Squashed 'third_party/eigen/' content from commit 61d72f6

Change-Id: Iccc90fa0b55ab44037f018046d2fcffd90d9d025
git-subtree-dir: third_party/eigen
git-subtree-split: 61d72f6383cfa842868c53e30e087b0258177257
diff --git a/Eigen/src/CMakeLists.txt b/Eigen/src/CMakeLists.txt
new file mode 100644
index 0000000..c326f37
--- /dev/null
+++ b/Eigen/src/CMakeLists.txt
@@ -0,0 +1,7 @@
+file(GLOB Eigen_src_subdirectories "*")
+escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+foreach(f ${Eigen_src_subdirectories})
+  if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" )
+    add_subdirectory(${f})
+  endif()
+endforeach()
diff --git a/Eigen/src/Cholesky/CMakeLists.txt b/Eigen/src/Cholesky/CMakeLists.txt
new file mode 100644
index 0000000..d01488b
--- /dev/null
+++ b/Eigen/src/Cholesky/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Cholesky_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Cholesky_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Cholesky COMPONENT Devel
+  )
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
new file mode 100644
index 0000000..e01ae82
--- /dev/null
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -0,0 +1,611 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com >
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LDLT_H
+#define EIGEN_LDLT_H
+
+namespace Eigen { 
+
+namespace internal {
+  template<typename MatrixType, int UpLo> struct LDLT_Traits;
+
+  // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
+  enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
+}
+
+/** \ingroup Cholesky_Module
+  *
+  * \class LDLT
+  *
+  * \brief Robust Cholesky decomposition of a matrix with pivoting
+  *
+  * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition
+  * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
+  *             The other triangular part won't be read.
+  *
+  * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite
+  * matrix \f$ A \f$ such that \f$ A =  P^TLDL^*P \f$, where P is a permutation matrix, L
+  * is lower triangular with a unit diagonal and D is a diagonal matrix.
+  *
+  * The decomposition uses pivoting to ensure stability, so that L will have
+  * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root
+  * on D also stabilizes the computation.
+  *
+  * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
+  * decomposition to determine whether a system of equations has a solution.
+  *
+  * \sa MatrixBase::ldlt(), class LLT
+  */
+template<typename _MatrixType, int _UpLo> class LDLT
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here!
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+      UpLo = _UpLo
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType;
+
+    typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
+    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
+
+    typedef internal::LDLT_Traits<MatrixType,UpLo> Traits;
+
+    /** \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via LDLT::compute(const MatrixType&).
+      */
+    LDLT() 
+      : m_matrix(), 
+        m_transpositions(), 
+        m_sign(internal::ZeroSign),
+        m_isInitialized(false) 
+    {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa LDLT()
+      */
+    LDLT(Index size)
+      : m_matrix(size, size),
+        m_transpositions(size),
+        m_temporary(size),
+        m_sign(internal::ZeroSign),
+        m_isInitialized(false)
+    {}
+
+    /** \brief Constructor with decomposition
+      *
+      * This calculates the decomposition for the input \a matrix.
+      * \sa LDLT(Index size)
+      */
+    LDLT(const MatrixType& matrix)
+      : m_matrix(matrix.rows(), matrix.cols()),
+        m_transpositions(matrix.rows()),
+        m_temporary(matrix.rows()),
+        m_sign(internal::ZeroSign),
+        m_isInitialized(false)
+    {
+      compute(matrix);
+    }
+
+    /** Clear any existing decomposition
+     * \sa rankUpdate(w,sigma)
+     */
+    void setZero()
+    {
+      m_isInitialized = false;
+    }
+
+    /** \returns a view of the upper triangular matrix U */
+    inline typename Traits::MatrixU matrixU() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return Traits::getU(m_matrix);
+    }
+
+    /** \returns a view of the lower triangular matrix L */
+    inline typename Traits::MatrixL matrixL() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return Traits::getL(m_matrix);
+    }
+
+    /** \returns the permutation matrix P as a transposition sequence.
+      */
+    inline const TranspositionType& transpositionsP() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_transpositions;
+    }
+
+    /** \returns the coefficients of the diagonal matrix D */
+    inline Diagonal<const MatrixType> vectorD() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_matrix.diagonal();
+    }
+
+    /** \returns true if the matrix is positive (semidefinite) */
+    inline bool isPositive() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
+    }
+    
+    #ifdef EIGEN2_SUPPORT
+    inline bool isPositiveDefinite() const
+    {
+      return isPositive();
+    }
+    #endif
+
+    /** \returns true if the matrix is negative (semidefinite) */
+    inline bool isNegative(void) const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
+    }
+
+    /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .
+      *
+      * \note_about_checking_solutions
+      *
+      * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
+      * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, 
+      * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
+      * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
+      * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
+      * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
+      *
+      * \sa MatrixBase::ldlt()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<LDLT, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      eigen_assert(m_matrix.rows()==b.rows()
+                && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<LDLT, Rhs>(*this, b.derived());
+    }
+
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+    {
+      *result = this->solve(b);
+      return true;
+    }
+    #endif
+
+    template<typename Derived>
+    bool solveInPlace(MatrixBase<Derived> &bAndX) const;
+
+    LDLT& compute(const MatrixType& matrix);
+
+    template <typename Derived>
+    LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
+
+    /** \returns the internal LDLT decomposition matrix
+      *
+      * TODO: document the storage layout
+      */
+    inline const MatrixType& matrixLDLT() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return m_matrix;
+    }
+
+    MatrixType reconstructedMatrix() const;
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "LDLT is not initialized.");
+      return Success;
+    }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+
+    /** \internal
+      * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
+      * The strict upper part is used during the decomposition, the strict lower
+      * part correspond to the coefficients of L (its diagonal is equal to 1 and
+      * is not stored), and the diagonal entries correspond to D.
+      */
+    MatrixType m_matrix;
+    TranspositionType m_transpositions;
+    TmpMatrixType m_temporary;
+    internal::SignMatrix m_sign;
+    bool m_isInitialized;
+};
+
+namespace internal {
+
+template<int UpLo> struct ldlt_inplace;
+
+template<> struct ldlt_inplace<Lower>
+{
+  template<typename MatrixType, typename TranspositionType, typename Workspace>
+  static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
+  {
+    using std::abs;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    eigen_assert(mat.rows()==mat.cols());
+    const Index size = mat.rows();
+
+    if (size <= 1)
+    {
+      transpositions.setIdentity();
+      if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef;
+      else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef;
+      else sign = ZeroSign;
+      return true;
+    }
+
+    for (Index k = 0; k < size; ++k)
+    {
+      // Find largest diagonal element
+      Index index_of_biggest_in_corner;
+      mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
+      index_of_biggest_in_corner += k;
+
+      transpositions.coeffRef(k) = index_of_biggest_in_corner;
+      if(k != index_of_biggest_in_corner)
+      {
+        // apply the transposition while taking care to consider only
+        // the lower triangular part
+        Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element
+        mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
+        mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
+        std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
+        for(int i=k+1;i<index_of_biggest_in_corner;++i)
+        {
+          Scalar tmp = mat.coeffRef(i,k);
+          mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));
+          mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp);
+        }
+        if(NumTraits<Scalar>::IsComplex)
+          mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k));
+      }
+
+      // partition the matrix:
+      //       A00 |  -  |  -
+      // lu  = A10 | A11 |  -
+      //       A20 | A21 | A22
+      Index rs = size - k - 1;
+      Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
+      Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
+      Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
+
+      if(k>0)
+      {
+        temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();
+        mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
+        if(rs>0)
+          A21.noalias() -= A20 * temp.head(k);
+      }
+      
+      // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
+      // was smaller than the cutoff value. However, soince LDLT is not rank-revealing
+      // we should only make sure we do not introduce INF or NaN values.
+      // LAPACK also uses 0 as the cutoff value.
+      RealScalar realAkk = numext::real(mat.coeffRef(k,k));
+      if((rs>0) && (abs(realAkk) > RealScalar(0)))
+        A21 /= realAkk;
+
+      if (sign == PositiveSemiDef) {
+        if (realAkk < 0) sign = Indefinite;
+      } else if (sign == NegativeSemiDef) {
+        if (realAkk > 0) sign = Indefinite;
+      } else if (sign == ZeroSign) {
+        if (realAkk > 0) sign = PositiveSemiDef;
+        else if (realAkk < 0) sign = NegativeSemiDef;
+      }
+    }
+
+    return true;
+  }
+
+  // Reference for the algorithm: Davis and Hager, "Multiple Rank
+  // Modifications of a Sparse Cholesky Factorization" (Algorithm 1)
+  // Trivial rearrangements of their computations (Timothy E. Holy)
+  // allow their algorithm to work for rank-1 updates even if the
+  // original matrix is not of full rank.
+  // Here only rank-1 updates are implemented, to reduce the
+  // requirement for intermediate storage and improve accuracy
+  template<typename MatrixType, typename WDerived>
+  static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1)
+  {
+    using numext::isfinite;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    const Index size = mat.rows();
+    eigen_assert(mat.cols() == size && w.size()==size);
+
+    RealScalar alpha = 1;
+
+    // Apply the update
+    for (Index j = 0; j < size; j++)
+    {
+      // Check for termination due to an original decomposition of low-rank
+      if (!(isfinite)(alpha))
+        break;
+
+      // Update the diagonal terms
+      RealScalar dj = numext::real(mat.coeff(j,j));
+      Scalar wj = w.coeff(j);
+      RealScalar swj2 = sigma*numext::abs2(wj);
+      RealScalar gamma = dj*alpha + swj2;
+
+      mat.coeffRef(j,j) += swj2/alpha;
+      alpha += swj2/dj;
+
+
+      // Update the terms of L
+      Index rs = size-j-1;
+      w.tail(rs) -= wj * mat.col(j).tail(rs);
+      if(gamma != 0)
+        mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs);
+    }
+    return true;
+  }
+
+  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
+  static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1)
+  {
+    // Apply the permutation to the input w
+    tmp = transpositions * w;
+
+    return ldlt_inplace<Lower>::updateInPlace(mat,tmp,sigma);
+  }
+};
+
+template<> struct ldlt_inplace<Upper>
+{
+  template<typename MatrixType, typename TranspositionType, typename Workspace>
+  static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
+  {
+    Transpose<MatrixType> matt(mat);
+    return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
+  }
+
+  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>
+  static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1)
+  {
+    Transpose<MatrixType> matt(mat);
+    return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);
+  }
+};
+
+template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>
+{
+  typedef const TriangularView<const MatrixType, UnitLower> MatrixL;
+  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m; }
+  static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
+{
+  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
+  typedef const TriangularView<const MatrixType, UnitUpper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+  static inline MatrixU getU(const MatrixType& m) { return m; }
+};
+
+} // end namespace internal
+
+/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
+  */
+template<typename MatrixType, int _UpLo>
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
+{
+  check_template_parameters();
+  
+  eigen_assert(a.rows()==a.cols());
+  const Index size = a.rows();
+
+  m_matrix = a;
+
+  m_transpositions.resize(size);
+  m_isInitialized = false;
+  m_temporary.resize(size);
+  m_sign = internal::ZeroSign;
+
+  internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
+
+  m_isInitialized = true;
+  return *this;
+}
+
+/** Update the LDLT decomposition:  given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T.
+ * \param w a vector to be incorporated into the decomposition.
+ * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1.
+ * \sa setZero()
+  */
+template<typename MatrixType, int _UpLo>
+template<typename Derived>
+LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
+{
+  const Index size = w.rows();
+  if (m_isInitialized)
+  {
+    eigen_assert(m_matrix.rows()==size);
+  }
+  else
+  {    
+    m_matrix.resize(size,size);
+    m_matrix.setZero();
+    m_transpositions.resize(size);
+    for (Index i = 0; i < size; i++)
+      m_transpositions.coeffRef(i) = i;
+    m_temporary.resize(size);
+    m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
+    m_isInitialized = true;
+  }
+
+  internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma);
+
+  return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int _UpLo, typename Rhs>
+struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
+  : solve_retval_base<LDLT<_MatrixType,_UpLo>, Rhs>
+{
+  typedef LDLT<_MatrixType,_UpLo> LDLTType;
+  EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    eigen_assert(rhs().rows() == dec().matrixLDLT().rows());
+    // dst = P b
+    dst = dec().transpositionsP() * rhs();
+
+    // dst = L^-1 (P b)
+    dec().matrixL().solveInPlace(dst);
+
+    // dst = D^-1 (L^-1 P b)
+    // more precisely, use pseudo-inverse of D (see bug 241)
+    using std::abs;
+    using std::max;
+    typedef typename LDLTType::MatrixType MatrixType;
+    typedef typename LDLTType::RealScalar RealScalar;
+    const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
+    // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
+    // as motivated by LAPACK's xGELSS:
+    // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
+    // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
+    // diagonal element is not well justified and to numerical issues in some cases.
+    // Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
+    RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
+    
+    for (Index i = 0; i < vectorD.size(); ++i) {
+      if(abs(vectorD(i)) > tolerance)
+        dst.row(i) /= vectorD(i);
+      else
+        dst.row(i).setZero();
+    }
+
+    // dst = L^-T (D^-1 L^-1 P b)
+    dec().matrixU().solveInPlace(dst);
+
+    // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
+    dst = dec().transpositionsP().transpose() * dst;
+  }
+};
+}
+
+/** \internal use x = ldlt_object.solve(x);
+  *
+  * This is the \em in-place version of solve().
+  *
+  * \param bAndX represents both the right-hand side matrix b and result x.
+  *
+  * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
+  *
+  * This version avoids a copy when the right hand side matrix b is not
+  * needed anymore.
+  *
+  * \sa LDLT::solve(), MatrixBase::ldlt()
+  */
+template<typename MatrixType,int _UpLo>
+template<typename Derived>
+bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
+{
+  eigen_assert(m_isInitialized && "LDLT is not initialized.");
+  eigen_assert(m_matrix.rows() == bAndX.rows());
+
+  bAndX = this->solve(bAndX);
+
+  return true;
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: P^T L D L^* P.
+ * This function is provided for debug purpose. */
+template<typename MatrixType, int _UpLo>
+MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
+{
+  eigen_assert(m_isInitialized && "LDLT is not initialized.");
+  const Index size = m_matrix.rows();
+  MatrixType res(size,size);
+
+  // P
+  res.setIdentity();
+  res = transpositionsP() * res;
+  // L^* P
+  res = matrixU() * res;
+  // D(L^*P)
+  res = vectorD().real().asDiagonal() * res;
+  // L(DL^*P)
+  res = matrixL() * res;
+  // P^T (LDL^*P)
+  res = transpositionsP().transpose() * res;
+
+  return res;
+}
+
+/** \cholesky_module
+  * \returns the Cholesky decomposition with full pivoting without square root of \c *this
+  */
+template<typename MatrixType, unsigned int UpLo>
+inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
+SelfAdjointView<MatrixType, UpLo>::ldlt() const
+{
+  return LDLT<PlainObject,UpLo>(m_matrix);
+}
+
+/** \cholesky_module
+  * \returns the Cholesky decomposition with full pivoting without square root of \c *this
+  */
+template<typename Derived>
+inline const LDLT<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::ldlt() const
+{
+  return LDLT<PlainObject>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LDLT_H
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
new file mode 100644
index 0000000..59723a6
--- /dev/null
+++ b/Eigen/src/Cholesky/LLT.h
@@ -0,0 +1,498 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LLT_H
+#define EIGEN_LLT_H
+
+namespace Eigen { 
+
+namespace internal{
+template<typename MatrixType, int UpLo> struct LLT_Traits;
+}
+
+/** \ingroup Cholesky_Module
+  *
+  * \class LLT
+  *
+  * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features
+  *
+  * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition
+  * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
+  *             The other triangular part won't be read.
+  *
+  * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite
+  * matrix A such that A = LL^* = U^*U, where L is lower triangular.
+  *
+  * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like  D^*D x = b,
+  * for that purpose, we recommend the Cholesky decomposition without square root which is more stable
+  * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other
+  * situations like generalised eigen problems with hermitian matrices.
+  *
+  * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices,
+  * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations
+  * has a solution.
+  *
+  * Example: \include LLT_example.cpp
+  * Output: \verbinclude LLT_example.out
+  *    
+  * \sa MatrixBase::llt(), class LDLT
+  */
+ /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
+  * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
+  * the strict lower part does not have to store correct values.
+  */
+template<typename _MatrixType, int _UpLo> class LLT
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    enum {
+      PacketSize = internal::packet_traits<Scalar>::size,
+      AlignmentMask = int(PacketSize)-1,
+      UpLo = _UpLo
+    };
+
+    typedef internal::LLT_Traits<MatrixType,UpLo> Traits;
+
+    /**
+      * \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via LLT::compute(const MatrixType&).
+      */
+    LLT() : m_matrix(), m_isInitialized(false) {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa LLT()
+      */
+    LLT(Index size) : m_matrix(size, size),
+                    m_isInitialized(false) {}
+
+    LLT(const MatrixType& matrix)
+      : m_matrix(matrix.rows(), matrix.cols()),
+        m_isInitialized(false)
+    {
+      compute(matrix);
+    }
+
+    /** \returns a view of the upper triangular matrix U */
+    inline typename Traits::MatrixU matrixU() const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      return Traits::getU(m_matrix);
+    }
+
+    /** \returns a view of the lower triangular matrix L */
+    inline typename Traits::MatrixL matrixL() const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      return Traits::getL(m_matrix);
+    }
+
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * Since this LLT class assumes anyway that the matrix A is invertible, the solution
+      * theoretically exists and is unique regardless of b.
+      *
+      * Example: \include LLT_solve.cpp
+      * Output: \verbinclude LLT_solve.out
+      *
+      * \sa solveInPlace(), MatrixBase::llt()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<LLT, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      eigen_assert(m_matrix.rows()==b.rows()
+                && "LLT::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<LLT, Rhs>(*this, b.derived());
+    }
+
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+    {
+      *result = this->solve(b);
+      return true;
+    }
+    
+    bool isPositiveDefinite() const { return true; }
+    #endif
+
+    template<typename Derived>
+    void solveInPlace(MatrixBase<Derived> &bAndX) const;
+
+    LLT& compute(const MatrixType& matrix);
+
+    /** \returns the LLT decomposition matrix
+      *
+      * TODO: document the storage layout
+      */
+    inline const MatrixType& matrixLLT() const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      return m_matrix;
+    }
+
+    MatrixType reconstructedMatrix() const;
+
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      return m_info;
+    }
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    template<typename VectorType>
+    LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    /** \internal
+      * Used to compute and store L
+      * The strict upper part is not used and even not initialized.
+      */
+    MatrixType m_matrix;
+    bool m_isInitialized;
+    ComputationInfo m_info;
+};
+
+namespace internal {
+
+template<typename Scalar, int UpLo> struct llt_inplace;
+
+template<typename MatrixType, typename VectorType>
+static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)
+{
+  using std::sqrt;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::ColXpr ColXpr;
+  typedef typename internal::remove_all<ColXpr>::type ColXprCleaned;
+  typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;
+  typedef Matrix<Scalar,Dynamic,1> TempVectorType;
+  typedef typename TempVectorType::SegmentReturnType TempVecSegment;
+
+  Index n = mat.cols();
+  eigen_assert(mat.rows()==n && vec.size()==n);
+
+  TempVectorType temp;
+
+  if(sigma>0)
+  {
+    // This version is based on Givens rotations.
+    // It is faster than the other one below, but only works for updates,
+    // i.e., for sigma > 0
+    temp = sqrt(sigma) * vec;
+
+    for(Index i=0; i<n; ++i)
+    {
+      JacobiRotation<Scalar> g;
+      g.makeGivens(mat(i,i), -temp(i), &mat(i,i));
+
+      Index rs = n-i-1;
+      if(rs>0)
+      {
+        ColXprSegment x(mat.col(i).tail(rs));
+        TempVecSegment y(temp.tail(rs));
+        apply_rotation_in_the_plane(x, y, g);
+      }
+    }
+  }
+  else
+  {
+    temp = vec;
+    RealScalar beta = 1;
+    for(Index j=0; j<n; ++j)
+    {
+      RealScalar Ljj = numext::real(mat.coeff(j,j));
+      RealScalar dj = numext::abs2(Ljj);
+      Scalar wj = temp.coeff(j);
+      RealScalar swj2 = sigma*numext::abs2(wj);
+      RealScalar gamma = dj*beta + swj2;
+
+      RealScalar x = dj + swj2/beta;
+      if (x<=RealScalar(0))
+        return j;
+      RealScalar nLjj = sqrt(x);
+      mat.coeffRef(j,j) = nLjj;
+      beta += swj2/dj;
+
+      // Update the terms of L
+      Index rs = n-j-1;
+      if(rs)
+      {
+        temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);
+        if(gamma != 0)
+          mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);
+      }
+    }
+  }
+  return -1;
+}
+
+template<typename Scalar> struct llt_inplace<Scalar, Lower>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename MatrixType>
+  static typename MatrixType::Index unblocked(MatrixType& mat)
+  {
+    using std::sqrt;
+    typedef typename MatrixType::Index Index;
+    
+    eigen_assert(mat.rows()==mat.cols());
+    const Index size = mat.rows();
+    for(Index k = 0; k < size; ++k)
+    {
+      Index rs = size-k-1; // remaining size
+
+      Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);
+      Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);
+      Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);
+
+      RealScalar x = numext::real(mat.coeff(k,k));
+      if (k>0) x -= A10.squaredNorm();
+      if (x<=RealScalar(0))
+        return k;
+      mat.coeffRef(k,k) = x = sqrt(x);
+      if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
+      if (rs>0) A21 *= RealScalar(1)/x;
+    }
+    return -1;
+  }
+
+  template<typename MatrixType>
+  static typename MatrixType::Index blocked(MatrixType& m)
+  {
+    typedef typename MatrixType::Index Index;
+    eigen_assert(m.rows()==m.cols());
+    Index size = m.rows();
+    if(size<32)
+      return unblocked(m);
+
+    Index blockSize = size/8;
+    blockSize = (blockSize/16)*16;
+    blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128));
+
+    for (Index k=0; k<size; k+=blockSize)
+    {
+      // partition the matrix:
+      //       A00 |  -  |  -
+      // lu  = A10 | A11 |  -
+      //       A20 | A21 | A22
+      Index bs = (std::min)(blockSize, size-k);
+      Index rs = size - k - bs;
+      Block<MatrixType,Dynamic,Dynamic> A11(m,k,   k,   bs,bs);
+      Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k,   rs,bs);
+      Block<MatrixType,Dynamic,Dynamic> A22(m,k+bs,k+bs,rs,rs);
+
+      Index ret;
+      if((ret=unblocked(A11))>=0) return k+ret;
+      if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21);
+      if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,-1); // bottleneck
+    }
+    return -1;
+  }
+
+  template<typename MatrixType, typename VectorType>
+  static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
+  {
+    return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
+  }
+};
+  
+template<typename Scalar> struct llt_inplace<Scalar, Upper>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+
+  template<typename MatrixType>
+  static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat)
+  {
+    Transpose<MatrixType> matt(mat);
+    return llt_inplace<Scalar, Lower>::unblocked(matt);
+  }
+  template<typename MatrixType>
+  static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat)
+  {
+    Transpose<MatrixType> matt(mat);
+    return llt_inplace<Scalar, Lower>::blocked(matt);
+  }
+  template<typename MatrixType, typename VectorType>
+  static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
+  {
+    Transpose<MatrixType> matt(mat);
+    return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma);
+  }
+};
+
+template<typename MatrixType> struct LLT_Traits<MatrixType,Lower>
+{
+  typedef const TriangularView<const MatrixType, Lower> MatrixL;
+  typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m; }
+  static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+  static bool inplace_decomposition(MatrixType& m)
+  { return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m)==-1; }
+};
+
+template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
+{
+  typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL;
+  typedef const TriangularView<const MatrixType, Upper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); }
+  static inline MatrixU getU(const MatrixType& m) { return m; }
+  static bool inplace_decomposition(MatrixType& m)
+  { return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m)==-1; }
+};
+
+} // end namespace internal
+
+/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix
+  *
+  * \returns a reference to *this
+  *
+  * Example: \include TutorialLinAlgComputeTwice.cpp
+  * Output: \verbinclude TutorialLinAlgComputeTwice.out
+  */
+template<typename MatrixType, int _UpLo>
+LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
+{
+  check_template_parameters();
+  
+  eigen_assert(a.rows()==a.cols());
+  const Index size = a.rows();
+  m_matrix.resize(size, size);
+  m_matrix = a;
+
+  m_isInitialized = true;
+  bool ok = Traits::inplace_decomposition(m_matrix);
+  m_info = ok ? Success : NumericalIssue;
+
+  return *this;
+}
+
+/** Performs a rank one update (or dowdate) of the current decomposition.
+  * If A = LL^* before the rank one update,
+  * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector
+  * of same dimension.
+  */
+template<typename _MatrixType, int _UpLo>
+template<typename VectorType>
+LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType);
+  eigen_assert(v.size()==m_matrix.cols());
+  eigen_assert(m_isInitialized);
+  if(internal::llt_inplace<typename MatrixType::Scalar, UpLo>::rankUpdate(m_matrix,v,sigma)>=0)
+    m_info = NumericalIssue;
+  else
+    m_info = Success;
+
+  return *this;
+}
+    
+namespace internal {
+template<typename _MatrixType, int UpLo, typename Rhs>
+struct solve_retval<LLT<_MatrixType, UpLo>, Rhs>
+  : solve_retval_base<LLT<_MatrixType, UpLo>, Rhs>
+{
+  typedef LLT<_MatrixType,UpLo> LLTType;
+  EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dst = rhs();
+    dec().solveInPlace(dst);
+  }
+};
+}
+
+/** \internal use x = llt_object.solve(x);
+  * 
+  * This is the \em in-place version of solve().
+  *
+  * \param bAndX represents both the right-hand side matrix b and result x.
+  *
+  * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.
+  *
+  * This version avoids a copy when the right hand side matrix b is not
+  * needed anymore.
+  *
+  * \sa LLT::solve(), MatrixBase::llt()
+  */
+template<typename MatrixType, int _UpLo>
+template<typename Derived>
+void LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
+{
+  eigen_assert(m_isInitialized && "LLT is not initialized.");
+  eigen_assert(m_matrix.rows()==bAndX.rows());
+  matrixL().solveInPlace(bAndX);
+  matrixU().solveInPlace(bAndX);
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: L L^*.
+ * This function is provided for debug purpose. */
+template<typename MatrixType, int _UpLo>
+MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const
+{
+  eigen_assert(m_isInitialized && "LLT is not initialized.");
+  return matrixL() * matrixL().adjoint().toDenseMatrix();
+}
+
+/** \cholesky_module
+  * \returns the LLT decomposition of \c *this
+  */
+template<typename Derived>
+inline const LLT<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::llt() const
+{
+  return LLT<PlainObject>(derived());
+}
+
+/** \cholesky_module
+  * \returns the LLT decomposition of \c *this
+  */
+template<typename MatrixType, unsigned int UpLo>
+inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
+SelfAdjointView<MatrixType, UpLo>::llt() const
+{
+  return LLT<PlainObject,UpLo>(m_matrix);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LLT_H
diff --git a/Eigen/src/Cholesky/LLT_MKL.h b/Eigen/src/Cholesky/LLT_MKL.h
new file mode 100644
index 0000000..66675d7
--- /dev/null
+++ b/Eigen/src/Cholesky/LLT_MKL.h
@@ -0,0 +1,102 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *     LLt decomposition based on LAPACKE_?potrf function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_LLT_MKL_H
+#define EIGEN_LLT_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+#include <iostream>
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Scalar> struct mkl_llt;
+
+#define EIGEN_MKL_LLT(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template<> struct mkl_llt<EIGTYPE> \
+{ \
+  template<typename MatrixType> \
+  static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \
+  { \
+    lapack_int matrix_order; \
+    lapack_int size, lda, info, StorageOrder; \
+    EIGTYPE* a; \
+    eigen_assert(m.rows()==m.cols()); \
+    /* Set up parameters for ?potrf */ \
+    size = m.rows(); \
+    StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \
+    matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+    a = &(m.coeffRef(0,0)); \
+    lda = m.outerStride(); \
+\
+    info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
+    info = (info==0) ? -1 : info>0 ? info-1 : size; \
+    return info; \
+  } \
+}; \
+template<> struct llt_inplace<EIGTYPE, Lower> \
+{ \
+  template<typename MatrixType> \
+  static typename MatrixType::Index blocked(MatrixType& m) \
+  { \
+    return mkl_llt<EIGTYPE>::potrf(m, 'L'); \
+  } \
+  template<typename MatrixType, typename VectorType> \
+  static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
+  { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \
+}; \
+template<> struct llt_inplace<EIGTYPE, Upper> \
+{ \
+  template<typename MatrixType> \
+  static typename MatrixType::Index blocked(MatrixType& m) \
+  { \
+    return mkl_llt<EIGTYPE>::potrf(m, 'U'); \
+  } \
+  template<typename MatrixType, typename VectorType> \
+  static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
+  { \
+    Transpose<MatrixType> matt(mat); \
+    return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \
+  } \
+};
+
+EIGEN_MKL_LLT(double, double, d)
+EIGEN_MKL_LLT(float, float, s)
+EIGEN_MKL_LLT(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_LLT(scomplex, MKL_Complex8, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LLT_MKL_H
diff --git a/Eigen/src/CholmodSupport/CMakeLists.txt b/Eigen/src/CholmodSupport/CMakeLists.txt
new file mode 100644
index 0000000..814dfa6
--- /dev/null
+++ b/Eigen/src/CholmodSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_CholmodSupport_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_CholmodSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/CholmodSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/CholmodSupport/CholmodSupport.h b/Eigen/src/CholmodSupport/CholmodSupport.h
new file mode 100644
index 0000000..c449960
--- /dev/null
+++ b/Eigen/src/CholmodSupport/CholmodSupport.h
@@ -0,0 +1,607 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CHOLMODSUPPORT_H
+#define EIGEN_CHOLMODSUPPORT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Scalar, typename CholmodType>
+void cholmod_configure_matrix(CholmodType& mat)
+{
+  if (internal::is_same<Scalar,float>::value)
+  {
+    mat.xtype = CHOLMOD_REAL;
+    mat.dtype = CHOLMOD_SINGLE;
+  }
+  else if (internal::is_same<Scalar,double>::value)
+  {
+    mat.xtype = CHOLMOD_REAL;
+    mat.dtype = CHOLMOD_DOUBLE;
+  }
+  else if (internal::is_same<Scalar,std::complex<float> >::value)
+  {
+    mat.xtype = CHOLMOD_COMPLEX;
+    mat.dtype = CHOLMOD_SINGLE;
+  }
+  else if (internal::is_same<Scalar,std::complex<double> >::value)
+  {
+    mat.xtype = CHOLMOD_COMPLEX;
+    mat.dtype = CHOLMOD_DOUBLE;
+  }
+  else
+  {
+    eigen_assert(false && "Scalar type not supported by CHOLMOD");
+  }
+}
+
+} // namespace internal
+
+/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
+  * Note that the data are shared.
+  */
+template<typename _Scalar, int _Options, typename _Index>
+cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+  cholmod_sparse res;
+  res.nzmax   = mat.nonZeros();
+  res.nrow    = mat.rows();;
+  res.ncol    = mat.cols();
+  res.p       = mat.outerIndexPtr();
+  res.i       = mat.innerIndexPtr();
+  res.x       = mat.valuePtr();
+  res.z       = 0;
+  res.sorted  = 1;
+  if(mat.isCompressed())
+  {
+    res.packed  = 1;
+    res.nz = 0;
+  }
+  else
+  {
+    res.packed  = 0;
+    res.nz = mat.innerNonZeroPtr();
+  }
+
+  res.dtype   = 0;
+  res.stype   = -1;
+  
+  if (internal::is_same<_Index,int>::value)
+  {
+    res.itype = CHOLMOD_INT;
+  }
+  else if (internal::is_same<_Index,UF_long>::value)
+  {
+    res.itype = CHOLMOD_LONG;
+  }
+  else
+  {
+    eigen_assert(false && "Index type not supported yet");
+  }
+
+  // setup res.xtype
+  internal::cholmod_configure_matrix<_Scalar>(res);
+  
+  res.stype = 0;
+  
+  return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)
+{
+  cholmod_sparse res = viewAsCholmod(mat.const_cast_derived());
+  return res;
+}
+
+/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
+  * The data are not copied but shared. */
+template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
+cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
+{
+  cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived());
+  
+  if(UpLo==Upper) res.stype =  1;
+  if(UpLo==Lower) res.stype = -1;
+
+  return res;
+}
+
+/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix.
+  * The data are not copied but shared. */
+template<typename Derived>
+cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
+{
+  EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+  typedef typename Derived::Scalar Scalar;
+
+  cholmod_dense res;
+  res.nrow   = mat.rows();
+  res.ncol   = mat.cols();
+  res.nzmax  = res.nrow * res.ncol;
+  res.d      = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();
+  res.x      = (void*)(mat.derived().data());
+  res.z      = 0;
+
+  internal::cholmod_configure_matrix<Scalar>(res);
+
+  return res;
+}
+
+/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
+  * The data are not copied but shared. */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm)
+{
+  return MappedSparseMatrix<Scalar,Flags,Index>
+         (cm.nrow, cm.ncol, static_cast<Index*>(cm.p)[cm.ncol],
+          static_cast<Index*>(cm.p), static_cast<Index*>(cm.i),static_cast<Scalar*>(cm.x) );
+}
+
+enum CholmodMode {
+  CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt
+};
+
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodBase
+  * \brief The base class for the direct Cholesky factorization of Cholmod
+  * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT
+  */
+template<typename _MatrixType, int _UpLo, typename Derived>
+class CholmodBase : internal::noncopyable
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum { UpLo = _UpLo };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef MatrixType CholMatrixType;
+    typedef typename MatrixType::Index Index;
+
+  public:
+
+    CholmodBase()
+      : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+    {
+      m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
+      cholmod_start(&m_cholmod);
+    }
+
+    CholmodBase(const MatrixType& matrix)
+      : m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
+    {
+      m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
+      cholmod_start(&m_cholmod);
+      compute(matrix);
+    }
+
+    ~CholmodBase()
+    {
+      if(m_cholmodFactor)
+        cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+      cholmod_finish(&m_cholmod);
+    }
+    
+    inline Index cols() const { return m_cholmodFactor->n; }
+    inline Index rows() const { return m_cholmodFactor->n; }
+    
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    Derived& compute(const MatrixType& matrix)
+    {
+      analyzePattern(matrix);
+      factorize(matrix);
+      return derived();
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<CholmodBase, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<CholmodBase, Rhs>(*this, b.derived());
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<CholmodBase, Rhs>
+    solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
+    }
+    
+    /** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      if(m_cholmodFactor)
+      {
+        cholmod_free_factor(&m_cholmodFactor, &m_cholmod);
+        m_cholmodFactor = 0;
+      }
+      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+      m_cholmodFactor = cholmod_analyze(&A, &m_cholmod);
+      
+      this->m_isInitialized = true;
+      this->m_info = Success;
+      m_analysisIsOk = true;
+      m_factorizationIsOk = false;
+    }
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& matrix)
+    {
+      eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
+      cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod);
+      
+      // If the factorization failed, minor is the column at which it did. On success minor == n.
+      this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
+      m_factorizationIsOk = true;
+    }
+    
+    /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.
+     *  See the Cholmod user guide for details. */
+    cholmod_common& cholmod() { return m_cholmod; }
+    
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+    {
+      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+      const Index size = m_cholmodFactor->n;
+      EIGEN_UNUSED_VARIABLE(size);
+      eigen_assert(size==b.rows());
+
+      // note: cd stands for Cholmod Dense
+      Rhs& b_ref(b.const_cast_derived());
+      cholmod_dense b_cd = viewAsCholmod(b_ref);
+      cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
+      if(!x_cd)
+      {
+        this->m_info = NumericalIssue;
+      }
+      // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
+      dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
+      cholmod_free_dense(&x_cd, &m_cholmod);
+    }
+    
+    /** \internal */
+    template<typename RhsScalar, int RhsOptions, typename RhsIndex, typename DestScalar, int DestOptions, typename DestIndex>
+    void _solve(const SparseMatrix<RhsScalar,RhsOptions,RhsIndex> &b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+    {
+      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+      const Index size = m_cholmodFactor->n;
+      EIGEN_UNUSED_VARIABLE(size);
+      eigen_assert(size==b.rows());
+
+      // note: cs stands for Cholmod Sparse
+      cholmod_sparse b_cs = viewAsCholmod(b);
+      cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod);
+      if(!x_cs)
+      {
+        this->m_info = NumericalIssue;
+      }
+      // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
+      dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
+      cholmod_free_sparse(&x_cs, &m_cholmod);
+    }
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    
+    
+    /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.
+      *
+      * During the numerical factorization, an offset term is added to the diagonal coefficients:\n
+      * \c d_ii = \a offset + \c d_ii
+      *
+      * The default is \a offset=0.
+      *
+      * \returns a reference to \c *this.
+      */
+    Derived& setShift(const RealScalar& offset)
+    {
+      m_shiftOffset[0] = offset;
+      return derived();
+    }
+    
+    template<typename Stream>
+    void dumpMemory(Stream& /*s*/)
+    {}
+    
+  protected:
+    mutable cholmod_common m_cholmod;
+    cholmod_factor* m_cholmodFactor;
+    RealScalar m_shiftOffset[2];
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    int m_factorizationIsOk;
+    int m_analysisIsOk;
+};
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodSimplicialLLT
+  * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
+  * using the Cholmod library.
+  * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest.
+  * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodSimplicialLLT() : Base() { init(); }
+
+    CholmodSimplicialLLT(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodSimplicialLLT() {}
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 0;
+      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+      m_cholmod.final_ll = 1;
+    }
+};
+
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodSimplicialLDLT
+  * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
+  * using the Cholmod library.
+  * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest.
+  * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodSimplicialLDLT() : Base() { init(); }
+
+    CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodSimplicialLDLT() {}
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 1;
+      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+    }
+};
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodSupernodalLLT
+  * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
+  * using the Cholmod library.
+  * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
+  * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodSupernodalLLT() : Base() { init(); }
+
+    CholmodSupernodalLLT(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodSupernodalLLT() {}
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 1;
+      m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+    }
+};
+
+/** \ingroup CholmodSupport_Module
+  * \class CholmodDecomposition
+  * \brief A general Cholesky factorization and solver based on Cholmod
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
+  * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * This variant permits to change the underlying Cholesky method at runtime.
+  * On the other hand, it does not provide access to the result of the factorization.
+  * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, int _UpLo = Lower>
+class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> >
+{
+    typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;
+    using Base::m_cholmod;
+    
+  public:
+    
+    typedef _MatrixType MatrixType;
+    
+    CholmodDecomposition() : Base() { init(); }
+
+    CholmodDecomposition(const MatrixType& matrix) : Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~CholmodDecomposition() {}
+    
+    void setMode(CholmodMode mode)
+    {
+      switch(mode)
+      {
+        case CholmodAuto:
+          m_cholmod.final_asis = 1;
+          m_cholmod.supernodal = CHOLMOD_AUTO;
+          break;
+        case CholmodSimplicialLLt:
+          m_cholmod.final_asis = 0;
+          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+          m_cholmod.final_ll = 1;
+          break;
+        case CholmodSupernodalLLt:
+          m_cholmod.final_asis = 1;
+          m_cholmod.supernodal = CHOLMOD_SUPERNODAL;
+          break;
+        case CholmodLDLt:
+          m_cholmod.final_asis = 1;
+          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;
+          break;
+        default:
+          break;
+      }
+    }
+  protected:
+    void init()
+    {
+      m_cholmod.final_asis = 1;
+      m_cholmod.supernodal = CHOLMOD_AUTO;
+    }
+};
+
+namespace internal {
+  
+template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
+struct solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+  : solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+{
+  typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
+struct sparse_solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+  : sparse_solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
+{
+  typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CHOLMODSUPPORT_H
diff --git a/Eigen/src/Core/Array.h b/Eigen/src/Core/Array.h
new file mode 100644
index 0000000..0ab03ef
--- /dev/null
+++ b/Eigen/src/Core/Array.h
@@ -0,0 +1,308 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARRAY_H
+#define EIGEN_ARRAY_H
+
+namespace Eigen {
+
+/** \class Array 
+  * \ingroup Core_Module
+  *
+  * \brief General-purpose arrays with easy API for coefficient-wise operations
+  *
+  * The %Array class is very similar to the Matrix class. It provides
+  * general-purpose one- and two-dimensional arrays. The difference between the
+  * %Array and the %Matrix class is primarily in the API: the API for the
+  * %Array class provides easy access to coefficient-wise operations, while the
+  * API for the %Matrix class provides easy access to linear-algebra
+  * operations.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN.
+  *
+  * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy
+  */
+namespace internal {
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+  typedef ArrayXpr XprKind;
+  typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase;
+};
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+class Array
+  : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+  public:
+
+    typedef PlainObjectBase<Array> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Array)
+
+    enum { Options = _Options };
+    typedef typename Base::PlainObject PlainObject;
+
+  protected:
+    template <typename Derived, typename OtherDerived, bool IsVector>
+    friend struct internal::conservative_resize_like_impl;
+
+    using Base::m_storage;
+
+  public:
+
+    using Base::base;
+    using Base::coeff;
+    using Base::coeffRef;
+
+    /**
+      * The usage of
+      *   using Base::operator=;
+      * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped
+      * the usage of 'using'. This should be done only for operator=.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other)
+    {
+      return Base::operator=(other);
+    }
+
+    /** Copies the value of the expression \a other into \c *this with automatic resizing.
+      *
+      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+      * it will be initialized.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array& operator=(const ArrayBase<OtherDerived>& other)
+    {
+      return Base::_set(other);
+    }
+
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    EIGEN_STRONG_INLINE Array& operator=(const Array& other)
+    {
+      return Base::_set(other);
+    }
+
+    /** Default constructor.
+      *
+      * For fixed-size matrices, does nothing.
+      *
+      * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
+      * is called a null matrix. This constructor is the unique way to create null matrices: resizing
+      * a matrix to 0 is not supported.
+      *
+      * \sa resize(Index,Index)
+      */
+    EIGEN_STRONG_INLINE Array() : Base()
+    {
+      Base::_check_template_params();
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    // FIXME is it still needed ??
+    /** \internal */
+    Array(internal::constructor_without_unaligned_array_assert)
+      : Base(internal::constructor_without_unaligned_array_assert())
+    {
+      Base::_check_template_params();
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+#endif
+
+    /** Constructs a vector or row-vector with given dimension. \only_for_vectors
+      *
+      * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
+      * it is redundant to pass the dimension here, so it makes more sense to use the default
+      * constructor Matrix() instead.
+      */
+    EIGEN_STRONG_INLINE explicit Array(Index dim)
+      : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array)
+      eigen_assert(dim >= 0);
+      eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1)
+    {
+      Base::_check_template_params();
+      this->template _init2<T0,T1>(val0, val1);
+    }
+    #else
+    /** constructs an uninitialized matrix with \a rows rows and \a cols columns.
+      *
+      * This is useful for dynamic-size matrices. For fixed-size matrices,
+      * it is redundant to pass these parameters, so one should use the default constructor
+      * Matrix() instead. */
+    Array(Index rows, Index cols);
+    /** constructs an initialized 2D vector with given coefficients */
+    Array(const Scalar& val0, const Scalar& val1);
+    #endif
+
+    /** constructs an initialized 3D vector with given coefficients */
+    EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)
+      m_storage.data()[0] = val0;
+      m_storage.data()[1] = val1;
+      m_storage.data()[2] = val2;
+    }
+    /** constructs an initialized 4D vector with given coefficients */
+    EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)
+      m_storage.data()[0] = val0;
+      m_storage.data()[1] = val1;
+      m_storage.data()[2] = val2;
+      m_storage.data()[3] = val3;
+    }
+
+    explicit Array(const Scalar *data);
+
+    /** Constructor copying the value of the expression \a other */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array(const ArrayBase<OtherDerived>& other)
+             : Base(other.rows() * other.cols(), other.rows(), other.cols())
+    {
+      Base::_check_template_params();
+      Base::_set_noalias(other);
+    }
+    /** Copy constructor */
+    EIGEN_STRONG_INLINE Array(const Array& other)
+            : Base(other.rows() * other.cols(), other.rows(), other.cols())
+    {
+      Base::_check_template_params();
+      Base::_set_noalias(other);
+    }
+    /** Copy constructor with in-place evaluation */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array(const ReturnByValue<OtherDerived>& other)
+    {
+      Base::_check_template_params();
+      Base::resize(other.rows(), other.cols());
+      other.evalTo(*this);
+    }
+
+    /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)
+      : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+    {
+      Base::_check_template_params();
+      Base::_resize_to_match(other);
+      *this = other;
+    }
+
+    /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the
+      * data pointers.
+      */
+    template<typename OtherDerived>
+    void swap(ArrayBase<OtherDerived> const & other)
+    { this->_swap(other.derived()); }
+
+    inline Index innerStride() const { return 1; }
+    inline Index outerStride() const { return this->innerSize(); }
+
+    #ifdef EIGEN_ARRAY_PLUGIN
+    #include EIGEN_ARRAY_PLUGIN
+    #endif
+
+  private:
+
+    template<typename MatrixType, typename OtherDerived, bool SwapPointers>
+    friend struct internal::matrix_swap_impl;
+};
+
+/** \defgroup arraytypedefs Global array typedefs
+  * \ingroup Core_Module
+  *
+  * Eigen defines several typedef shortcuts for most common 1D and 2D array types.
+  *
+  * The general patterns are the following:
+  *
+  * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
+  * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
+  * for complex double.
+  *
+  * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats.
+  *
+  * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is
+  * a fixed-size 1D array of 4 complex floats.
+  *
+  * \sa class Array
+  */
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix;  \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Size, 1>    Array##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size)         \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix;  \
+/** \ingroup arraytypedefs */                                    \
+typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix;
+
+#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
+EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
+
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int,                  i)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float,                f)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double,               d)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)
+EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS
+
+#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE
+
+#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
+using Eigen::Matrix##SizeSuffix##TypeSuffix; \
+using Eigen::Vector##SizeSuffix##TypeSuffix; \
+using Eigen::RowVector##SizeSuffix##TypeSuffix;
+
+#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
+
+#define EIGEN_USING_ARRAY_TYPEDEFS \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \
+EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd)
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARRAY_H
diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h
new file mode 100644
index 0000000..3885260
--- /dev/null
+++ b/Eigen/src/Core/ArrayBase.h
@@ -0,0 +1,228 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARRAYBASE_H
+#define EIGEN_ARRAYBASE_H
+
+namespace Eigen { 
+
+template<typename ExpressionType> class MatrixWrapper;
+
+/** \class ArrayBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for all 1D and 2D array, and related expressions
+  *
+  * An array is similar to a dense vector or matrix. While matrices are mathematical
+  * objects with well defined linear algebra operators, an array is just a collection
+  * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence,
+  * all operations applied to an array are performed coefficient wise. Furthermore,
+  * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient
+  * constructors allowing to easily write generic code working for both scalar values
+  * and arrays.
+  *
+  * This class is the base that is inherited by all array expression types.
+  *
+  * \tparam Derived is the derived type, e.g., an array or an expression type.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN.
+  *
+  * \sa class MatrixBase, \ref TopicClassHierarchy
+  */
+template<typename Derived> class ArrayBase
+  : public DenseBase<Derived>
+{
+  public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** The base class for a given storage type. */
+    typedef ArrayBase StorageBaseType;
+
+    typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
+
+    using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+                typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    typedef DenseBase<Derived> Base;
+    using Base::RowsAtCompileTime;
+    using Base::ColsAtCompileTime;
+    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+    using Base::CoeffReadCost;
+
+    using Base::derived;
+    using Base::const_cast_derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+    using Base::lazyAssign;
+    using Base::operator=;
+    using Base::operator+=;
+    using Base::operator-=;
+    using Base::operator*=;
+    using Base::operator/=;
+
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal the plain matrix type corresponding to this expression. Note that is not necessarily
+      * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const
+      * reference to a matrix, not a matrix! It is however guaranteed that the return type of eval() is either
+      * PlainObject or const PlainObject&.
+      */
+    typedef Array<typename internal::traits<Derived>::Scalar,
+                internal::traits<Derived>::RowsAtCompileTime,
+                internal::traits<Derived>::ColsAtCompileTime,
+                AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                internal::traits<Derived>::MaxRowsAtCompileTime,
+                internal::traits<Derived>::MaxColsAtCompileTime
+          > PlainObject;
+
+
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase
+#   include "../plugins/CommonCwiseUnaryOps.h"
+#   include "../plugins/MatrixCwiseUnaryOps.h"
+#   include "../plugins/ArrayCwiseUnaryOps.h"
+#   include "../plugins/CommonCwiseBinaryOps.h"
+#   include "../plugins/MatrixCwiseBinaryOps.h"
+#   include "../plugins/ArrayCwiseBinaryOps.h"
+#   ifdef EIGEN_ARRAYBASE_PLUGIN
+#     include EIGEN_ARRAYBASE_PLUGIN
+#   endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+    /** Special case of the template operator=, in order to prevent the compiler
+      * from generating a default operator= (issue hit with g++ 4.1)
+      */
+    Derived& operator=(const ArrayBase& other)
+    {
+      return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
+    }
+
+    Derived& operator+=(const Scalar& scalar)
+    { return *this = derived() + scalar; }
+    Derived& operator-=(const Scalar& scalar)
+    { return *this = derived() - scalar; }
+
+    template<typename OtherDerived>
+    Derived& operator+=(const ArrayBase<OtherDerived>& other);
+    template<typename OtherDerived>
+    Derived& operator-=(const ArrayBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    Derived& operator*=(const ArrayBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    Derived& operator/=(const ArrayBase<OtherDerived>& other);
+
+  public:
+    ArrayBase<Derived>& array() { return *this; }
+    const ArrayBase<Derived>& array() const { return *this; }
+
+    /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array
+      * \sa MatrixBase::array() */
+    MatrixWrapper<Derived> matrix() { return derived(); }
+    const MatrixWrapper<const Derived> matrix() const { return derived(); }
+
+//     template<typename Dest>
+//     inline void evalTo(Dest& dst) const { dst = matrix(); }
+
+  protected:
+    ArrayBase() : Base() {}
+
+  private:
+    explicit ArrayBase(Index);
+    ArrayBase(Index,Index);
+    template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&);
+  protected:
+    // mixing arrays and matrices is not legal
+    template<typename OtherDerived> Derived& operator+=(const MatrixBase<OtherDerived>& )
+    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
+    // mixing arrays and matrices is not legal
+    template<typename OtherDerived> Derived& operator-=(const MatrixBase<OtherDerived>& )
+    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
+};
+
+/** replaces \c *this by \c *this - \a other.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)
+{
+  SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+/** replaces \c *this by \c *this + \a other.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)
+{
+  SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+/** replaces \c *this by \c *this * \a other coefficient wise.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other)
+{
+  SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+/** replaces \c *this by \c *this / \a other coefficient wise.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other)
+{
+  SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARRAYBASE_H
diff --git a/Eigen/src/Core/ArrayWrapper.h b/Eigen/src/Core/ArrayWrapper.h
new file mode 100644
index 0000000..b4641e2
--- /dev/null
+++ b/Eigen/src/Core/ArrayWrapper.h
@@ -0,0 +1,264 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARRAYWRAPPER_H
+#define EIGEN_ARRAYWRAPPER_H
+
+namespace Eigen { 
+
+/** \class ArrayWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a mathematical vector or matrix as an array object
+  *
+  * This class is the return type of MatrixBase::array(), and most of the time
+  * this is the only way it is use.
+  *
+  * \sa MatrixBase::array(), class MatrixWrapper
+  */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<ArrayWrapper<ExpressionType> >
+  : public traits<typename remove_all<typename ExpressionType::Nested>::type >
+{
+  typedef ArrayXpr XprKind;
+  // Let's remove NestByRefBit
+  enum {
+    Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
+    Flags = Flags0 & ~NestByRefBit
+  };
+};
+}
+
+template<typename ExpressionType>
+class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >
+{
+  public:
+    typedef ArrayBase<ArrayWrapper> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<ExpressionType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
+
+    inline ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+
+    inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); }
+    inline const Scalar* data() const { return m_expression.data(); }
+
+    inline CoeffReturnType coeff(Index rowId, Index colId) const
+    {
+      return m_expression.coeff(rowId, colId);
+    }
+
+    inline Scalar& coeffRef(Index rowId, Index colId)
+    {
+      return m_expression.const_cast_derived().coeffRef(rowId, colId);
+    }
+
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      return m_expression.const_cast_derived().coeffRef(rowId, colId);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_expression.coeff(index);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index rowId, Index colId) const
+    {
+      return m_expression.template packet<LoadMode>(rowId, colId);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(rowId, colId, val);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_expression.template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& val)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(index, val);
+    }
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const { dst = m_expression; }
+
+    const typename internal::remove_all<NestedExpressionType>::type& 
+    nestedExpression() const 
+    {
+      return m_expression;
+    }
+
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index)  */
+    void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index,Index)*/
+    void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
+
+  protected:
+    NestedExpressionType m_expression;
+};
+
+/** \class MatrixWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Expression of an array as a mathematical vector or matrix
+  *
+  * This class is the return type of ArrayBase::matrix(), and most of the time
+  * this is the only way it is use.
+  *
+  * \sa MatrixBase::matrix(), class ArrayWrapper
+  */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<MatrixWrapper<ExpressionType> >
+ : public traits<typename remove_all<typename ExpressionType::Nested>::type >
+{
+  typedef MatrixXpr XprKind;
+  // Let's remove NestByRefBit
+  enum {
+    Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
+    Flags = Flags0 & ~NestByRefBit
+  };
+};
+}
+
+template<typename ExpressionType>
+class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >
+{
+  public:
+    typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<ExpressionType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    typedef typename internal::nested<ExpressionType>::type NestedExpressionType;
+
+    inline MatrixWrapper(ExpressionType& a_matrix) : m_expression(a_matrix) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+
+    inline ScalarWithConstIfNotLvalue* data() { return m_expression.const_cast_derived().data(); }
+    inline const Scalar* data() const { return m_expression.data(); }
+
+    inline CoeffReturnType coeff(Index rowId, Index colId) const
+    {
+      return m_expression.coeff(rowId, colId);
+    }
+
+    inline Scalar& coeffRef(Index rowId, Index colId)
+    {
+      return m_expression.const_cast_derived().coeffRef(rowId, colId);
+    }
+
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      return m_expression.derived().coeffRef(rowId, colId);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_expression.coeff(index);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index rowId, Index colId) const
+    {
+      return m_expression.template packet<LoadMode>(rowId, colId);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(rowId, colId, val);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_expression.template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& val)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(index, val);
+    }
+
+    const typename internal::remove_all<NestedExpressionType>::type& 
+    nestedExpression() const 
+    {
+      return m_expression;
+    }
+
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index)  */
+    void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); }
+    /** Forwards the resizing request to the nested expression
+      * \sa DenseBase::resize(Index,Index)*/
+    void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); }
+
+  protected:
+    NestedExpressionType m_expression;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARRAYWRAPPER_H
diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h
new file mode 100644
index 0000000..bcfc261
--- /dev/null
+++ b/Eigen/src/Core/Assign.h
@@ -0,0 +1,590 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ASSIGN_H
+#define EIGEN_ASSIGN_H
+
+namespace Eigen {
+
+namespace internal {
+
+/***************************************************************************
+* Part 1 : the logic deciding a strategy for traversal and unrolling       *
+***************************************************************************/
+
+template <typename Derived, typename OtherDerived>
+struct assign_traits
+{
+public:
+  enum {
+    DstIsAligned = Derived::Flags & AlignedBit,
+    DstHasDirectAccess = Derived::Flags & DirectAccessBit,
+    SrcIsAligned = OtherDerived::Flags & AlignedBit,
+    JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned
+  };
+
+private:
+  enum {
+    InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime)
+              : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime)
+              : int(Derived::RowsAtCompileTime),
+    InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime)
+              : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime)
+              : int(Derived::MaxRowsAtCompileTime),
+    MaxSizeAtCompileTime = Derived::SizeAtCompileTime,
+    PacketSize = packet_traits<typename Derived::Scalar>::size
+  };
+
+  enum {
+    StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)),
+    MightVectorize = StorageOrdersAgree
+                  && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit),
+    MayInnerVectorize  = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
+                       && int(DstIsAligned) && int(SrcIsAligned),
+    MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
+    MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess
+                       && (DstIsAligned || MaxSizeAtCompileTime == Dynamic),
+      /* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
+         so it's only good for large enough sizes. */
+    MaySliceVectorize  = MightVectorize && DstHasDirectAccess
+                       && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
+      /* slice vectorization can be slow, so we only want it if the slices are big, which is
+         indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
+         in a fixed-size matrix */
+  };
+
+public:
+  enum {
+    Traversal = int(MayInnerVectorize)  ? int(InnerVectorizedTraversal)
+              : int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
+              : int(MaySliceVectorize)  ? int(SliceVectorizedTraversal)
+              : int(MayLinearize)       ? int(LinearTraversal)
+                                        : int(DefaultTraversal),
+    Vectorized = int(Traversal) == InnerVectorizedTraversal
+              || int(Traversal) == LinearVectorizedTraversal
+              || int(Traversal) == SliceVectorizedTraversal
+  };
+
+private:
+  enum {
+    UnrollingLimit      = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1),
+    MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic
+                       && int(OtherDerived::CoeffReadCost) != Dynamic
+                       && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
+    MayUnrollInner      = int(InnerSize) != Dynamic
+                       && int(OtherDerived::CoeffReadCost) != Dynamic
+                       && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
+  };
+
+public:
+  enum {
+    Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal))
+                ? (
+                    int(MayUnrollCompletely) ? int(CompleteUnrolling)
+                  : int(MayUnrollInner)      ? int(InnerUnrolling)
+                                             : int(NoUnrolling)
+                  )
+              : int(Traversal) == int(LinearVectorizedTraversal)
+                ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) )
+              : int(Traversal) == int(LinearTraversal)
+                ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) )
+              : int(NoUnrolling)
+  };
+
+#ifdef EIGEN_DEBUG_ASSIGN
+  static void debug()
+  {
+    EIGEN_DEBUG_VAR(DstIsAligned)
+    EIGEN_DEBUG_VAR(SrcIsAligned)
+    EIGEN_DEBUG_VAR(JointAlignment)
+    EIGEN_DEBUG_VAR(InnerSize)
+    EIGEN_DEBUG_VAR(InnerMaxSize)
+    EIGEN_DEBUG_VAR(PacketSize)
+    EIGEN_DEBUG_VAR(StorageOrdersAgree)
+    EIGEN_DEBUG_VAR(MightVectorize)
+    EIGEN_DEBUG_VAR(MayLinearize)
+    EIGEN_DEBUG_VAR(MayInnerVectorize)
+    EIGEN_DEBUG_VAR(MayLinearVectorize)
+    EIGEN_DEBUG_VAR(MaySliceVectorize)
+    EIGEN_DEBUG_VAR(Traversal)
+    EIGEN_DEBUG_VAR(UnrollingLimit)
+    EIGEN_DEBUG_VAR(MayUnrollCompletely)
+    EIGEN_DEBUG_VAR(MayUnrollInner)
+    EIGEN_DEBUG_VAR(Unrolling)
+  }
+#endif
+};
+
+/***************************************************************************
+* Part 2 : meta-unrollers
+***************************************************************************/
+
+/************************
+*** Default traversal ***
+************************/
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_DefaultTraversal_CompleteUnrolling
+{
+  enum {
+    outer = Index / Derived1::InnerSizeAtCompileTime,
+    inner = Index % Derived1::InnerSizeAtCompileTime
+  };
+
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    dst.copyCoeffByOuterInner(outer, inner, src);
+    assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_DefaultTraversal_InnerUnrolling
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
+  {
+    dst.copyCoeffByOuterInner(outer, Index, src);
+    assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, outer);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, Stop, Stop>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
+};
+
+/***********************
+*** Linear traversal ***
+***********************/
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_LinearTraversal_CompleteUnrolling
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    dst.copyCoeff(Index, src);
+    assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
+};
+
+/**************************
+*** Inner vectorization ***
+**************************/
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_innervec_CompleteUnrolling
+{
+  enum {
+    outer = Index / Derived1::InnerSizeAtCompileTime,
+    inner = Index % Derived1::InnerSizeAtCompileTime,
+    JointAlignment = assign_traits<Derived1,Derived2>::JointAlignment
+  };
+
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    dst.template copyPacketByOuterInner<Derived2, Aligned, JointAlignment>(outer, inner, src);
+    assign_innervec_CompleteUnrolling<Derived1, Derived2,
+      Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, int Index, int Stop>
+struct assign_innervec_InnerUnrolling
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, typename Derived1::Index outer)
+  {
+    dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, Index, src);
+    assign_innervec_InnerUnrolling<Derived1, Derived2,
+      Index+packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src, outer);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Stop>
+struct assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, typename Derived1::Index) {}
+};
+
+/***************************************************************************
+* Part 3 : implementation of all cases
+***************************************************************************/
+
+template<typename Derived1, typename Derived2,
+         int Traversal = assign_traits<Derived1, Derived2>::Traversal,
+         int Unrolling = assign_traits<Derived1, Derived2>::Unrolling,
+         int Version = Specialized>
+struct assign_impl;
+
+/************************
+*** Default traversal ***
+************************/
+
+template<typename Derived1, typename Derived2, int Unrolling, int Version>
+struct assign_impl<Derived1, Derived2, InvalidTraversal, Unrolling, Version>
+{
+  static inline void run(Derived1 &, const Derived2 &) { }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, DefaultTraversal, NoUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer)
+      for(Index inner = 0; inner < innerSize; ++inner)
+        dst.copyCoeffByOuterInner(outer, inner, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, DefaultTraversal, CompleteUnrolling, Version>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
+      ::run(dst, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, DefaultTraversal, InnerUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer)
+      assign_DefaultTraversal_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
+        ::run(dst, src, outer);
+  }
+};
+
+/***********************
+*** Linear traversal ***
+***********************/
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, LinearTraversal, NoUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    const Index size = dst.size();
+    for(Index i = 0; i < size; ++i)
+      dst.copyCoeff(i, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, LinearTraversal, CompleteUnrolling, Version>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    assign_LinearTraversal_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
+      ::run(dst, src);
+  }
+};
+
+/**************************
+*** Inner vectorization ***
+**************************/
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, NoUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    const Index packetSize = packet_traits<typename Derived1::Scalar>::size;
+    for(Index outer = 0; outer < outerSize; ++outer)
+      for(Index inner = 0; inner < innerSize; inner+=packetSize)
+        dst.template copyPacketByOuterInner<Derived2, Aligned, Aligned>(outer, inner, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, CompleteUnrolling, Version>
+{
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
+      ::run(dst, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, InnerVectorizedTraversal, InnerUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer)
+      assign_innervec_InnerUnrolling<Derived1, Derived2, 0, Derived1::InnerSizeAtCompileTime>
+        ::run(dst, src, outer);
+  }
+};
+
+/***************************
+*** Linear vectorization ***
+***************************/
+
+template <bool IsAligned = false>
+struct unaligned_assign_impl
+{
+  template <typename Derived, typename OtherDerived>
+  static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {}
+};
+
+template <>
+struct unaligned_assign_impl<false>
+{
+  // MSVC must not inline this functions. If it does, it fails to optimize the
+  // packet access path.
+#ifdef _MSC_VER
+  template <typename Derived, typename OtherDerived>
+  static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
+#else
+  template <typename Derived, typename OtherDerived>
+  static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end)
+#endif
+  {
+    for (typename Derived::Index index = start; index < end; ++index)
+      dst.copyCoeff(index, src);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, NoUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    const Index size = dst.size();
+    typedef packet_traits<typename Derived1::Scalar> PacketTraits;
+    enum {
+      packetSize = PacketTraits::size,
+      dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
+      srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
+    };
+    const Index alignedStart = assign_traits<Derived1,Derived2>::DstIsAligned ? 0
+                             : internal::first_aligned(&dst.coeffRef(0), size);
+    const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
+
+    unaligned_assign_impl<assign_traits<Derived1,Derived2>::DstIsAligned!=0>::run(src,dst,0,alignedStart);
+
+    for(Index index = alignedStart; index < alignedEnd; index += packetSize)
+    {
+      dst.template copyPacket<Derived2, dstAlignment, srcAlignment>(index, src);
+    }
+
+    unaligned_assign_impl<>::run(src,dst,alignedEnd,size);
+  }
+};
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, LinearVectorizedTraversal, CompleteUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src)
+  {
+    enum { size = Derived1::SizeAtCompileTime,
+           packetSize = packet_traits<typename Derived1::Scalar>::size,
+           alignedSize = (size/packetSize)*packetSize };
+
+    assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
+    assign_DefaultTraversal_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
+  }
+};
+
+/**************************
+*** Slice vectorization ***
+***************************/
+
+template<typename Derived1, typename Derived2, int Version>
+struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Version>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    typedef typename Derived1::Scalar Scalar;
+    typedef packet_traits<Scalar> PacketTraits;
+    enum {
+      packetSize = PacketTraits::size,
+      alignable = PacketTraits::AlignedOnScalar,
+      dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
+      dstAlignment = alignable ? Aligned : int(dstIsAligned),
+      srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
+    };
+    const Scalar *dst_ptr = &dst.coeffRef(0,0);
+    if((!bool(dstIsAligned)) && (Index(dst_ptr) % sizeof(Scalar))>0)
+    {
+      // the pointer is not aligend-on scalar, so alignment is not possible
+      return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
+    }
+    const Index packetAlignedMask = packetSize - 1;
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
+    Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
+
+    for(Index outer = 0; outer < outerSize; ++outer)
+    {
+      const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
+      // do the non-vectorizable part of the assignment
+      for(Index inner = 0; inner<alignedStart ; ++inner)
+        dst.copyCoeffByOuterInner(outer, inner, src);
+
+      // do the vectorizable part of the assignment
+      for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)
+        dst.template copyPacketByOuterInner<Derived2, dstAlignment, Unaligned>(outer, inner, src);
+
+      // do the non-vectorizable part of the assignment
+      for(Index inner = alignedEnd; inner<innerSize ; ++inner)
+        dst.copyCoeffByOuterInner(outer, inner, src);
+
+      alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
+    }
+  }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Part 4 : implementation of DenseBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>
+  ::lazyAssign(const DenseBase<OtherDerived>& other)
+{
+  enum{
+    SameType = internal::is_same<typename Derived::Scalar,typename OtherDerived::Scalar>::value
+  };
+
+  EIGEN_STATIC_ASSERT_LVALUE(Derived)
+  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)
+  EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+#ifdef EIGEN_DEBUG_ASSIGN
+  internal::assign_traits<Derived, OtherDerived>::debug();
+#endif
+  eigen_assert(rows() == other.rows() && cols() == other.cols());
+  internal::assign_impl<Derived, OtherDerived, int(SameType) ? int(internal::assign_traits<Derived, OtherDerived>::Traversal)
+                                                       : int(InvalidTraversal)>::run(derived(),other.derived());
+#ifndef EIGEN_NO_DEBUG
+  checkTransposeAliasing(other.derived());
+#endif
+  return derived();
+}
+
+namespace internal {
+
+template<typename Derived, typename OtherDerived,
+         bool EvalBeforeAssigning = (int(internal::traits<OtherDerived>::Flags) & EvalBeforeAssigningBit) != 0,
+         bool NeedToTranspose = ((int(Derived::RowsAtCompileTime) == 1 && int(OtherDerived::ColsAtCompileTime) == 1)
+                              |   // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+                                  // revert to || as soon as not needed anymore.
+                                  (int(Derived::ColsAtCompileTime) == 1 && int(OtherDerived::RowsAtCompileTime) == 1))
+                              && int(Derived::SizeAtCompileTime) != 1>
+struct assign_selector;
+
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,false,false> {
+  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
+  template<typename ActualDerived, typename ActualOtherDerived>
+  static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { other.evalTo(dst); return dst; }
+};
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,true,false> {
+  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); }
+};
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,false,true> {
+  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
+  template<typename ActualDerived, typename ActualOtherDerived>
+  static EIGEN_STRONG_INLINE Derived& evalTo(ActualDerived& dst, const ActualOtherDerived& other) { Transpose<ActualDerived> dstTrans(dst); other.evalTo(dstTrans); return dst; }
+};
+template<typename Derived, typename OtherDerived>
+struct assign_selector<Derived,OtherDerived,true,true> {
+  static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other)
+{
+  return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other)
+{
+  return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other)
+{
+  return internal::assign_selector<Derived,Derived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+template <typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other)
+{
+  return internal::assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
+}
+
+template<typename Derived>
+template <typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other)
+{
+  return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+  return internal::assign_selector<Derived,OtherDerived,false>::evalTo(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ASSIGN_H
diff --git a/Eigen/src/Core/Assign_MKL.h b/Eigen/src/Core/Assign_MKL.h
new file mode 100644
index 0000000..7772951
--- /dev/null
+++ b/Eigen/src/Core/Assign_MKL.h
@@ -0,0 +1,224 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_ASSIGN_VML_H
+#define EIGEN_ASSIGN_VML_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Op> struct vml_call
+{ enum { IsSupported = 0 }; };
+
+template<typename Dst, typename Src, typename UnaryOp>
+class vml_assign_traits
+{
+  private:
+    enum {
+      DstHasDirectAccess = Dst::Flags & DirectAccessBit,
+      SrcHasDirectAccess = Src::Flags & DirectAccessBit,
+
+      StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)),
+      InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)
+                : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime)
+                : int(Dst::RowsAtCompileTime),
+      InnerMaxSize  = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)
+                    : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)
+                    : int(Dst::MaxRowsAtCompileTime),
+      MaxSizeAtCompileTime = Dst::SizeAtCompileTime,
+
+      MightEnableVml =  vml_call<UnaryOp>::IsSupported && StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess
+                     && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1,
+      MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit),
+      VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize,
+      LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD,
+      MayEnableVml = MightEnableVml && LargeEnough,
+      MayLinearize = MayEnableVml && MightLinearize
+    };
+  public:
+    enum {
+      Traversal = MayLinearize ? LinearVectorizedTraversal
+                : MayEnableVml ? InnerVectorizedTraversal
+                : DefaultTraversal
+    };
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling,
+         int VmlTraversal = vml_assign_traits<Derived1, Derived2, UnaryOp>::Traversal >
+struct vml_assign_impl
+  : assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>
+{
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
+struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, InnerVectorizedTraversal>
+{
+  typedef typename Derived1::Scalar Scalar;
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
+  {
+    // in case we want to (or have to) skip VML at runtime we can call:
+    // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
+    const Index innerSize = dst.innerSize();
+    const Index outerSize = dst.outerSize();
+    for(Index outer = 0; outer < outerSize; ++outer) {
+      const Scalar *src_ptr = src.IsRowMajor ?  &(src.nestedExpression().coeffRef(outer,0)) :
+                                                &(src.nestedExpression().coeffRef(0, outer));
+      Scalar *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer));
+      vml_call<UnaryOp>::run(src.functor(), innerSize, src_ptr, dst_ptr );
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, typename UnaryOp, int Traversal, int Unrolling>
+struct vml_assign_impl<Derived1, Derived2, UnaryOp, Traversal, Unrolling, LinearVectorizedTraversal>
+{
+  static inline void run(Derived1& dst, const CwiseUnaryOp<UnaryOp, Derived2>& src)
+  {
+    // in case we want to (or have to) skip VML at runtime we can call:
+    // assign_impl<Derived1,Eigen::CwiseUnaryOp<UnaryOp, Derived2>,Traversal,Unrolling,BuiltIn>::run(dst,src);
+    vml_call<UnaryOp>::run(src.functor(), dst.size(), src.nestedExpression().data(), dst.data() );
+  }
+};
+
+// Macroses
+
+#define EIGEN_MKL_VML_SPECIALIZE_ASSIGN(TRAVERSAL,UNROLLING) \
+  template<typename Derived1, typename Derived2, typename UnaryOp> \
+  struct assign_impl<Derived1, Eigen::CwiseUnaryOp<UnaryOp, Derived2>, TRAVERSAL, UNROLLING, Specialized>  {  \
+    static inline void run(Derived1 &dst, const Eigen::CwiseUnaryOp<UnaryOp, Derived2> &src) { \
+      vml_assign_impl<Derived1,Derived2,UnaryOp,TRAVERSAL,UNROLLING>::run(dst, src); \
+    } \
+  };
+
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,InnerUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,InnerUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,CompleteUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,NoUnrolling)
+EIGEN_MKL_VML_SPECIALIZE_ASSIGN(SliceVectorizedTraversal,NoUnrolling)
+
+
+#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)
+#define  EIGEN_MKL_VML_MODE VML_HA
+#else
+#define  EIGEN_MKL_VML_MODE VML_LA
+#endif
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)     \
+  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
+    enum { IsSupported = 1 };                                                    \
+    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/,        \
+                            int size, const EIGENTYPE* src, EIGENTYPE* dst) {    \
+      VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst);                           \
+    }                                                                            \
+  };
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)  \
+  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
+    enum { IsSupported = 1 };                                                    \
+    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& /*func*/,        \
+                            int size, const EIGENTYPE* src, EIGENTYPE* dst) {    \
+      MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE;                                    \
+      VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst, vmlMode);                  \
+    }                                                                            \
+  };
+
+#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE)       \
+  template<> struct vml_call< scalar_##EIGENOP##_op<EIGENTYPE> > {               \
+    enum { IsSupported = 1 };                                                    \
+    static inline void run( const scalar_##EIGENOP##_op<EIGENTYPE>& func,        \
+                          int size, const EIGENTYPE* src, EIGENTYPE* dst) {      \
+      EIGENTYPE exponent = func.m_exponent;                                      \
+      MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE;                                    \
+      VMLOP(&size, (const VMLTYPE*)src, (const VMLTYPE*)&exponent,               \
+                        (VMLTYPE*)dst, &vmlMode);                                \
+    }                                                                            \
+  };
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP)                   \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vs##VMLOP, float, float)             \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vd##VMLOP, double, double)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)                \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vc##VMLOP, scomplex, MKL_Complex8)   \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vz##VMLOP, dcomplex, MKL_Complex16)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP)                        \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP)                         \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP)
+
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP)                \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vms##VMLOP, float, float)         \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmd##VMLOP, double, double)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)             \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmc##VMLOP, scomplex, MKL_Complex8)  \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmz##VMLOP, dcomplex, MKL_Complex16)
+
+#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(EIGENOP, VMLOP)                     \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP)                      \
+  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP)
+
+
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sin,  Sin)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(asin, Asin)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(cos,  Cos)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(acos, Acos)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(tan,  Tan)
+//EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs,  Abs)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(exp,  Exp)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(log,  Ln)
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt)
+
+EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr)
+
+// The vm*powx functions are not avaibale in the windows version of MKL.
+#ifndef _WIN32
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8)
+EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzpowx_, dcomplex, MKL_Complex16)
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ASSIGN_VML_H
diff --git a/Eigen/src/Core/BandMatrix.h b/Eigen/src/Core/BandMatrix.h
new file mode 100644
index 0000000..ffd7fe8
--- /dev/null
+++ b/Eigen/src/Core/BandMatrix.h
@@ -0,0 +1,334 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BANDMATRIX_H
+#define EIGEN_BANDMATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Derived>
+class BandMatrixBase : public EigenBase<Derived>
+{
+  public:
+
+    enum {
+      Flags = internal::traits<Derived>::Flags,
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
+      Supers = internal::traits<Derived>::Supers,
+      Subs   = internal::traits<Derived>::Subs,
+      Options = internal::traits<Derived>::Options
+    };
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
+    typedef typename DenseMatrixType::Index Index;
+    typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;
+    typedef EigenBase<Derived> Base;
+
+  protected:
+    enum {
+      DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic))
+                            ? 1 + Supers + Subs
+                            : Dynamic,
+      SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime)
+    };
+
+  public:
+    
+    using Base::derived;
+    using Base::rows;
+    using Base::cols;
+
+    /** \returns the number of super diagonals */
+    inline Index supers() const { return derived().supers(); }
+
+    /** \returns the number of sub diagonals */
+    inline Index subs() const { return derived().subs(); }
+    
+    /** \returns an expression of the underlying coefficient matrix */
+    inline const CoefficientsType& coeffs() const { return derived().coeffs(); }
+    
+    /** \returns an expression of the underlying coefficient matrix */
+    inline CoefficientsType& coeffs() { return derived().coeffs(); }
+
+    /** \returns a vector expression of the \a i -th column,
+      * only the meaningful part is returned.
+      * \warning the internal storage must be column major. */
+    inline Block<CoefficientsType,Dynamic,1> col(Index i)
+    {
+      EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+      Index start = 0;
+      Index len = coeffs().rows();
+      if (i<=supers())
+      {
+        start = supers()-i;
+        len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));
+      }
+      else if (i>=rows()-subs())
+        len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));
+      return Block<CoefficientsType,Dynamic,1>(coeffs(), start, i, len, 1);
+    }
+
+    /** \returns a vector expression of the main diagonal */
+    inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()
+    { return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
+
+    /** \returns a vector expression of the main diagonal (const version) */
+    inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const
+    { return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }
+
+    template<int Index> struct DiagonalIntReturnType {
+      enum {
+        ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)),
+        Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex,
+        ActualIndex = ReturnOpposite ? -Index : Index,
+        DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic)
+                     ? Dynamic
+                     : (ActualIndex<0
+                     ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)
+                     : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))
+      };
+      typedef Block<CoefficientsType,1, DiagonalSize> BuildType;
+      typedef typename internal::conditional<Conjugate,
+                 CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>,BuildType >,
+                 BuildType>::type Type;
+    };
+
+    /** \returns a vector expression of the \a N -th sub or super diagonal */
+    template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()
+    {
+      return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
+    }
+
+    /** \returns a vector expression of the \a N -th sub or super diagonal */
+    template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const
+    {
+      return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));
+    }
+
+    /** \returns a vector expression of the \a i -th sub or super diagonal */
+    inline Block<CoefficientsType,1,Dynamic> diagonal(Index i)
+    {
+      eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
+      return Block<CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
+    }
+
+    /** \returns a vector expression of the \a i -th sub or super diagonal */
+    inline const Block<const CoefficientsType,1,Dynamic> diagonal(Index i) const
+    {
+      eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));
+      return Block<const CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));
+    }
+    
+    template<typename Dest> inline void evalTo(Dest& dst) const
+    {
+      dst.resize(rows(),cols());
+      dst.setZero();
+      dst.diagonal() = diagonal();
+      for (Index i=1; i<=supers();++i)
+        dst.diagonal(i) = diagonal(i);
+      for (Index i=1; i<=subs();++i)
+        dst.diagonal(-i) = diagonal(-i);
+    }
+
+    DenseMatrixType toDenseMatrix() const
+    {
+      DenseMatrixType res(rows(),cols());
+      evalTo(res);
+      return res;
+    }
+
+  protected:
+
+    inline Index diagonalLength(Index i) const
+    { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); }
+};
+
+/**
+  * \class BandMatrix
+  * \ingroup Core_Module
+  *
+  * \brief Represents a rectangular matrix with a banded storage
+  *
+  * \param _Scalar Numeric type, i.e. float, double, int
+  * \param Rows Number of rows, or \b Dynamic
+  * \param Cols Number of columns, or \b Dynamic
+  * \param Supers Number of super diagonal
+  * \param Subs Number of sub diagonal
+  * \param _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint
+  *                 The former controls \ref TopicStorageOrders "storage order", and defaults to
+  *                 column-major. The latter controls whether the matrix represents a selfadjoint 
+  *                 matrix in which case either Supers of Subs have to be null.
+  *
+  * \sa class TridiagonalMatrix
+  */
+
+template<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options>
+struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >
+{
+  typedef _Scalar Scalar;
+  typedef Dense StorageKind;
+  typedef DenseIndex Index;
+  enum {
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    RowsAtCompileTime = _Rows,
+    ColsAtCompileTime = _Cols,
+    MaxRowsAtCompileTime = _Rows,
+    MaxColsAtCompileTime = _Cols,
+    Flags = LvalueBit,
+    Supers = _Supers,
+    Subs = _Subs,
+    Options = _Options,
+    DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic
+  };
+  typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType;
+};
+
+template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options>
+class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
+{
+  public:
+
+    typedef typename internal::traits<BandMatrix>::Scalar Scalar;
+    typedef typename internal::traits<BandMatrix>::Index Index;
+    typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;
+
+    inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)
+      : m_coeffs(1+supers+subs,cols),
+        m_rows(rows), m_supers(supers), m_subs(subs)
+    {
+    }
+
+    /** \returns the number of columns */
+    inline Index rows() const { return m_rows.value(); }
+
+    /** \returns the number of rows */
+    inline Index cols() const { return m_coeffs.cols(); }
+
+    /** \returns the number of super diagonals */
+    inline Index supers() const { return m_supers.value(); }
+
+    /** \returns the number of sub diagonals */
+    inline Index subs() const { return m_subs.value(); }
+
+    inline const CoefficientsType& coeffs() const { return m_coeffs; }
+    inline CoefficientsType& coeffs() { return m_coeffs; }
+
+  protected:
+
+    CoefficientsType m_coeffs;
+    internal::variable_if_dynamic<Index, Rows>   m_rows;
+    internal::variable_if_dynamic<Index, Supers> m_supers;
+    internal::variable_if_dynamic<Index, Subs>   m_subs;
+};
+
+template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>
+class BandMatrixWrapper;
+
+template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>
+struct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >
+{
+  typedef typename _CoefficientsType::Scalar Scalar;
+  typedef typename _CoefficientsType::StorageKind StorageKind;
+  typedef typename _CoefficientsType::Index Index;
+  enum {
+    CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost,
+    RowsAtCompileTime = _Rows,
+    ColsAtCompileTime = _Cols,
+    MaxRowsAtCompileTime = _Rows,
+    MaxColsAtCompileTime = _Cols,
+    Flags = LvalueBit,
+    Supers = _Supers,
+    Subs = _Subs,
+    Options = _Options,
+    DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic
+  };
+  typedef _CoefficientsType CoefficientsType;
+};
+
+template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>
+class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >
+{
+  public:
+
+    typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;
+    typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;
+    typedef typename internal::traits<BandMatrixWrapper>::Index Index;
+
+    inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs)
+      : m_coeffs(coeffs),
+        m_rows(rows), m_supers(supers), m_subs(subs)
+    {
+      EIGEN_UNUSED_VARIABLE(cols);
+      //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());
+    }
+
+    /** \returns the number of columns */
+    inline Index rows() const { return m_rows.value(); }
+
+    /** \returns the number of rows */
+    inline Index cols() const { return m_coeffs.cols(); }
+
+    /** \returns the number of super diagonals */
+    inline Index supers() const { return m_supers.value(); }
+
+    /** \returns the number of sub diagonals */
+    inline Index subs() const { return m_subs.value(); }
+
+    inline const CoefficientsType& coeffs() const { return m_coeffs; }
+
+  protected:
+
+    const CoefficientsType& m_coeffs;
+    internal::variable_if_dynamic<Index, _Rows>   m_rows;
+    internal::variable_if_dynamic<Index, _Supers> m_supers;
+    internal::variable_if_dynamic<Index, _Subs>   m_subs;
+};
+
+/**
+  * \class TridiagonalMatrix
+  * \ingroup Core_Module
+  *
+  * \brief Represents a tridiagonal matrix with a compact banded storage
+  *
+  * \param _Scalar Numeric type, i.e. float, double, int
+  * \param Size Number of rows and cols, or \b Dynamic
+  * \param _Options Can be 0 or \b SelfAdjoint
+  *
+  * \sa class BandMatrix
+  */
+template<typename Scalar, int Size, int Options>
+class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>
+{
+    typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base;
+    typedef typename Base::Index Index;
+  public:
+    TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {}
+
+    inline typename Base::template DiagonalIntReturnType<1>::Type super()
+    { return Base::template diagonal<1>(); }
+    inline const typename Base::template DiagonalIntReturnType<1>::Type super() const
+    { return Base::template diagonal<1>(); }
+    inline typename Base::template DiagonalIntReturnType<-1>::Type sub()
+    { return Base::template diagonal<-1>(); }
+    inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const
+    { return Base::template diagonal<-1>(); }
+  protected:
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BANDMATRIX_H
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
new file mode 100644
index 0000000..8278944
--- /dev/null
+++ b/Eigen/src/Core/Block.h
@@ -0,0 +1,406 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLOCK_H
+#define EIGEN_BLOCK_H
+
+namespace Eigen { 
+
+/** \class Block
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a fixed-size or dynamic-size block
+  *
+  * \param XprType the type of the expression in which we are taking a block
+  * \param BlockRows the number of rows of the block we are taking at compile time (optional)
+  * \param BlockCols the number of columns of the block we are taking at compile time (optional)
+  *
+  * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
+  * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and
+  * most of the time this is the only way it is used.
+  *
+  * However, if you want to directly maniputate block expressions,
+  * for instance if you want to write a function returning such an expression, you
+  * will need to use this class.
+  *
+  * Here is an example illustrating the dynamic case:
+  * \include class_Block.cpp
+  * Output: \verbinclude class_Block.out
+  *
+  * \note Even though this expression has dynamic size, in the case where \a XprType
+  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
+  * it does not cause a dynamic memory allocation.
+  *
+  * Here is an example illustrating the fixed-size case:
+  * \include class_FixedBlock.cpp
+  * Output: \verbinclude class_FixedBlock.out
+  *
+  * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock
+  */
+
+namespace internal {
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprType>
+{
+  typedef typename traits<XprType>::Scalar Scalar;
+  typedef typename traits<XprType>::StorageKind StorageKind;
+  typedef typename traits<XprType>::XprKind XprKind;
+  typedef typename nested<XprType>::type XprTypeNested;
+  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+  enum{
+    MatrixRows = traits<XprType>::RowsAtCompileTime,
+    MatrixCols = traits<XprType>::ColsAtCompileTime,
+    RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,
+    ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,
+    MaxRowsAtCompileTime = BlockRows==0 ? 0
+                         : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime)
+                         : int(traits<XprType>::MaxRowsAtCompileTime),
+    MaxColsAtCompileTime = BlockCols==0 ? 0
+                         : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
+                         : int(traits<XprType>::MaxColsAtCompileTime),
+    XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
+    IsDense = is_same<StorageKind,Dense>::value,
+    IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+               : (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+               : XprTypeIsRowMajor,
+    HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
+    InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+    InnerStrideAtCompileTime = HasSameStorageOrderAsXprType
+                             ? int(inner_stride_at_compile_time<XprType>::ret)
+                             : int(outer_stride_at_compile_time<XprType>::ret),
+    OuterStrideAtCompileTime = HasSameStorageOrderAsXprType
+                             ? int(outer_stride_at_compile_time<XprType>::ret)
+                             : int(inner_stride_at_compile_time<XprType>::ret),
+    MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits<Scalar>::size) == 0)
+                       && (InnerStrideAtCompileTime == 1)
+                        ? PacketAccessBit : 0,
+    MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
+    FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
+    FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
+    FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
+    Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
+                                        DirectAccessBit |
+                                        MaskPacketAccessBit |
+                                        MaskAlignedBit),
+    Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit
+  };
+};
+
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,
+         bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense;
+         
+} // end namespace internal
+
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl;
+
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class Block
+  : public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind>
+{
+    typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl;
+  public:
+    //typedef typename Impl::Base Base;
+    typedef Impl Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
+  
+    /** Column or Row constructor
+      */
+    inline Block(XprType& xpr, Index i) : Impl(xpr,i)
+    {
+      eigen_assert( (i>=0) && (
+          ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())
+        ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));
+    }
+
+    /** Fixed-size constructor
+      */
+    inline Block(XprType& xpr, Index a_startRow, Index a_startCol)
+      : Impl(xpr, a_startRow, a_startCol)
+    {
+      EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)
+      eigen_assert(a_startRow >= 0 && BlockRows >= 1 && a_startRow + BlockRows <= xpr.rows()
+             && a_startCol >= 0 && BlockCols >= 1 && a_startCol + BlockCols <= xpr.cols());
+    }
+
+    /** Dynamic-size constructor
+      */
+    inline Block(XprType& xpr,
+          Index a_startRow, Index a_startCol,
+          Index blockRows, Index blockCols)
+      : Impl(xpr, a_startRow, a_startCol, blockRows, blockCols)
+    {
+      eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
+          && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
+      eigen_assert(a_startRow >= 0 && blockRows >= 0 && a_startRow  <= xpr.rows() - blockRows
+          && a_startCol >= 0 && blockCols >= 0 && a_startCol <= xpr.cols() - blockCols);
+    }
+};
+         
+// The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense
+// that must be specialized for direct and non-direct access...
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>
+  : public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel>
+{
+    typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;
+    typedef typename XprType::Index Index;
+  public:
+    typedef Impl Base;
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+    inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}
+    inline BlockImpl(XprType& xpr, Index a_startRow, Index a_startCol) : Impl(xpr, a_startRow, a_startCol) {}
+    inline BlockImpl(XprType& xpr, Index a_startRow, Index a_startCol, Index blockRows, Index blockCols)
+      : Impl(xpr, a_startRow, a_startCol, blockRows, blockCols) {}
+};
+
+namespace internal {
+
+/** \internal Internal implementation of dense Blocks in the general case. */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class BlockImpl_dense
+  : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel> >::type
+{
+    typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+  public:
+
+    typedef typename internal::dense_xpr_base<BlockType>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
+
+    class InnerIterator;
+
+    /** Column or Row constructor
+      */
+    inline BlockImpl_dense(XprType& xpr, Index i)
+      : m_xpr(xpr),
+        // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,
+        // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,
+        // all other cases are invalid.
+        // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
+        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+        m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+        m_blockCols(BlockCols==1 ? 1 : xpr.cols())
+    {}
+
+    /** Fixed-size constructor
+      */
+    inline BlockImpl_dense(XprType& xpr, Index a_startRow, Index a_startCol)
+      : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
+                    m_blockRows(BlockRows), m_blockCols(BlockCols)
+    {}
+
+    /** Dynamic-size constructor
+      */
+    inline BlockImpl_dense(XprType& xpr,
+          Index a_startRow, Index a_startCol,
+          Index blockRows, Index blockCols)
+      : m_xpr(xpr), m_startRow(a_startRow), m_startCol(a_startCol),
+                    m_blockRows(blockRows), m_blockCols(blockCols)
+    {}
+
+    inline Index rows() const { return m_blockRows.value(); }
+    inline Index cols() const { return m_blockCols.value(); }
+
+    inline Scalar& coeffRef(Index rowId, Index colId)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(XprType)
+      return m_xpr.const_cast_derived()
+               .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
+    }
+
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      return m_xpr.derived()
+               .coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());
+    }
+
+    EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const
+    {
+      return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(XprType)
+      return m_xpr.const_cast_derived()
+             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_xpr.const_cast_derived()
+             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    inline const CoeffReturnType coeff(Index index) const
+    {
+      return m_xpr
+             .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                    m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index rowId, Index colId) const
+    {
+      return m_xpr.template packet<Unaligned>
+              (rowId + m_startRow.value(), colId + m_startCol.value());
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
+    {
+      m_xpr.const_cast_derived().template writePacket<Unaligned>
+              (rowId + m_startRow.value(), colId + m_startCol.value(), val);
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index index) const
+    {
+      return m_xpr.template packet<Unaligned>
+              (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+               m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& val)
+    {
+      m_xpr.const_cast_derived().template writePacket<Unaligned>
+         (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+          m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);
+    }
+
+    #ifdef EIGEN_PARSED_BY_DOXYGEN
+    /** \sa MapBase::data() */
+    inline const Scalar* data() const;
+    inline Index innerStride() const;
+    inline Index outerStride() const;
+    #endif
+
+    const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const 
+    { 
+      return m_xpr; 
+    }
+      
+    Index startRow() const 
+    { 
+      return m_startRow.value(); 
+    }
+      
+    Index startCol() const 
+    { 
+      return m_startCol.value(); 
+    }
+
+  protected:
+
+    const typename XprType::Nested m_xpr;
+    const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+    const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
+};
+
+/** \internal Internal implementation of dense Blocks in the direct access case.*/
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
+  : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> >
+{
+    typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+  public:
+
+    typedef MapBase<BlockType> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)
+
+    /** Column or Row constructor
+      */
+    inline BlockImpl_dense(XprType& xpr, Index i)
+      : Base(internal::const_cast_ptr(&xpr.coeffRef(
+              (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0,
+              (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)),
+             BlockRows==1 ? 1 : xpr.rows(),
+             BlockCols==1 ? 1 : xpr.cols()),
+        m_xpr(xpr)
+    {
+      init();
+    }
+
+    /** Fixed-size constructor
+      */
+    inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
+      : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol))), m_xpr(xpr)
+    {
+      init();
+    }
+
+    /** Dynamic-size constructor
+      */
+    inline BlockImpl_dense(XprType& xpr,
+          Index startRow, Index startCol,
+          Index blockRows, Index blockCols)
+      : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols),
+        m_xpr(xpr)
+    {
+      init();
+    }
+
+    const typename internal::remove_all<typename XprType::Nested>::type& nestedExpression() const 
+    { 
+      return m_xpr; 
+    }
+      
+    /** \sa MapBase::innerStride() */
+    inline Index innerStride() const
+    {
+      return internal::traits<BlockType>::HasSameStorageOrderAsXprType
+             ? m_xpr.innerStride()
+             : m_xpr.outerStride();
+    }
+
+    /** \sa MapBase::outerStride() */
+    inline Index outerStride() const
+    {
+      return m_outerStride;
+    }
+
+  #ifndef __SUNPRO_CC
+  // FIXME sunstudio is not friendly with the above friend...
+  // META-FIXME there is no 'friend' keyword around here. Is this obsolete?
+  protected:
+  #endif
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal used by allowAligned() */
+    inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)
+      : Base(data, blockRows, blockCols), m_xpr(xpr)
+    {
+      init();
+    }
+    #endif
+
+  protected:
+    void init()
+    {
+      m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType
+                    ? m_xpr.outerStride()
+                    : m_xpr.innerStride();
+    }
+
+    typename XprType::Nested m_xpr;
+    Index m_outerStride;
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BLOCK_H
diff --git a/Eigen/src/Core/BooleanRedux.h b/Eigen/src/Core/BooleanRedux.h
new file mode 100644
index 0000000..be9f48a
--- /dev/null
+++ b/Eigen/src/Core/BooleanRedux.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ALLANDANY_H
+#define EIGEN_ALLANDANY_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Derived, int UnrollCount>
+struct all_unroller
+{
+  enum {
+    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived::RowsAtCompileTime
+  };
+
+  static inline bool run(const Derived &mat)
+  {
+    return all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col);
+  }
+};
+
+template<typename Derived>
+struct all_unroller<Derived, 0>
+{
+  static inline bool run(const Derived &/*mat*/) { return true; }
+};
+
+template<typename Derived>
+struct all_unroller<Derived, Dynamic>
+{
+  static inline bool run(const Derived &) { return false; }
+};
+
+template<typename Derived, int UnrollCount>
+struct any_unroller
+{
+  enum {
+    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived::RowsAtCompileTime
+  };
+
+  static inline bool run(const Derived &mat)
+  {
+    return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col);
+  }
+};
+
+template<typename Derived>
+struct any_unroller<Derived, 0>
+{
+  static inline bool run(const Derived & /*mat*/) { return false; }
+};
+
+template<typename Derived>
+struct any_unroller<Derived, Dynamic>
+{
+  static inline bool run(const Derived &) { return false; }
+};
+
+} // end namespace internal
+
+/** \returns true if all coefficients are true
+  *
+  * Example: \include MatrixBase_all.cpp
+  * Output: \verbinclude MatrixBase_all.out
+  *
+  * \sa any(), Cwise::operator<()
+  */
+template<typename Derived>
+inline bool DenseBase<Derived>::all() const
+{
+  enum {
+    unroll = SizeAtCompileTime != Dynamic
+          && CoeffReadCost != Dynamic
+          && NumTraits<Scalar>::AddCost != Dynamic
+          && SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
+  };
+  if(unroll)
+    return internal::all_unroller<Derived, unroll ? int(SizeAtCompileTime) : Dynamic>::run(derived());
+  else
+  {
+    for(Index j = 0; j < cols(); ++j)
+      for(Index i = 0; i < rows(); ++i)
+        if (!coeff(i, j)) return false;
+    return true;
+  }
+}
+
+/** \returns true if at least one coefficient is true
+  *
+  * \sa all()
+  */
+template<typename Derived>
+inline bool DenseBase<Derived>::any() const
+{
+  enum {
+    unroll = SizeAtCompileTime != Dynamic
+          && CoeffReadCost != Dynamic
+          && NumTraits<Scalar>::AddCost != Dynamic
+          && SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT
+  };
+  if(unroll)
+    return internal::any_unroller<Derived, unroll ? int(SizeAtCompileTime) : Dynamic>::run(derived());
+  else
+  {
+    for(Index j = 0; j < cols(); ++j)
+      for(Index i = 0; i < rows(); ++i)
+        if (coeff(i, j)) return true;
+    return false;
+  }
+}
+
+/** \returns the number of coefficients which evaluate to true
+  *
+  * \sa all(), any()
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::Index DenseBase<Derived>::count() const
+{
+  return derived().template cast<bool>().template cast<Index>().sum();
+}
+
+/** \returns true is \c *this contains at least one Not A Number (NaN).
+  *
+  * \sa allFinite()
+  */
+template<typename Derived>
+inline bool DenseBase<Derived>::hasNaN() const
+{
+  return !((derived().array()==derived().array()).all());
+}
+
+/** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values.
+  *
+  * \sa hasNaN()
+  */
+template<typename Derived>
+inline bool DenseBase<Derived>::allFinite() const
+{
+  return !((derived()-derived()).hasNaN());
+}
+    
+} // end namespace Eigen
+
+#endif // EIGEN_ALLANDANY_H
diff --git a/Eigen/src/Core/CMakeLists.txt b/Eigen/src/Core/CMakeLists.txt
new file mode 100644
index 0000000..2346fc2
--- /dev/null
+++ b/Eigen/src/Core/CMakeLists.txt
@@ -0,0 +1,10 @@
+FILE(GLOB Eigen_Core_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Core_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core COMPONENT Devel
+  )
+
+ADD_SUBDIRECTORY(products)
+ADD_SUBDIRECTORY(util)
+ADD_SUBDIRECTORY(arch)
diff --git a/Eigen/src/Core/CommaInitializer.h b/Eigen/src/Core/CommaInitializer.h
new file mode 100644
index 0000000..a036d8c
--- /dev/null
+++ b/Eigen/src/Core/CommaInitializer.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMMAINITIALIZER_H
+#define EIGEN_COMMAINITIALIZER_H
+
+namespace Eigen { 
+
+/** \class CommaInitializer
+  * \ingroup Core_Module
+  *
+  * \brief Helper class used by the comma initializer operator
+  *
+  * This class is internally used to implement the comma initializer feature. It is
+  * the return type of MatrixBase::operator<<, and most of the time this is the only
+  * way it is used.
+  *
+  * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished()
+  */
+template<typename XprType>
+struct CommaInitializer
+{
+  typedef typename XprType::Scalar Scalar;
+  typedef typename XprType::Index Index;
+
+  inline CommaInitializer(XprType& xpr, const Scalar& s)
+    : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)
+  {
+    m_xpr.coeffRef(0,0) = s;
+  }
+
+  template<typename OtherDerived>
+  inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)
+    : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
+  {
+    m_xpr.block(0, 0, other.rows(), other.cols()) = other;
+  }
+
+  /* Copy/Move constructor which transfers ownership. This is crucial in 
+   * absence of return value optimization to avoid assertions during destruction. */
+  // FIXME in C++11 mode this could be replaced by a proper RValue constructor
+  inline CommaInitializer(const CommaInitializer& o)
+  : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
+    // Mark original object as finished. In absence of R-value references we need to const_cast:
+    const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
+    const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
+    const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
+  }
+
+  /* inserts a scalar value in the target matrix */
+  CommaInitializer& operator,(const Scalar& s)
+  {
+    if (m_col==m_xpr.cols())
+    {
+      m_row+=m_currentBlockRows;
+      m_col = 0;
+      m_currentBlockRows = 1;
+      eigen_assert(m_row<m_xpr.rows()
+        && "Too many rows passed to comma initializer (operator<<)");
+    }
+    eigen_assert(m_col<m_xpr.cols()
+      && "Too many coefficients passed to comma initializer (operator<<)");
+    eigen_assert(m_currentBlockRows==1);
+    m_xpr.coeffRef(m_row, m_col++) = s;
+    return *this;
+  }
+
+  /* inserts a matrix expression in the target matrix */
+  template<typename OtherDerived>
+  CommaInitializer& operator,(const DenseBase<OtherDerived>& other)
+  {
+    if(other.cols()==0 || other.rows()==0)
+      return *this;
+    if (m_col==m_xpr.cols())
+    {
+      m_row+=m_currentBlockRows;
+      m_col = 0;
+      m_currentBlockRows = other.rows();
+      eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()
+        && "Too many rows passed to comma initializer (operator<<)");
+    }
+    eigen_assert(m_col<m_xpr.cols()
+      && "Too many coefficients passed to comma initializer (operator<<)");
+    eigen_assert(m_currentBlockRows==other.rows());
+    if (OtherDerived::SizeAtCompileTime != Dynamic)
+      m_xpr.template block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
+                              OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
+                    (m_row, m_col) = other;
+    else
+      m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other;
+    m_col += other.cols();
+    return *this;
+  }
+
+  inline ~CommaInitializer()
+  {
+    eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows()
+         && m_col == m_xpr.cols()
+         && "Too few coefficients passed to comma initializer (operator<<)");
+  }
+
+  /** \returns the built matrix once all its coefficients have been set.
+    * Calling finished is 100% optional. Its purpose is to write expressions
+    * like this:
+    * \code
+    * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
+    * \endcode
+    */
+  inline XprType& finished() { return m_xpr; }
+
+  XprType& m_xpr;   // target expression
+  Index m_row;              // current row id
+  Index m_col;              // current col id
+  Index m_currentBlockRows; // current block height
+};
+
+/** \anchor MatrixBaseCommaInitRef
+  * Convenient operator to set the coefficients of a matrix.
+  *
+  * The coefficients must be provided in a row major order and exactly match
+  * the size of the matrix. Otherwise an assertion is raised.
+  *
+  * Example: \include MatrixBase_set.cpp
+  * Output: \verbinclude MatrixBase_set.out
+  * 
+  * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.
+  *
+  * \sa CommaInitializer::finished(), class CommaInitializer
+  */
+template<typename Derived>
+inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)
+{
+  return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);
+}
+
+/** \sa operator<<(const Scalar&) */
+template<typename Derived>
+template<typename OtherDerived>
+inline CommaInitializer<Derived>
+DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)
+{
+  return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMMAINITIALIZER_H
diff --git a/Eigen/src/Core/CoreIterators.h b/Eigen/src/Core/CoreIterators.h
new file mode 100644
index 0000000..6da4683
--- /dev/null
+++ b/Eigen/src/Core/CoreIterators.h
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+namespace Eigen { 
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \ingroup SparseCore_Module
+  * \class InnerIterator
+  * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+  *
+  * todo
+  */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class DenseBase<Derived>::InnerIterator
+{
+  protected:
+    typedef typename Derived::Scalar Scalar;
+    typedef typename Derived::Index Index;
+
+    enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+  public:
+    EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+      : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
+    {}
+
+    EIGEN_STRONG_INLINE Scalar value() const
+    {
+      return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+                          : m_expression.coeff(m_inner, m_outer);
+    }
+
+    EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+    EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+  protected:
+    const Derived& m_expression;
+    Index m_inner;
+    const Index m_outer;
+    const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h
new file mode 100644
index 0000000..586f77a
--- /dev/null
+++ b/Eigen/src/Core/CwiseBinaryOp.h
@@ -0,0 +1,229 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_BINARY_OP_H
+#define EIGEN_CWISE_BINARY_OP_H
+
+namespace Eigen {
+
+/** \class CwiseBinaryOp
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions
+  *
+  * \param BinaryOp template functor implementing the operator
+  * \param Lhs the type of the left-hand side
+  * \param Rhs the type of the right-hand side
+  *
+  * This class represents an expression  where a coefficient-wise binary operator is applied to two expressions.
+  * It is the return type of binary operators, by which we mean only those binary operators where
+  * both the left-hand side and the right-hand side are Eigen expressions.
+  * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.
+  *
+  * Most of the time, this is the only way that it is used, so you typically don't have to name
+  * CwiseBinaryOp types explicitly.
+  *
+  * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
+  */
+
+namespace internal {
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+  // we must not inherit from traits<Lhs> since it has
+  // the potential to cause problems with MSVC
+  typedef typename remove_all<Lhs>::type Ancestor;
+  typedef typename traits<Ancestor>::XprKind XprKind;
+  enum {
+    RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,
+    ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,
+    MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime
+  };
+
+  // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),
+  // we still want to handle the case when the result type is different.
+  typedef typename result_of<
+                     BinaryOp(
+                       typename Lhs::Scalar,
+                       typename Rhs::Scalar
+                     )
+                   >::type Scalar;
+  typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+                                           typename traits<Rhs>::StorageKind>::ret StorageKind;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  typedef typename Lhs::Nested LhsNested;
+  typedef typename Rhs::Nested RhsNested;
+  typedef typename remove_reference<LhsNested>::type _LhsNested;
+  typedef typename remove_reference<RhsNested>::type _RhsNested;
+  enum {
+    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+    LhsFlags = _LhsNested::Flags,
+    RhsFlags = _RhsNested::Flags,
+    SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
+    StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit),
+    Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
+        HereditaryBits
+      | (int(LhsFlags) & int(RhsFlags) &
+           ( AlignedBit
+           | (StorageOrdersAgree ? LinearAccessBit : 0)
+           | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
+           )
+        )
+     ),
+    Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
+    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
+  };
+};
+} // end namespace internal
+
+// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor
+// that would take two operands of different types. If there were such an example, then this check should be
+// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as
+// currently they take only one typename Scalar template parameter.
+// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
+// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
+// add together a float matrix and a double matrix.
+#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
+  EIGEN_STATIC_ASSERT((internal::functor_is_product_like<BINOP>::ret \
+                        ? int(internal::scalar_product_traits<LHS, RHS>::Defined) \
+                        : int(internal::is_same<LHS, RHS>::value)), \
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>
+class CwiseBinaryOpImpl;
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOp : internal::no_assignment_operator,
+  public CwiseBinaryOpImpl<
+          BinaryOp, Lhs, Rhs,
+          typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+                                           typename internal::traits<Rhs>::StorageKind>::ret>
+{
+  public:
+
+    typedef typename CwiseBinaryOpImpl<
+        BinaryOp, Lhs, Rhs,
+        typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
+                                         typename internal::traits<Rhs>::StorageKind>::ret>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
+
+    typedef typename internal::nested<Lhs>::type LhsNested;
+    typedef typename internal::nested<Rhs>::type RhsNested;
+    typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+    typedef typename internal::remove_reference<RhsNested>::type _RhsNested;
+
+    EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())
+      : m_lhs(aLhs), m_rhs(aRhs), m_functor(func)
+    {
+      EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);
+      // require the sizes to match
+      EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)
+      eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const {
+      // return the fixed size type if available to enable compile time optimizations
+      if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)
+        return m_rhs.rows();
+      else
+        return m_lhs.rows();
+    }
+    EIGEN_STRONG_INLINE Index cols() const {
+      // return the fixed size type if available to enable compile time optimizations
+      if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)
+        return m_rhs.cols();
+      else
+        return m_lhs.cols();
+    }
+
+    /** \returns the left hand side nested expression */
+    const _LhsNested& lhs() const { return m_lhs; }
+    /** \returns the right hand side nested expression */
+    const _RhsNested& rhs() const { return m_rhs; }
+    /** \returns the functor representing the binary operation */
+    const BinaryOp& functor() const { return m_functor; }
+
+  protected:
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+    const BinaryOp m_functor;
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Dense>
+  : public internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
+{
+    typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+  public:
+
+    typedef typename internal::dense_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE( Derived )
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
+    {
+      return derived().functor()(derived().lhs().coeff(rowId, colId),
+                                 derived().rhs().coeff(rowId, colId));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
+    {
+      return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(rowId, colId),
+                                          derived().rhs().template packet<LoadMode>(rowId, colId));
+    }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      return derived().functor()(derived().lhs().coeff(index),
+                                 derived().rhs().coeff(index));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return derived().functor().packetOp(derived().lhs().template packet<LoadMode>(index),
+                                          derived().rhs().template packet<LoadMode>(index));
+    }
+};
+
+/** replaces \c *this by \c *this - \a other.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
+{
+  SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+/** replaces \c *this by \c *this + \a other.
+  *
+  * \returns a reference to \c *this
+  */
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
+{
+  SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived> tmp(derived());
+  tmp = other.derived();
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_BINARY_OP_H
diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h
new file mode 100644
index 0000000..a93bab2
--- /dev/null
+++ b/Eigen/src/Core/CwiseNullaryOp.h
@@ -0,0 +1,864 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_NULLARY_OP_H
+#define EIGEN_CWISE_NULLARY_OP_H
+
+namespace Eigen {
+
+/** \class CwiseNullaryOp
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression of a matrix where all coefficients are defined by a functor
+  *
+  * \param NullaryOp template functor implementing the operator
+  * \param PlainObjectType the underlying plain matrix/array type
+  *
+  * This class represents an expression of a generic nullary operator.
+  * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,
+  * and most of the time this is the only way it is used.
+  *
+  * However, if you want to write a function returning such an expression, you
+  * will need to use this class.
+  *
+  * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr()
+  */
+
+namespace internal {
+template<typename NullaryOp, typename PlainObjectType>
+struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>
+{
+  enum {
+    Flags = (traits<PlainObjectType>::Flags
+      & (  HereditaryBits
+         | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
+         | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
+      | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
+    CoeffReadCost = functor_traits<NullaryOp>::Cost
+  };
+};
+}
+
+template<typename NullaryOp, typename PlainObjectType>
+class CwiseNullaryOp : internal::no_assignment_operator,
+  public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)
+
+    CwiseNullaryOp(Index nbRows, Index nbCols, const NullaryOp& func = NullaryOp())
+      : m_rows(nbRows), m_cols(nbCols), m_functor(func)
+    {
+      eigen_assert(nbRows >= 0
+            && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
+            &&  nbCols >= 0
+            && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols));
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
+    {
+      return m_functor(rowId, colId);
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
+    {
+      return m_functor.packetOp(rowId, colId);
+    }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      return m_functor(index);
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return m_functor.packetOp(index);
+    }
+
+    /** \returns the functor representing the nullary operation */
+    const NullaryOp& functor() const { return m_functor; }
+
+  protected:
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
+    const NullaryOp m_functor;
+};
+
+
+/** \returns an expression of a matrix defined by a custom functor \a func
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+template<typename CustomNullaryOp>
+EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
+DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)
+{
+  return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
+}
+
+/** \returns an expression of a matrix defined by a custom functor \a func
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+template<typename CustomNullaryOp>
+EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
+DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
+  else return CwiseNullaryOp<CustomNullaryOp, Derived>(size, 1, func);
+}
+
+/** \returns an expression of a matrix defined by a custom functor \a func
+  *
+  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+template<typename CustomNullaryOp>
+EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, Derived>
+DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
+{
+  return CwiseNullaryOp<CustomNullaryOp, Derived>(RowsAtCompileTime, ColsAtCompileTime, func);
+}
+
+/** \returns an expression of a constant matrix of value \a value
+  *
+  * The parameters \a nbRows and \a nbCols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this DenseBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a nbRows and \a nbCols as arguments, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(Index nbRows, Index nbCols, const Scalar& value)
+{
+  return DenseBase<Derived>::NullaryExpr(nbRows, nbCols, internal::scalar_constant_op<Scalar>(value));
+}
+
+/** \returns an expression of a constant matrix of value \a value
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this DenseBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Zero() should be used
+  * instead.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(Index size, const Scalar& value)
+{
+  return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));
+}
+
+/** \returns an expression of a constant matrix of value \a value
+  *
+  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * The template parameter \a CustomNullaryOp is the type of the functor.
+  *
+  * \sa class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Constant(const Scalar& value)
+{
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function generates 'size' equally spaced values in the closed interval [low,high].
+  * This particular version of LinSpaced() uses sequential access, i.e. vector access is
+  * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization
+  * and yields faster code than the random access version.
+  *
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include DenseBase_LinSpaced_seq.cpp
+  * Output: \verbinclude DenseBase_LinSpaced_seq.out
+  *
+  * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,false>(low,high,size));
+}
+
+/**
+  * \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&)
+  * Special version for fixed size types which does not require the size parameter.
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::SequentialLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,false>(low,high,Derived::SizeAtCompileTime));
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function generates 'size' equally spaced values in the closed interval [low,high].
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include DenseBase_LinSpaced.cpp
+  * Output: \verbinclude DenseBase_LinSpaced.out
+  *
+  * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,true>(low,high,size));
+}
+
+/**
+  * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)
+  * Special version for fixed size types which does not require the size parameter.
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType
+DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,true>(low,high,Derived::SizeAtCompileTime));
+}
+
+/** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */
+template<typename Derived>
+bool DenseBase<Derived>::isApproxToConstant
+(const Scalar& val, const RealScalar& prec) const
+{
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = 0; i < rows(); ++i)
+      if(!internal::isApprox(this->coeff(i, j), val, prec))
+        return false;
+  return true;
+}
+
+/** This is just an alias for isApproxToConstant().
+  *
+  * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */
+template<typename Derived>
+bool DenseBase<Derived>::isConstant
+(const Scalar& val, const RealScalar& prec) const
+{
+  return isApproxToConstant(val, prec);
+}
+
+/** Alias for setConstant(): sets all coefficients in this expression to \a val.
+  *
+  * \sa setConstant(), Constant(), class CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)
+{
+  setConstant(val);
+}
+
+/** Sets all coefficients in this expression to \a value.
+  *
+  * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)
+{
+  return derived() = Constant(rows(), cols(), val);
+}
+
+/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setConstant_int.cpp
+  * Output: \verbinclude Matrix_setConstant_int.out
+  *
+  * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)
+{
+  resize(size);
+  return setConstant(val);
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to the given \a value.
+  *
+  * \param nbRows the new number of rows
+  * \param nbCols the new number of columns
+  * \param val the value to which all coefficients are set
+  *
+  * Example: \include Matrix_setConstant_int_int.cpp
+  * Output: \verbinclude Matrix_setConstant_int_int.out
+  *
+  * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setConstant(Index nbRows, Index nbCols, const Scalar& val)
+{
+  resize(nbRows, nbCols);
+  return setConstant(val);
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function generates 'size' equally spaced values in the closed interval [low,high].
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include DenseBase_setLinSpaced.cpp
+  * Output: \verbinclude DenseBase_setLinSpaced.out
+  *
+  * \sa CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,false>(low,high,newSize));
+}
+
+/**
+  * \brief Sets a linearly space vector.
+  *
+  * The function fill *this with equally spaced values in the closed interval [low,high].
+  * When size is set to 1, a vector of length 1 containing 'high' is returned.
+  *
+  * \only_for_vectors
+  *
+  * \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return setLinSpaced(size(), low, high);
+}
+
+// zero:
+
+/** \returns an expression of a zero matrix.
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_zero_int_int.cpp
+  * Output: \verbinclude MatrixBase_zero_int_int.out
+  *
+  * \sa Zero(), Zero(Index)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero(Index nbRows, Index nbCols)
+{
+  return Constant(nbRows, nbCols, Scalar(0));
+}
+
+/** \returns an expression of a zero vector.
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Zero() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_zero_int.cpp
+  * Output: \verbinclude MatrixBase_zero_int.out
+  *
+  * \sa Zero(), Zero(Index,Index)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero(Index size)
+{
+  return Constant(size, Scalar(0));
+}
+
+/** \returns an expression of a fixed-size zero matrix or vector.
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * Example: \include MatrixBase_zero.cpp
+  * Output: \verbinclude MatrixBase_zero.out
+  *
+  * \sa Zero(Index), Zero(Index,Index)
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Zero()
+{
+  return Constant(Scalar(0));
+}
+
+/** \returns true if *this is approximately equal to the zero matrix,
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isZero.cpp
+  * Output: \verbinclude MatrixBase_isZero.out
+  *
+  * \sa class CwiseNullaryOp, Zero()
+  */
+template<typename Derived>
+bool DenseBase<Derived>::isZero(const RealScalar& prec) const
+{
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = 0; i < rows(); ++i)
+      if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<Scalar>(1), prec))
+        return false;
+  return true;
+}
+
+/** Sets all coefficients in this expression to zero.
+  *
+  * Example: \include MatrixBase_setZero.cpp
+  * Output: \verbinclude MatrixBase_setZero.out
+  *
+  * \sa class CwiseNullaryOp, Zero()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()
+{
+  return setConstant(Scalar(0));
+}
+
+/** Resizes to the given \a size, and sets all coefficients in this expression to zero.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setZero_int.cpp
+  * Output: \verbinclude Matrix_setZero_int.out
+  *
+  * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index newSize)
+{
+  resize(newSize);
+  return setConstant(Scalar(0));
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to zero.
+  *
+  * \param nbRows the new number of rows
+  * \param nbCols the new number of columns
+  *
+  * Example: \include Matrix_setZero_int_int.cpp
+  * Output: \verbinclude Matrix_setZero_int_int.out
+  *
+  * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setZero(Index nbRows, Index nbCols)
+{
+  resize(nbRows, nbCols);
+  return setConstant(Scalar(0));
+}
+
+// ones:
+
+/** \returns an expression of a matrix where all coefficients equal one.
+  *
+  * The parameters \a nbRows and \a nbCols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_ones_int_int.cpp
+  * Output: \verbinclude MatrixBase_ones_int_int.out
+  *
+  * \sa Ones(), Ones(Index), isOnes(), class Ones
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones(Index nbRows, Index nbCols)
+{
+  return Constant(nbRows, nbCols, Scalar(1));
+}
+
+/** \returns an expression of a vector where all coefficients equal one.
+  *
+  * The parameter \a newSize is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Ones() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_ones_int.cpp
+  * Output: \verbinclude MatrixBase_ones_int.out
+  *
+  * \sa Ones(), Ones(Index,Index), isOnes(), class Ones
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones(Index newSize)
+{
+  return Constant(newSize, Scalar(1));
+}
+
+/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * Example: \include MatrixBase_ones.cpp
+  * Output: \verbinclude MatrixBase_ones.out
+  *
+  * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType
+DenseBase<Derived>::Ones()
+{
+  return Constant(Scalar(1));
+}
+
+/** \returns true if *this is approximately equal to the matrix where all coefficients
+  *          are equal to 1, within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isOnes.cpp
+  * Output: \verbinclude MatrixBase_isOnes.out
+  *
+  * \sa class CwiseNullaryOp, Ones()
+  */
+template<typename Derived>
+bool DenseBase<Derived>::isOnes
+(const RealScalar& prec) const
+{
+  return isApproxToConstant(Scalar(1), prec);
+}
+
+/** Sets all coefficients in this expression to one.
+  *
+  * Example: \include MatrixBase_setOnes.cpp
+  * Output: \verbinclude MatrixBase_setOnes.out
+  *
+  * \sa class CwiseNullaryOp, Ones()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()
+{
+  return setConstant(Scalar(1));
+}
+
+/** Resizes to the given \a newSize, and sets all coefficients in this expression to one.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setOnes_int.cpp
+  * Output: \verbinclude Matrix_setOnes_int.out
+  *
+  * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index newSize)
+{
+  resize(newSize);
+  return setConstant(Scalar(1));
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to one.
+  *
+  * \param nbRows the new number of rows
+  * \param nbCols the new number of columns
+  *
+  * Example: \include Matrix_setOnes_int_int.cpp
+  * Output: \verbinclude Matrix_setOnes_int_int.out
+  *
+  * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setOnes(Index nbRows, Index nbCols)
+{
+  resize(nbRows, nbCols);
+  return setConstant(Scalar(1));
+}
+
+// Identity:
+
+/** \returns an expression of the identity matrix (not necessarily square).
+  *
+  * The parameters \a nbRows and \a nbCols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_identity_int_int.cpp
+  * Output: \verbinclude MatrixBase_identity_int_int.out
+  *
+  * \sa Identity(), setIdentity(), isIdentity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
+MatrixBase<Derived>::Identity(Index nbRows, Index nbCols)
+{
+  return DenseBase<Derived>::NullaryExpr(nbRows, nbCols, internal::scalar_identity_op<Scalar>());
+}
+
+/** \returns an expression of the identity matrix (not necessarily square).
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variant taking size arguments.
+  *
+  * Example: \include MatrixBase_identity.cpp
+  * Output: \verbinclude MatrixBase_identity.out
+  *
+  * \sa Identity(Index,Index), setIdentity(), isIdentity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType
+MatrixBase<Derived>::Identity()
+{
+  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+  return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());
+}
+
+/** \returns true if *this is approximately equal to the identity matrix
+  *          (not necessarily square),
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isIdentity.cpp
+  * Output: \verbinclude MatrixBase_isIdentity.out
+  *
+  * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isIdentity
+(const RealScalar& prec) const
+{
+  for(Index j = 0; j < cols(); ++j)
+  {
+    for(Index i = 0; i < rows(); ++i)
+    {
+      if(i == j)
+      {
+        if(!internal::isApprox(this->coeff(i, j), static_cast<Scalar>(1), prec))
+          return false;
+      }
+      else
+      {
+        if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast<RealScalar>(1), prec))
+          return false;
+      }
+    }
+  }
+  return true;
+}
+
+namespace internal {
+
+template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>
+struct setIdentity_impl
+{
+  static EIGEN_STRONG_INLINE Derived& run(Derived& m)
+  {
+    return m = Derived::Identity(m.rows(), m.cols());
+  }
+};
+
+template<typename Derived>
+struct setIdentity_impl<Derived, true>
+{
+  typedef typename Derived::Index Index;
+  static EIGEN_STRONG_INLINE Derived& run(Derived& m)
+  {
+    m.setZero();
+    const Index size = (std::min)(m.rows(), m.cols());
+    for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);
+    return m;
+  }
+};
+
+} // end namespace internal
+
+/** Writes the identity expression (not necessarily square) into *this.
+  *
+  * Example: \include MatrixBase_setIdentity.cpp
+  * Output: \verbinclude MatrixBase_setIdentity.out
+  *
+  * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()
+{
+  return internal::setIdentity_impl<Derived>::run(derived());
+}
+
+/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.
+  *
+  * \param nbRows the new number of rows
+  * \param nbCols the new number of columns
+  *
+  * Example: \include Matrix_setIdentity_int_int.cpp
+  * Output: \verbinclude Matrix_setIdentity_int_int.out
+  *
+  * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index nbRows, Index nbCols)
+{
+  derived().resize(nbRows, nbCols);
+  return setIdentity();
+}
+
+/** \returns an expression of the i-th unit (basis) vector.
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i);
+}
+
+/** \returns an expression of the i-th unit (basis) vector.
+  *
+  * \only_for_vectors
+  *
+  * This variant is for fixed-size vector only.
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return BasisReturnType(SquareMatrixType::Identity(),i);
+}
+
+/** \returns an expression of the X axis unit vector (1{,0}^*)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
+{ return Derived::Unit(0); }
+
+/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
+{ return Derived::Unit(1); }
+
+/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
+{ return Derived::Unit(2); }
+
+/** \returns an expression of the W axis unit vector (0,0,0,1)
+  *
+  * \only_for_vectors
+  *
+  * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
+{ return Derived::Unit(3); }
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_NULLARY_OP_H
diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h
new file mode 100644
index 0000000..f2de749
--- /dev/null
+++ b/Eigen/src/Core/CwiseUnaryOp.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_UNARY_OP_H
+#define EIGEN_CWISE_UNARY_OP_H
+
+namespace Eigen { 
+
+/** \class CwiseUnaryOp
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression where a coefficient-wise unary operator is applied to an expression
+  *
+  * \param UnaryOp template functor implementing the operator
+  * \param XprType the type of the expression to which we are applying the unary operator
+  *
+  * This class represents an expression where a unary operator is applied to an expression.
+  * It is the return type of all operations taking exactly 1 input expression, regardless of the
+  * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix
+  * is considered unary, because only the right-hand side is an expression, and its
+  * return type is a specialization of CwiseUnaryOp.
+  *
+  * Most of the time, this is the only way that it is used, so you typically don't have to name
+  * CwiseUnaryOp types explicitly.
+  *
+  * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
+  */
+
+namespace internal {
+template<typename UnaryOp, typename XprType>
+struct traits<CwiseUnaryOp<UnaryOp, XprType> >
+ : traits<XprType>
+{
+  typedef typename result_of<
+                     UnaryOp(typename XprType::Scalar)
+                   >::type Scalar;
+  typedef typename XprType::Nested XprTypeNested;
+  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;
+  enum {
+    Flags = _XprTypeNested::Flags & (
+      HereditaryBits | LinearAccessBit | AlignedBit
+      | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
+    CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
+  };
+};
+}
+
+template<typename UnaryOp, typename XprType, typename StorageKind>
+class CwiseUnaryOpImpl;
+
+template<typename UnaryOp, typename XprType>
+class CwiseUnaryOp : internal::no_assignment_operator,
+  public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>
+{
+  public:
+
+    typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
+
+    inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())
+      : m_xpr(xpr), m_functor(func) {}
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
+
+    /** \returns the functor representing the unary operation */
+    const UnaryOp& functor() const { return m_functor; }
+
+    /** \returns the nested expression */
+    const typename internal::remove_all<typename XprType::Nested>::type&
+    nestedExpression() const { return m_xpr; }
+
+    /** \returns the nested expression */
+    typename internal::remove_all<typename XprType::Nested>::type&
+    nestedExpression() { return m_xpr.const_cast_derived(); }
+
+  protected:
+    typename XprType::Nested m_xpr;
+    const UnaryOp m_functor;
+};
+
+// This is the generic implementation for dense storage.
+// It can be used for any expression types implementing the dense concept.
+template<typename UnaryOp, typename XprType>
+class CwiseUnaryOpImpl<UnaryOp,XprType,Dense>
+  : public internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type
+{
+  public:
+
+    typedef CwiseUnaryOp<UnaryOp, XprType> Derived;
+    typedef typename internal::dense_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index rowId, Index colId) const
+    {
+      return derived().functor()(derived().nestedExpression().coeff(rowId, colId));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
+    {
+      return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(rowId, colId));
+    }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      return derived().functor()(derived().nestedExpression().coeff(index));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return derived().functor().packetOp(derived().nestedExpression().template packet<LoadMode>(index));
+    }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_UNARY_OP_H
diff --git a/Eigen/src/Core/CwiseUnaryView.h b/Eigen/src/Core/CwiseUnaryView.h
new file mode 100644
index 0000000..b2638d3
--- /dev/null
+++ b/Eigen/src/Core/CwiseUnaryView.h
@@ -0,0 +1,139 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_UNARY_VIEW_H
+#define EIGEN_CWISE_UNARY_VIEW_H
+
+namespace Eigen {
+
+/** \class CwiseUnaryView
+  * \ingroup Core_Module
+  *
+  * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector
+  *
+  * \param ViewOp template functor implementing the view
+  * \param MatrixType the type of the matrix we are applying the unary operator
+  *
+  * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.
+  * It is the return type of real() and imag(), and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp
+  */
+
+namespace internal {
+template<typename ViewOp, typename MatrixType>
+struct traits<CwiseUnaryView<ViewOp, MatrixType> >
+ : traits<MatrixType>
+{
+  typedef typename result_of<
+                     ViewOp(typename traits<MatrixType>::Scalar)
+                   >::type Scalar;
+  typedef typename MatrixType::Nested MatrixTypeNested;
+  typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+  enum {
+    Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)),
+    CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits<ViewOp>::Cost,
+    MatrixTypeInnerStride =  inner_stride_at_compile_time<MatrixType>::ret,
+    // need to cast the sizeof's from size_t to int explicitly, otherwise:
+    // "error: no integral type can represent all of the enumerator values
+    InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic
+                             ? int(Dynamic)
+                             : int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)),
+    OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret == Dynamic
+                             ? int(Dynamic)
+                             : outer_stride_at_compile_time<MatrixType>::ret * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar))
+  };
+};
+}
+
+template<typename ViewOp, typename MatrixType, typename StorageKind>
+class CwiseUnaryViewImpl;
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>
+{
+  public:
+
+    typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)
+
+    inline CwiseUnaryView(const MatrixType& mat, const ViewOp& func = ViewOp())
+      : m_matrix(mat), m_functor(func) {}
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
+
+    /** \returns the functor representing unary operation */
+    const ViewOp& functor() const { return m_functor; }
+
+    /** \returns the nested expression */
+    const typename internal::remove_all<typename MatrixType::Nested>::type&
+    nestedExpression() const { return m_matrix; }
+
+    /** \returns the nested expression */
+    typename internal::remove_all<typename MatrixType::Nested>::type&
+    nestedExpression() { return m_matrix.const_cast_derived(); }
+
+  protected:
+    // FIXME changed from MatrixType::Nested because of a weird compilation error with sun CC
+    typename internal::nested<MatrixType>::type m_matrix;
+    ViewOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>
+  : public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type
+{
+  public:
+
+    typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+    typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;
+
+    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)
+    
+    inline Scalar* data() { return &coeffRef(0); }
+    inline const Scalar* data() const { return &coeff(0); }
+
+    inline Index innerStride() const
+    {
+      return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
+    }
+
+    inline Index outerStride() const
+    {
+      return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);
+    }
+
+    EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
+    {
+      return derived().functor()(derived().nestedExpression().coeff(row, col));
+    }
+
+    EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
+    {
+      return derived().functor()(derived().nestedExpression().coeff(index));
+    }
+
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+    {
+      return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col));
+    }
+
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+    {
+      return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index));
+    }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_UNARY_VIEW_H
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
new file mode 100644
index 0000000..32f60d8
--- /dev/null
+++ b/Eigen/src/Core/DenseBase.h
@@ -0,0 +1,525 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DENSEBASE_H
+#define EIGEN_DENSEBASE_H
+
+namespace Eigen {
+
+namespace internal {
+  
+// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.
+// This dummy function simply aims at checking that at compile time.
+static inline void check_DenseIndex_is_signed() {
+  EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); 
+}
+
+} // end namespace internal
+  
+/** \class DenseBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for all dense matrices, vectors, and arrays
+  *
+  * This class is the base that is inherited by all dense objects (matrix, vector, arrays,
+  * and related expression types). The common Eigen API for dense objects is contained in this class.
+  *
+  * \tparam Derived is the derived type, e.g., a matrix type or an expression.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived> class DenseBase
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+  : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+                                     typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
+#else
+  : public DenseCoeffsBase<Derived>
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+{
+  public:
+    using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
+                typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
+
+    class InnerIterator;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+
+    /** \brief The type of indices 
+      * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE.
+      * \sa \ref TopicPreprocessorDirectives.
+      */
+    typedef typename internal::traits<Derived>::Index Index; 
+
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    typedef DenseCoeffsBase<Derived> Base;
+    using Base::derived;
+    using Base::const_cast_derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::rowIndexByOuterInner;
+    using Base::colIndexByOuterInner;
+    using Base::coeff;
+    using Base::coeffByOuterInner;
+    using Base::packet;
+    using Base::packetByOuterInner;
+    using Base::writePacket;
+    using Base::writePacketByOuterInner;
+    using Base::coeffRef;
+    using Base::coeffRefByOuterInner;
+    using Base::copyCoeff;
+    using Base::copyCoeffByOuterInner;
+    using Base::copyPacket;
+    using Base::copyPacketByOuterInner;
+    using Base::operator();
+    using Base::operator[];
+    using Base::x;
+    using Base::y;
+    using Base::z;
+    using Base::w;
+    using Base::stride;
+    using Base::innerStride;
+    using Base::outerStride;
+    using Base::rowStride;
+    using Base::colStride;
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+
+    enum {
+
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+        /**< The number of rows at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+        /**< The number of columns at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),
+        /**< This is equal to the number of coefficients, i.e. the number of
+          * rows times the number of columns, or to \a Dynamic if this is not
+          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+        /**< This value is equal to the maximum possible number of rows that this expression
+          * might have. If this expression might have an arbitrarily high number of rows,
+          * this value is set to \a Dynamic.
+          *
+          * This value is useful to know when evaluating an expression, in order to determine
+          * whether it is possible to avoid doing a dynamic memory allocation.
+          *
+          * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
+          */
+
+      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,
+        /**< This value is equal to the maximum possible number of columns that this expression
+          * might have. If this expression might have an arbitrarily high number of columns,
+          * this value is set to \a Dynamic.
+          *
+          * This value is useful to know when evaluating an expression, in order to determine
+          * whether it is possible to avoid doing a dynamic memory allocation.
+          *
+          * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
+          */
+
+      MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,
+                                                      internal::traits<Derived>::MaxColsAtCompileTime>::ret),
+        /**< This value is equal to the maximum possible number of coefficients that this expression
+          * might have. If this expression might have an arbitrarily high number of coefficients,
+          * this value is set to \a Dynamic.
+          *
+          * This value is useful to know when evaluating an expression, in order to determine
+          * whether it is possible to avoid doing a dynamic memory allocation.
+          *
+          * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
+          */
+
+      IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1
+                           || internal::traits<Derived>::MaxColsAtCompileTime == 1,
+        /**< This is set to true if either the number of rows or the number of
+          * columns is known at compile-time to be equal to 1. Indeed, in that case,
+          * we are dealing with a column-vector (if there is only one column) or with
+          * a row-vector (if there is only one row). */
+
+      Flags = internal::traits<Derived>::Flags,
+        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+          * constructed from this one. See the \ref flags "list of flags".
+          */
+
+      IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */
+
+      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
+                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+        /**< This is a rough measure of how expensive it is to read one coefficient from
+          * this expression.
+          */
+
+      InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,
+      OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret
+    };
+
+    enum { ThisConstantIsPrivateInPlainObjectBase };
+
+    /** \returns the number of nonzero coefficients which is in practice the number
+      * of stored coefficients. */
+    inline Index nonZeros() const { return size(); }
+    /** \returns true if either the number of rows or the number of columns is equal to 1.
+      * In other words, this function returns
+      * \code rows()==1 || cols()==1 \endcode
+      * \sa rows(), cols(), IsVectorAtCompileTime. */
+
+    /** \returns the outer size.
+      *
+      * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension
+      * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a
+      * column-major matrix, and the number of rows for a row-major matrix. */
+    Index outerSize() const
+    {
+      return IsVectorAtCompileTime ? 1
+           : int(IsRowMajor) ? this->rows() : this->cols();
+    }
+
+    /** \returns the inner size.
+      *
+      * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension
+      * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a 
+      * column-major matrix, and the number of columns for a row-major matrix. */
+    Index innerSize() const
+    {
+      return IsVectorAtCompileTime ? this->size()
+           : int(IsRowMajor) ? this->cols() : this->rows();
+    }
+
+    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
+      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
+      * nothing else.
+      */
+    void resize(Index newSize)
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(newSize);
+      eigen_assert(newSize == this->size()
+                && "DenseBase::resize() does not actually allow to resize.");
+    }
+    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are
+      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does
+      * nothing else.
+      */
+    void resize(Index nbRows, Index nbCols)
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(nbRows);
+      EIGEN_ONLY_USED_FOR_DEBUG(nbCols);
+      eigen_assert(nbRows == this->rows() && nbCols == this->cols()
+                && "DenseBase::resize() does not actually allow to resize.");
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+    /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */
+    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,false>,Derived> SequentialLinSpacedReturnType;
+    /** \internal Represents a vector with linearly spaced coefficients that allows random access. */
+    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,true>,Derived> RandomAccessLinSpacedReturnType;
+    /** \internal the return type of MatrixBase::eigenvalues() */
+    typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    /** Copies \a other into *this. \returns a reference to *this. */
+    template<typename OtherDerived>
+    Derived& operator=(const DenseBase<OtherDerived>& other);
+
+    /** Special case of the template operator=, in order to prevent the compiler
+      * from generating a default operator= (issue hit with g++ 4.1)
+      */
+    Derived& operator=(const DenseBase& other);
+
+    template<typename OtherDerived>
+    Derived& operator=(const EigenBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    Derived& operator+=(const EigenBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    Derived& operator-=(const EigenBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    Derived& operator=(const ReturnByValue<OtherDerived>& func);
+
+    /** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
+    template<typename OtherDerived>
+    Derived& lazyAssign(const DenseBase<OtherDerived>& other);
+
+    /** \internal Evaluates \a other into *this. \returns a reference to *this. */
+    template<typename OtherDerived>
+    Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
+
+    CommaInitializer<Derived> operator<< (const Scalar& s);
+
+    template<unsigned int Added,unsigned int Removed>
+    const Flagged<Derived, Added, Removed> flagged() const;
+
+    template<typename OtherDerived>
+    CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);
+
+    Eigen::Transpose<Derived> transpose();
+	typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
+    ConstTransposeReturnType transpose() const;
+    void transposeInPlace();
+#ifndef EIGEN_NO_DEBUG
+  protected:
+    template<typename OtherDerived>
+    void checkTransposeAliasing(const OtherDerived& other) const;
+  public:
+#endif
+
+
+    static const ConstantReturnType
+    Constant(Index rows, Index cols, const Scalar& value);
+    static const ConstantReturnType
+    Constant(Index size, const Scalar& value);
+    static const ConstantReturnType
+    Constant(const Scalar& value);
+
+    static const SequentialLinSpacedReturnType
+    LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);
+    static const RandomAccessLinSpacedReturnType
+    LinSpaced(Index size, const Scalar& low, const Scalar& high);
+    static const SequentialLinSpacedReturnType
+    LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);
+    static const RandomAccessLinSpacedReturnType
+    LinSpaced(const Scalar& low, const Scalar& high);
+
+    template<typename CustomNullaryOp>
+    static const CwiseNullaryOp<CustomNullaryOp, Derived>
+    NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);
+    template<typename CustomNullaryOp>
+    static const CwiseNullaryOp<CustomNullaryOp, Derived>
+    NullaryExpr(Index size, const CustomNullaryOp& func);
+    template<typename CustomNullaryOp>
+    static const CwiseNullaryOp<CustomNullaryOp, Derived>
+    NullaryExpr(const CustomNullaryOp& func);
+
+    static const ConstantReturnType Zero(Index rows, Index cols);
+    static const ConstantReturnType Zero(Index size);
+    static const ConstantReturnType Zero();
+    static const ConstantReturnType Ones(Index rows, Index cols);
+    static const ConstantReturnType Ones(Index size);
+    static const ConstantReturnType Ones();
+
+    void fill(const Scalar& value);
+    Derived& setConstant(const Scalar& value);
+    Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);
+    Derived& setLinSpaced(const Scalar& low, const Scalar& high);
+    Derived& setZero();
+    Derived& setOnes();
+    Derived& setRandom();
+
+    template<typename OtherDerived>
+    bool isApprox(const DenseBase<OtherDerived>& other,
+                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isMuchSmallerThan(const RealScalar& other,
+                           const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    template<typename OtherDerived>
+    bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,
+                           const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    
+    inline bool hasNaN() const;
+    inline bool allFinite() const;
+
+    inline Derived& operator*=(const Scalar& other);
+    inline Derived& operator/=(const Scalar& other);
+
+    typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType;
+    /** \returns the matrix or vector obtained by evaluating this expression.
+      *
+      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+      * a const reference, in order to avoid a useless copy.
+      */
+    EIGEN_STRONG_INLINE EvalReturnType eval() const
+    {
+      // Even though MSVC does not honor strong inlining when the return type
+      // is a dynamic matrix, we desperately need strong inlining for fixed
+      // size types on MSVC.
+      return typename internal::eval<Derived>::type(derived());
+    }
+
+    /** swaps *this with the expression \a other.
+      *
+      */
+    template<typename OtherDerived>
+    void swap(const DenseBase<OtherDerived>& other,
+              int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase)
+    {
+      SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
+    }
+
+    /** swaps *this with the matrix or array \a other.
+      *
+      */
+    template<typename OtherDerived>
+    void swap(PlainObjectBase<OtherDerived>& other)
+    {
+      SwapWrapper<Derived>(derived()).lazyAssign(other.derived());
+    }
+
+
+    inline const NestByValue<Derived> nestByValue() const;
+    inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
+    inline ForceAlignedAccess<Derived> forceAlignedAccess();
+    template<bool Enable> inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;
+    template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
+
+    Scalar sum() const;
+    Scalar mean() const;
+    Scalar trace() const;
+
+    Scalar prod() const;
+
+    typename internal::traits<Derived>::Scalar minCoeff() const;
+    typename internal::traits<Derived>::Scalar maxCoeff() const;
+
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;
+    template<typename IndexType>
+    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;
+
+    template<typename BinaryOp>
+    typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
+    redux(const BinaryOp& func) const;
+
+    template<typename Visitor>
+    void visit(Visitor& func) const;
+
+    inline const WithFormat<Derived> format(const IOFormat& fmt) const;
+
+    /** \returns the unique coefficient of a 1x1 expression */
+    CoeffReturnType value() const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      return derived().coeff(0,0);
+    }
+
+    bool all(void) const;
+    bool any(void) const;
+    Index count() const;
+
+    typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;
+    typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;
+    typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;
+    typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;
+
+    ConstRowwiseReturnType rowwise() const;
+    RowwiseReturnType rowwise();
+    ConstColwiseReturnType colwise() const;
+    ColwiseReturnType colwise();
+
+    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index rows, Index cols);
+    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(Index size);
+    static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
+
+    template<typename ThenDerived,typename ElseDerived>
+    const Select<Derived,ThenDerived,ElseDerived>
+    select(const DenseBase<ThenDerived>& thenMatrix,
+           const DenseBase<ElseDerived>& elseMatrix) const;
+
+    template<typename ThenDerived>
+    inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+    select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const;
+
+    template<typename ElseDerived>
+    inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+    select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;
+
+    template<int p> RealScalar lpNorm() const;
+
+    template<int RowFactor, int ColFactor>
+    inline const Replicate<Derived,RowFactor,ColFactor> replicate() const;
+    
+    typedef Replicate<Derived,Dynamic,Dynamic> ReplicateReturnType;
+    inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const;
+
+    typedef Reverse<Derived, BothDirections> ReverseReturnType;
+    typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
+    ReverseReturnType reverse();
+    ConstReverseReturnType reverse() const;
+    void reverseInPlace();
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase
+#   include "../plugins/BlockMethods.h"
+#   ifdef EIGEN_DENSEBASE_PLUGIN
+#     include EIGEN_DENSEBASE_PLUGIN
+#   endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+#ifdef EIGEN2_SUPPORT
+
+    Block<Derived> corner(CornerType type, Index cRows, Index cCols);
+    const Block<Derived> corner(CornerType type, Index cRows, Index cCols) const;
+    template<int CRows, int CCols>
+    Block<Derived, CRows, CCols> corner(CornerType type);
+    template<int CRows, int CCols>
+    const Block<Derived, CRows, CCols> corner(CornerType type) const;
+
+#endif // EIGEN2_SUPPORT
+
+
+    // disable the use of evalTo for dense objects with a nice compilation error
+    template<typename Dest> inline void evalTo(Dest& ) const
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);
+    }
+
+  protected:
+    /** Default constructor. Do nothing. */
+    DenseBase()
+    {
+      /* Just checks for self-consistency of the flags.
+       * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down
+       */
+#ifdef EIGEN_INTERNAL_DEBUGGING
+      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))
+                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),
+                          INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)
+#endif
+    }
+
+  private:
+    explicit DenseBase(int);
+    DenseBase(int,int);
+    template<typename OtherDerived> explicit DenseBase(const DenseBase<OtherDerived>&);
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_DENSEBASE_H
diff --git a/Eigen/src/Core/DenseCoeffsBase.h b/Eigen/src/Core/DenseCoeffsBase.h
new file mode 100644
index 0000000..3c890f2
--- /dev/null
+++ b/Eigen/src/Core/DenseCoeffsBase.h
@@ -0,0 +1,754 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DENSECOEFFSBASE_H
+#define EIGEN_DENSECOEFFSBASE_H
+
+namespace Eigen {
+
+namespace internal {
+template<typename T> struct add_const_on_value_type_if_arithmetic
+{
+  typedef typename conditional<is_arithmetic<T>::value, T, typename add_const_on_value_type<T>::type>::type type;
+};
+}
+
+/** \brief Base class providing read-only coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #ReadOnlyAccessors Constant indicating read-only access
+  *
+  * This class defines the \c operator() \c const function and friends, which can be used to read specific
+  * entries of a matrix or array.
+  * 
+  * \sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,
+  *     \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>
+{
+  public:
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+
+    // Explanation for this CoeffReturnType typedef.
+    // - This is the return type of the coeff() method.
+    // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references
+    // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).
+    // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems
+    // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is
+    // not possible, since the underlying expressions might not offer a valid address the reference could be referring to.
+    typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit),
+                         const Scalar&,
+                         typename internal::conditional<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>::type
+                     >::type CoeffReturnType;
+
+    typedef typename internal::add_const_on_value_type_if_arithmetic<
+                         typename internal::packet_traits<Scalar>::type
+                     >::type PacketReturnType;
+
+    typedef EigenBase<Derived> Base;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+
+    EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const
+    {
+      return int(Derived::RowsAtCompileTime) == 1 ? 0
+          : int(Derived::ColsAtCompileTime) == 1 ? inner
+          : int(Derived::Flags)&RowMajorBit ? outer
+          : inner;
+    }
+
+    EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const
+    {
+      return int(Derived::ColsAtCompileTime) == 1 ? 0
+          : int(Derived::RowsAtCompileTime) == 1 ? inner
+          : int(Derived::Flags)&RowMajorBit ? inner
+          : outer;
+    }
+
+    /** Short version: don't use this function, use
+      * \link operator()(Index,Index) const \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator()(Index,Index) const \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameters \a row and \a col are in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator()(Index,Index) const \endlink.
+      *
+      * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const
+      */
+    EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      return derived().coeff(row, col);
+    }
+
+    EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
+    {
+      return coeff(rowIndexByOuterInner(outer, inner),
+                   colIndexByOuterInner(outer, inner));
+    }
+
+    /** \returns the coefficient at given the given row and column.
+      *
+      * \sa operator()(Index,Index), operator[](Index)
+      */
+    EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const
+    {
+      eigen_assert(row >= 0 && row < rows()
+          && col >= 0 && col < cols());
+      return derived().coeff(row, col);
+    }
+
+    /** Short version: don't use this function, use
+      * \link operator[](Index) const \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator[](Index) const \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameter \a index is in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator[](Index) const \endlink.
+      *
+      * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const
+      */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    coeff(Index index) const
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      return derived().coeff(index);
+    }
+
+
+    /** \returns the coefficient at given index.
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
+      * z() const, w() const
+      */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    operator[](Index index) const
+    {
+      #ifndef EIGEN2_SUPPORT
+      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
+                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
+      #endif
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeff(index);
+    }
+
+    /** \returns the coefficient at given index.
+      *
+      * This is synonymous to operator[](Index) const.
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const,
+      * z() const, w() const
+      */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    operator()(Index index) const
+    {
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeff(index);
+    }
+
+    /** equivalent to operator[](0).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    x() const { return (*this)[0]; }
+
+    /** equivalent to operator[](1).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    y() const { return (*this)[1]; }
+
+    /** equivalent to operator[](2).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    z() const { return (*this)[2]; }
+
+    /** equivalent to operator[](3).  */
+
+    EIGEN_STRONG_INLINE CoeffReturnType
+    w() const { return (*this)[3]; }
+
+    /** \internal
+      * \returns the packet of coefficients starting at the given row and column. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                      && col >= 0 && col < cols());
+      return derived().template packet<LoadMode>(row,col);
+    }
+
+
+    /** \internal */
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const
+    {
+      return packet<LoadMode>(rowIndexByOuterInner(outer, inner),
+                              colIndexByOuterInner(outer, inner));
+    }
+
+    /** \internal
+      * \returns the packet of coefficients starting at the given index. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit and the LinearAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      return derived().template packet<LoadMode>(index);
+    }
+
+  protected:
+    // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase.
+    // But some methods are only available in the DirectAccess case.
+    // So we add dummy methods here with these names, so that "using... " doesn't fail.
+    // It's not private so that the child class DenseBase can access them, and it's not public
+    // either since it's an implementation detail, so has to be protected.
+    void coeffRef();
+    void coeffRefByOuterInner();
+    void writePacket();
+    void writePacketByOuterInner();
+    void copyCoeff();
+    void copyCoeffByOuterInner();
+    void copyPacket();
+    void copyPacketByOuterInner();
+    void stride();
+    void innerStride();
+    void outerStride();
+    void rowStride();
+    void colStride();
+};
+
+/** \brief Base class providing read/write coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #WriteAccessors Constant indicating read/write access
+  *
+  * This class defines the non-const \c operator() function and friends, which can be used to write specific
+  * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which
+  * defines the const variant for reading specific entries.
+  * 
+  * \sa DenseCoeffsBase<Derived, DirectAccessors>, \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
+{
+  public:
+
+    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    using Base::coeff;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+    using Base::rowIndexByOuterInner;
+    using Base::colIndexByOuterInner;
+    using Base::operator[];
+    using Base::operator();
+    using Base::x;
+    using Base::y;
+    using Base::z;
+    using Base::w;
+
+    /** Short version: don't use this function, use
+      * \link operator()(Index,Index) \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator()(Index,Index) \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameters \a row and \a col are in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator()(Index,Index) \endlink.
+      *
+      * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)
+      */
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      return derived().coeffRef(row, col);
+    }
+
+    EIGEN_STRONG_INLINE Scalar&
+    coeffRefByOuterInner(Index outer, Index inner)
+    {
+      return coeffRef(rowIndexByOuterInner(outer, inner),
+                      colIndexByOuterInner(outer, inner));
+    }
+
+    /** \returns a reference to the coefficient at given the given row and column.
+      *
+      * \sa operator[](Index)
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    operator()(Index row, Index col)
+    {
+      eigen_assert(row >= 0 && row < rows()
+          && col >= 0 && col < cols());
+      return derived().coeffRef(row, col);
+    }
+
+
+    /** Short version: don't use this function, use
+      * \link operator[](Index) \endlink instead.
+      *
+      * Long version: this function is similar to
+      * \link operator[](Index) \endlink, but without the assertion.
+      * Use this for limiting the performance cost of debugging code when doing
+      * repeated coefficient access. Only use this when it is guaranteed that the
+      * parameters \a row and \a col are in range.
+      *
+      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
+      * function equivalent to \link operator[](Index) \endlink.
+      *
+      * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    coeffRef(Index index)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      return derived().coeffRef(index);
+    }
+
+    /** \returns a reference to the coefficient at given index.
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    operator[](Index index)
+    {
+      #ifndef EIGEN2_SUPPORT
+      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,
+                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)
+      #endif
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeffRef(index);
+    }
+
+    /** \returns a reference to the coefficient at given index.
+      *
+      * This is synonymous to operator[](Index).
+      *
+      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
+      *
+      * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()
+      */
+
+    EIGEN_STRONG_INLINE Scalar&
+    operator()(Index index)
+    {
+      eigen_assert(index >= 0 && index < size());
+      return derived().coeffRef(index);
+    }
+
+    /** equivalent to operator[](0).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    x() { return (*this)[0]; }
+
+    /** equivalent to operator[](1).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    y() { return (*this)[1]; }
+
+    /** equivalent to operator[](2).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    z() { return (*this)[2]; }
+
+    /** equivalent to operator[](3).  */
+
+    EIGEN_STRONG_INLINE Scalar&
+    w() { return (*this)[3]; }
+
+    /** \internal
+      * Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket
+    (Index row, Index col, const typename internal::packet_traits<Scalar>::type& val)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      derived().template writePacket<StoreMode>(row,col,val);
+    }
+
+
+    /** \internal */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacketByOuterInner
+    (Index outer, Index inner, const typename internal::packet_traits<Scalar>::type& val)
+    {
+      writePacket<StoreMode>(rowIndexByOuterInner(outer, inner),
+                            colIndexByOuterInner(outer, inner),
+                            val);
+    }
+
+    /** \internal
+      * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
+      * to ensure that a packet really starts there. This method is only available on expressions having the
+      * PacketAccessBit and the LinearAccessBit.
+      *
+      * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
+      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
+      * starting at an address which is a multiple of the packet size.
+      */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket
+    (Index index, const typename internal::packet_traits<Scalar>::type& val)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      derived().template writePacket<StoreMode>(index,val);
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+    /** \internal Copies the coefficient at position (row,col) of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      derived().coeffRef(row, col) = other.derived().coeff(row, col);
+    }
+
+    /** \internal Copies the coefficient at the given index of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      derived().coeffRef(index) = other.derived().coeff(index);
+    }
+
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
+    {
+      const Index row = rowIndexByOuterInner(outer,inner);
+      const Index col = colIndexByOuterInner(outer,inner);
+      // derived() is important here: copyCoeff() may be reimplemented in Derived!
+      derived().copyCoeff(row, col, other);
+    }
+
+    /** \internal Copies the packet at position (row,col) of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      derived().template writePacket<StoreMode>(row, col,
+        other.derived().template packet<LoadMode>(row, col));
+    }
+
+    /** \internal Copies the packet at the given index of other into *this.
+      *
+      * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code
+      * with usual assignments.
+      *
+      * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox.
+      */
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+    {
+      eigen_internal_assert(index >= 0 && index < size());
+      derived().template writePacket<StoreMode>(index,
+        other.derived().template packet<LoadMode>(index));
+    }
+
+    /** \internal */
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase<OtherDerived>& other)
+    {
+      const Index row = rowIndexByOuterInner(outer,inner);
+      const Index col = colIndexByOuterInner(outer,inner);
+      // derived() is important here: copyCoeff() may be reimplemented in Derived!
+      derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other);
+    }
+#endif
+
+};
+
+/** \brief Base class providing direct read-only coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #DirectAccessors Constant indicating direct access
+  *
+  * This class defines functions to work with strides which can be used to access entries directly. This class
+  * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using
+  * \c operator() .
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>
+{
+  public:
+
+    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+
+    /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
+      *
+      * \sa outerStride(), rowStride(), colStride()
+      */
+    inline Index innerStride() const
+    {
+      return derived().innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
+      *          in a column-major matrix).
+      *
+      * \sa innerStride(), rowStride(), colStride()
+      */
+    inline Index outerStride() const
+    {
+      return derived().outerStride();
+    }
+
+    // FIXME shall we remove it ?
+    inline Index stride() const
+    {
+      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive rows.
+      *
+      * \sa innerStride(), outerStride(), colStride()
+      */
+    inline Index rowStride() const
+    {
+      return Derived::IsRowMajor ? outerStride() : innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive columns.
+      *
+      * \sa innerStride(), outerStride(), rowStride()
+      */
+    inline Index colStride() const
+    {
+      return Derived::IsRowMajor ? innerStride() : outerStride();
+    }
+};
+
+/** \brief Base class providing direct read/write coefficient access to matrices and arrays.
+  * \ingroup Core_Module
+  * \tparam Derived Type of the derived class
+  * \tparam #DirectWriteAccessors Constant indicating direct access
+  *
+  * This class defines functions to work with strides which can be used to access entries directly. This class
+  * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using
+  * \c operator().
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived>
+class DenseCoeffsBase<Derived, DirectWriteAccessors>
+  : public DenseCoeffsBase<Derived, WriteAccessors>
+{
+  public:
+
+    typedef DenseCoeffsBase<Derived, WriteAccessors> Base;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::derived;
+
+    /** \returns the pointer increment between two consecutive elements within a slice in the inner direction.
+      *
+      * \sa outerStride(), rowStride(), colStride()
+      */
+    inline Index innerStride() const
+    {
+      return derived().innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns
+      *          in a column-major matrix).
+      *
+      * \sa innerStride(), rowStride(), colStride()
+      */
+    inline Index outerStride() const
+    {
+      return derived().outerStride();
+    }
+
+    // FIXME shall we remove it ?
+    inline Index stride() const
+    {
+      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive rows.
+      *
+      * \sa innerStride(), outerStride(), colStride()
+      */
+    inline Index rowStride() const
+    {
+      return Derived::IsRowMajor ? outerStride() : innerStride();
+    }
+
+    /** \returns the pointer increment between two consecutive columns.
+      *
+      * \sa innerStride(), outerStride(), rowStride()
+      */
+    inline Index colStride() const
+    {
+      return Derived::IsRowMajor ? innerStride() : outerStride();
+    }
+};
+
+namespace internal {
+
+template<typename Derived, bool JustReturnZero>
+struct first_aligned_impl
+{
+  static inline typename Derived::Index run(const Derived&)
+  { return 0; }
+};
+
+template<typename Derived>
+struct first_aligned_impl<Derived, false>
+{
+  static inline typename Derived::Index run(const Derived& m)
+  {
+    return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size());
+  }
+};
+
+/** \internal \returns the index of the first element of the array that is well aligned for vectorization.
+  *
+  * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more
+  * documentation.
+  */
+template<typename Derived>
+static inline typename Derived::Index first_aligned(const Derived& m)
+{
+  return first_aligned_impl
+          <Derived, (Derived::Flags & AlignedBit) || !(Derived::Flags & DirectAccessBit)>
+          ::run(m);
+}
+
+template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
+struct inner_stride_at_compile_time
+{
+  enum { ret = traits<Derived>::InnerStrideAtCompileTime };
+};
+
+template<typename Derived>
+struct inner_stride_at_compile_time<Derived, false>
+{
+  enum { ret = 0 };
+};
+
+template<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>
+struct outer_stride_at_compile_time
+{
+  enum { ret = traits<Derived>::OuterStrideAtCompileTime };
+};
+
+template<typename Derived>
+struct outer_stride_at_compile_time<Derived, false>
+{
+  enum { ret = 0 };
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_DENSECOEFFSBASE_H
diff --git a/Eigen/src/Core/DenseStorage.h b/Eigen/src/Core/DenseStorage.h
new file mode 100644
index 0000000..a72738e
--- /dev/null
+++ b/Eigen/src/Core/DenseStorage.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIXSTORAGE_H
+#define EIGEN_MATRIXSTORAGE_H
+
+#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN;
+#else
+  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+struct constructor_without_unaligned_array_assert {};
+
+template<typename T, int Size> void check_static_allocation_size()
+{
+  // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
+  #if EIGEN_STACK_ALLOCATION_LIMIT
+  EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
+  #endif
+}
+
+/** \internal
+  * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
+  * to 16 bytes boundary if the total size is a multiple of 16 bytes.
+  */
+template <typename T, int Size, int MatrixOrArrayOptions,
+          int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0
+                        : (((Size*sizeof(T))%16)==0) ? 16
+                        : 0 >
+struct plain_array
+{
+  T array[Size];
+
+  plain_array() 
+  { 
+    check_static_allocation_size<T,Size>();
+  }
+
+  plain_array(constructor_without_unaligned_array_assert) 
+  { 
+    check_static_allocation_size<T,Size>();
+  }
+};
+
+#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
+  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
+#elif EIGEN_GNUC_AT_LEAST(4,7) 
+  // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned.
+  // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
+  // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
+  template<typename PtrType>
+  EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; }
+  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
+    eigen_assert((reinterpret_cast<size_t>(eigen_unaligned_array_assert_workaround_gcc47(array)) & sizemask) == 0 \
+              && "this assertion is explained here: " \
+              "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
+              " **** READ THIS WEB PAGE !!! ****");
+#else
+  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \
+    eigen_assert((reinterpret_cast<size_t>(array) & sizemask) == 0 \
+              && "this assertion is explained here: " \
+              "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \
+              " **** READ THIS WEB PAGE !!! ****");
+#endif
+
+template <typename T, int Size, int MatrixOrArrayOptions>
+struct plain_array<T, Size, MatrixOrArrayOptions, 16>
+{
+  EIGEN_USER_ALIGN16 T array[Size];
+
+  plain_array() 
+  { 
+    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
+    check_static_allocation_size<T,Size>();
+  }
+
+  plain_array(constructor_without_unaligned_array_assert) 
+  { 
+    check_static_allocation_size<T,Size>();
+  }
+};
+
+template <typename T, int MatrixOrArrayOptions, int Alignment>
+struct plain_array<T, 0, MatrixOrArrayOptions, Alignment>
+{
+  EIGEN_USER_ALIGN16 T array[1];
+  plain_array() {}
+  plain_array(constructor_without_unaligned_array_assert) {}
+};
+
+} // end namespace internal
+
+/** \internal
+  *
+  * \class DenseStorage
+  * \ingroup Core_Module
+  *
+  * \brief Stores the data of a matrix
+  *
+  * This class stores the data of fixed-size, dynamic-size or mixed matrices
+  * in a way as compact as possible.
+  *
+  * \sa Matrix
+  */
+template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage;
+
+// purely fixed-size matrix
+template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage
+{
+    internal::plain_array<T,Size,_Options> m_data;
+  public:
+    inline DenseStorage() {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()) {}
+    inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
+    static inline DenseIndex rows(void) {return _Rows;}
+    static inline DenseIndex cols(void) {return _Cols;}
+    inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// null matrix
+template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
+{
+  public:
+    inline DenseStorage() {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
+    inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void swap(DenseStorage& ) {}
+    static inline DenseIndex rows(void) {return _Rows;}
+    static inline DenseIndex cols(void) {return _Cols;}
+    inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
+    inline const T *data() const { return 0; }
+    inline T *data() { return 0; }
+};
+
+// more specializations for null matrices; these are necessary to resolve ambiguities
+template<typename T, int _Options> class DenseStorage<T, 0, Dynamic, Dynamic, _Options>
+: public DenseStorage<T, 0, 0, 0, _Options> { };
+
+template<typename T, int _Rows, int _Options> class DenseStorage<T, 0, _Rows, Dynamic, _Options>
+: public DenseStorage<T, 0, 0, 0, _Options> { };
+
+template<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic, _Cols, _Options>
+: public DenseStorage<T, 0, 0, 0, _Options> { };
+
+// dynamic-size matrix with fixed-size storage
+template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>
+{
+    internal::plain_array<T,Size,_Options> m_data;
+    DenseIndex m_rows;
+    DenseIndex m_cols;
+  public:
+    inline DenseStorage() : m_rows(0), m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
+    inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
+    inline void swap(DenseStorage& other)
+    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+    inline DenseIndex rows() const {return m_rows;}
+    inline DenseIndex cols() const {return m_cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
+    inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// dynamic-size matrix with fixed-size storage and fixed width
+template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>
+{
+    internal::plain_array<T,Size,_Options> m_data;
+    DenseIndex m_rows;
+  public:
+    inline DenseStorage() : m_rows(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
+    inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    inline DenseIndex cols(void) const {return _Cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
+    inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// dynamic-size matrix with fixed-size storage and fixed height
+template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>
+{
+    internal::plain_array<T,Size,_Options> m_data;
+    DenseIndex m_cols;
+  public:
+    inline DenseStorage() : m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+      : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
+    inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+    inline DenseIndex rows(void) const {return _Rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
+    inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
+    inline const T *data() const { return m_data.array; }
+    inline T *data() { return m_data.array; }
+};
+
+// purely dynamic matrix.
+template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>
+{
+    T *m_data;
+    DenseIndex m_rows;
+    DenseIndex m_cols;
+  public:
+    inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert)
+       : m_data(0), m_rows(0), m_cols(0) {}
+    inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
+    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
+    inline void swap(DenseStorage& other)
+    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+    {
+      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
+      m_rows = nbRows;
+      m_cols = nbCols;
+    }
+    void resize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
+    {
+      if(size != m_rows*m_cols)
+      {
+        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);
+        if (size)
+          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+        else
+          m_data = 0;
+        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+      }
+      m_rows = nbRows;
+      m_cols = nbCols;
+    }
+    inline const T *data() const { return m_data; }
+    inline T *data() { return m_data; }
+};
+
+// matrix with dynamic width and fixed height (so that matrix has dynamic size).
+template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>
+{
+    T *m_data;
+    DenseIndex m_cols;
+  public:
+    inline DenseStorage() : m_data(0), m_cols(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
+    inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
+    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
+    static inline DenseIndex rows(void) {return _Rows;}
+    inline DenseIndex cols(void) const {return m_cols;}
+    inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
+    {
+      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
+      m_cols = nbCols;
+    }
+    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex nbCols)
+    {
+      if(size != _Rows*m_cols)
+      {
+        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);
+        if (size)
+          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+        else
+          m_data = 0;
+        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+      }
+      m_cols = nbCols;
+    }
+    inline const T *data() const { return m_data; }
+    inline T *data() { return m_data; }
+};
+
+// matrix with dynamic height and fixed width (so that matrix has dynamic size).
+template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>
+{
+    T *m_data;
+    DenseIndex m_rows;
+  public:
+    inline DenseStorage() : m_data(0), m_rows(0) {}
+    inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
+    inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
+    { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
+    inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
+    inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
+    inline DenseIndex rows(void) const {return m_rows;}
+    static inline DenseIndex cols(void) {return _Cols;}
+    inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
+    {
+      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
+      m_rows = nbRows;
+    }
+    EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex nbRows, DenseIndex)
+    {
+      if(size != m_rows*_Cols)
+      {
+        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);
+        if (size)
+          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);
+        else
+          m_data = 0;
+        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN
+      }
+      m_rows = nbRows;
+    }
+    inline const T *data() const { return m_data; }
+    inline T *data() { return m_data; }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_H
diff --git a/Eigen/src/Core/Diagonal.h b/Eigen/src/Core/Diagonal.h
new file mode 100644
index 0000000..68cf6d4
--- /dev/null
+++ b/Eigen/src/Core/Diagonal.h
@@ -0,0 +1,237 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DIAGONAL_H
+#define EIGEN_DIAGONAL_H
+
+namespace Eigen { 
+
+/** \class Diagonal
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix
+  *
+  * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal
+  * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.
+  *              A positive value means a superdiagonal, a negative value means a subdiagonal.
+  *              You can also use Dynamic so the index can be set at runtime.
+  *
+  * The matrix is not required to be square.
+  *
+  * This class represents an expression of the main diagonal, or any sub/super diagonal
+  * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the
+  * time this is the only way it is used.
+  *
+  * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)
+  */
+
+namespace internal {
+template<typename MatrixType, int DiagIndex>
+struct traits<Diagonal<MatrixType,DiagIndex> >
+ : traits<MatrixType>
+{
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  typedef typename MatrixType::StorageKind StorageKind;
+  enum {
+    RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic
+                      : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
+                                              MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+    ColsAtCompileTime = 1,
+    MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
+                         : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,
+                                                                              MatrixType::MaxColsAtCompileTime)
+                         : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),
+                                                 MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),
+    MaxColsAtCompileTime = 1,
+    MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
+    Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit,
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost,
+    MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,
+    InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1,
+    OuterStrideAtCompileTime = 0
+  };
+};
+}
+
+template<typename MatrixType, int _DiagIndex> class Diagonal
+   : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type
+{
+  public:
+
+    enum { DiagIndex = _DiagIndex };
+    typedef typename internal::dense_xpr_base<Diagonal>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)
+
+    inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)
+
+    inline Index rows() const
+    { return m_index.value()<0 ? (std::min<Index>)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min<Index>)(m_matrix.rows(),m_matrix.cols()-m_index.value()); }
+
+    inline Index cols() const { return 1; }
+
+    inline Index innerStride() const
+    {
+      return m_matrix.outerStride() + 1;
+    }
+
+    inline Index outerStride() const
+    {
+      return 0;
+    }
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<MatrixType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); }
+    inline const Scalar* data() const { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); }
+
+    inline Scalar& coeffRef(Index row, Index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
+    }
+
+    inline const Scalar& coeffRef(Index row, Index) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset());
+    }
+
+    inline CoeffReturnType coeff(Index row, Index) const
+    {
+      return m_matrix.coeff(row+rowOffset(), row+colOffset());
+    }
+
+    inline Scalar& coeffRef(Index idx)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
+    }
+
+    inline const Scalar& coeffRef(Index idx) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(idx+rowOffset(), idx+colOffset());
+    }
+
+    inline CoeffReturnType coeff(Index idx) const
+    {
+      return m_matrix.coeff(idx+rowOffset(), idx+colOffset());
+    }
+
+    const typename internal::remove_all<typename MatrixType::Nested>::type& 
+    nestedExpression() const 
+    {
+      return m_matrix;
+    }
+
+    int index() const
+    {
+      return m_index.value();
+    }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+    const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;
+
+  private:
+    // some compilers may fail to optimize std::max etc in case of compile-time constants...
+    EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }
+    EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }
+    EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }
+    // triger a compile time error is someone try to call packet
+    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;
+    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;
+};
+
+/** \returns an expression of the main diagonal of the matrix \c *this
+  *
+  * \c *this is not required to be square.
+  *
+  * Example: \include MatrixBase_diagonal.cpp
+  * Output: \verbinclude MatrixBase_diagonal.out
+  *
+  * \sa class Diagonal */
+template<typename Derived>
+inline typename MatrixBase<Derived>::DiagonalReturnType
+MatrixBase<Derived>::diagonal()
+{
+  return derived();
+}
+
+/** This is the const version of diagonal(). */
+template<typename Derived>
+inline typename MatrixBase<Derived>::ConstDiagonalReturnType
+MatrixBase<Derived>::diagonal() const
+{
+  return ConstDiagonalReturnType(derived());
+}
+
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
+  *
+  * \c *this is not required to be square.
+  *
+  * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+  * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
+  *
+  * Example: \include MatrixBase_diagonal_int.cpp
+  * Output: \verbinclude MatrixBase_diagonal_int.out
+  *
+  * \sa MatrixBase::diagonal(), class Diagonal */
+template<typename Derived>
+inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
+MatrixBase<Derived>::diagonal(Index index)
+{
+  return DiagonalDynamicIndexReturnType(derived(), index);
+}
+
+/** This is the const version of diagonal(Index). */
+template<typename Derived>
+inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
+MatrixBase<Derived>::diagonal(Index index) const
+{
+  return ConstDiagonalDynamicIndexReturnType(derived(), index);
+}
+
+/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
+  *
+  * \c *this is not required to be square.
+  *
+  * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0
+  * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal.
+  *
+  * Example: \include MatrixBase_diagonal_template_int.cpp
+  * Output: \verbinclude MatrixBase_diagonal_template_int.out
+  *
+  * \sa MatrixBase::diagonal(), class Diagonal */
+template<typename Derived>
+template<int Index>
+inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index>::Type
+MatrixBase<Derived>::diagonal()
+{
+  return derived();
+}
+
+/** This is the const version of diagonal<int>(). */
+template<typename Derived>
+template<int Index>
+inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index>::Type
+MatrixBase<Derived>::diagonal() const
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DIAGONAL_H
diff --git a/Eigen/src/Core/DiagonalMatrix.h b/Eigen/src/Core/DiagonalMatrix.h
new file mode 100644
index 0000000..e6c220f
--- /dev/null
+++ b/Eigen/src/Core/DiagonalMatrix.h
@@ -0,0 +1,313 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DIAGONALMATRIX_H
+#define EIGEN_DIAGONALMATRIX_H
+
+namespace Eigen { 
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename Derived>
+class DiagonalBase : public EigenBase<Derived>
+{
+  public:
+    typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;
+    typedef typename DiagonalVectorType::Scalar Scalar;
+    typedef typename DiagonalVectorType::RealScalar RealScalar;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+
+    enum {
+      RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+      ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+      MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
+      MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,
+      IsVectorAtCompileTime = 0,
+      Flags = 0
+    };
+
+    typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType;
+    typedef DenseMatrixType DenseType;
+    typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject;
+
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+    DenseMatrixType toDenseMatrix() const { return derived(); }
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived> &other) const;
+    template<typename DenseDerived>
+    void addTo(MatrixBase<DenseDerived> &other) const
+    { other.diagonal() += diagonal(); }
+    template<typename DenseDerived>
+    void subTo(MatrixBase<DenseDerived> &other) const
+    { other.diagonal() -= diagonal(); }
+
+    inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }
+    inline DiagonalVectorType& diagonal() { return derived().diagonal(); }
+
+    inline Index rows() const { return diagonal().size(); }
+    inline Index cols() const { return diagonal().size(); }
+
+    /** \returns the diagonal matrix product of \c *this by the matrix \a matrix.
+      */
+    template<typename MatrixDerived>
+    const DiagonalProduct<MatrixDerived, Derived, OnTheLeft>
+    operator*(const MatrixBase<MatrixDerived> &matrix) const
+    {
+      return DiagonalProduct<MatrixDerived, Derived, OnTheLeft>(matrix.derived(), derived());
+    }
+
+    inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> >
+    inverse() const
+    {
+      return diagonal().cwiseInverse();
+    }
+    
+    inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
+    operator*(const Scalar& scalar) const
+    {
+      return diagonal() * scalar;
+    }
+    friend inline const DiagonalWrapper<const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DiagonalVectorType> >
+    operator*(const Scalar& scalar, const DiagonalBase& other)
+    {
+      return other.diagonal() * scalar;
+    }
+    
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived>
+    bool isApprox(const DiagonalBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return diagonal().isApprox(other.diagonal(), precision);
+    }
+    template<typename OtherDerived>
+    bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return toDenseMatrix().isApprox(other, precision);
+    }
+    #endif
+};
+
+template<typename Derived>
+template<typename DenseDerived>
+void DiagonalBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+{
+  other.setZero();
+  other.diagonal() = diagonal();
+}
+#endif
+
+/** \class DiagonalMatrix
+  * \ingroup Core_Module
+  *
+  * \brief Represents a diagonal matrix with its storage
+  *
+  * \param _Scalar the type of coefficients
+  * \param SizeAtCompileTime the dimension of the matrix, or Dynamic
+  * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults
+  *        to SizeAtCompileTime. Most of the time, you do not need to specify it.
+  *
+  * \sa class DiagonalWrapper
+  */
+
+namespace internal {
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
+struct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
+ : traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;
+  typedef Dense StorageKind;
+  typedef DenseIndex Index;
+  enum {
+    Flags = LvalueBit
+  };
+};
+}
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>
+class DiagonalMatrix
+  : public DiagonalBase<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  public:
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;
+    typedef const DiagonalMatrix& Nested;
+    typedef _Scalar Scalar;
+    typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;
+    typedef typename internal::traits<DiagonalMatrix>::Index Index;
+    #endif
+
+  protected:
+
+    DiagonalVectorType m_diagonal;
+
+  public:
+
+    /** const version of diagonal(). */
+    inline const DiagonalVectorType& diagonal() const { return m_diagonal; }
+    /** \returns a reference to the stored vector of diagonal coefficients. */
+    inline DiagonalVectorType& diagonal() { return m_diagonal; }
+
+    /** Default constructor without initialization */
+    inline DiagonalMatrix() {}
+
+    /** Constructs a diagonal matrix with given dimension  */
+    inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}
+
+    /** 2D constructor. */
+    inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}
+
+    /** 3D constructor. */
+    inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}
+
+    /** Copy constructor. */
+    template<typename OtherDerived>
+    inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */
+    inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}
+    #endif
+
+    /** generic constructor from expression of the diagonal coefficients */
+    template<typename OtherDerived>
+    explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other)
+    {}
+
+    /** Copy operator. */
+    template<typename OtherDerived>
+    DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other)
+    {
+      m_diagonal = other.diagonal();
+      return *this;
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    DiagonalMatrix& operator=(const DiagonalMatrix& other)
+    {
+      m_diagonal = other.diagonal();
+      return *this;
+    }
+    #endif
+
+    /** Resizes to given size. */
+    inline void resize(Index size) { m_diagonal.resize(size); }
+    /** Sets all coefficients to zero. */
+    inline void setZero() { m_diagonal.setZero(); }
+    /** Resizes and sets all coefficients to zero. */
+    inline void setZero(Index size) { m_diagonal.setZero(size); }
+    /** Sets this matrix to be the identity matrix of the current size. */
+    inline void setIdentity() { m_diagonal.setOnes(); }
+    /** Sets this matrix to be the identity matrix of the given size. */
+    inline void setIdentity(Index size) { m_diagonal.setOnes(size); }
+};
+
+/** \class DiagonalWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a diagonal matrix
+  *
+  * \param _DiagonalVectorType the type of the vector of diagonal coefficients
+  *
+  * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,
+  * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()
+  * and most of the time this is the only way that it is used.
+  *
+  * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()
+  */
+
+namespace internal {
+template<typename _DiagonalVectorType>
+struct traits<DiagonalWrapper<_DiagonalVectorType> >
+{
+  typedef _DiagonalVectorType DiagonalVectorType;
+  typedef typename DiagonalVectorType::Scalar Scalar;
+  typedef typename DiagonalVectorType::Index Index;
+  typedef typename DiagonalVectorType::StorageKind StorageKind;
+  enum {
+    RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,
+    Flags =  traits<DiagonalVectorType>::Flags & LvalueBit
+  };
+};
+}
+
+template<typename _DiagonalVectorType>
+class DiagonalWrapper
+  : public DiagonalBase<DiagonalWrapper<_DiagonalVectorType> >, internal::no_assignment_operator
+{
+  public:
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef _DiagonalVectorType DiagonalVectorType;
+    typedef DiagonalWrapper Nested;
+    #endif
+
+    /** Constructor from expression of diagonal coefficients to wrap. */
+    inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}
+
+    /** \returns a const reference to the wrapped expression of diagonal coefficients. */
+    const DiagonalVectorType& diagonal() const { return m_diagonal; }
+
+  protected:
+    typename DiagonalVectorType::Nested m_diagonal;
+};
+
+/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients
+  *
+  * \only_for_vectors
+  *
+  * Example: \include MatrixBase_asDiagonal.cpp
+  * Output: \verbinclude MatrixBase_asDiagonal.out
+  *
+  * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()
+  **/
+template<typename Derived>
+inline const DiagonalWrapper<const Derived>
+MatrixBase<Derived>::asDiagonal() const
+{
+  return derived();
+}
+
+/** \returns true if *this is approximately equal to a diagonal matrix,
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isDiagonal.cpp
+  * Output: \verbinclude MatrixBase_isDiagonal.out
+  *
+  * \sa asDiagonal()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const
+{
+  using std::abs;
+  if(cols() != rows()) return false;
+  RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
+  for(Index j = 0; j < cols(); ++j)
+  {
+    RealScalar absOnDiagonal = abs(coeff(j,j));
+    if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
+  }
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = 0; i < j; ++i)
+    {
+      if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
+      if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
+    }
+  return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DIAGONALMATRIX_H
diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h
new file mode 100644
index 0000000..00f8f29
--- /dev/null
+++ b/Eigen/src/Core/DiagonalProduct.h
@@ -0,0 +1,130 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DIAGONALPRODUCT_H
+#define EIGEN_DIAGONALPRODUCT_H
+
+namespace Eigen { 
+
+namespace internal {
+template<typename MatrixType, typename DiagonalType, int ProductOrder>
+struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
+ : traits<MatrixType>
+{
+  typedef typename scalar_product_traits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;
+  enum {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+
+    _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor,
+    _ScalarAccessOnDiag =  !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)
+                          ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)),
+    _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,
+    // FIXME currently we need same types, but in the future the next rule should be the one
+    //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
+    _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
+    _LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
+
+    Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
+    CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
+  };
+};
+}
+
+template<typename MatrixType, typename DiagonalType, int ProductOrder>
+class DiagonalProduct : internal::no_assignment_operator,
+                        public MatrixBase<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
+{
+  public:
+
+    typedef MatrixBase<DiagonalProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct)
+
+    inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal)
+      : m_matrix(matrix), m_diagonal(diagonal)
+    {
+      eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols()));
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+    {
+      return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col);
+    }
+    
+    EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const
+    {
+      enum {
+        StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
+      };
+      return coeff(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const
+    {
+      enum {
+        StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor
+      };
+      const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col;
+      return packet_impl<LoadMode>(row,col,indexInDiagonalVector,typename internal::conditional<
+        ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft)
+       ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type());
+    }
+    
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index idx) const
+    {
+      enum {
+        StorageOrder = int(MatrixType::Flags) & RowMajorBit ? RowMajor : ColMajor
+      };
+      return packet<LoadMode>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);
+    }
+
+  protected:
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const
+    {
+      return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
+                     internal::pset1<PacketScalar>(m_diagonal.diagonal().coeff(id)));
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const
+    {
+      enum {
+        InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,
+        DiagonalVectorPacketLoadMode = (LoadMode == Aligned && (((InnerSize%16) == 0) || (int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit)==AlignedBit) ? Aligned : Unaligned)
+      };
+      return internal::pmul(m_matrix.template packet<LoadMode>(row, col),
+                     m_diagonal.diagonal().template packet<DiagonalVectorPacketLoadMode>(id));
+    }
+
+    typename MatrixType::Nested m_matrix;
+    typename DiagonalType::Nested m_diagonal;
+};
+
+/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal.
+  */
+template<typename Derived>
+template<typename DiagonalDerived>
+inline const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
+MatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const
+{
+  return DiagonalProduct<Derived, DiagonalDerived, OnTheRight>(derived(), a_diagonal.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DIAGONALPRODUCT_H
diff --git a/Eigen/src/Core/Dot.h b/Eigen/src/Core/Dot.h
new file mode 100644
index 0000000..9d7651f
--- /dev/null
+++ b/Eigen/src/Core/Dot.h
@@ -0,0 +1,263 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DOT_H
+#define EIGEN_DOT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot
+// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE
+// looking at the static assertions. Thus this is a trick to get better compile errors.
+template<typename T, typename U,
+// the NeedToTranspose condition here is taken straight from Assign.h
+         bool NeedToTranspose = T::IsVectorAtCompileTime
+                && U::IsVectorAtCompileTime
+                && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
+                      |  // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+                         // revert to || as soon as not needed anymore.
+                    (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
+>
+struct dot_nocheck
+{
+  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
+  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+  {
+    return a.template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
+  }
+};
+
+template<typename T, typename U>
+struct dot_nocheck<T, U, true>
+{
+  typedef typename scalar_product_traits<typename traits<T>::Scalar,typename traits<U>::Scalar>::ReturnType ResScalar;
+  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)
+  {
+    return a.transpose().template binaryExpr<scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> >(b).sum();
+  }
+};
+
+} // end namespace internal
+
+/** \returns the dot product of *this with other.
+  *
+  * \only_for_vectors
+  *
+  * \note If the scalar type is complex numbers, then this function returns the hermitian
+  * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the
+  * second variable.
+  *
+  * \sa squaredNorm(), norm()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
+MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+  typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;
+  EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);
+
+  eigen_assert(size() == other.size());
+
+  return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);
+}
+
+#ifdef EIGEN2_SUPPORT
+/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable
+  * (conjugating the second variable). Of course this only makes a difference in the complex case.
+  *
+  * This method is only available in EIGEN2_SUPPORT mode.
+  *
+  * \only_for_vectors
+  *
+  * \sa dot()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+MatrixBase<Derived>::eigen2_dot(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  eigen_assert(size() == other.size());
+
+  return internal::dot_nocheck<OtherDerived,Derived>::run(other,*this);
+}
+#endif
+
+
+//---------- implementation of L2 norm and related functions ----------
+
+/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm.
+  * In both cases, it consists in the sum of the square of all the matrix entries.
+  * For vectors, this is also equals to the dot product of \c *this with itself.
+  *
+  * \sa dot(), norm()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const
+{
+  return numext::real((*this).cwiseAbs2().sum());
+}
+
+/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm.
+  * In both cases, it consists in the square root of the sum of the square of all the matrix entries.
+  * For vectors, this is also equals to the square root of the dot product of \c *this with itself.
+  *
+  * \sa dot(), squaredNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
+{
+  using std::sqrt;
+  return sqrt(squaredNorm());
+}
+
+/** \returns an expression of the quotient of *this by its own norm.
+  *
+  * \only_for_vectors
+  *
+  * \sa norm(), normalize()
+  */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::normalized() const
+{
+  typedef typename internal::nested<Derived>::type Nested;
+  typedef typename internal::remove_reference<Nested>::type _Nested;
+  _Nested n(derived());
+  return n / n.norm();
+}
+
+/** Normalizes the vector, i.e. divides it by its own norm.
+  *
+  * \only_for_vectors
+  *
+  * \sa norm(), normalized()
+  */
+template<typename Derived>
+inline void MatrixBase<Derived>::normalize()
+{
+  *this /= norm();
+}
+
+//---------- implementation of other norms ----------
+
+namespace internal {
+
+template<typename Derived, int p>
+struct lpNorm_selector
+{
+  typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;
+  static inline RealScalar run(const MatrixBase<Derived>& m)
+  {
+    using std::pow;
+    return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);
+  }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, 1>
+{
+  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+  {
+    return m.cwiseAbs().sum();
+  }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, 2>
+{
+  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+  {
+    return m.norm();
+  }
+};
+
+template<typename Derived>
+struct lpNorm_selector<Derived, Infinity>
+{
+  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)
+  {
+    return m.cwiseAbs().maxCoeff();
+  }
+};
+
+} // end namespace internal
+
+/** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values
+  *          of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$
+  *          norm, that is the maximum of the absolute values of the coefficients of *this.
+  *
+  * \sa norm()
+  */
+template<typename Derived>
+template<int p>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::lpNorm() const
+{
+  return internal::lpNorm_selector<Derived, p>::run(*this);
+}
+
+//---------- implementation of isOrthogonal / isUnitary ----------
+
+/** \returns true if *this is approximately orthogonal to \a other,
+  *          within the precision given by \a prec.
+  *
+  * Example: \include MatrixBase_isOrthogonal.cpp
+  * Output: \verbinclude MatrixBase_isOrthogonal.out
+  */
+template<typename Derived>
+template<typename OtherDerived>
+bool MatrixBase<Derived>::isOrthogonal
+(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const
+{
+  typename internal::nested<Derived,2>::type nested(derived());
+  typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+  return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();
+}
+
+/** \returns true if *this is approximately an unitary matrix,
+  *          within the precision given by \a prec. In the case where the \a Scalar
+  *          type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
+  *
+  * \note This can be used to check whether a family of vectors forms an orthonormal basis.
+  *       Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
+  *       orthonormal basis.
+  *
+  * Example: \include MatrixBase_isUnitary.cpp
+  * Output: \verbinclude MatrixBase_isUnitary.out
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const
+{
+  typename Derived::Nested nested(derived());
+  for(Index i = 0; i < cols(); ++i)
+  {
+    if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))
+      return false;
+    for(Index j = 0; j < i; ++j)
+      if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
+        return false;
+  }
+  return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DOT_H
diff --git a/Eigen/src/Core/EigenBase.h b/Eigen/src/Core/EigenBase.h
new file mode 100644
index 0000000..fadb458
--- /dev/null
+++ b/Eigen/src/Core/EigenBase.h
@@ -0,0 +1,131 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EIGENBASE_H
+#define EIGEN_EIGENBASE_H
+
+namespace Eigen {
+
+/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
+  *
+  * In other words, an EigenBase object is an object that can be copied into a MatrixBase.
+  *
+  * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc.
+  *
+  * Notice that this class is trivial, it is only used to disambiguate overloaded functions.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived> struct EigenBase
+{
+//   typedef typename internal::plain_matrix_type<Derived>::type PlainObject;
+
+  typedef typename internal::traits<Derived>::StorageKind StorageKind;
+  typedef typename internal::traits<Derived>::Index Index;
+
+  /** \returns a reference to the derived object */
+  Derived& derived() { return *static_cast<Derived*>(this); }
+  /** \returns a const reference to the derived object */
+  const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+  inline Derived& const_cast_derived() const
+  { return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
+  inline const Derived& const_derived() const
+  { return *static_cast<const Derived*>(this); }
+
+  /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
+  inline Index rows() const { return derived().rows(); }
+  /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
+  inline Index cols() const { return derived().cols(); }
+  /** \returns the number of coefficients, which is rows()*cols().
+    * \sa rows(), cols(), SizeAtCompileTime. */
+  inline Index size() const { return rows() * cols(); }
+
+  /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  { derived().evalTo(dst); }
+
+  /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */
+  template<typename Dest> inline void addTo(Dest& dst) const
+  {
+    // This is the default implementation,
+    // derived class can reimplement it in a more optimized way.
+    typename Dest::PlainObject res(rows(),cols());
+    evalTo(res);
+    dst += res;
+  }
+
+  /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */
+  template<typename Dest> inline void subTo(Dest& dst) const
+  {
+    // This is the default implementation,
+    // derived class can reimplement it in a more optimized way.
+    typename Dest::PlainObject res(rows(),cols());
+    evalTo(res);
+    dst -= res;
+  }
+
+  /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */
+  template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
+  {
+    // This is the default implementation,
+    // derived class can reimplement it in a more optimized way.
+    dst = dst * this->derived();
+  }
+
+  /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */
+  template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
+  {
+    // This is the default implementation,
+    // derived class can reimplement it in a more optimized way.
+    dst = this->derived() * dst;
+  }
+
+};
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** \brief Copies the generic expression \a other into *this.
+  *
+  * \details The expression must provide a (templated) evalTo(Derived& dst) const
+  * function which does the actual job. In practice, this allows any user to write
+  * its own special matrix without having to modify MatrixBase
+  *
+  * \returns a reference to *this.
+  */
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
+{
+  other.derived().evalTo(derived());
+  return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
+{
+  other.derived().addTo(derived());
+  return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
+{
+  other.derived().subTo(derived());
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_EIGENBASE_H
diff --git a/Eigen/src/Core/Flagged.h b/Eigen/src/Core/Flagged.h
new file mode 100644
index 0000000..1f2955f
--- /dev/null
+++ b/Eigen/src/Core/Flagged.h
@@ -0,0 +1,140 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FLAGGED_H
+#define EIGEN_FLAGGED_H
+
+namespace Eigen { 
+
+/** \class Flagged
+  * \ingroup Core_Module
+  *
+  * \brief Expression with modified flags
+  *
+  * \param ExpressionType the type of the object of which we are modifying the flags
+  * \param Added the flags added to the expression
+  * \param Removed the flags removed from the expression (has priority over Added).
+  *
+  * This class represents an expression whose flags have been modified.
+  * It is the return type of MatrixBase::flagged()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::flagged()
+  */
+
+namespace internal {
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+struct traits<Flagged<ExpressionType, Added, Removed> > : traits<ExpressionType>
+{
+  enum { Flags = (ExpressionType::Flags | Added) & ~Removed };
+};
+}
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged
+  : public MatrixBase<Flagged<ExpressionType, Added, Removed> >
+{
+  public:
+
+    typedef MatrixBase<Flagged> Base;
+    
+    EIGEN_DENSE_PUBLIC_INTERFACE(Flagged)
+    typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
+        ExpressionType, const ExpressionType&>::type ExpressionTypeNested;
+    typedef typename ExpressionType::InnerIterator InnerIterator;
+
+    inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {}
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    inline Index outerStride() const { return m_matrix.outerStride(); }
+    inline Index innerStride() const { return m_matrix.innerStride(); }
+
+    inline CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_matrix.coeff(row, col);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_matrix.coeff(index);
+    }
+    
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(index);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_matrix.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_matrix.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return m_matrix.template packet<LoadMode>(row, col);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_matrix.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_matrix.template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
+    }
+
+    const ExpressionType& _expression() const { return m_matrix; }
+
+    template<typename OtherDerived>
+    typename ExpressionType::PlainObject solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived>
+    void solveTriangularInPlace(const MatrixBase<OtherDerived>& other) const;
+
+  protected:
+    ExpressionTypeNested m_matrix;
+};
+
+/** \returns an expression of *this with added and removed flags
+  *
+  * This is mostly for internal use.
+  *
+  * \sa class Flagged
+  */
+template<typename Derived>
+template<unsigned int Added,unsigned int Removed>
+inline const Flagged<Derived, Added, Removed>
+DenseBase<Derived>::flagged() const
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_FLAGGED_H
diff --git a/Eigen/src/Core/ForceAlignedAccess.h b/Eigen/src/Core/ForceAlignedAccess.h
new file mode 100644
index 0000000..807c7a2
--- /dev/null
+++ b/Eigen/src/Core/ForceAlignedAccess.h
@@ -0,0 +1,146 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FORCEALIGNEDACCESS_H
+#define EIGEN_FORCEALIGNEDACCESS_H
+
+namespace Eigen {
+
+/** \class ForceAlignedAccess
+  * \ingroup Core_Module
+  *
+  * \brief Enforce aligned packet loads and stores regardless of what is requested
+  *
+  * \param ExpressionType the type of the object of which we are forcing aligned packet access
+  *
+  * This class is the return type of MatrixBase::forceAlignedAccess()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::forceAlignedAccess()
+  */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<ForceAlignedAccess<ExpressionType> > : public traits<ExpressionType>
+{};
+}
+
+template<typename ExpressionType> class ForceAlignedAccess
+  : public internal::dense_xpr_base< ForceAlignedAccess<ExpressionType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)
+
+    inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+
+    inline const CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_expression.coeff(row, col);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_expression.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline const CoeffReturnType coeff(Index index) const
+    {
+      return m_expression.coeff(index);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return m_expression.template packet<Aligned>(row, col);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_expression.template packet<Aligned>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<Aligned>(index, x);
+    }
+
+    operator const ExpressionType&() const { return m_expression; }
+
+  protected:
+    const ExpressionType& m_expression;
+
+  private:
+    ForceAlignedAccess& operator=(const ForceAlignedAccess&);
+};
+
+/** \returns an expression of *this with forced aligned access
+  * \sa forceAlignedAccessIf(),class ForceAlignedAccess
+  */
+template<typename Derived>
+inline const ForceAlignedAccess<Derived>
+MatrixBase<Derived>::forceAlignedAccess() const
+{
+  return ForceAlignedAccess<Derived>(derived());
+}
+
+/** \returns an expression of *this with forced aligned access
+  * \sa forceAlignedAccessIf(), class ForceAlignedAccess
+  */
+template<typename Derived>
+inline ForceAlignedAccess<Derived>
+MatrixBase<Derived>::forceAlignedAccess()
+{
+  return ForceAlignedAccess<Derived>(derived());
+}
+
+/** \returns an expression of *this with forced aligned access if \a Enable is true.
+  * \sa forceAlignedAccess(), class ForceAlignedAccess
+  */
+template<typename Derived>
+template<bool Enable>
+inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type
+MatrixBase<Derived>::forceAlignedAccessIf() const
+{
+  return derived();
+}
+
+/** \returns an expression of *this with forced aligned access if \a Enable is true.
+  * \sa forceAlignedAccess(), class ForceAlignedAccess
+  */
+template<typename Derived>
+template<bool Enable>
+inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type
+MatrixBase<Derived>::forceAlignedAccessIf()
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_FORCEALIGNEDACCESS_H
diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h
new file mode 100644
index 0000000..5f14c65
--- /dev/null
+++ b/Eigen/src/Core/Functors.h
@@ -0,0 +1,1026 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FUNCTORS_H
+#define EIGEN_FUNCTORS_H
+
+namespace Eigen {
+
+namespace internal {
+
+// associative functors:
+
+/** \internal
+  * \brief Template functor to compute the sum of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum()
+  */
+template<typename Scalar> struct scalar_sum_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::padd(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+  { return internal::predux(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sum_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasAdd
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the product of two scalars
+  *
+  * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()
+  */
+template<typename LhsScalar,typename RhsScalar> struct scalar_product_op {
+  enum {
+    // TODO vectorize mixed product
+    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul
+  };
+  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)
+  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pmul(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const
+  { return internal::predux_mul(a); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {
+  enum {
+    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!
+    PacketAccess = scalar_product_op<LhsScalar,RhsScalar>::Vectorizable
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the conjugate product of two scalars
+  *
+  * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)
+  */
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op {
+
+  enum {
+    Conj = NumTraits<LhsScalar>::IsComplex
+  };
+  
+  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+  
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)
+  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const
+  { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }
+  
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {
+  enum {
+    Cost = NumTraits<LhsScalar>::MulCost,
+    PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMul
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the min of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff()
+  */
+template<typename Scalar> struct scalar_min_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pmin(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+  { return internal::predux_min(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_min_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasMin
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the max of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff()
+  */
+template<typename Scalar> struct scalar_max_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pmax(a,b); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const
+  { return internal::predux_max(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_max_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasMax
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the hypot of two scalars
+  *
+  * \sa MatrixBase::stableNorm(), class Redux
+  */
+template<typename Scalar> struct scalar_hypot_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)
+//   typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const
+  {
+    using std::max;
+    using std::min;
+    using std::sqrt;
+    Scalar p = (max)(_x, _y);
+    Scalar q = (min)(_x, _y);
+    Scalar qp = q/p;
+    return p * sqrt(Scalar(1) + qp*qp);
+  }
+};
+template<typename Scalar>
+struct functor_traits<scalar_hypot_op<Scalar> > {
+  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess=0 };
+};
+
+/** \internal
+  * \brief Template functor to compute the pow of two scalars
+  */
+template<typename Scalar, typename OtherScalar> struct scalar_binary_pow_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op)
+  inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return numext::pow(a, b); }
+};
+template<typename Scalar, typename OtherScalar>
+struct functor_traits<scalar_binary_pow_op<Scalar,OtherScalar> > {
+  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };
+};
+
+// other binary functors:
+
+/** \internal
+  * \brief Template functor to compute the difference of two scalars
+  *
+  * \sa class CwiseBinaryOp, MatrixBase::operator-
+  */
+template<typename Scalar> struct scalar_difference_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::psub(a,b); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_difference_op<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasSub
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the quotient of two scalars
+  *
+  * \sa class CwiseBinaryOp, Cwise::operator/()
+  */
+template<typename LhsScalar,typename RhsScalar> struct scalar_quotient_op {
+  enum {
+    // TODO vectorize mixed product
+    Vectorizable = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv
+  };
+  typedef typename scalar_product_traits<LhsScalar,RhsScalar>::ReturnType result_type;
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)
+  EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const
+  { return internal::pdiv(a,b); }
+};
+template<typename LhsScalar,typename RhsScalar>
+struct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {
+  enum {
+    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost), // rough estimate!
+    PacketAccess = scalar_quotient_op<LhsScalar,RhsScalar>::Vectorizable
+  };
+};
+
+
+
+/** \internal
+  * \brief Template functor to compute the and of two booleans
+  *
+  * \sa class CwiseBinaryOp, ArrayBase::operator&&
+  */
+struct scalar_boolean_and_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op)
+  EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; }
+};
+template<> struct functor_traits<scalar_boolean_and_op> {
+  enum {
+    Cost = NumTraits<bool>::AddCost,
+    PacketAccess = false
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the or of two booleans
+  *
+  * \sa class CwiseBinaryOp, ArrayBase::operator||
+  */
+struct scalar_boolean_or_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op)
+  EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; }
+};
+template<> struct functor_traits<scalar_boolean_or_op> {
+  enum {
+    Cost = NumTraits<bool>::AddCost,
+    PacketAccess = false
+  };
+};
+
+/** \internal
+  * \brief Template functors for comparison of two scalars
+  * \todo Implement packet-comparisons
+  */
+template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
+
+template<typename Scalar, ComparisonName cmp>
+struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = false
+  };
+};
+
+template<ComparisonName Cmp, typename Scalar>
+struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
+  typedef bool type;
+};
+
+
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+  EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+  EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+  EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+  EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+  EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
+};
+
+// unary functors:
+
+/** \internal
+  * \brief Template functor to compute the opposite of a scalar
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::operator-
+  */
+template<typename Scalar> struct scalar_opposite_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pnegate(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_opposite_op<Scalar> >
+{ enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasNegate };
+};
+
+/** \internal
+  * \brief Template functor to compute the absolute value of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::abs
+  */
+template<typename Scalar> struct scalar_abs_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using std::abs; return abs(a); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pabs(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_abs_op<Scalar> >
+{
+  enum {
+    Cost = NumTraits<Scalar>::AddCost,
+    PacketAccess = packet_traits<Scalar>::HasAbs
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the squared absolute value of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::abs2
+  */
+template<typename Scalar> struct scalar_abs2_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a,a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_abs2_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasAbs2 }; };
+
+/** \internal
+  * \brief Template functor to compute the conjugate of a complex value
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::conjugate()
+  */
+template<typename Scalar> struct scalar_conjugate_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)
+  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); }
+  template<typename Packet>
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_conjugate_op<Scalar> >
+{
+  enum {
+    Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
+    PacketAccess = packet_traits<Scalar>::HasConj
+  };
+};
+
+/** \internal
+  * \brief Template functor to cast a scalar to another type
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::cast()
+  */
+template<typename Scalar, typename NewType>
+struct scalar_cast_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)
+  typedef NewType result_type;
+  EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast<Scalar, NewType>(a); }
+};
+template<typename Scalar, typename NewType>
+struct functor_traits<scalar_cast_op<Scalar,NewType> >
+{ enum { Cost = is_same<Scalar, NewType>::value ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the real part of a complex
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::real()
+  */
+template<typename Scalar>
+struct scalar_real_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_real_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the imaginary part of a complex
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::imag()
+  */
+template<typename Scalar>
+struct scalar_imag_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_imag_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the real part of a complex as a reference
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::real()
+  */
+template<typename Scalar>
+struct scalar_real_ref_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast<Scalar*>(&a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_real_ref_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to extract the imaginary part of a complex as a reference
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::imag()
+  */
+template<typename Scalar>
+struct scalar_imag_ref_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)
+  typedef typename NumTraits<Scalar>::Real result_type;
+  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast<Scalar*>(&a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_imag_ref_op<Scalar> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+/** \internal
+  *
+  * \brief Template functor to compute the exponential of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::exp()
+  */
+template<typename Scalar> struct scalar_exp_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::exp; return exp(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_exp_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasExp }; };
+
+/** \internal
+  *
+  * \brief Template functor to compute the logarithm of a scalar
+  *
+  * \sa class CwiseUnaryOp, Cwise::log()
+  */
+template<typename Scalar> struct scalar_log_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::log; return log(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::plog(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_log_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog }; };
+
+/** \internal
+  * \brief Template functor to multiply a scalar by a fixed other one
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/
+  */
+/* NOTE why doing the pset1() in packetOp *is* an optimization ?
+ * indeed it seems better to declare m_other as a Packet and do the pset1() once
+ * in the constructor. However, in practice:
+ *  - GCC does not like m_other as a Packet and generate a load every time it needs it
+ *  - on the other hand GCC is able to moves the pset1() outside the loop :)
+ *  - simpler code ;)
+ * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y)
+ */
+template<typename Scalar>
+struct scalar_multiple_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  // FIXME default copy constructors seems bugged with std::complex<>
+  EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { }
+  EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; }
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a, pset1<Packet>(m_other)); }
+  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_multiple_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+template<typename Scalar1, typename Scalar2>
+struct scalar_multiple2_op {
+  typedef typename scalar_product_traits<Scalar1,Scalar2>::ReturnType result_type;
+  EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { }
+  EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; }
+  typename add_const_on_value_type<typename NumTraits<Scalar2>::Nested>::type m_other;
+};
+template<typename Scalar1,typename Scalar2>
+struct functor_traits<scalar_multiple2_op<Scalar1,Scalar2> >
+{ enum { Cost = NumTraits<Scalar1>::MulCost, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to divide a scalar by a fixed other one
+  *
+  * This functor is used to implement the quotient of a matrix by
+  * a scalar where the scalar type is not necessarily a floating point type.
+  *
+  * \sa class CwiseUnaryOp, MatrixBase::operator/
+  */
+template<typename Scalar>
+struct scalar_quotient1_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  // FIXME default copy constructors seems bugged with std::complex<>
+  EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {}
+  EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; }
+  EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const
+  { return internal::pdiv(a, pset1<Packet>(m_other)); }
+  typename add_const_on_value_type<typename NumTraits<Scalar>::Nested>::type m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_quotient1_op<Scalar> >
+{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+
+// nullary functors
+
+template<typename Scalar>
+struct scalar_constant_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { }
+  EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { }
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; }
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1<Packet>(m_other); }
+  const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_constant_op<Scalar> >
+// FIXME replace this packet test by a safe one
+{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::Vectorizable, IsRepeatable = true }; };
+
+template<typename Scalar> struct scalar_identity_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op)
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_identity_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
+
+template <typename Scalar, bool RandomAccess> struct linspaced_op_impl;
+
+// linear access for packet ops:
+// 1) initialization
+//   base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0])
+// 2) each step (where size is 1 for coeff access or PacketSize for packet access)
+//   base += [size*step, ..., size*step]
+//
+// TODO: Perhaps it's better to initialize lazily (so not in the constructor but in packetOp)
+//       in order to avoid the padd() in operator() ?
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,false>
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+
+  linspaced_op_impl(const Scalar& low, const Scalar& step) :
+  m_low(low), m_step(step),
+  m_packetStep(pset1<Packet>(packet_traits<Scalar>::size*step)),
+  m_base(padd(pset1<Packet>(low), pmul(pset1<Packet>(step),plset<Scalar>(-packet_traits<Scalar>::size)))) {}
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const 
+  { 
+    m_base = padd(m_base, pset1<Packet>(m_step));
+    return m_low+Scalar(i)*m_step; 
+  }
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); }
+
+  const Scalar m_low;
+  const Scalar m_step;
+  const Packet m_packetStep;
+  mutable Packet m_base;
+};
+
+// random access for packet ops:
+// 1) each step
+//   [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )
+template <typename Scalar>
+struct linspaced_op_impl<Scalar,true>
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+
+  linspaced_op_impl(const Scalar& low, const Scalar& step) :
+  m_low(low), m_step(step),
+  m_lowPacket(pset1<Packet>(m_low)), m_stepPacket(pset1<Packet>(m_step)), m_interPacket(plset<Scalar>(0)) {}
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; }
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
+  { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); }
+
+  const Scalar m_low;
+  const Scalar m_step;
+  const Packet m_lowPacket;
+  const Packet m_stepPacket;
+  const Packet m_interPacket;
+};
+
+// ----- Linspace functor ----------------------------------------------------------------
+
+// Forward declaration (we default to random access which does not really give
+// us a speed gain when using packet access but it allows to use the functor in
+// nested expressions).
+template <typename Scalar, bool RandomAccess = true> struct linspaced_op;
+template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_op<Scalar,RandomAccess> >
+{ enum { Cost = 1, PacketAccess = packet_traits<Scalar>::HasSetLinear, IsRepeatable = true }; };
+template <typename Scalar, bool RandomAccess> struct linspaced_op
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
+
+  // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
+  // there row==0 and col is used for the actual iteration.
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const 
+  {
+    eigen_assert(col==0 || row==0);
+    return impl(col + row);
+  }
+
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); }
+
+  // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since
+  // there row==0 and col is used for the actual iteration.
+  template<typename Index>
+  EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const
+  {
+    eigen_assert(col==0 || row==0);
+    return impl.packetOp(col + row);
+  }
+
+  // This proxy object handles the actual required temporaries, the different
+  // implementations (random vs. sequential access) as well as the
+  // correct piping to size 2/4 packet operations.
+  const linspaced_op_impl<Scalar,RandomAccess> impl;
+};
+
+// all functors allow linear access, except scalar_identity_op. So we fix here a quick meta
+// to indicate whether a functor allows linear access, just always answering 'yes' except for
+// scalar_identity_op.
+// FIXME move this to functor_traits adding a functor_default
+template<typename Functor> struct functor_has_linear_access { enum { ret = 1 }; };
+template<typename Scalar> struct functor_has_linear_access<scalar_identity_op<Scalar> > { enum { ret = 0 }; };
+
+// In Eigen, any binary op (Product, CwiseBinaryOp) require the Lhs and Rhs to have the same scalar type, except for multiplication
+// where the mixing of different types is handled by scalar_product_traits
+// In particular, real * complex<real> is allowed.
+// FIXME move this to functor_traits adding a functor_default
+template<typename Functor> struct functor_is_product_like { enum { ret = 0 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_conj_product_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+template<typename LhsScalar,typename RhsScalar> struct functor_is_product_like<scalar_quotient_op<LhsScalar,RhsScalar> > { enum { ret = 1 }; };
+
+
+/** \internal
+  * \brief Template functor to add a scalar to a fixed other one
+  * \sa class CwiseUnaryOp, Array::operator+
+  */
+/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */
+template<typename Scalar>
+struct scalar_add_op {
+  typedef typename packet_traits<Scalar>::type Packet;
+  // FIXME default copy constructors seems bugged with std::complex<>
+  inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { }
+  inline scalar_add_op(const Scalar& other) : m_other(other) { }
+  inline Scalar operator() (const Scalar& a) const { return a + m_other; }
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::padd(a, pset1<Packet>(m_other)); }
+  const Scalar m_other;
+};
+template<typename Scalar>
+struct functor_traits<scalar_add_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = packet_traits<Scalar>::HasAdd }; };
+
+/** \internal
+  * \brief Template functor to compute the square root of a scalar
+  * \sa class CwiseUnaryOp, Cwise::sqrt()
+  */
+template<typename Scalar> struct scalar_sqrt_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::sqrt; return sqrt(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sqrt_op<Scalar> >
+{ enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasSqrt
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the cosine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::cos()
+  */
+template<typename Scalar> struct scalar_cos_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)
+  inline Scalar operator() (const Scalar& a) const { using std::cos; return cos(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_cos_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasCos
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the sine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::sin()
+  */
+template<typename Scalar> struct scalar_sin_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::sin; return sin(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::psin(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_sin_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasSin
+  };
+};
+
+
+/** \internal
+  * \brief Template functor to compute the tan of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::tan()
+  */
+template<typename Scalar> struct scalar_tan_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::tan; return tan(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_tan_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasTan
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the arc cosine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::acos()
+  */
+template<typename Scalar> struct scalar_acos_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::acos; return acos(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_acos_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasACos
+  };
+};
+
+/** \internal
+  * \brief Template functor to compute the arc sine of a scalar
+  * \sa class CwiseUnaryOp, ArrayBase::asin()
+  */
+template<typename Scalar> struct scalar_asin_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)
+  inline const Scalar operator() (const Scalar& a) const { using std::asin; return asin(a); }
+  typedef typename packet_traits<Scalar>::type Packet;
+  inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_asin_op<Scalar> >
+{
+  enum {
+    Cost = 5 * NumTraits<Scalar>::MulCost,
+    PacketAccess = packet_traits<Scalar>::HasASin
+  };
+};
+
+/** \internal
+  * \brief Template functor to raise a scalar to a power
+  * \sa class CwiseUnaryOp, Cwise::pow
+  */
+template<typename Scalar>
+struct scalar_pow_op {
+  // FIXME default copy constructors seems bugged with std::complex<>
+  inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { }
+  inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
+  inline Scalar operator() (const Scalar& a) const { return numext::pow(a, m_exponent); }
+  const Scalar m_exponent;
+};
+template<typename Scalar>
+struct functor_traits<scalar_pow_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
+
+/** \internal
+  * \brief Template functor to compute the quotient between a scalar and array entries.
+  * \sa class CwiseUnaryOp, Cwise::inverse()
+  */
+template<typename Scalar>
+struct scalar_inverse_mult_op {
+  scalar_inverse_mult_op(const Scalar& other) : m_other(other) {}
+  inline Scalar operator() (const Scalar& a) const { return m_other / a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pdiv(pset1<Packet>(m_other),a); }
+  Scalar m_other;
+};
+
+/** \internal
+  * \brief Template functor to compute the inverse of a scalar
+  * \sa class CwiseUnaryOp, Cwise::inverse()
+  */
+template<typename Scalar>
+struct scalar_inverse_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op)
+  inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pdiv(pset1<Packet>(Scalar(1)),a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_inverse_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };
+
+/** \internal
+  * \brief Template functor to compute the square of a scalar
+  * \sa class CwiseUnaryOp, Cwise::square()
+  */
+template<typename Scalar>
+struct scalar_square_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op)
+  inline Scalar operator() (const Scalar& a) const { return a*a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a,a); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_square_op<Scalar> >
+{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+/** \internal
+  * \brief Template functor to compute the cube of a scalar
+  * \sa class CwiseUnaryOp, Cwise::cube()
+  */
+template<typename Scalar>
+struct scalar_cube_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op)
+  inline Scalar operator() (const Scalar& a) const { return a*a*a; }
+  template<typename Packet>
+  inline const Packet packetOp(const Packet& a) const
+  { return internal::pmul(a,pmul(a,a)); }
+};
+template<typename Scalar>
+struct functor_traits<scalar_cube_op<Scalar> >
+{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };
+
+// default functor traits for STL functors:
+
+template<typename T>
+struct functor_traits<std::multiplies<T> >
+{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::divides<T> >
+{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::plus<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::minus<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::negate<T> >
+{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_or<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_and<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::logical_not<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::greater<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::less<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::greater_equal<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::less_equal<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::equal_to<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::not_equal_to<T> >
+{ enum { Cost = 1, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binder2nd<T> >
+{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binder1st<T> >
+{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::unary_negate<T> >
+{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+
+template<typename T>
+struct functor_traits<std::binary_negate<T> >
+{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };
+
+#ifdef EIGEN_STDEXT_SUPPORT
+
+template<typename T0,typename T1>
+struct functor_traits<std::project1st<T0,T1> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::project2nd<T0,T1> >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::select2nd<std::pair<T0,T1> > >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::select1st<std::pair<T0,T1> > >
+{ enum { Cost = 0, PacketAccess = false }; };
+
+template<typename T0,typename T1>
+struct functor_traits<std::unary_compose<T0,T1> >
+{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost, PacketAccess = false }; };
+
+template<typename T0,typename T1,typename T2>
+struct functor_traits<std::binary_compose<T0,T1,T2> >
+{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost + functor_traits<T2>::Cost, PacketAccess = false }; };
+
+#endif // EIGEN_STDEXT_SUPPORT
+
+// allow to add new functors and specializations of functor_traits from outside Eigen.
+// this macro is really needed because functor_traits must be specialized after it is declared but before it is used...
+#ifdef EIGEN_FUNCTORS_PLUGIN
+#include EIGEN_FUNCTORS_PLUGIN
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_FUNCTORS_H
diff --git a/Eigen/src/Core/Fuzzy.h b/Eigen/src/Core/Fuzzy.h
new file mode 100644
index 0000000..fe63bd2
--- /dev/null
+++ b/Eigen/src/Core/Fuzzy.h
@@ -0,0 +1,150 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FUZZY_H
+#define EIGEN_FUZZY_H
+
+namespace Eigen { 
+
+namespace internal
+{
+
+template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isApprox_selector
+{
+  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
+  {
+    using std::min;
+    typename internal::nested<Derived,2>::type nested(x);
+    typename internal::nested<OtherDerived,2>::type otherNested(y);
+    return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());
+  }
+};
+
+template<typename Derived, typename OtherDerived>
+struct isApprox_selector<Derived, OtherDerived, true>
+{
+  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&)
+  {
+    return x.matrix() == y.matrix();
+  }
+};
+
+template<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isMuchSmallerThan_object_selector
+{
+  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)
+  {
+    return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();
+  }
+};
+
+template<typename Derived, typename OtherDerived>
+struct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>
+{
+  static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&)
+  {
+    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
+  }
+};
+
+template<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>
+struct isMuchSmallerThan_scalar_selector
+{
+  static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec)
+  {
+    return x.cwiseAbs2().sum() <= numext::abs2(prec * y);
+  }
+};
+
+template<typename Derived>
+struct isMuchSmallerThan_scalar_selector<Derived, true>
+{
+  static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&)
+  {
+    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();
+  }
+};
+
+} // end namespace internal
+
+
+/** \returns \c true if \c *this is approximately equal to \a other, within the precision
+  * determined by \a prec.
+  *
+  * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
+  * are considered to be approximately equal within precision \f$ p \f$ if
+  * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
+  * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
+  * L2 norm).
+  *
+  * \note Because of the multiplicativeness of this comparison, one can't use this function
+  * to check whether \c *this is approximately equal to the zero matrix or vector.
+  * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
+  * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const
+  * RealScalar&, RealScalar) instead.
+  *
+  * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const
+  */
+template<typename Derived>
+template<typename OtherDerived>
+bool DenseBase<Derived>::isApprox(
+  const DenseBase<OtherDerived>& other,
+  const RealScalar& prec
+) const
+{
+  return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
+}
+
+/** \returns \c true if the norm of \c *this is much smaller than \a other,
+  * within the precision determined by \a prec.
+  *
+  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
+  * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
+  * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
+  *
+  * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
+  * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
+  * of a reference matrix of same dimensions.
+  *
+  * \sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const
+  */
+template<typename Derived>
+bool DenseBase<Derived>::isMuchSmallerThan(
+  const typename NumTraits<Scalar>::Real& other,
+  const RealScalar& prec
+) const
+{
+  return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);
+}
+
+/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
+  * within the precision determined by \a prec.
+  *
+  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
+  * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
+  * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
+  * For matrices, the comparison is done using the Hilbert-Schmidt norm.
+  *
+  * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
+  */
+template<typename Derived>
+template<typename OtherDerived>
+bool DenseBase<Derived>::isMuchSmallerThan(
+  const DenseBase<OtherDerived>& other,
+  const RealScalar& prec
+) const
+{
+  return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_FUZZY_H
diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h
new file mode 100644
index 0000000..9e805a8
--- /dev/null
+++ b/Eigen/src/Core/GeneralProduct.h
@@ -0,0 +1,635 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_PRODUCT_H
+#define EIGEN_GENERAL_PRODUCT_H
+
+namespace Eigen { 
+
+/** \class GeneralProduct
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the product of two general matrices or vectors
+  *
+  * \param LhsNested the type used to store the left-hand side
+  * \param RhsNested the type used to store the right-hand side
+  * \param ProductMode the type of the product
+  *
+  * This class represents an expression of the product of two general matrices.
+  * We call a general matrix, a dense matrix with full storage. For instance,
+  * This excludes triangular, selfadjoint, and sparse matrices.
+  * It is the return type of the operator* between general matrices. Its template
+  * arguments are determined automatically by ProductReturnType. Therefore,
+  * GeneralProduct should never be used direclty. To determine the result type of a
+  * function which involves a matrix product, use ProductReturnType::Type.
+  *
+  * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+  */
+template<typename Lhs, typename Rhs, int ProductType = internal::product_type<Lhs,Rhs>::value>
+class GeneralProduct;
+
+enum {
+  Large = 2,
+  Small = 3
+};
+
+namespace internal {
+
+template<int Rows, int Cols, int Depth> struct product_type_selector;
+
+template<int Size, int MaxSize> struct product_size_category
+{
+  enum { is_large = MaxSize == Dynamic ||
+                    Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD,
+         value = is_large  ? Large
+               : Size == 1 ? 1
+                           : Small
+  };
+};
+
+template<typename Lhs, typename Rhs> struct product_type
+{
+  typedef typename remove_all<Lhs>::type _Lhs;
+  typedef typename remove_all<Rhs>::type _Rhs;
+  enum {
+    MaxRows  = _Lhs::MaxRowsAtCompileTime,
+    Rows  = _Lhs::RowsAtCompileTime,
+    MaxCols  = _Rhs::MaxColsAtCompileTime,
+    Cols  = _Rhs::ColsAtCompileTime,
+    MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime,
+                                           _Rhs::MaxRowsAtCompileTime),
+    Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime,
+                                        _Rhs::RowsAtCompileTime),
+    LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+  };
+
+  // the splitting into different lines of code here, introducing the _select enums and the typedef below,
+  // is to work around an internal compiler error with gcc 4.1 and 4.2.
+private:
+  enum {
+    rows_select = product_size_category<Rows,MaxRows>::value,
+    cols_select = product_size_category<Cols,MaxCols>::value,
+    depth_select = product_size_category<Depth,MaxDepth>::value
+  };
+  typedef product_type_selector<rows_select, cols_select, depth_select> selector;
+
+public:
+  enum {
+    value = selector::ret
+  };
+#ifdef EIGEN_DEBUG_PRODUCT
+  static void debug()
+  {
+      EIGEN_DEBUG_VAR(Rows);
+      EIGEN_DEBUG_VAR(Cols);
+      EIGEN_DEBUG_VAR(Depth);
+      EIGEN_DEBUG_VAR(rows_select);
+      EIGEN_DEBUG_VAR(cols_select);
+      EIGEN_DEBUG_VAR(depth_select);
+      EIGEN_DEBUG_VAR(value);
+  }
+#endif
+};
+
+
+/* The following allows to select the kind of product at compile time
+ * based on the three dimensions of the product.
+ * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */
+// FIXME I'm not sure the current mapping is the ideal one.
+template<int M, int N>  struct product_type_selector<M,N,1>              { enum { ret = OuterProduct }; };
+template<int Depth>     struct product_type_selector<1,    1,    Depth>  { enum { ret = InnerProduct }; };
+template<>              struct product_type_selector<1,    1,    1>      { enum { ret = InnerProduct }; };
+template<>              struct product_type_selector<Small,1,    Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<1,    Small,Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small,Small,Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small, Large, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
+template<>              struct product_type_selector<Large, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };
+template<>              struct product_type_selector<1,    Large,Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<1,    Large,Large>  { enum { ret = GemvProduct }; };
+template<>              struct product_type_selector<1,    Small,Large>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Large,1,    Small>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Large,1,    Large>  { enum { ret = GemvProduct }; };
+template<>              struct product_type_selector<Small,1,    Large>  { enum { ret = CoeffBasedProductMode }; };
+template<>              struct product_type_selector<Small,Small,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Small,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Small,Large,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Large,Large>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Small,Small>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Small,Large,Small>  { enum { ret = GemmProduct }; };
+template<>              struct product_type_selector<Large,Large,Small>  { enum { ret = GemmProduct }; };
+
+} // end namespace internal
+
+/** \class ProductReturnType
+  * \ingroup Core_Module
+  *
+  * \brief Helper class to get the correct and optimized returned type of operator*
+  *
+  * \param Lhs the type of the left-hand side
+  * \param Rhs the type of the right-hand side
+  * \param ProductMode the type of the product (determined automatically by internal::product_mode)
+  *
+  * This class defines the typename Type representing the optimized product expression
+  * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
+  * is the recommended way to define the result type of a function returning an expression
+  * which involve a matrix product. The class Product should never be
+  * used directly.
+  *
+  * \sa class Product, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
+  */
+template<typename Lhs, typename Rhs, int ProductType>
+struct ProductReturnType
+{
+  // TODO use the nested type to reduce instanciations ????
+//   typedef typename internal::nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
+//   typedef typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
+
+  typedef GeneralProduct<Lhs/*Nested*/, Rhs/*Nested*/, ProductType> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,CoeffBasedProductMode>
+{
+  typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+  typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+  typedef CoeffBasedProduct<LhsNested, RhsNested, EvalBeforeAssigningBit | EvalBeforeNestingBit> Type;
+};
+
+template<typename Lhs, typename Rhs>
+struct ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{
+  typedef typename internal::nested<Lhs, Rhs::ColsAtCompileTime, typename internal::plain_matrix_type<Lhs>::type >::type LhsNested;
+  typedef typename internal::nested<Rhs, Lhs::RowsAtCompileTime, typename internal::plain_matrix_type<Rhs>::type >::type RhsNested;
+  typedef CoeffBasedProduct<LhsNested, RhsNested, NestByRefBit> Type;
+};
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs>
+struct LazyProductReturnType : public ProductReturnType<Lhs,Rhs,LazyCoeffBasedProductMode>
+{};
+
+/***********************************************************************
+*  Implementation of Inner Vector Vector Product
+***********************************************************************/
+
+// FIXME : maybe the "inner product" could return a Scalar
+// instead of a 1x1 matrix ??
+// Pro: more natural for the user
+// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix
+// product ends up to a row-vector times col-vector product... To tackle this use
+// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,InnerProduct> >
+ : traits<Matrix<typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, InnerProduct>
+  : internal::no_assignment_operator,
+    public Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1>
+{
+    typedef Matrix<typename internal::scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType,1,1> Base;
+  public:
+    GeneralProduct(const Lhs& lhs, const Rhs& rhs)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+      Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();
+    }
+
+    /** Convertion to scalar */
+    operator const typename Base::Scalar() const {
+      return Base::coeff(0,0);
+    }
+};
+
+/***********************************************************************
+*  Implementation of Outer Vector Vector Product
+***********************************************************************/
+
+namespace internal {
+
+// Column major
+template<typename ProductType, typename Dest, typename Func>
+EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const false_type&)
+{
+  typedef typename Dest::Index Index;
+  // FIXME make sure lhs is sequentially stored
+  // FIXME not very good if rhs is real and lhs complex while alpha is real too
+  const Index cols = dest.cols();
+  for (Index j=0; j<cols; ++j)
+    func(dest.col(j), prod.rhs().coeff(0,j) * prod.lhs());
+}
+
+// Row major
+template<typename ProductType, typename Dest, typename Func>
+EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& dest, const Func& func, const true_type&) {
+  typedef typename Dest::Index Index;
+  // FIXME make sure rhs is sequentially stored
+  // FIXME not very good if lhs is real and rhs complex while alpha is real too
+  const Index rows = dest.rows();
+  for (Index i=0; i<rows; ++i)
+    func(dest.row(i), prod.lhs().coeff(i,0) * prod.rhs());
+}
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,OuterProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs> >
+{};
+
+}
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, OuterProduct>
+  : public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
+{
+    template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
+    
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::RealScalar, typename Rhs::RealScalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+    }
+    
+    struct set  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived()  = src; } };
+    struct add  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };
+    struct sub  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };
+    struct adds {
+      Scalar m_scale;
+      adds(const Scalar& s) : m_scale(s) {}
+      template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const {
+        dst.const_cast_derived() += m_scale * src;
+      }
+    };
+    
+    template<typename Dest>
+    inline void evalTo(Dest& dest) const {
+      internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
+    }
+    
+    template<typename Dest>
+    inline void addTo(Dest& dest) const {
+      internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
+    }
+
+    template<typename Dest>
+    inline void subTo(Dest& dest) const {
+      internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
+    }
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+    {
+      internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
+    }
+};
+
+/***********************************************************************
+*  Implementation of General Matrix Vector Product
+***********************************************************************/
+
+/*  According to the shape/flags of the matrix we have to distinghish 3 different cases:
+ *   1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine
+ *   2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine
+ *   3 - all other cases are handled using a simple loop along the outer-storage direction.
+ *  Therefore we need a lower level meta selector.
+ *  Furthermore, if the matrix is the rhs, then the product has to be transposed.
+ */
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,GemvProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs> >
+{};
+
+template<int Side, int StorageOrder, bool BlasCompatible>
+struct gemv_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, GemvProduct>
+  : public ProductBase<GeneralProduct<Lhs,Rhs,GemvProduct>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+
+    typedef typename Lhs::Scalar LhsScalar;
+    typedef typename Rhs::Scalar RhsScalar;
+
+    GeneralProduct(const Lhs& a_lhs, const Rhs& a_rhs) : Base(a_lhs,a_rhs)
+    {
+//       EIGEN_STATIC_ASSERT((internal::is_same<typename Lhs::Scalar, typename Rhs::Scalar>::value),
+//         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+    }
+
+    enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };
+    typedef typename internal::conditional<int(Side)==OnTheRight,_LhsNested,_RhsNested>::type MatrixType;
+
+    template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
+    {
+      eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols());
+      internal::gemv_selector<Side,(int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,
+                       bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess)>::run(*this, dst, alpha);
+    }
+};
+
+namespace internal {
+
+// The vector is on the left => transposition
+template<int StorageOrder, bool BlasCompatible>
+struct gemv_selector<OnTheLeft,StorageOrder,BlasCompatible>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
+  {
+    Transpose<Dest> destT(dest);
+    enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };
+    gemv_selector<OnTheRight,OtherStorageOrder,BlasCompatible>
+      ::run(GeneralProduct<Transpose<const typename ProductType::_RhsNested>,Transpose<const typename ProductType::_LhsNested>, GemvProduct>
+        (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha);
+  }
+};
+
+template<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,false>
+{
+  EIGEN_STRONG_INLINE  Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; }
+};
+
+template<typename Scalar,int Size>
+struct gemv_static_vector_if<Scalar,Size,Dynamic,true>
+{
+  EIGEN_STRONG_INLINE Scalar* data() { return 0; }
+};
+
+template<typename Scalar,int Size,int MaxSize>
+struct gemv_static_vector_if<Scalar,Size,MaxSize,true>
+{
+  #if EIGEN_ALIGN_STATICALLY
+  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0> m_data;
+  EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }
+  #else
+  // Some architectures cannot align on the stack,
+  // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.
+  enum {
+    ForceAlignment  = internal::packet_traits<Scalar>::Vectorizable,
+    PacketSize      = internal::packet_traits<Scalar>::size
+  };
+  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?PacketSize:0),0> m_data;
+  EIGEN_STRONG_INLINE Scalar* data() {
+    return ForceAlignment
+            ? reinterpret_cast<Scalar*>((reinterpret_cast<size_t>(m_data.array) & ~(size_t(15))) + 16)
+            : m_data.array;
+  }
+  #endif
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,true>
+{
+  template<typename ProductType, typename Dest>
+  static inline void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
+  {
+    typedef typename ProductType::Index Index;
+    typedef typename ProductType::LhsScalar   LhsScalar;
+    typedef typename ProductType::RhsScalar   RhsScalar;
+    typedef typename ProductType::Scalar      ResScalar;
+    typedef typename ProductType::RealScalar  RealScalar;
+    typedef typename ProductType::ActualLhsType ActualLhsType;
+    typedef typename ProductType::ActualRhsType ActualRhsType;
+    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+    ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs());
+    ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+    enum {
+      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+      // on, the other hand it is good for the cache to pack the vector anyways...
+      EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
+      ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
+      MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
+    };
+
+    gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
+
+    bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
+    bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
+    
+    RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
+
+    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+                                                  evalToDest ? dest.data() : static_dest.data());
+    
+    if(!evalToDest)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = dest.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      if(!alphaIsCompatible)
+      {
+        MappedDest(actualDestPtr, dest.size()).setZero();
+        compatibleAlpha = RhsScalar(1);
+      }
+      else
+        MappedDest(actualDestPtr, dest.size()) = dest;
+    }
+
+    general_matrix_vector_product
+      <Index,LhsScalar,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+        actualLhs.rows(), actualLhs.cols(),
+        actualLhs.data(), actualLhs.outerStride(),
+        actualRhs.data(), actualRhs.innerStride(),
+        actualDestPtr, 1,
+        compatibleAlpha);
+
+    if (!evalToDest)
+    {
+      if(!alphaIsCompatible)
+        dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
+      else
+        dest = MappedDest(actualDestPtr, dest.size());
+    }
+  }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,true>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
+  {
+    typedef typename ProductType::LhsScalar LhsScalar;
+    typedef typename ProductType::RhsScalar RhsScalar;
+    typedef typename ProductType::Scalar    ResScalar;
+    typedef typename ProductType::Index Index;
+    typedef typename ProductType::ActualLhsType ActualLhsType;
+    typedef typename ProductType::ActualRhsType ActualRhsType;
+    typedef typename ProductType::_ActualRhsType _ActualRhsType;
+    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+
+    typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+    enum {
+      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+      // on, the other hand it is good for the cache to pack the vector anyways...
+      DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
+    };
+
+    gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
+
+    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
+        DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
+
+    if(!DirectlyUseRhs)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = actualRhs.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+    }
+
+    general_matrix_vector_product
+      <Index,LhsScalar,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsBlasTraits::NeedToConjugate>::run(
+        actualLhs.rows(), actualLhs.cols(),
+        actualLhs.data(), actualLhs.outerStride(),
+        actualRhsPtr, 1,
+        dest.data(), dest.innerStride(),
+        actualAlpha);
+  }
+};
+
+template<> struct gemv_selector<OnTheRight,ColMajor,false>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
+  {
+    typedef typename Dest::Index Index;
+    // TODO makes sure dest is sequentially stored in memory, otherwise use a temp
+    const Index size = prod.rhs().rows();
+    for(Index k=0; k<size; ++k)
+      dest += (alpha*prod.rhs().coeff(k)) * prod.lhs().col(k);
+  }
+};
+
+template<> struct gemv_selector<OnTheRight,RowMajor,false>
+{
+  template<typename ProductType, typename Dest>
+  static void run(const ProductType& prod, Dest& dest, const typename ProductType::Scalar& alpha)
+  {
+    typedef typename Dest::Index Index;
+    // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp
+    const Index rows = prod.rows();
+    for(Index i=0; i<rows; ++i)
+      dest.coeffRef(i) += alpha * (prod.lhs().row(i).cwiseProduct(prod.rhs().transpose())).sum();
+  }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** \returns the matrix product of \c *this and \a other.
+  *
+  * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
+  *
+  * \sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename ProductReturnType<Derived, OtherDerived>::Type
+MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+  // A note regarding the function declaration: In MSVC, this function will sometimes
+  // not be inlined since DenseStorage is an unwindable object for dynamic
+  // matrices and product types are holding a member to store the result.
+  // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.
+  enum {
+    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic
+                   || OtherDerived::RowsAtCompileTime==Dynamic
+                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+  };
+  // note to the lost user:
+  //    * for a dot product use: v1.dot(v2)
+  //    * for a coeff-wise product use: v1.cwiseProduct(v2)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+#ifdef EIGEN_DEBUG_PRODUCT
+  internal::product_type<Derived,OtherDerived>::debug();
+#endif
+  return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation.
+  *
+  * The returned product will behave like any other expressions: the coefficients of the product will be
+  * computed once at a time as requested. This might be useful in some extremely rare cases when only
+  * a small and no coherent fraction of the result's coefficients have to be computed.
+  *
+  * \warning This version of the matrix product can be much much slower. So use it only if you know
+  * what you are doing and that you measured a true speed improvement.
+  *
+  * \sa operator*(const MatrixBase&)
+  */
+template<typename Derived>
+template<typename OtherDerived>
+const typename LazyProductReturnType<Derived,OtherDerived>::Type
+MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const
+{
+  enum {
+    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic
+                   || OtherDerived::RowsAtCompileTime==Dynamic
+                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),
+    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,
+    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)
+  };
+  // note to the lost user:
+  //    * for a dot product use: v1.dot(v2)
+  //    * for a coeff-wise product use: v1.cwiseProduct(v2)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+
+  return typename LazyProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PRODUCT_H
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
new file mode 100644
index 0000000..5f783eb
--- /dev/null
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -0,0 +1,350 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERIC_PACKET_MATH_H
+#define EIGEN_GENERIC_PACKET_MATH_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+  * \file GenericPacketMath.h
+  *
+  * Default implementation for types not supported by the vectorization.
+  * In practice these functions are provided to make easier the writing
+  * of generic vectorized code.
+  */
+
+#ifndef EIGEN_DEBUG_ALIGNED_LOAD
+#define EIGEN_DEBUG_ALIGNED_LOAD
+#endif
+
+#ifndef EIGEN_DEBUG_UNALIGNED_LOAD
+#define EIGEN_DEBUG_UNALIGNED_LOAD
+#endif
+
+#ifndef EIGEN_DEBUG_ALIGNED_STORE
+#define EIGEN_DEBUG_ALIGNED_STORE
+#endif
+
+#ifndef EIGEN_DEBUG_UNALIGNED_STORE
+#define EIGEN_DEBUG_UNALIGNED_STORE
+#endif
+
+struct default_packet_traits
+{
+  enum {
+    HasAdd    = 1,
+    HasSub    = 1,
+    HasMul    = 1,
+    HasNegate = 1,
+    HasAbs    = 1,
+    HasAbs2   = 1,
+    HasMin    = 1,
+    HasMax    = 1,
+    HasConj   = 1,
+    HasSetLinear = 1,
+
+    HasDiv    = 0,
+    HasSqrt   = 0,
+    HasExp    = 0,
+    HasLog    = 0,
+    HasPow    = 0,
+
+    HasSin    = 0,
+    HasCos    = 0,
+    HasTan    = 0,
+    HasASin   = 0,
+    HasACos   = 0,
+    HasATan   = 0
+  };
+};
+
+template<typename T> struct packet_traits : default_packet_traits
+{
+  typedef T type;
+  enum {
+    Vectorizable = 0,
+    size = 1,
+    AlignedOnScalar = 0
+  };
+  enum {
+    HasAdd    = 0,
+    HasSub    = 0,
+    HasMul    = 0,
+    HasNegate = 0,
+    HasAbs    = 0,
+    HasAbs2   = 0,
+    HasMin    = 0,
+    HasMax    = 0,
+    HasConj   = 0,
+    HasSetLinear = 0
+  };
+};
+
+/** \internal \returns a + b (coeff-wise) */
+template<typename Packet> inline Packet
+padd(const Packet& a,
+        const Packet& b) { return a+b; }
+
+/** \internal \returns a - b (coeff-wise) */
+template<typename Packet> inline Packet
+psub(const Packet& a,
+        const Packet& b) { return a-b; }
+
+/** \internal \returns -a (coeff-wise) */
+template<typename Packet> inline Packet
+pnegate(const Packet& a) { return -a; }
+
+/** \internal \returns conj(a) (coeff-wise) */
+template<typename Packet> inline Packet
+pconj(const Packet& a) { return numext::conj(a); }
+
+/** \internal \returns a * b (coeff-wise) */
+template<typename Packet> inline Packet
+pmul(const Packet& a,
+        const Packet& b) { return a*b; }
+
+/** \internal \returns a / b (coeff-wise) */
+template<typename Packet> inline Packet
+pdiv(const Packet& a,
+        const Packet& b) { return a/b; }
+
+/** \internal \returns the min of \a a and \a b  (coeff-wise) */
+template<typename Packet> inline Packet
+pmin(const Packet& a,
+        const Packet& b) { using std::min; return (min)(a, b); }
+
+/** \internal \returns the max of \a a and \a b  (coeff-wise) */
+template<typename Packet> inline Packet
+pmax(const Packet& a,
+        const Packet& b) { using std::max; return (max)(a, b); }
+
+/** \internal \returns the absolute value of \a a */
+template<typename Packet> inline Packet
+pabs(const Packet& a) { using std::abs; return abs(a); }
+
+/** \internal \returns the bitwise and of \a a and \a b */
+template<typename Packet> inline Packet
+pand(const Packet& a, const Packet& b) { return a & b; }
+
+/** \internal \returns the bitwise or of \a a and \a b */
+template<typename Packet> inline Packet
+por(const Packet& a, const Packet& b) { return a | b; }
+
+/** \internal \returns the bitwise xor of \a a and \a b */
+template<typename Packet> inline Packet
+pxor(const Packet& a, const Packet& b) { return a ^ b; }
+
+/** \internal \returns the bitwise andnot of \a a and \a b */
+template<typename Packet> inline Packet
+pandnot(const Packet& a, const Packet& b) { return a & (!b); }
+
+/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
+template<typename Packet> inline Packet
+pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet version of \a *from, (un-aligned load) */
+template<typename Packet> inline Packet
+ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet with elements of \a *from duplicated.
+  * For instance, for a packet of 8 elements, 4 scalar will be read from \a *from and
+  * duplicated to form: {from[0],from[0],from[1],from[1],,from[2],from[2],,from[3],from[3]}
+  * Currently, this function is only used for scalar * complex products.
+ */
+template<typename Packet> inline Packet
+ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
+template<typename Packet> inline Packet
+pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
+
+/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */
+template<typename Scalar> inline typename packet_traits<Scalar>::type
+plset(const Scalar& a) { return a; }
+
+/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
+template<typename Scalar, typename Packet> inline void pstore(Scalar* to, const Packet& from)
+{ (*to) = from; }
+
+/** \internal copy the packet \a from to \a *to, (un-aligned store) */
+template<typename Scalar, typename Packet> inline void pstoreu(Scalar* to, const Packet& from)
+{ (*to) = from; }
+
+/** \internal tries to do cache prefetching of \a addr */
+template<typename Scalar> inline void prefetch(const Scalar* addr)
+{
+#if !defined(_MSC_VER)
+__builtin_prefetch(addr);
+#endif
+}
+
+/** \internal \returns the first element of a packet */
+template<typename Packet> inline typename unpacket_traits<Packet>::type pfirst(const Packet& a)
+{ return a; }
+
+/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */
+template<typename Packet> inline Packet
+preduxp(const Packet* vecs) { return vecs[0]; }
+
+/** \internal \returns the sum of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux(const Packet& a)
+{ return a; }
+
+/** \internal \returns the product of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)
+{ return a; }
+
+/** \internal \returns the min of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux_min(const Packet& a)
+{ return a; }
+
+/** \internal \returns the max of the elements of \a a*/
+template<typename Packet> inline typename unpacket_traits<Packet>::type predux_max(const Packet& a)
+{ return a; }
+
+/** \internal \returns the reversed elements of \a a*/
+template<typename Packet> inline Packet preverse(const Packet& a)
+{ return a; }
+
+
+/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */
+template<typename Packet> inline Packet pcplxflip(const Packet& a)
+{
+  // FIXME: uncomment the following in case we drop the internal imag and real functions.
+//   using std::imag;
+//   using std::real;
+  return Packet(imag(a),real(a));
+}
+
+/**************************
+* Special math functions
+***************************/
+
+/** \internal \returns the sine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet psin(const Packet& a) { using std::sin; return sin(a); }
+
+/** \internal \returns the cosine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pcos(const Packet& a) { using std::cos; return cos(a); }
+
+/** \internal \returns the tan of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet ptan(const Packet& a) { using std::tan; return tan(a); }
+
+/** \internal \returns the arc sine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pasin(const Packet& a) { using std::asin; return asin(a); }
+
+/** \internal \returns the arc cosine of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pacos(const Packet& a) { using std::acos; return acos(a); }
+
+/** \internal \returns the exp of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet pexp(const Packet& a) { using std::exp; return exp(a); }
+
+/** \internal \returns the log of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet plog(const Packet& a) { using std::log; return log(a); }
+
+/** \internal \returns the square-root of \a a (coeff-wise) */
+template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+Packet psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }
+
+/***************************************************************************
+* The following functions might not have to be overwritten for vectorized types
+***************************************************************************/
+
+/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */
+// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type)
+template<typename Packet>
+inline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a)
+{
+  pstore(to, pset1<Packet>(a));
+}
+
+/** \internal \returns a * b + c (coeff-wise) */
+template<typename Packet> inline Packet
+pmadd(const Packet&  a,
+         const Packet&  b,
+         const Packet&  c)
+{ return padd(pmul(a, b),c); }
+
+/** \internal \returns a packet version of \a *from.
+  * If LoadMode equals #Aligned, \a from must be 16 bytes aligned */
+template<typename Packet, int LoadMode>
+inline Packet ploadt(const typename unpacket_traits<Packet>::type* from)
+{
+  if(LoadMode == Aligned)
+    return pload<Packet>(from);
+  else
+    return ploadu<Packet>(from);
+}
+
+/** \internal copy the packet \a from to \a *to.
+  * If StoreMode equals #Aligned, \a to must be 16 bytes aligned */
+template<typename Scalar, typename Packet, int LoadMode>
+inline void pstoret(Scalar* to, const Packet& from)
+{
+  if(LoadMode == Aligned)
+    pstore(to, from);
+  else
+    pstoreu(to, from);
+}
+
+/** \internal default implementation of palign() allowing partial specialization */
+template<int Offset,typename PacketType>
+struct palign_impl
+{
+  // by default data are aligned, so there is nothing to be done :)
+  static inline void run(PacketType&, const PacketType&) {}
+};
+
+/** \internal update \a first using the concatenation of the packet_size minus \a Offset last elements
+  * of \a first and \a Offset first elements of \a second.
+  * 
+  * This function is currently only used to optimize matrix-vector products on unligned matrices.
+  * It takes 2 packets that represent a contiguous memory array, and returns a packet starting
+  * at the position \a Offset. For instance, for packets of 4 elements, we have:
+  *  Input:
+  *  - first = {f0,f1,f2,f3}
+  *  - second = {s0,s1,s2,s3}
+  * Output: 
+  *   - if Offset==0 then {f0,f1,f2,f3}
+  *   - if Offset==1 then {f1,f2,f3,s0}
+  *   - if Offset==2 then {f2,f3,s0,s1}
+  *   - if Offset==3 then {f3,s0,s1,s3}
+  */
+template<int Offset,typename PacketType>
+inline void palign(PacketType& first, const PacketType& second)
+{
+  palign_impl<Offset,PacketType>::run(first,second);
+}
+
+/***************************************************************************
+* Fast complex products (GCC generates a function call which is very slow)
+***************************************************************************/
+
+template<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b)
+{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
+
+template<> inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b)
+{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERIC_PACKET_MATH_H
+
diff --git a/Eigen/src/Core/GlobalFunctions.h b/Eigen/src/Core/GlobalFunctions.h
new file mode 100644
index 0000000..2acf977
--- /dev/null
+++ b/Eigen/src/Core/GlobalFunctions.h
@@ -0,0 +1,92 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GLOBAL_FUNCTIONS_H
+#define EIGEN_GLOBAL_FUNCTIONS_H
+
+#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR) \
+  template<typename Derived> \
+  inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \
+  NAME(const Eigen::ArrayBase<Derived>& x) { \
+    return x.derived(); \
+  }
+
+#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \
+  \
+  template<typename Derived> \
+  struct NAME##_retval<ArrayBase<Derived> > \
+  { \
+    typedef const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> type; \
+  }; \
+  template<typename Derived> \
+  struct NAME##_impl<ArrayBase<Derived> > \
+  { \
+    static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) \
+    { \
+      return x.derived(); \
+    } \
+  };
+
+
+namespace Eigen
+{
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op)
+  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op)
+  
+  template<typename Derived>
+  inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar>, const Derived>
+  pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) {
+    return x.derived().pow(exponent);
+  }
+
+  template<typename Derived>
+  inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_binary_pow_op<typename Derived::Scalar, typename Derived::Scalar>, const Derived, const Derived>
+  pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<Derived>& exponents) 
+  {
+    return Eigen::CwiseBinaryOp<Eigen::internal::scalar_binary_pow_op<typename Derived::Scalar, typename Derived::Scalar>, const Derived, const Derived>(
+      x.derived(),
+      exponents.derived()
+    );
+  }
+  
+  /**
+  * \brief Component-wise division of a scalar by array elements.
+  **/
+  template <typename Derived>
+  inline const Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>
+    operator/(const typename Derived::Scalar& s, const Eigen::ArrayBase<Derived>& a)
+  {
+    return Eigen::CwiseUnaryOp<Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>, const Derived>(
+      a.derived(),
+      Eigen::internal::scalar_inverse_mult_op<typename Derived::Scalar>(s)  
+    );
+  }
+
+  namespace internal
+  {
+    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op)
+    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op)
+    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op)
+  }
+}
+
+// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...)
+
+#endif // EIGEN_GLOBAL_FUNCTIONS_H
diff --git a/Eigen/src/Core/IO.h b/Eigen/src/Core/IO.h
new file mode 100644
index 0000000..8d4bc59
--- /dev/null
+++ b/Eigen/src/Core/IO.h
@@ -0,0 +1,250 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_IO_H
+#define EIGEN_IO_H
+
+namespace Eigen { 
+
+enum { DontAlignCols = 1 };
+enum { StreamPrecision = -1,
+       FullPrecision = -2 };
+
+namespace internal {
+template<typename Derived>
+std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt);
+}
+
+/** \class IOFormat
+  * \ingroup Core_Module
+  *
+  * \brief Stores a set of parameters controlling the way matrices are printed
+  *
+  * List of available parameters:
+  *  - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision.
+  *                 The default is the special value \c StreamPrecision which means to use the
+  *                 stream's own precision setting, as set for instance using \c cout.precision(3). The other special value
+  *                 \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point
+  *                 type.
+  *  - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which
+  *             allows to disable the alignment of columns, resulting in faster code.
+  *  - \b coeffSeparator string printed between two coefficients of the same row
+  *  - \b rowSeparator string printed between two rows
+  *  - \b rowPrefix string printed at the beginning of each row
+  *  - \b rowSuffix string printed at the end of each row
+  *  - \b matPrefix string printed at the beginning of the matrix
+  *  - \b matSuffix string printed at the end of the matrix
+  *
+  * Example: \include IOFormat.cpp
+  * Output: \verbinclude IOFormat.out
+  *
+  * \sa DenseBase::format(), class WithFormat
+  */
+struct IOFormat
+{
+  /** Default contructor, see class IOFormat for the meaning of the parameters */
+  IOFormat(int _precision = StreamPrecision, int _flags = 0,
+    const std::string& _coeffSeparator = " ",
+    const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="",
+    const std::string& _matPrefix="", const std::string& _matSuffix="")
+  : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),
+    rowSpacer(""), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)
+  {
+    int i = int(matSuffix.length())-1;
+    while (i>=0 && matSuffix[i]!='\n')
+    {
+      rowSpacer += ' ';
+      i--;
+    }
+  }
+  std::string matPrefix, matSuffix;
+  std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;
+  std::string coeffSeparator;
+  int precision;
+  int flags;
+};
+
+/** \class WithFormat
+  * \ingroup Core_Module
+  *
+  * \brief Pseudo expression providing matrix output with given format
+  *
+  * \param ExpressionType the type of the object on which IO stream operations are performed
+  *
+  * This class represents an expression with stream operators controlled by a given IOFormat.
+  * It is the return type of DenseBase::format()
+  * and most of the time this is the only way it is used.
+  *
+  * See class IOFormat for some examples.
+  *
+  * \sa DenseBase::format(), class IOFormat
+  */
+template<typename ExpressionType>
+class WithFormat
+{
+  public:
+
+    WithFormat(const ExpressionType& matrix, const IOFormat& format)
+      : m_matrix(matrix), m_format(format)
+    {}
+
+    friend std::ostream & operator << (std::ostream & s, const WithFormat& wf)
+    {
+      return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format);
+    }
+
+  protected:
+    const typename ExpressionType::Nested m_matrix;
+    IOFormat m_format;
+};
+
+/** \returns a WithFormat proxy object allowing to print a matrix the with given
+  * format \a fmt.
+  *
+  * See class IOFormat for some examples.
+  *
+  * \sa class IOFormat, class WithFormat
+  */
+template<typename Derived>
+inline const WithFormat<Derived>
+DenseBase<Derived>::format(const IOFormat& fmt) const
+{
+  return WithFormat<Derived>(derived(), fmt);
+}
+
+namespace internal {
+
+template<typename Scalar, bool IsInteger>
+struct significant_decimals_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline int run()
+  {
+    using std::ceil;
+    using std::log;
+    return cast<RealScalar,int>(ceil(-log(NumTraits<RealScalar>::epsilon())/log(RealScalar(10))));
+  }
+};
+
+template<typename Scalar>
+struct significant_decimals_default_impl<Scalar, true>
+{
+  static inline int run()
+  {
+    return 0;
+  }
+};
+
+template<typename Scalar>
+struct significant_decimals_impl
+  : significant_decimals_default_impl<Scalar, NumTraits<Scalar>::IsInteger>
+{};
+
+/** \internal
+  * print the matrix \a _m to the output stream \a s using the output format \a fmt */
+template<typename Derived>
+std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt)
+{
+  if(_m.size() == 0)
+  {
+    s << fmt.matPrefix << fmt.matSuffix;
+    return s;
+  }
+  
+  typename Derived::Nested m = _m;
+  typedef typename Derived::Scalar Scalar;
+  typedef typename Derived::Index Index;
+
+  Index width = 0;
+
+  std::streamsize explicit_precision;
+  if(fmt.precision == StreamPrecision)
+  {
+    explicit_precision = 0;
+  }
+  else if(fmt.precision == FullPrecision)
+  {
+    if (NumTraits<Scalar>::IsInteger)
+    {
+      explicit_precision = 0;
+    }
+    else
+    {
+      explicit_precision = significant_decimals_impl<Scalar>::run();
+    }
+  }
+  else
+  {
+    explicit_precision = fmt.precision;
+  }
+
+  std::streamsize old_precision = 0;
+  if(explicit_precision) old_precision = s.precision(explicit_precision);
+
+  bool align_cols = !(fmt.flags & DontAlignCols);
+  if(align_cols)
+  {
+    // compute the largest width
+    for(Index j = 0; j < m.cols(); ++j)
+      for(Index i = 0; i < m.rows(); ++i)
+      {
+        std::stringstream sstr;
+        sstr.copyfmt(s);
+        sstr << m.coeff(i,j);
+        width = std::max<Index>(width, Index(sstr.str().length()));
+      }
+  }
+  s << fmt.matPrefix;
+  for(Index i = 0; i < m.rows(); ++i)
+  {
+    if (i)
+      s << fmt.rowSpacer;
+    s << fmt.rowPrefix;
+    if(width) s.width(width);
+    s << m.coeff(i, 0);
+    for(Index j = 1; j < m.cols(); ++j)
+    {
+      s << fmt.coeffSeparator;
+      if (width) s.width(width);
+      s << m.coeff(i, j);
+    }
+    s << fmt.rowSuffix;
+    if( i < m.rows() - 1)
+      s << fmt.rowSeparator;
+  }
+  s << fmt.matSuffix;
+  if(explicit_precision) s.precision(old_precision);
+  return s;
+}
+
+} // end namespace internal
+
+/** \relates DenseBase
+  *
+  * Outputs the matrix, to the given stream.
+  *
+  * If you wish to print the matrix with a format different than the default, use DenseBase::format().
+  *
+  * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers.
+  * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters.
+  *
+  * \sa DenseBase::format()
+  */
+template<typename Derived>
+std::ostream & operator <<
+(std::ostream & s,
+ const DenseBase<Derived> & m)
+{
+  return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_IO_H
diff --git a/Eigen/src/Core/Map.h b/Eigen/src/Core/Map.h
new file mode 100644
index 0000000..f804c89
--- /dev/null
+++ b/Eigen/src/Core/Map.h
@@ -0,0 +1,192 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MAP_H
+#define EIGEN_MAP_H
+
+namespace Eigen { 
+
+/** \class Map
+  * \ingroup Core_Module
+  *
+  * \brief A matrix or vector expression mapping an existing array of data.
+  *
+  * \tparam PlainObjectType the equivalent matrix type of the mapped data
+  * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+  *                The default is \c #Unaligned.
+  * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout
+  *                   of an ordinary, contiguous array. This can be overridden by specifying strides.
+  *                   The type passed here must be a specialization of the Stride template, see examples below.
+  *
+  * This class represents a matrix or vector expression mapping an existing array of data.
+  * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
+  * such as plain C arrays or structures from other libraries. By default, it assumes that the
+  * data is laid out contiguously in memory. You can however override this by explicitly specifying
+  * inner and outer strides.
+  *
+  * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix:
+  * \include Map_simple.cpp
+  * Output: \verbinclude Map_simple.out
+  *
+  * If you need to map non-contiguous arrays, you can do so by specifying strides:
+  *
+  * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer
+  * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time
+  * fixed value.
+  * \include Map_inner_stride.cpp
+  * Output: \verbinclude Map_inner_stride.out
+  *
+  * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping
+  * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.
+  * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is
+  * a short version of \c OuterStride<Dynamic> because the default template parameter of OuterStride
+  * is  \c Dynamic
+  * \include Map_outer_stride.cpp
+  * Output: \verbinclude Map_outer_stride.out
+  *
+  * For more details and for an example of specifying both an inner and an outer stride, see class Stride.
+  *
+  * \b Tip: to change the array of data mapped by a Map object, you can use the C++
+  * placement new syntax:
+  *
+  * Example: \include Map_placement_new.cpp
+  * Output: \verbinclude Map_placement_new.out
+  *
+  * This class is the return type of PlainObjectBase::Map() but can also be used directly.
+  *
+  * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+  */
+
+namespace internal {
+template<typename PlainObjectType, int MapOptions, typename StrideType>
+struct traits<Map<PlainObjectType, MapOptions, StrideType> >
+  : public traits<PlainObjectType>
+{
+  typedef traits<PlainObjectType> TraitsBase;
+  typedef typename PlainObjectType::Index Index;
+  typedef typename PlainObjectType::Scalar Scalar;
+  enum {
+    InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
+                             ? int(PlainObjectType::InnerStrideAtCompileTime)
+                             : int(StrideType::InnerStrideAtCompileTime),
+    OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
+                             ? int(PlainObjectType::OuterStrideAtCompileTime)
+                             : int(StrideType::OuterStrideAtCompileTime),
+    HasNoInnerStride = InnerStrideAtCompileTime == 1,
+    HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
+    HasNoStride = HasNoInnerStride && HasNoOuterStride,
+    IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned),
+    IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
+    KeepsPacketAccess = bool(HasNoInnerStride)
+                        && ( bool(IsDynamicSize)
+                           || HasNoOuterStride
+                           || ( OuterStrideAtCompileTime!=Dynamic
+                           && ((static_cast<int>(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ),
+    Flags0 = TraitsBase::Flags & (~NestByRefBit),
+    Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit),
+    Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime))
+           ? int(Flags1) : int(Flags1 & ~LinearAccessBit),
+    Flags3 = is_lvalue<PlainObjectType>::value ? int(Flags2) : (int(Flags2) & ~LvalueBit),
+    Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit)
+  };
+private:
+  enum { Options }; // Expressions don't have Options
+};
+}
+
+template<typename PlainObjectType, int MapOptions, typename StrideType> class Map
+  : public MapBase<Map<PlainObjectType, MapOptions, StrideType> >
+{
+  public:
+
+    typedef MapBase<Map> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Map)
+
+    typedef typename Base::PointerType PointerType;
+#if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API
+    typedef const Scalar* PointerArgType;
+    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast<PointerType>(ptr); }
+#else
+    typedef PointerType PointerArgType;
+    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }
+#endif
+
+    inline Index innerStride() const
+    {
+      return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+    }
+
+    inline Index outerStride() const
+    {
+      return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+           : IsVectorAtCompileTime ? this->size()
+           : int(Flags)&RowMajorBit ? this->cols()
+           : this->rows();
+    }
+
+    /** Constructor in the fixed-size case.
+      *
+      * \param dataPtr pointer to the array to map
+      * \param a_stride optional Stride object, passing the strides.
+      */
+    inline Map(PointerArgType dataPtr, const StrideType& a_stride = StrideType())
+      : Base(cast_to_pointer_type(dataPtr)), m_stride(a_stride)
+    {
+      PlainObjectType::Base::_check_template_params();
+    }
+
+    /** Constructor in the dynamic-size vector case.
+      *
+      * \param dataPtr pointer to the array to map
+      * \param a_size the size of the vector expression
+      * \param a_stride optional Stride object, passing the strides.
+      */
+    inline Map(PointerArgType dataPtr, Index a_size, const StrideType& a_stride = StrideType())
+      : Base(cast_to_pointer_type(dataPtr), a_size), m_stride(a_stride)
+    {
+      PlainObjectType::Base::_check_template_params();
+    }
+
+    /** Constructor in the dynamic-size matrix case.
+      *
+      * \param dataPtr pointer to the array to map
+      * \param nbRows the number of rows of the matrix expression
+      * \param nbCols the number of columns of the matrix expression
+      * \param a_stride optional Stride object, passing the strides.
+      */
+    inline Map(PointerArgType dataPtr, Index nbRows, Index nbCols, const StrideType& a_stride = StrideType())
+      : Base(cast_to_pointer_type(dataPtr), nbRows, nbCols), m_stride(a_stride)
+    {
+      PlainObjectType::Base::_check_template_params();
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
+
+  protected:
+    StrideType m_stride;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
+  ::Array(const Scalar *data)
+{
+  this->_set_noalias(Eigen::Map<const Array>(data));
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>
+  ::Matrix(const Scalar *data)
+{
+  this->_set_noalias(Eigen::Map<const Matrix>(data));
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAP_H
diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h
new file mode 100644
index 0000000..92e1141
--- /dev/null
+++ b/Eigen/src/Core/MapBase.h
@@ -0,0 +1,247 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MAPBASE_H
+#define EIGEN_MAPBASE_H
+
+#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \
+      EIGEN_STATIC_ASSERT((int(internal::traits<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \
+                          YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)
+
+namespace Eigen { 
+
+/** \class MapBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for Map and Block expression with direct access
+  *
+  * \sa class Map, class Block
+  */
+template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
+  : public internal::dense_xpr_base<Derived>::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Derived>::type Base;
+    enum {
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+      SizeAtCompileTime = Base::SizeAtCompileTime
+    };
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename internal::conditional<
+                         bool(internal::is_lvalue<Derived>::value),
+                         Scalar *,
+                         const Scalar *>::type
+                     PointerType;
+
+    using Base::derived;
+//    using Base::RowsAtCompileTime;
+//    using Base::ColsAtCompileTime;
+//    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+    using Base::IsRowMajor;
+
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+    using Base::lazyAssign;
+    using Base::eval;
+
+    using Base::innerStride;
+    using Base::outerStride;
+    using Base::rowStride;
+    using Base::colStride;
+
+    // bug 217 - compile error on ICC 11.1
+    using Base::operator=;
+
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+
+    inline Index rows() const { return m_rows.value(); }
+    inline Index cols() const { return m_cols.value(); }
+
+    /** Returns a pointer to the first coefficient of the matrix or vector.
+      *
+      * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().
+      *
+      * \sa innerStride(), outerStride()
+      */
+    inline const Scalar* data() const { return m_data; }
+
+    inline const Scalar& coeff(Index rowId, Index colId) const
+    {
+      return m_data[colId * colStride() + rowId * rowStride()];
+    }
+
+    inline const Scalar& coeff(Index index) const
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return m_data[index * innerStride()];
+    }
+
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      return this->m_data[colId * colStride() + rowId * rowStride()];
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return this->m_data[index * innerStride()];
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index rowId, Index colId) const
+    {
+      return internal::ploadt<PacketScalar, LoadMode>
+               (m_data + (colId * colStride() + rowId * rowStride()));
+    }
+
+    template<int LoadMode>
+    inline PacketScalar packet(Index index) const
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
+    }
+
+    inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
+    {
+      EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
+      checkSanity();
+    }
+
+    inline MapBase(PointerType dataPtr, Index vecSize)
+            : m_data(dataPtr),
+              m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),
+              m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime))
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+      eigen_assert(vecSize >= 0);
+      eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);
+      checkSanity();
+    }
+
+    inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols)
+            : m_data(dataPtr), m_rows(nbRows), m_cols(nbCols)
+    {
+      eigen_assert( (dataPtr == 0)
+              || (   nbRows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == nbRows)
+                  && nbCols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == nbCols)));
+      checkSanity();
+    }
+
+  protected:
+
+    void checkSanity() const
+    {
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits<Derived>::Flags&PacketAccessBit,
+                                        internal::inner_stride_at_compile_time<Derived>::ret==1),
+                          PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
+      eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
+                   && "input pointer is not aligned on a 16 byte boundary");
+    }
+
+    PointerType m_data;
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;
+};
+
+template<typename Derived> class MapBase<Derived, WriteAccessors>
+  : public MapBase<Derived, ReadOnlyAccessors>
+{
+    typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
+  public:
+
+    typedef MapBase<Derived, ReadOnlyAccessors> Base;
+
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::PacketScalar PacketScalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::PointerType PointerType;
+
+    using Base::derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+
+    using Base::innerStride;
+    using Base::outerStride;
+    using Base::rowStride;
+    using Base::colStride;
+
+    typedef typename internal::conditional<
+                    internal::is_lvalue<Derived>::value,
+                    Scalar,
+                    const Scalar
+                  >::type ScalarWithConstIfNotLvalue;
+
+    inline const Scalar* data() const { return this->m_data; }
+    inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)
+    {
+      return this->m_data[col * colStride() + row * rowStride()];
+    }
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      return this->m_data[index * innerStride()];
+    }
+
+    template<int StoreMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& val)
+    {
+      internal::pstoret<Scalar, PacketScalar, StoreMode>
+               (this->m_data + (col * colStride() + row * rowStride()), val);
+    }
+
+    template<int StoreMode>
+    inline void writePacket(Index index, const PacketScalar& val)
+    {
+      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)
+      internal::pstoret<Scalar, PacketScalar, StoreMode>
+                (this->m_data + index * innerStride(), val);
+    }
+
+    explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}
+    inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}
+    inline MapBase(PointerType dataPtr, Index nbRows, Index nbCols) : Base(dataPtr, nbRows, nbCols) {}
+
+    Derived& operator=(const MapBase& other)
+    {
+      ReadOnlyMapBase::Base::operator=(other);
+      return derived();
+    }
+
+    // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
+    // see bugs 821 and 920.
+    using ReadOnlyMapBase::Base::operator=;
+};
+
+#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAPBASE_H
diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h
new file mode 100644
index 0000000..2bfc5eb
--- /dev/null
+++ b/Eigen/src/Core/MathFunctions.h
@@ -0,0 +1,768 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATHFUNCTIONS_H
+#define EIGEN_MATHFUNCTIONS_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal \struct global_math_functions_filtering_base
+  *
+  * What it does:
+  * Defines a typedef 'type' as follows:
+  * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then
+  *   global_math_functions_filtering_base<T>::type is a typedef for it.
+  * - otherwise, global_math_functions_filtering_base<T>::type is a typedef for T.
+  *
+  * How it's used:
+  * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions.
+  * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know
+  * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase<Derived>.
+  * So we must make sure to use sin_impl<ArrayBase<Derived> > and not sin_impl<Derived>, otherwise our partial specialization
+  * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it.
+  *
+  * How it's implemented:
+  * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace
+  * the typename dummy by an integer template parameter, it doesn't work anymore!
+  */
+
+template<typename T, typename dummy = void>
+struct global_math_functions_filtering_base
+{
+  typedef T type;
+};
+
+template<typename T> struct always_void { typedef void type; };
+
+template<typename T>
+struct global_math_functions_filtering_base
+  <T,
+   typename always_void<typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl>::type
+  >
+{
+  typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;
+};
+
+#define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>
+#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>::type
+
+/****************************************************************************
+* Implementation of real                                                 *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct real_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    return x;
+  }
+};
+
+template<typename Scalar>
+struct real_default_impl<Scalar,true>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    using std::real;
+    return real(x);
+  }
+};
+
+template<typename Scalar> struct real_impl : real_default_impl<Scalar> {};
+
+template<typename Scalar>
+struct real_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+
+/****************************************************************************
+* Implementation of imag                                                 *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct imag_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar&)
+  {
+    return RealScalar(0);
+  }
+};
+
+template<typename Scalar>
+struct imag_default_impl<Scalar,true>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    using std::imag;
+    return imag(x);
+  }
+};
+
+template<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};
+
+template<typename Scalar>
+struct imag_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+/****************************************************************************
+* Implementation of real_ref                                             *
+****************************************************************************/
+
+template<typename Scalar>
+struct real_ref_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar& run(Scalar& x)
+  {
+    return reinterpret_cast<RealScalar*>(&x)[0];
+  }
+  static inline const RealScalar& run(const Scalar& x)
+  {
+    return reinterpret_cast<const RealScalar*>(&x)[0];
+  }
+};
+
+template<typename Scalar>
+struct real_ref_retval
+{
+  typedef typename NumTraits<Scalar>::Real & type;
+};
+
+/****************************************************************************
+* Implementation of imag_ref                                             *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex>
+struct imag_ref_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar& run(Scalar& x)
+  {
+    return reinterpret_cast<RealScalar*>(&x)[1];
+  }
+  static inline const RealScalar& run(const Scalar& x)
+  {
+    return reinterpret_cast<RealScalar*>(&x)[1];
+  }
+};
+
+template<typename Scalar>
+struct imag_ref_default_impl<Scalar, false>
+{
+  static inline Scalar run(Scalar&)
+  {
+    return Scalar(0);
+  }
+  static inline const Scalar run(const Scalar&)
+  {
+    return Scalar(0);
+  }
+};
+
+template<typename Scalar>
+struct imag_ref_impl : imag_ref_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
+
+template<typename Scalar>
+struct imag_ref_retval
+{
+  typedef typename NumTraits<Scalar>::Real & type;
+};
+
+/****************************************************************************
+* Implementation of conj                                                 *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>
+struct conj_impl
+{
+  static inline Scalar run(const Scalar& x)
+  {
+    return x;
+  }
+};
+
+template<typename Scalar>
+struct conj_impl<Scalar,true>
+{
+  static inline Scalar run(const Scalar& x)
+  {
+    using std::conj;
+    return conj(x);
+  }
+};
+
+template<typename Scalar>
+struct conj_retval
+{
+  typedef Scalar type;
+};
+
+/****************************************************************************
+* Implementation of abs2                                                 *
+****************************************************************************/
+
+template<typename Scalar>
+struct abs2_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    return x*x;
+  }
+};
+
+template<typename RealScalar>
+struct abs2_impl<std::complex<RealScalar> >
+{
+  static inline RealScalar run(const std::complex<RealScalar>& x)
+  {
+    return real(x)*real(x) + imag(x)*imag(x);
+  }
+};
+
+template<typename Scalar>
+struct abs2_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+/****************************************************************************
+* Implementation of norm1                                                *
+****************************************************************************/
+
+template<typename Scalar, bool IsComplex>
+struct norm1_default_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x)
+  {
+    using std::abs;
+    return abs(real(x)) + abs(imag(x));
+  }
+};
+
+template<typename Scalar>
+struct norm1_default_impl<Scalar, false>
+{
+  static inline Scalar run(const Scalar& x)
+  {
+    using std::abs;
+    return abs(x);
+  }
+};
+
+template<typename Scalar>
+struct norm1_impl : norm1_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};
+
+template<typename Scalar>
+struct norm1_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+/****************************************************************************
+* Implementation of hypot                                                *
+****************************************************************************/
+
+template<typename Scalar>
+struct hypot_impl
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline RealScalar run(const Scalar& x, const Scalar& y)
+  {
+    using std::max;
+    using std::min;
+    using std::abs;
+    using std::sqrt;
+    RealScalar _x = abs(x);
+    RealScalar _y = abs(y);
+    RealScalar p = (max)(_x, _y);
+    if(p==RealScalar(0)) return 0;
+    RealScalar q = (min)(_x, _y);
+    RealScalar qp = q/p;
+    return p * sqrt(RealScalar(1) + qp*qp);
+  }
+};
+
+template<typename Scalar>
+struct hypot_retval
+{
+  typedef typename NumTraits<Scalar>::Real type;
+};
+
+/****************************************************************************
+* Implementation of cast                                                 *
+****************************************************************************/
+
+template<typename OldType, typename NewType>
+struct cast_impl
+{
+  static inline NewType run(const OldType& x)
+  {
+    return static_cast<NewType>(x);
+  }
+};
+
+// here, for once, we're plainly returning NewType: we don't want cast to do weird things.
+
+template<typename OldType, typename NewType>
+inline NewType cast(const OldType& x)
+{
+  return cast_impl<OldType, NewType>::run(x);
+}
+
+/****************************************************************************
+* Implementation of atanh2                                                *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct atanh2_default_impl
+{
+  typedef Scalar retval;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    using std::abs;
+    using std::log;
+    using std::sqrt;
+    Scalar z = x / y;
+    if (y == Scalar(0) || abs(z) > sqrt(NumTraits<RealScalar>::epsilon()))
+      return RealScalar(0.5) * log((y + x) / (y - x));
+    else
+      return z + z*z*z / RealScalar(3);
+  }
+};
+
+template<typename Scalar>
+struct atanh2_default_impl<Scalar, true>
+{
+  static inline Scalar run(const Scalar&, const Scalar&)
+  {
+    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)
+    return Scalar(0);
+  }
+};
+
+template<typename Scalar>
+struct atanh2_impl : atanh2_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct atanh2_retval
+{
+  typedef Scalar type;
+};
+
+/****************************************************************************
+* Implementation of pow                                                  *
+****************************************************************************/
+
+template<typename Scalar, bool IsInteger>
+struct pow_default_impl
+{
+  typedef Scalar retval;
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    using std::pow;
+    return pow(x, y);
+  }
+};
+
+template<typename Scalar>
+struct pow_default_impl<Scalar, true>
+{
+  static inline Scalar run(Scalar x, Scalar y)
+  {
+    Scalar res(1);
+    eigen_assert(!NumTraits<Scalar>::IsSigned || y >= 0);
+    if(y & 1) res *= x;
+    y >>= 1;
+    while(y)
+    {
+      x *= x;
+      if(y&1) res *= x;
+      y >>= 1;
+    }
+    return res;
+  }
+};
+
+template<typename Scalar>
+struct pow_impl : pow_default_impl<Scalar, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct pow_retval
+{
+  typedef Scalar type;
+};
+
+/****************************************************************************
+* Implementation of random                                               *
+****************************************************************************/
+
+template<typename Scalar,
+         bool IsComplex,
+         bool IsInteger>
+struct random_default_impl {};
+
+template<typename Scalar>
+struct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar>
+struct random_retval
+{
+  typedef Scalar type;
+};
+
+template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y);
+template<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random();
+
+template<typename Scalar>
+struct random_default_impl<Scalar, false, false>
+{
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX);
+  }
+  static inline Scalar run()
+  {
+    return run(Scalar(NumTraits<Scalar>::IsSigned ? -1 : 0), Scalar(1));
+  }
+};
+
+enum {
+  floor_log2_terminate,
+  floor_log2_move_up,
+  floor_log2_move_down,
+  floor_log2_bogus
+};
+
+template<unsigned int n, int lower, int upper> struct floor_log2_selector
+{
+  enum { middle = (lower + upper) / 2,
+         value = (upper <= lower + 1) ? int(floor_log2_terminate)
+               : (n < (1 << middle)) ? int(floor_log2_move_down)
+               : (n==0) ? int(floor_log2_bogus)
+               : int(floor_log2_move_up)
+  };
+};
+
+template<unsigned int n,
+         int lower = 0,
+         int upper = sizeof(unsigned int) * CHAR_BIT - 1,
+         int selector = floor_log2_selector<n, lower, upper>::value>
+struct floor_log2 {};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_move_down>
+{
+  enum { value = floor_log2<n, lower, floor_log2_selector<n, lower, upper>::middle>::value };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_move_up>
+{
+  enum { value = floor_log2<n, floor_log2_selector<n, lower, upper>::middle, upper>::value };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_terminate>
+{
+  enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower };
+};
+
+template<unsigned int n, int lower, int upper>
+struct floor_log2<n, lower, upper, floor_log2_bogus>
+{
+  // no value, error at compile time
+};
+
+template<typename Scalar>
+struct random_default_impl<Scalar, false, true>
+{
+  typedef typename NumTraits<Scalar>::NonInteger NonInteger;
+
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1)));
+  }
+
+  static inline Scalar run()
+  {
+#ifdef EIGEN_MAKING_DOCS
+    return run(Scalar(NumTraits<Scalar>::IsSigned ? -10 : 0), Scalar(10));
+#else
+    enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value,
+           scalar_bits = sizeof(Scalar) * CHAR_BIT,
+           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)),
+           offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0
+    };
+    return Scalar((std::rand() >> shift) - offset);
+#endif
+  }
+};
+
+template<typename Scalar>
+struct random_default_impl<Scalar, true, false>
+{
+  static inline Scalar run(const Scalar& x, const Scalar& y)
+  {
+    return Scalar(random(real(x), real(y)),
+                  random(imag(x), imag(y)));
+  }
+  static inline Scalar run()
+  {
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    return Scalar(random<RealScalar>(), random<RealScalar>());
+  }
+};
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
+{
+  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
+}
+
+} // end namespace internal
+
+/****************************************************************************
+* Generic math function                                                    *
+****************************************************************************/
+
+namespace numext {
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);
+}  
+
+template<typename Scalar>
+inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)
+{
+  return internal::real_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)
+{
+  return internal::imag_ref_impl<Scalar>::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)
+{
+  return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(atanh2, Scalar) atanh2(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(atanh2, Scalar)::run(x, y);
+}
+
+template<typename Scalar>
+inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y)
+{
+  return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y);
+}
+
+// std::isfinite is non standard, so let's define our own version,
+// even though it is not very efficient.
+template<typename T> bool (isfinite)(const T& x)
+{
+  return x<NumTraits<T>::highest() && x>NumTraits<T>::lowest();
+}
+
+} // end namespace numext
+
+namespace internal {
+
+/****************************************************************************
+* Implementation of fuzzy comparisons                                       *
+****************************************************************************/
+
+template<typename Scalar,
+         bool IsComplex,
+         bool IsInteger>
+struct scalar_fuzzy_default_impl {};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, false, false>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
+  {
+    using std::abs;
+    return abs(x) <= abs(y) * prec;
+  }
+  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
+  {
+    using std::min;
+    using std::abs;
+    return abs(x - y) <= (min)(abs(x), abs(y)) * prec;
+  }
+  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)
+  {
+    return x <= y || isApprox(x, y, prec);
+  }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, false, true>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&)
+  {
+    return x == Scalar(0);
+  }
+  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&)
+  {
+    return x == y;
+  }
+  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&)
+  {
+    return x <= y;
+  }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_default_impl<Scalar, true, false>
+{
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)
+  {
+    return numext::abs2(x) <= numext::abs2(y) * prec * prec;
+  }
+  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)
+  {
+    using std::min;
+    return numext::abs2(x - y) <= (min)(numext::abs2(x), numext::abs2(y)) * prec * prec;
+  }
+};
+
+template<typename Scalar>
+struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};
+
+template<typename Scalar, typename OtherScalar>
+inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
+                                   typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool isApprox(const Scalar& x, const Scalar& y,
+                          typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
+                                    typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
+}
+
+/******************************************
+***  The special case of the  bool type ***
+******************************************/
+
+template<> struct random_impl<bool>
+{
+  static inline bool run()
+  {
+    return random<int>(0,1)==0 ? false : true;
+  }
+};
+
+template<> struct scalar_fuzzy_impl<bool>
+{
+  typedef bool RealScalar;
+  
+  template<typename OtherScalar>
+  static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&)
+  {
+    return !x;
+  }
+  
+  static inline bool isApprox(bool x, bool y, bool)
+  {
+    return x == y;
+  }
+
+  static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&)
+  {
+    return (!x) || y;
+  }
+  
+};
+
+  
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATHFUNCTIONS_H
diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h
new file mode 100644
index 0000000..d7d0b5b
--- /dev/null
+++ b/Eigen/src/Core/Matrix.h
@@ -0,0 +1,405 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_H
+#define EIGEN_MATRIX_H
+
+namespace Eigen {
+
+/** \class Matrix
+  * \ingroup Core_Module
+  *
+  * \brief The matrix class, also used for vectors and row-vectors
+  *
+  * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen.
+  * Vectors are matrices with one column, and row-vectors are matrices with one row.
+  *
+  * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note").
+  *
+  * The first three template parameters are required:
+  * \tparam _Scalar \anchor matrix_tparam_scalar Numeric type, e.g. float, double, int or std::complex<float>.
+  *                 User defined sclar types are supported as well (see \ref user_defined_scalars "here").
+  * \tparam _Rows Number of rows, or \b Dynamic
+  * \tparam _Cols Number of columns, or \b Dynamic
+  *
+  * The remaining template parameters are optional -- in most cases you don't have to worry about them.
+  * \tparam _Options \anchor matrix_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either
+  *                 \b #AutoAlign or \b #DontAlign.
+  *                 The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required
+  *                 for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size.
+  * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note").
+  * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note").
+  *
+  * Eigen provides a number of typedefs covering the usual cases. Here are some examples:
+  *
+  * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix<double, 2, 2>)
+  * \li \c Vector4f is a vector of 4 floats (\c Matrix<float, 4, 1>)
+  * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix<int, 1, 3>)
+  *
+  * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix<float, Dynamic, Dynamic>)
+  * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix<float, Dynamic, 1>)
+  *
+  * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix<float, 2, Dynamic>)
+  * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix<double, Dynamic, 3>)
+  *
+  * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs.
+  *
+  * You can access elements of vectors and matrices using normal subscripting:
+  *
+  * \code
+  * Eigen::VectorXd v(10);
+  * v[0] = 0.1;
+  * v[1] = 0.2;
+  * v(0) = 0.3;
+  * v(1) = 0.4;
+  *
+  * Eigen::MatrixXi m(10, 10);
+  * m(0, 1) = 1;
+  * m(0, 2) = 2;
+  * m(0, 3) = 3;
+  * \endcode
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN.
+  *
+  * <i><b>Some notes:</b></i>
+  *
+  * <dl>
+  * <dt><b>\anchor dense Dense versus sparse:</b></dt>
+  * <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module.
+  *
+  * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array.
+  * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.</dd>
+  *
+  * <dt><b>\anchor fixedsize Fixed-size versus dynamic-size:</b></dt>
+  * <dd>Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array
+  * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up
+  * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time.
+  *
+  * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime
+  * variables, and the array of coefficients is allocated dynamically on the heap.
+  *
+  * Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of a std::map.
+  * If you want this behavior, see the Sparse module.</dd>
+  *
+  * <dt><b>\anchor maxrows _MaxRows and _MaxCols:</b></dt>
+  * <dd>In most cases, one just leaves these parameters to the default values.
+  * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases
+  * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot
+  * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols
+  * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.</dd>
+  * </dl>
+  *
+  * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, 
+  * \ref TopicStorageOrders 
+  */
+
+namespace internal {
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+  typedef _Scalar Scalar;
+  typedef Dense StorageKind;
+  typedef DenseIndex Index;
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = _Rows,
+    ColsAtCompileTime = _Cols,
+    MaxRowsAtCompileTime = _MaxRows,
+    MaxColsAtCompileTime = _MaxCols,
+    Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    Options = _Options,
+    InnerStrideAtCompileTime = 1,
+    OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime
+  };
+};
+}
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+class Matrix
+  : public PlainObjectBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+{
+  public:
+
+    /** \brief Base class typedef.
+      * \sa PlainObjectBase
+      */
+    typedef PlainObjectBase<Matrix> Base;
+
+    enum { Options = _Options };
+
+    EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)
+
+    typedef typename Base::PlainObject PlainObject;
+
+    using Base::base;
+    using Base::coeffRef;
+
+    /**
+      * \brief Assigns matrices to each other.
+      *
+      * \note This is a special case of the templated operator=. Its purpose is
+      * to prevent a default operator= from hiding the templated operator=.
+      *
+      * \callgraph
+      */
+    EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other)
+    {
+      return Base::_set(other);
+    }
+
+    /** \internal
+      * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
+      *
+      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+      * it will be initialized.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Matrix& operator=(const MatrixBase<OtherDerived>& other)
+    {
+      return Base::_set(other);
+    }
+
+    /* Here, doxygen failed to copy the brief information when using \copydoc */
+
+    /**
+      * \brief Copies the generic expression \a other into *this.
+      * \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived> &other)
+    {
+      return Base::operator=(other);
+    }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue<OtherDerived>& func)
+    {
+      return Base::operator=(func);
+    }
+
+    /** \brief Default constructor.
+      *
+      * For fixed-size matrices, does nothing.
+      *
+      * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix
+      * is called a null matrix. This constructor is the unique way to create null matrices: resizing
+      * a matrix to 0 is not supported.
+      *
+      * \sa resize(Index,Index)
+      */
+    EIGEN_STRONG_INLINE Matrix() : Base()
+    {
+      Base::_check_template_params();
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+
+    // FIXME is it still needed
+    Matrix(internal::constructor_without_unaligned_array_assert)
+      : Base(internal::constructor_without_unaligned_array_assert())
+    { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
+
+    /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
+      *
+      * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
+      * it is redundant to pass the dimension here, so it makes more sense to use the default
+      * constructor Matrix() instead.
+      */
+    EIGEN_STRONG_INLINE explicit Matrix(Index dim)
+      : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
+      eigen_assert(dim >= 0);
+      eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
+      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y)
+    {
+      Base::_check_template_params();
+      Base::template _init2<T0,T1>(x, y);
+    }
+    #else
+    /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns.
+      *
+      * This is useful for dynamic-size matrices. For fixed-size matrices,
+      * it is redundant to pass these parameters, so one should use the default constructor
+      * Matrix() instead. */
+    Matrix(Index rows, Index cols);
+    /** \brief Constructs an initialized 2D vector with given coefficients */
+    Matrix(const Scalar& x, const Scalar& y);
+    #endif
+
+    /** \brief Constructs an initialized 3D vector with given coefficients */
+    EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3)
+      m_storage.data()[0] = x;
+      m_storage.data()[1] = y;
+      m_storage.data()[2] = z;
+    }
+    /** \brief Constructs an initialized 4D vector with given coefficients */
+    EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
+    {
+      Base::_check_template_params();
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4)
+      m_storage.data()[0] = x;
+      m_storage.data()[1] = y;
+      m_storage.data()[2] = z;
+      m_storage.data()[3] = w;
+    }
+
+    explicit Matrix(const Scalar *data);
+
+    /** \brief Constructor copying the value of the expression \a other */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Matrix(const MatrixBase<OtherDerived>& other)
+             : Base(other.rows() * other.cols(), other.rows(), other.cols())
+    {
+      // This test resides here, to bring the error messages closer to the user. Normally, these checks
+      // are performed deeply within the library, thus causing long and scary error traces.
+      EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+      Base::_check_template_params();
+      Base::_set_noalias(other);
+    }
+    /** \brief Copy constructor */
+    EIGEN_STRONG_INLINE Matrix(const Matrix& other)
+            : Base(other.rows() * other.cols(), other.rows(), other.cols())
+    {
+      Base::_check_template_params();
+      Base::_set_noalias(other);
+    }
+    /** \brief Copy constructor with in-place evaluation */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Matrix(const ReturnByValue<OtherDerived>& other)
+    {
+      Base::_check_template_params();
+      Base::resize(other.rows(), other.cols());
+      other.evalTo(*this);
+    }
+
+    /** \brief Copy constructor for generic expressions.
+      * \sa MatrixBase::operator=(const EigenBase<OtherDerived>&)
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived> &other)
+      : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+    {
+      Base::_check_template_params();
+      Base::_resize_to_match(other);
+      // FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to
+      //              go for pure _set() implementations, right?
+      *this = other;
+    }
+
+    /** \internal
+      * \brief Override MatrixBase::swap() since for dynamic-sized matrices
+      * of same type it is enough to swap the data pointers.
+      */
+    template<typename OtherDerived>
+    void swap(MatrixBase<OtherDerived> const & other)
+    { this->_swap(other.derived()); }
+
+    inline Index innerStride() const { return 1; }
+    inline Index outerStride() const { return this->innerSize(); }
+
+    /////////// Geometry module ///////////
+
+    template<typename OtherDerived>
+    explicit Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r);
+    template<typename OtherDerived>
+    Matrix& operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r);
+
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived>
+    explicit Matrix(const eigen2_RotationBase<OtherDerived,ColsAtCompileTime>& r);
+    template<typename OtherDerived>
+    Matrix& operator=(const eigen2_RotationBase<OtherDerived,ColsAtCompileTime>& r);
+    #endif
+
+    // allow to extend Matrix outside Eigen
+    #ifdef EIGEN_MATRIX_PLUGIN
+    #include EIGEN_MATRIX_PLUGIN
+    #endif
+
+  protected:
+    template <typename Derived, typename OtherDerived, bool IsVector>
+    friend struct internal::conservative_resize_like_impl;
+
+    using Base::m_storage;
+};
+
+/** \defgroup matrixtypedefs Global matrix typedefs
+  *
+  * \ingroup Core_Module
+  *
+  * Eigen defines several typedef shortcuts for most common matrix and vector types.
+  *
+  * The general patterns are the following:
+  *
+  * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
+  * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
+  * for complex double.
+  *
+  * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats.
+  *
+  * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
+  * a fixed-size vector of 4 complex floats.
+  *
+  * \sa class Matrix
+  */
+
+#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \
+/** \ingroup matrixtypedefs */                                    \
+typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix;  \
+/** \ingroup matrixtypedefs */                                    \
+typedef Matrix<Type, Size, 1>    Vector##SizeSuffix##TypeSuffix;  \
+/** \ingroup matrixtypedefs */                                    \
+typedef Matrix<Type, 1, Size>    RowVector##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size)         \
+/** \ingroup matrixtypedefs */                                    \
+typedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix;  \
+/** \ingroup matrixtypedefs */                                    \
+typedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix;
+
+#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \
+EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \
+EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \
+EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4)
+
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
+
+#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_TYPEDEFS
+#undef EIGEN_MAKE_FIXED_TYPEDEFS
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_H
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
new file mode 100644
index 0000000..b67a7c1
--- /dev/null
+++ b/Eigen/src/Core/MatrixBase.h
@@ -0,0 +1,554 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIXBASE_H
+#define EIGEN_MATRIXBASE_H
+
+namespace Eigen {
+
+/** \class MatrixBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for all dense matrices, vectors, and expressions
+  *
+  * This class is the base that is inherited by all matrix, vector, and related expression
+  * types. Most of the Eigen API is contained in this class, and its base classes. Other important
+  * classes for the Eigen API are Matrix, and VectorwiseOp.
+  *
+  * Note that some methods are defined in other modules such as the \ref LU_Module LU module
+  * for all functions related to matrix inversions.
+  *
+  * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.
+  *
+  * When writing a function taking Eigen objects as argument, if you want your function
+  * to take as argument any matrix, vector, or expression, just let it take a
+  * MatrixBase argument. As an example, here is a function printFirstRow which, given
+  * a matrix, vector, or expression \a x, prints the first row of \a x.
+  *
+  * \code
+    template<typename Derived>
+    void printFirstRow(const Eigen::MatrixBase<Derived>& x)
+    {
+      cout << x.row(0) << endl;
+    }
+  * \endcode
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+template<typename Derived> class MatrixBase
+  : public DenseBase<Derived>
+{
+  public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef MatrixBase StorageBaseType;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    typedef DenseBase<Derived> Base;
+    using Base::RowsAtCompileTime;
+    using Base::ColsAtCompileTime;
+    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+    using Base::CoeffReadCost;
+
+    using Base::derived;
+    using Base::const_cast_derived;
+    using Base::rows;
+    using Base::cols;
+    using Base::size;
+    using Base::coeff;
+    using Base::coeffRef;
+    using Base::lazyAssign;
+    using Base::eval;
+    using Base::operator+=;
+    using Base::operator-=;
+    using Base::operator*=;
+    using Base::operator/=;
+
+    typedef typename Base::CoeffReturnType CoeffReturnType;
+    typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;
+    typedef typename Base::RowXpr RowXpr;
+    typedef typename Base::ColXpr ColXpr;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** type of the equivalent square matrix */
+    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    /** \returns the size of the main diagonal, which is min(rows(),cols()).
+      * \sa rows(), cols(), SizeAtCompileTime. */
+    inline Index diagonalSize() const { return (std::min)(rows(),cols()); }
+
+    /** \brief The plain matrix type corresponding to this expression.
+      *
+      * This is not necessarily exactly the return type of eval(). In the case of plain matrices,
+      * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
+      * that the return type of eval() is either PlainObject or const PlainObject&.
+      */
+    typedef Matrix<typename internal::traits<Derived>::Scalar,
+                internal::traits<Derived>::RowsAtCompileTime,
+                internal::traits<Derived>::ColsAtCompileTime,
+                AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                internal::traits<Derived>::MaxRowsAtCompileTime,
+                internal::traits<Derived>::MaxColsAtCompileTime
+          > PlainObject;
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Derived> ConstantReturnType;
+    /** \internal the return type of MatrixBase::adjoint() */
+    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,
+                        ConstTransposeReturnType
+                     >::type AdjointReturnType;
+    /** \internal Return type of eigenvalues() */
+    typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType;
+    /** \internal the return type of identity */
+    typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>,Derived> IdentityReturnType;
+    /** \internal the return type of unit vectors */
+    typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,
+                  internal::traits<Derived>::RowsAtCompileTime,
+                  internal::traits<Derived>::ColsAtCompileTime> BasisReturnType;
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase
+#   include "../plugins/CommonCwiseUnaryOps.h"
+#   include "../plugins/CommonCwiseBinaryOps.h"
+#   include "../plugins/MatrixCwiseUnaryOps.h"
+#   include "../plugins/MatrixCwiseBinaryOps.h"
+#   ifdef EIGEN_MATRIXBASE_PLUGIN
+#     include EIGEN_MATRIXBASE_PLUGIN
+#   endif
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+    /** Special case of the template operator=, in order to prevent the compiler
+      * from generating a default operator= (issue hit with g++ 4.1)
+      */
+    Derived& operator=(const MatrixBase& other);
+
+    // We cannot inherit here via Base::operator= since it is causing
+    // trouble with MSVC.
+
+    template <typename OtherDerived>
+    Derived& operator=(const DenseBase<OtherDerived>& other);
+
+    template <typename OtherDerived>
+    Derived& operator=(const EigenBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    Derived& operator=(const ReturnByValue<OtherDerived>& other);
+
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
+
+    template<typename MatrixPower, typename Lhs, typename Rhs>
+    Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
+
+    template<typename OtherDerived>
+    Derived& operator+=(const MatrixBase<OtherDerived>& other);
+    template<typename OtherDerived>
+    Derived& operator-=(const MatrixBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    const typename ProductReturnType<Derived,OtherDerived>::Type
+    operator*(const MatrixBase<OtherDerived> &other) const;
+
+    template<typename OtherDerived>
+    const typename LazyProductReturnType<Derived,OtherDerived>::Type
+    lazyProduct(const MatrixBase<OtherDerived> &other) const;
+
+    template<typename OtherDerived>
+    Derived& operator*=(const EigenBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    void applyOnTheLeft(const EigenBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    void applyOnTheRight(const EigenBase<OtherDerived>& other);
+
+    template<typename DiagonalDerived>
+    const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
+    operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;
+
+    template<typename OtherDerived>
+    typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType
+    dot(const MatrixBase<OtherDerived>& other) const;
+
+    #ifdef EIGEN2_SUPPORT
+      template<typename OtherDerived>
+      Scalar eigen2_dot(const MatrixBase<OtherDerived>& other) const;
+    #endif
+
+    RealScalar squaredNorm() const;
+    RealScalar norm() const;
+    RealScalar stableNorm() const;
+    RealScalar blueNorm() const;
+    RealScalar hypotNorm() const;
+    const PlainObject normalized() const;
+    void normalize();
+
+    const AdjointReturnType adjoint() const;
+    void adjointInPlace();
+
+    typedef Diagonal<Derived> DiagonalReturnType;
+    DiagonalReturnType diagonal();
+    typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
+    ConstDiagonalReturnType diagonal() const;
+
+    template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
+    template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };
+
+    template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
+    template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
+    
+    typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType;
+    typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType;
+
+    DiagonalDynamicIndexReturnType diagonal(Index index);
+    ConstDiagonalDynamicIndexReturnType diagonal(Index index) const;
+
+    #ifdef EIGEN2_SUPPORT
+    template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
+    template<unsigned int Mode> const typename internal::eigen2_part_return_type<Derived, Mode>::type part() const;
+    
+    // huuuge hack. make Eigen2's matrix.part<Diagonal>() work in eigen3. Problem: Diagonal is now a class template instead
+    // of an integer constant. Solution: overload the part() method template wrt template parameters list.
+    template<template<typename T, int N> class U>
+    const DiagonalWrapper<ConstDiagonalReturnType> part() const
+    { return diagonal().asDiagonal(); }
+    #endif // EIGEN2_SUPPORT
+
+    template<unsigned int Mode> struct TriangularViewReturnType { typedef TriangularView<Derived, Mode> Type; };
+    template<unsigned int Mode> struct ConstTriangularViewReturnType { typedef const TriangularView<const Derived, Mode> Type; };
+
+    template<unsigned int Mode> typename TriangularViewReturnType<Mode>::Type triangularView();
+    template<unsigned int Mode> typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;
+
+    template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SelfAdjointView<Derived, UpLo> Type; };
+    template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView<const Derived, UpLo> Type; };
+
+    template<unsigned int UpLo> typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
+    template<unsigned int UpLo> typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
+
+    const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),
+                                         const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;
+    static const IdentityReturnType Identity();
+    static const IdentityReturnType Identity(Index rows, Index cols);
+    static const BasisReturnType Unit(Index size, Index i);
+    static const BasisReturnType Unit(Index i);
+    static const BasisReturnType UnitX();
+    static const BasisReturnType UnitY();
+    static const BasisReturnType UnitZ();
+    static const BasisReturnType UnitW();
+
+    const DiagonalWrapper<const Derived> asDiagonal() const;
+    const PermutationWrapper<const Derived> asPermutation() const;
+
+    Derived& setIdentity();
+    Derived& setIdentity(Index rows, Index cols);
+
+    bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    template<typename OtherDerived>
+    bool isOrthogonal(const MatrixBase<OtherDerived>& other,
+                      const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+    bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
+
+    /** \returns true if each coefficients of \c *this and \a other are all exactly equal.
+      * \warning When using floating point scalar values you probably should rather use a
+      *          fuzzy comparison such as isApprox()
+      * \sa isApprox(), operator!= */
+    template<typename OtherDerived>
+    inline bool operator==(const MatrixBase<OtherDerived>& other) const
+    { return cwiseEqual(other).all(); }
+
+    /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other.
+      * \warning When using floating point scalar values you probably should rather use a
+      *          fuzzy comparison such as isApprox()
+      * \sa isApprox(), operator== */
+    template<typename OtherDerived>
+    inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+    { return cwiseNotEqual(other).any(); }
+
+    NoAlias<Derived,Eigen::MatrixBase > noalias();
+
+    inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;
+    inline ForceAlignedAccess<Derived> forceAlignedAccess();
+    template<bool Enable> inline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type forceAlignedAccessIf() const;
+    template<bool Enable> inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();
+
+    Scalar trace() const;
+
+/////////// Array module ///////////
+
+    template<int p> RealScalar lpNorm() const;
+
+    MatrixBase<Derived>& matrix() { return *this; }
+    const MatrixBase<Derived>& matrix() const { return *this; }
+
+    /** \returns an \link Eigen::ArrayBase Array \endlink expression of this matrix
+      * \sa ArrayBase::matrix() */
+    ArrayWrapper<Derived> array() { return derived(); }
+    const ArrayWrapper<const Derived> array() const { return derived(); }
+
+/////////// LU module ///////////
+
+    const FullPivLU<PlainObject> fullPivLu() const;
+    const PartialPivLU<PlainObject> partialPivLu() const;
+
+    #if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+    const LU<PlainObject> lu() const;
+    #endif
+
+    #ifdef EIGEN2_SUPPORT
+    const LU<PlainObject> eigen2_lu() const;
+    #endif
+
+    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+    const PartialPivLU<PlainObject> lu() const;
+    #endif
+    
+    #ifdef EIGEN2_SUPPORT
+    template<typename ResultType>
+    void computeInverse(MatrixBase<ResultType> *result) const {
+      *result = this->inverse();
+    }
+    #endif
+
+    const internal::inverse_impl<Derived> inverse() const;
+    template<typename ResultType>
+    void computeInverseAndDetWithCheck(
+      ResultType& inverse,
+      typename ResultType::Scalar& determinant,
+      bool& invertible,
+      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
+    ) const;
+    template<typename ResultType>
+    void computeInverseWithCheck(
+      ResultType& inverse,
+      bool& invertible,
+      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()
+    ) const;
+    Scalar determinant() const;
+
+/////////// Cholesky module ///////////
+
+    const LLT<PlainObject>  llt() const;
+    const LDLT<PlainObject> ldlt() const;
+
+/////////// QR module ///////////
+
+    const HouseholderQR<PlainObject> householderQr() const;
+    const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const;
+    const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const;
+    
+    #ifdef EIGEN2_SUPPORT
+    const QR<PlainObject> qr() const;
+    #endif
+
+    EigenvaluesReturnType eigenvalues() const;
+    RealScalar operatorNorm() const;
+
+/////////// SVD module ///////////
+
+    JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;
+
+    #ifdef EIGEN2_SUPPORT
+    SVD<PlainObject> svd() const;
+    #endif
+
+/////////// Geometry module ///////////
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /// \internal helper struct to form the return type of the cross product
+    template<typename OtherDerived> struct cross_product_return_type {
+      typedef typename internal::scalar_product_traits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;
+      typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;
+    };
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    template<typename OtherDerived>
+    typename cross_product_return_type<OtherDerived>::type
+    cross(const MatrixBase<OtherDerived>& other) const;
+    template<typename OtherDerived>
+    PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
+    PlainObject unitOrthogonal(void) const;
+    Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;
+    
+    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+    ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
+    // put this as separate enum value to work around possible GCC 4.3 bug (?)
+    enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1?Vertical:Horizontal };
+    typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;
+    HomogeneousReturnType homogeneous() const;
+    #endif
+    
+    enum {
+      SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1
+    };
+    typedef Block<const Derived,
+                  internal::traits<Derived>::ColsAtCompileTime==1 ? SizeMinusOne : 1,
+                  internal::traits<Derived>::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne;
+    typedef CwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>,
+                const ConstStartMinusOne > HNormalizedReturnType;
+
+    const HNormalizedReturnType hnormalized() const;
+
+////////// Householder module ///////////
+
+    void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);
+    template<typename EssentialPart>
+    void makeHouseholder(EssentialPart& essential,
+                         Scalar& tau, RealScalar& beta) const;
+    template<typename EssentialPart>
+    void applyHouseholderOnTheLeft(const EssentialPart& essential,
+                                   const Scalar& tau,
+                                   Scalar* workspace);
+    template<typename EssentialPart>
+    void applyHouseholderOnTheRight(const EssentialPart& essential,
+                                    const Scalar& tau,
+                                    Scalar* workspace);
+
+///////// Jacobi module /////////
+
+    template<typename OtherScalar>
+    void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+    template<typename OtherScalar>
+    void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
+
+///////// MatrixFunctions module /////////
+
+    typedef typename internal::stem_function<Scalar>::type StemFunction;
+    const MatrixExponentialReturnValue<Derived> exp() const;
+    const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;
+    const MatrixFunctionReturnValue<Derived> cosh() const;
+    const MatrixFunctionReturnValue<Derived> sinh() const;
+    const MatrixFunctionReturnValue<Derived> cos() const;
+    const MatrixFunctionReturnValue<Derived> sin() const;
+    const MatrixSquareRootReturnValue<Derived> sqrt() const;
+    const MatrixLogarithmReturnValue<Derived> log() const;
+    const MatrixPowerReturnValue<Derived> pow(const RealScalar& p) const;
+
+#ifdef EIGEN2_SUPPORT
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    Derived& operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+                                      EvalBeforeAssigningBit>& other);
+
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    Derived& operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+                                      EvalBeforeAssigningBit>& other);
+
+    /** \deprecated because .lazy() is deprecated
+      * Overloaded for cache friendly product evaluation */
+    template<typename OtherDerived>
+    Derived& lazyAssign(const Flagged<OtherDerived, 0, EvalBeforeAssigningBit>& other)
+    { return lazyAssign(other._expression()); }
+
+    template<unsigned int Added>
+    const Flagged<Derived, Added, 0> marked() const;
+    const Flagged<Derived, 0, EvalBeforeAssigningBit> lazy() const;
+
+    inline const Cwise<Derived> cwise() const;
+    inline Cwise<Derived> cwise();
+
+    VectorBlock<Derived> start(Index size);
+    const VectorBlock<const Derived> start(Index size) const;
+    VectorBlock<Derived> end(Index size);
+    const VectorBlock<const Derived> end(Index size) const;
+    template<int Size> VectorBlock<Derived,Size> start();
+    template<int Size> const VectorBlock<const Derived,Size> start() const;
+    template<int Size> VectorBlock<Derived,Size> end();
+    template<int Size> const VectorBlock<const Derived,Size> end() const;
+
+    Minor<Derived> minor(Index row, Index col);
+    const Minor<Derived> minor(Index row, Index col) const;
+#endif
+
+  protected:
+    MatrixBase() : Base() {}
+
+  private:
+    explicit MatrixBase(int);
+    MatrixBase(int,int);
+    template<typename OtherDerived> explicit MatrixBase(const MatrixBase<OtherDerived>&);
+  protected:
+    // mixing arrays and matrices is not legal
+    template<typename OtherDerived> Derived& operator+=(const ArrayBase<OtherDerived>& )
+    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
+    // mixing arrays and matrices is not legal
+    template<typename OtherDerived> Derived& operator-=(const ArrayBase<OtherDerived>& )
+    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
+};
+
+
+/***************************************************************************
+* Implementation of matrix base methods
+***************************************************************************/
+
+/** replaces \c *this by \c *this * \a other.
+  *
+  * \returns a reference to \c *this
+  *
+  * Example: \include MatrixBase_applyOnTheRight.cpp
+  * Output: \verbinclude MatrixBase_applyOnTheRight.out
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived&
+MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
+{
+  other.derived().applyThisOnTheRight(derived());
+  return derived();
+}
+
+/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
+  *
+  * Example: \include MatrixBase_applyOnTheRight.cpp
+  * Output: \verbinclude MatrixBase_applyOnTheRight.out
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
+{
+  other.derived().applyThisOnTheRight(derived());
+}
+
+/** replaces \c *this by \a other * \c *this.
+  *
+  * Example: \include MatrixBase_applyOnTheLeft.cpp
+  * Output: \verbinclude MatrixBase_applyOnTheLeft.out
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
+{
+  other.derived().applyThisOnTheLeft(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIXBASE_H
diff --git a/Eigen/src/Core/NestByValue.h b/Eigen/src/Core/NestByValue.h
new file mode 100644
index 0000000..a893b17
--- /dev/null
+++ b/Eigen/src/Core/NestByValue.h
@@ -0,0 +1,111 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NESTBYVALUE_H
+#define EIGEN_NESTBYVALUE_H
+
+namespace Eigen {
+
+/** \class NestByValue
+  * \ingroup Core_Module
+  *
+  * \brief Expression which must be nested by value
+  *
+  * \param ExpressionType the type of the object of which we are requiring nesting-by-value
+  *
+  * This class is the return type of MatrixBase::nestByValue()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::nestByValue()
+  */
+
+namespace internal {
+template<typename ExpressionType>
+struct traits<NestByValue<ExpressionType> > : public traits<ExpressionType>
+{};
+}
+
+template<typename ExpressionType> class NestByValue
+  : public internal::dense_xpr_base< NestByValue<ExpressionType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<NestByValue>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue)
+
+    inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+
+    inline const CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_expression.coeff(row, col);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_expression.const_cast_derived().coeffRef(row, col);
+    }
+
+    inline const CoeffReturnType coeff(Index index) const
+    {
+      return m_expression.coeff(index);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return m_expression.template packet<LoadMode>(row, col);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return m_expression.template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
+    }
+
+    operator const ExpressionType&() const { return m_expression; }
+
+  protected:
+    const ExpressionType m_expression;
+};
+
+/** \returns an expression of the temporary version of *this.
+  */
+template<typename Derived>
+inline const NestByValue<Derived>
+DenseBase<Derived>::nestByValue() const
+{
+  return NestByValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_NESTBYVALUE_H
diff --git a/Eigen/src/Core/NoAlias.h b/Eigen/src/Core/NoAlias.h
new file mode 100644
index 0000000..768bfb1
--- /dev/null
+++ b/Eigen/src/Core/NoAlias.h
@@ -0,0 +1,134 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NOALIAS_H
+#define EIGEN_NOALIAS_H
+
+namespace Eigen {
+
+/** \class NoAlias
+  * \ingroup Core_Module
+  *
+  * \brief Pseudo expression providing an operator = assuming no aliasing
+  *
+  * \param ExpressionType the type of the object on which to do the lazy assignment
+  *
+  * This class represents an expression with special assignment operators
+  * assuming no aliasing between the target expression and the source expression.
+  * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.
+  * It is the return type of MatrixBase::noalias()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::noalias()
+  */
+template<typename ExpressionType, template <typename> class StorageBase>
+class NoAlias
+{
+    typedef typename ExpressionType::Scalar Scalar;
+  public:
+    NoAlias(ExpressionType& expression) : m_expression(expression) {}
+
+    /** Behaves like MatrixBase::lazyAssign(other)
+      * \sa MatrixBase::lazyAssign() */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)
+    { return internal::assign_selector<ExpressionType,OtherDerived,false>::run(m_expression,other.derived()); }
+
+    /** \sa MatrixBase::operator+= */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)
+    {
+      typedef SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
+      SelfAdder tmp(m_expression);
+      typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
+      typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
+      internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
+      return m_expression;
+    }
+
+    /** \sa MatrixBase::operator-= */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)
+    {
+      typedef SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, ExpressionType, OtherDerived> SelfAdder;
+      SelfAdder tmp(m_expression);
+      typedef typename internal::nested<OtherDerived>::type OtherDerivedNested;
+      typedef typename internal::remove_all<OtherDerivedNested>::type _OtherDerivedNested;
+      internal::assign_selector<SelfAdder,_OtherDerivedNested,false>::run(tmp,OtherDerivedNested(other.derived()));
+      return m_expression;
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    { other.derived().addTo(m_expression); return m_expression; }
+
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    { other.derived().subTo(m_expression); return m_expression; }
+
+    template<typename Lhs, typename Rhs, int NestingFlags>
+    EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+    { return m_expression.derived() += CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+
+    template<typename Lhs, typename Rhs, int NestingFlags>
+    EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct<Lhs,Rhs,NestingFlags>& other)
+    { return m_expression.derived() -= CoeffBasedProduct<Lhs,Rhs,NestByRefBit>(other.lhs(), other.rhs()); }
+    
+    template<typename OtherDerived>
+    ExpressionType& operator=(const ReturnByValue<OtherDerived>& func)
+    { return m_expression = func; }
+#endif
+
+    ExpressionType& expression() const
+    {
+      return m_expression;
+    }
+
+  protected:
+    ExpressionType& m_expression;
+};
+
+/** \returns a pseudo expression of \c *this with an operator= assuming
+  * no aliasing between \c *this and the source expression.
+  *
+  * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
+  * Currently, even though several expressions may alias, only product
+  * expressions have this flag. Therefore, noalias() is only usefull when
+  * the source expression contains a matrix product.
+  *
+  * Here are some examples where noalias is usefull:
+  * \code
+  * D.noalias()  = A * B;
+  * D.noalias() += A.transpose() * B;
+  * D.noalias() -= 2 * A * B.adjoint();
+  * \endcode
+  *
+  * On the other hand the following example will lead to a \b wrong result:
+  * \code
+  * A.noalias() = A * B;
+  * \endcode
+  * because the result matrix A is also an operand of the matrix product. Therefore,
+  * there is no alternative than evaluating A * B in a temporary, that is the default
+  * behavior when you write:
+  * \code
+  * A = A * B;
+  * \endcode
+  *
+  * \sa class NoAlias
+  */
+template<typename Derived>
+NoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_NOALIAS_H
diff --git a/Eigen/src/Core/NumTraits.h b/Eigen/src/Core/NumTraits.h
new file mode 100644
index 0000000..bac9e50
--- /dev/null
+++ b/Eigen/src/Core/NumTraits.h
@@ -0,0 +1,150 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NUMTRAITS_H
+#define EIGEN_NUMTRAITS_H
+
+namespace Eigen {
+
+/** \class NumTraits
+  * \ingroup Core_Module
+  *
+  * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
+  *
+  * \param T the numeric type at hand
+  *
+  * This class stores enums, typedefs and static methods giving information about a numeric type.
+  *
+  * The provided data consists of:
+  * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real,
+  *     then \a Real is just a typedef to \a T. If \a T is \c std::complex<U> then \a Real
+  *     is a typedef to \a U.
+  * \li A typedef \a NonInteger, giving the type that should be used for operations producing non-integral values,
+  *     such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives
+  *     \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to
+  *     take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is
+  *     only intended as a helper for code that needs to explicitly promote types.
+  * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what
+  *     this means, just use \a T here.
+  * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex
+  *     type, and to 0 otherwise.
+  * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int,
+  *     and to \c 0 otherwise.
+  * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed
+  *     to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers.
+  *     Stay vague here. No need to do architecture-specific stuff.
+  * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned.
+  * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must
+  *     be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise.
+  * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), returns a \a Real instead of a \a T.
+  * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default
+  *     value by the fuzzy comparison operators.
+  * \li highest() and lowest() functions returning the highest and lowest possible values respectively.
+  */
+
+template<typename T> struct GenericNumTraits
+{
+  enum {
+    IsInteger = std::numeric_limits<T>::is_integer,
+    IsSigned = std::numeric_limits<T>::is_signed,
+    IsComplex = 0,
+    RequireInitialization = internal::is_arithmetic<T>::value ? 0 : 1,
+    ReadCost = 1,
+    AddCost = 1,
+    MulCost = 1
+  };
+
+  typedef T Real;
+  typedef typename internal::conditional<
+                     IsInteger,
+                     typename internal::conditional<sizeof(T)<=2, float, double>::type,
+                     T
+                   >::type NonInteger;
+  typedef T Nested;
+
+  static inline Real epsilon() { return std::numeric_limits<T>::epsilon(); }
+  static inline Real dummy_precision()
+  {
+    // make sure to override this for floating-point types
+    return Real(0);
+  }
+  static inline T highest() { return (std::numeric_limits<T>::max)(); }
+  static inline T lowest()  { return IsInteger ? (std::numeric_limits<T>::min)() : (-(std::numeric_limits<T>::max)()); }
+  
+#ifdef EIGEN2_SUPPORT
+  enum {
+    HasFloatingPoint = !IsInteger
+  };
+  typedef NonInteger FloatingPoint;
+#endif
+};
+
+template<typename T> struct NumTraits : GenericNumTraits<T>
+{};
+
+template<> struct NumTraits<float>
+  : GenericNumTraits<float>
+{
+  static inline float dummy_precision() { return 1e-5f; }
+};
+
+template<> struct NumTraits<double> : GenericNumTraits<double>
+{
+  static inline double dummy_precision() { return 1e-12; }
+};
+
+template<> struct NumTraits<long double>
+  : GenericNumTraits<long double>
+{
+  static inline long double dummy_precision() { return 1e-15l; }
+};
+
+template<typename _Real> struct NumTraits<std::complex<_Real> >
+  : GenericNumTraits<std::complex<_Real> >
+{
+  typedef _Real Real;
+  enum {
+    IsComplex = 1,
+    RequireInitialization = NumTraits<_Real>::RequireInitialization,
+    ReadCost = 2 * NumTraits<_Real>::ReadCost,
+    AddCost = 2 * NumTraits<Real>::AddCost,
+    MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
+  };
+
+  static inline Real epsilon() { return NumTraits<Real>::epsilon(); }
+  static inline Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+struct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
+{
+  typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> ArrayType;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Array<RealScalar, Rows, Cols, Options, MaxRows, MaxCols> Real;
+  typedef typename NumTraits<Scalar>::NonInteger NonIntegerScalar;
+  typedef Array<NonIntegerScalar, Rows, Cols, Options, MaxRows, MaxCols> NonInteger;
+  typedef ArrayType & Nested;
+  
+  enum {
+    IsComplex = NumTraits<Scalar>::IsComplex,
+    IsInteger = NumTraits<Scalar>::IsInteger,
+    IsSigned  = NumTraits<Scalar>::IsSigned,
+    RequireInitialization = 1,
+    ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::ReadCost,
+    AddCost  = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost,
+    MulCost  = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost
+  };
+  
+  static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }
+  static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_NUMTRAITS_H
diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h
new file mode 100644
index 0000000..85ffae2
--- /dev/null
+++ b/Eigen/src/Core/PermutationMatrix.h
@@ -0,0 +1,721 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PERMUTATIONMATRIX_H
+#define EIGEN_PERMUTATIONMATRIX_H
+
+namespace Eigen { 
+
+template<int RowCol,typename IndicesType,typename MatrixType, typename StorageKind> class PermutedImpl;
+
+/** \class PermutationBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for permutations
+  *
+  * \param Derived the derived class
+  *
+  * This class is the base class for all expressions representing a permutation matrix,
+  * internally stored as a vector of integers.
+  * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix
+  * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have:
+  *  \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f]
+  * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have:
+  *  \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f]
+  *
+  * Permutation matrices are square and invertible.
+  *
+  * Notice that in addition to the member functions and operators listed here, there also are non-member
+  * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)
+  * on either side.
+  *
+  * \sa class PermutationMatrix, class PermutationWrapper
+  */
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
+struct permut_matrix_product_retval;
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed=false>
+struct permut_sparsematrix_product_retval;
+enum PermPermProduct_t {PermPermProduct};
+
+} // end namespace internal
+
+template<typename Derived>
+class PermutationBase : public EigenBase<Derived>
+{
+    typedef internal::traits<Derived> Traits;
+    typedef EigenBase<Derived> Base;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    enum {
+      Flags = Traits::Flags,
+      CoeffReadCost = Traits::CoeffReadCost,
+      RowsAtCompileTime = Traits::RowsAtCompileTime,
+      ColsAtCompileTime = Traits::ColsAtCompileTime,
+      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
+    };
+    typedef typename Traits::Scalar Scalar;
+    typedef typename Traits::Index Index;
+    typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>
+            DenseMatrixType;
+    typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,Index>
+            PlainPermutationType;
+    using Base::derived;
+    #endif
+
+    /** Copies the other permutation into *this */
+    template<typename OtherDerived>
+    Derived& operator=(const PermutationBase<OtherDerived>& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+
+    /** Assignment from the Transpositions \a tr */
+    template<typename OtherDerived>
+    Derived& operator=(const TranspositionsBase<OtherDerived>& tr)
+    {
+      setIdentity(tr.size());
+      for(Index k=size()-1; k>=0; --k)
+        applyTranspositionOnTheRight(k,tr.coeff(k));
+      return derived();
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Derived& operator=(const PermutationBase& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+    #endif
+
+    /** \returns the number of rows */
+    inline Index rows() const { return Index(indices().size()); }
+
+    /** \returns the number of columns */
+    inline Index cols() const { return Index(indices().size()); }
+
+    /** \returns the size of a side of the respective square matrix, i.e., the number of indices */
+    inline Index size() const { return Index(indices().size()); }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived>& other) const
+    {
+      other.setZero();
+      for (int i=0; i<rows();++i)
+        other.coeffRef(indices().coeff(i),i) = typename DenseDerived::Scalar(1);
+    }
+    #endif
+
+    /** \returns a Matrix object initialized from this permutation matrix. Notice that it
+      * is inefficient to return this Matrix object by value. For efficiency, favor using
+      * the Matrix constructor taking EigenBase objects.
+      */
+    DenseMatrixType toDenseMatrix() const
+    {
+      return derived();
+    }
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return derived().indices(); }
+    /** \returns a reference to the stored array representing the permutation. */
+    IndicesType& indices() { return derived().indices(); }
+
+    /** Resizes to given size.
+      */
+    inline void resize(Index newSize)
+    {
+      indices().resize(newSize);
+    }
+
+    /** Sets *this to be the identity permutation matrix */
+    void setIdentity()
+    {
+      for(Index i = 0; i < size(); ++i)
+        indices().coeffRef(i) = i;
+    }
+
+    /** Sets *this to be the identity permutation matrix of given size.
+      */
+    void setIdentity(Index newSize)
+    {
+      resize(newSize);
+      setIdentity();
+    }
+
+    /** Multiplies *this by the transposition \f$(ij)\f$ on the left.
+      *
+      * \returns a reference to *this.
+      *
+      * \warning This is much slower than applyTranspositionOnTheRight(int,int):
+      * this has linear complexity and requires a lot of branching.
+      *
+      * \sa applyTranspositionOnTheRight(int,int)
+      */
+    Derived& applyTranspositionOnTheLeft(Index i, Index j)
+    {
+      eigen_assert(i>=0 && j>=0 && i<size() && j<size());
+      for(Index k = 0; k < size(); ++k)
+      {
+        if(indices().coeff(k) == i) indices().coeffRef(k) = j;
+        else if(indices().coeff(k) == j) indices().coeffRef(k) = i;
+      }
+      return derived();
+    }
+
+    /** Multiplies *this by the transposition \f$(ij)\f$ on the right.
+      *
+      * \returns a reference to *this.
+      *
+      * This is a fast operation, it only consists in swapping two indices.
+      *
+      * \sa applyTranspositionOnTheLeft(int,int)
+      */
+    Derived& applyTranspositionOnTheRight(Index i, Index j)
+    {
+      eigen_assert(i>=0 && j>=0 && i<size() && j<size());
+      std::swap(indices().coeffRef(i), indices().coeffRef(j));
+      return derived();
+    }
+
+    /** \returns the inverse permutation matrix.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    inline Transpose<PermutationBase> inverse() const
+    { return derived(); }
+    /** \returns the tranpose permutation matrix.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    inline Transpose<PermutationBase> transpose() const
+    { return derived(); }
+
+    /**** multiplication helpers to hopefully get RVO ****/
+
+  
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+  protected:
+    template<typename OtherDerived>
+    void assignTranspose(const PermutationBase<OtherDerived>& other)
+    {
+      for (int i=0; i<rows();++i) indices().coeffRef(other.indices().coeff(i)) = i;
+    }
+    template<typename Lhs,typename Rhs>
+    void assignProduct(const Lhs& lhs, const Rhs& rhs)
+    {
+      eigen_assert(lhs.cols() == rhs.rows());
+      for (int i=0; i<rows();++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i));
+    }
+#endif
+
+  public:
+
+    /** \returns the product permutation matrix.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    template<typename Other>
+    inline PlainPermutationType operator*(const PermutationBase<Other>& other) const
+    { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); }
+
+    /** \returns the product of a permutation with another inverse permutation.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    template<typename Other>
+    inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other) const
+    { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); }
+
+    /** \returns the product of an inverse permutation with another permutation.
+      *
+      * \note \note_try_to_help_rvo
+      */
+    template<typename Other> friend
+    inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
+    { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
+    
+    /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
+      *
+      * This function is O(\c n) procedure allocating a buffer of \c n booleans.
+      */
+    Index determinant() const
+    {
+      Index res = 1;
+      Index n = size();
+      Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
+      mask.fill(false);
+      Index r = 0;
+      while(r < n)
+      {
+        // search for the next seed
+        while(r<n && mask[r]) r++;
+        if(r>=n)
+          break;
+        // we got one, let's follow it until we are back to the seed
+        Index k0 = r++;
+        mask.coeffRef(k0) = true;
+        for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
+        {
+          mask.coeffRef(k) = true;
+          res = -res;
+        }
+      }
+      return res;
+    }
+
+  protected:
+
+};
+
+/** \class PermutationMatrix
+  * \ingroup Core_Module
+  *
+  * \brief Permutation matrix
+  *
+  * \param SizeAtCompileTime the number of rows/cols, or Dynamic
+  * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
+  * \param IndexType the interger type of the indices
+  *
+  * This class represents a permutation matrix, internally stored as a vector of integers.
+  *
+  * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix
+  */
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+struct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
+ : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  typedef IndexType Index;
+  typedef Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+class PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType> >
+{
+    typedef PermutationBase<PermutationMatrix> Base;
+    typedef internal::traits<PermutationMatrix> Traits;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    #endif
+
+    inline PermutationMatrix()
+    {}
+
+    /** Constructs an uninitialized permutation matrix of given size.
+      */
+    inline PermutationMatrix(int size) : m_indices(size)
+    {}
+
+    /** Copy constructor. */
+    template<typename OtherDerived>
+    inline PermutationMatrix(const PermutationBase<OtherDerived>& other)
+      : m_indices(other.indices()) {}
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Standard copy constructor. Defined only to prevent a default copy constructor
+      * from hiding the other templated constructor */
+    inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {}
+    #endif
+
+    /** Generic constructor from expression of the indices. The indices
+      * array has the meaning that the permutations sends each integer i to indices[i].
+      *
+      * \warning It is your responsibility to check that the indices array that you passes actually
+      * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the
+      * array's size.
+      */
+    template<typename Other>
+    explicit inline PermutationMatrix(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
+    {}
+
+    /** Convert the Transpositions \a tr to a permutation matrix */
+    template<typename Other>
+    explicit PermutationMatrix(const TranspositionsBase<Other>& tr)
+      : m_indices(tr.size())
+    {
+      *this = tr;
+    }
+
+    /** Copies the other permutation into *this */
+    template<typename Other>
+    PermutationMatrix& operator=(const PermutationBase<Other>& other)
+    {
+      m_indices = other.indices();
+      return *this;
+    }
+
+    /** Assignment from the Transpositions \a tr */
+    template<typename Other>
+    PermutationMatrix& operator=(const TranspositionsBase<Other>& tr)
+    {
+      return Base::operator=(tr.derived());
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    PermutationMatrix& operator=(const PermutationMatrix& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    /** \returns a reference to the stored array representing the permutation. */
+    IndicesType& indices() { return m_indices; }
+
+
+    /**** multiplication helpers to hopefully get RVO ****/
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Other>
+    PermutationMatrix(const Transpose<PermutationBase<Other> >& other)
+      : m_indices(other.nestedPermutation().size())
+    {
+      for (int i=0; i<m_indices.size();++i) m_indices.coeffRef(other.nestedPermutation().indices().coeff(i)) = i;
+    }
+    template<typename Lhs,typename Rhs>
+    PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs)
+      : m_indices(lhs.indices().size())
+    {
+      Base::assignProduct(lhs,rhs);
+    }
+#endif
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+struct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
+ : traits<Matrix<IndexType,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >
+{
+  typedef IndexType Index;
+  typedef Map<const Matrix<IndexType, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+class Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess>
+  : public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, IndexType>,_PacketAccess> >
+{
+    typedef PermutationBase<Map> Base;
+    typedef internal::traits<Map> Traits;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+    #endif
+
+    inline Map(const Index* indicesPtr)
+      : m_indices(indicesPtr)
+    {}
+
+    inline Map(const Index* indicesPtr, Index size)
+      : m_indices(indicesPtr,size)
+    {}
+
+    /** Copies the other permutation into *this */
+    template<typename Other>
+    Map& operator=(const PermutationBase<Other>& other)
+    { return Base::operator=(other.derived()); }
+
+    /** Assignment from the Transpositions \a tr */
+    template<typename Other>
+    Map& operator=(const TranspositionsBase<Other>& tr)
+    { return Base::operator=(tr.derived()); }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Map& operator=(const Map& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    /** \returns a reference to the stored array representing the permutation. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+/** \class PermutationWrapper
+  * \ingroup Core_Module
+  *
+  * \brief Class to view a vector of integers as a permutation matrix
+  *
+  * \param _IndicesType the type of the vector of integer (can be any compatible expression)
+  *
+  * This class allows to view any vector expression of integers as a permutation matrix.
+  *
+  * \sa class PermutationBase, class PermutationMatrix
+  */
+
+struct PermutationStorage {};
+
+template<typename _IndicesType> class TranspositionsWrapper;
+namespace internal {
+template<typename _IndicesType>
+struct traits<PermutationWrapper<_IndicesType> >
+{
+  typedef PermutationStorage StorageKind;
+  typedef typename _IndicesType::Scalar Scalar;
+  typedef typename _IndicesType::Scalar Index;
+  typedef _IndicesType IndicesType;
+  enum {
+    RowsAtCompileTime = _IndicesType::SizeAtCompileTime,
+    ColsAtCompileTime = _IndicesType::SizeAtCompileTime,
+    MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime,
+    Flags = 0,
+    CoeffReadCost = _IndicesType::CoeffReadCost
+  };
+};
+}
+
+template<typename _IndicesType>
+class PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesType> >
+{
+    typedef PermutationBase<PermutationWrapper> Base;
+    typedef internal::traits<PermutationWrapper> Traits;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef typename Traits::IndicesType IndicesType;
+    #endif
+
+    inline PermutationWrapper(const IndicesType& a_indices)
+      : m_indices(a_indices)
+    {}
+
+    /** const version of indices(). */
+    const typename internal::remove_all<typename IndicesType::Nested>::type&
+    indices() const { return m_indices; }
+
+  protected:
+
+    typename IndicesType::Nested m_indices;
+};
+
+/** \returns the matrix with the permutation applied to the columns.
+  */
+template<typename Derived, typename PermutationDerived>
+inline const internal::permut_matrix_product_retval<PermutationDerived, Derived, OnTheRight>
+operator*(const MatrixBase<Derived>& matrix,
+          const PermutationBase<PermutationDerived> &permutation)
+{
+  return internal::permut_matrix_product_retval
+           <PermutationDerived, Derived, OnTheRight>
+           (permutation.derived(), matrix.derived());
+}
+
+/** \returns the matrix with the permutation applied to the rows.
+  */
+template<typename Derived, typename PermutationDerived>
+inline const internal::permut_matrix_product_retval
+               <PermutationDerived, Derived, OnTheLeft>
+operator*(const PermutationBase<PermutationDerived> &permutation,
+          const MatrixBase<Derived>& matrix)
+{
+  return internal::permut_matrix_product_retval
+           <PermutationDerived, Derived, OnTheLeft>
+           (permutation.derived(), matrix.derived());
+}
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct traits<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct permut_matrix_product_retval
+ : public ReturnByValue<permut_matrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+    typedef typename MatrixType::Index Index;
+
+    permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
+      : m_permutation(perm), m_matrix(matrix)
+    {}
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    template<typename Dest> inline void evalTo(Dest& dst) const
+    {
+      const Index n = Side==OnTheLeft ? rows() : cols();
+      // FIXME we need an is_same for expression that is not sensitive to constness. For instance
+      // is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
+      if(    is_same<MatrixTypeNestedCleaned,Dest>::value
+          && blas_traits<MatrixTypeNestedCleaned>::HasUsableDirectAccess
+          && blas_traits<Dest>::HasUsableDirectAccess
+          && extract_data(dst) == extract_data(m_matrix))
+      {
+        // apply the permutation inplace
+        Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
+        mask.fill(false);
+        Index r = 0;
+        while(r < m_permutation.size())
+        {
+          // search for the next seed
+          while(r<m_permutation.size() && mask[r]) r++;
+          if(r>=m_permutation.size())
+            break;
+          // we got one, let's follow it until we are back to the seed
+          Index k0 = r++;
+          Index kPrev = k0;
+          mask.coeffRef(k0) = true;
+          for(Index k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k))
+          {
+                  Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)
+            .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
+                       (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev));
+
+            mask.coeffRef(k) = true;
+            kPrev = k;
+          }
+        }
+      }
+      else
+      {
+        for(int i = 0; i < n; ++i)
+        {
+          Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>
+               (dst, ((Side==OnTheLeft) ^ Transposed) ? m_permutation.indices().coeff(i) : i)
+
+          =
+
+          Block<const MatrixTypeNestedCleaned,Side==OnTheLeft ? 1 : MatrixType::RowsAtCompileTime,Side==OnTheRight ? 1 : MatrixType::ColsAtCompileTime>
+               (m_matrix, ((Side==OnTheRight) ^ Transposed) ? m_permutation.indices().coeff(i) : i);
+        }
+      }
+    }
+
+  protected:
+    const PermutationType& m_permutation;
+    typename MatrixType::Nested m_matrix;
+};
+
+/* Template partial specialization for transposed/inverse permutations */
+
+template<typename Derived>
+struct traits<Transpose<PermutationBase<Derived> > >
+ : traits<Derived>
+{};
+
+} // end namespace internal
+
+template<typename Derived>
+class Transpose<PermutationBase<Derived> >
+  : public EigenBase<Transpose<PermutationBase<Derived> > >
+{
+    typedef Derived PermutationType;
+    typedef typename PermutationType::IndicesType IndicesType;
+    typedef typename PermutationType::PlainPermutationType PlainPermutationType;
+  public:
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    typedef internal::traits<PermutationType> Traits;
+    typedef typename Derived::DenseMatrixType DenseMatrixType;
+    enum {
+      Flags = Traits::Flags,
+      CoeffReadCost = Traits::CoeffReadCost,
+      RowsAtCompileTime = Traits::RowsAtCompileTime,
+      ColsAtCompileTime = Traits::ColsAtCompileTime,
+      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime
+    };
+    typedef typename Traits::Scalar Scalar;
+    #endif
+
+    Transpose(const PermutationType& p) : m_permutation(p) {}
+
+    inline int rows() const { return m_permutation.rows(); }
+    inline int cols() const { return m_permutation.cols(); }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived>& other) const
+    {
+      other.setZero();
+      for (int i=0; i<rows();++i)
+        other.coeffRef(i, m_permutation.indices().coeff(i)) = typename DenseDerived::Scalar(1);
+    }
+    #endif
+
+    /** \return the equivalent permutation matrix */
+    PlainPermutationType eval() const { return *this; }
+
+    DenseMatrixType toDenseMatrix() const { return *this; }
+
+    /** \returns the matrix with the inverse permutation applied to the columns.
+      */
+    template<typename OtherDerived> friend
+    inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>
+    operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trPerm)
+    {
+      return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheRight, true>(trPerm.m_permutation, matrix.derived());
+    }
+
+    /** \returns the matrix with the inverse permutation applied to the rows.
+      */
+    template<typename OtherDerived>
+    inline const internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>
+    operator*(const MatrixBase<OtherDerived>& matrix) const
+    {
+      return internal::permut_matrix_product_retval<PermutationType, OtherDerived, OnTheLeft, true>(m_permutation, matrix.derived());
+    }
+
+    const PermutationType& nestedPermutation() const { return m_permutation; }
+
+  protected:
+    const PermutationType& m_permutation;
+};
+
+template<typename Derived>
+const PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PERMUTATIONMATRIX_H
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
new file mode 100644
index 0000000..8a3e454
--- /dev/null
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -0,0 +1,792 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DENSESTORAGEBASE_H
+#define EIGEN_DENSESTORAGEBASE_H
+
+#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO)
+# define EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);
+#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN)
+# define EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN();
+#else
+# undef EIGEN_INITIALIZE_COEFFS
+# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+template<int MaxSizeAtCompileTime> struct check_rows_cols_for_overflow {
+  template<typename Index>
+  static EIGEN_ALWAYS_INLINE void run(Index, Index)
+  {
+  }
+};
+
+template<> struct check_rows_cols_for_overflow<Dynamic> {
+  template<typename Index>
+  static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols)
+  {
+    // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242
+    // we assume Index is signed
+    Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed
+    bool error = (rows == 0 || cols == 0) ? false
+               : (rows > max_index / cols);
+    if (error)
+      throw_std_bad_alloc();
+  }
+};
+
+template <typename Derived,
+          typename OtherDerived = Derived,
+          bool IsVector = bool(Derived::IsVectorAtCompileTime) && bool(OtherDerived::IsVectorAtCompileTime)>
+struct conservative_resize_like_impl;
+
+template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
+
+} // end namespace internal
+
+/** \class PlainObjectBase
+  * \brief %Dense storage base class for matrices and arrays.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN.
+  *
+  * \sa \ref TopicClassHierarchy
+  */
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+namespace internal {
+
+// this is a warkaround to doxygen not being able to understand the inheritence logic
+// when it is hidden by the dense_xpr_base helper struct.
+template<typename Derived> struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase<Derived> {};
+/** This class is just a workaround for Doxygen and it does not not actually exist. */
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct dense_xpr_base_dispatcher_for_doxygen<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+    : public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {};
+/** This class is just a workaround for Doxygen and it does not not actually exist. */
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct dense_xpr_base_dispatcher_for_doxygen<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
+    : public ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > {};
+
+} // namespace internal
+
+template<typename Derived>
+class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen<Derived>
+#else
+template<typename Derived>
+class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
+#endif
+{
+  public:
+    enum { Options = internal::traits<Derived>::Options };
+    typedef typename internal::dense_xpr_base<Derived>::type Base;
+
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef Derived DenseType;
+
+    using Base::RowsAtCompileTime;
+    using Base::ColsAtCompileTime;
+    using Base::SizeAtCompileTime;
+    using Base::MaxRowsAtCompileTime;
+    using Base::MaxColsAtCompileTime;
+    using Base::MaxSizeAtCompileTime;
+    using Base::IsVectorAtCompileTime;
+    using Base::Flags;
+
+    template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;
+    friend  class Eigen::Map<Derived, Unaligned>;
+    typedef Eigen::Map<Derived, Unaligned>  MapType;
+    friend  class Eigen::Map<const Derived, Unaligned>;
+    typedef const Eigen::Map<const Derived, Unaligned> ConstMapType;
+    friend  class Eigen::Map<Derived, Aligned>;
+    typedef Eigen::Map<Derived, Aligned> AlignedMapType;
+    friend  class Eigen::Map<const Derived, Aligned>;
+    typedef const Eigen::Map<const Derived, Aligned> ConstAlignedMapType;
+    template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; };
+    template<typename StrideType> struct StridedConstMapType { typedef Eigen::Map<const Derived, Unaligned, StrideType> type; };
+    template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, Aligned, StrideType> type; };
+    template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, Aligned, StrideType> type; };
+
+  protected:
+    DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;
+
+  public:
+    enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits<Derived>::Flags & AlignedBit) != 0 };
+    EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+
+    Base& base() { return *static_cast<Base*>(this); }
+    const Base& base() const { return *static_cast<const Base*>(this); }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }
+
+    EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const
+    {
+      if(Flags & RowMajorBit)
+        return m_storage.data()[colId + rowId * m_storage.cols()];
+      else // column-major
+        return m_storage.data()[rowId + colId * m_storage.rows()];
+    }
+
+    EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
+    {
+      return m_storage.data()[index];
+    }
+
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId)
+    {
+      if(Flags & RowMajorBit)
+        return m_storage.data()[colId + rowId * m_storage.cols()];
+      else // column-major
+        return m_storage.data()[rowId + colId * m_storage.rows()];
+    }
+
+    EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
+    {
+      return m_storage.data()[index];
+    }
+
+    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      if(Flags & RowMajorBit)
+        return m_storage.data()[colId + rowId * m_storage.cols()];
+      else // column-major
+        return m_storage.data()[rowId + colId * m_storage.rows()];
+    }
+
+    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const
+    {
+      return m_storage.data()[index];
+    }
+
+    /** \internal */
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const
+    {
+      return internal::ploadt<PacketScalar, LoadMode>
+               (m_storage.data() + (Flags & RowMajorBit
+                                   ? colId + rowId * m_storage.cols()
+                                   : rowId + colId * m_storage.rows()));
+    }
+
+    /** \internal */
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const
+    {
+      return internal::ploadt<PacketScalar, LoadMode>(m_storage.data() + index);
+    }
+
+    /** \internal */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val)
+    {
+      internal::pstoret<Scalar, PacketScalar, StoreMode>
+              (m_storage.data() + (Flags & RowMajorBit
+                                   ? colId + rowId * m_storage.cols()
+                                   : rowId + colId * m_storage.rows()), val);
+    }
+
+    /** \internal */
+    template<int StoreMode>
+    EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val)
+    {
+      internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, val);
+    }
+
+    /** \returns a const pointer to the data array of this matrix */
+    EIGEN_STRONG_INLINE const Scalar *data() const
+    { return m_storage.data(); }
+
+    /** \returns a pointer to the data array of this matrix */
+    EIGEN_STRONG_INLINE Scalar *data()
+    { return m_storage.data(); }
+
+    /** Resizes \c *this to a \a rows x \a cols matrix.
+      *
+      * This method is intended for dynamic-size matrices, although it is legal to call it on any
+      * matrix as long as fixed dimensions are left unchanged. If you only want to change the number
+      * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).
+      *
+      * If the current number of coefficients of \c *this exactly matches the
+      * product \a rows * \a cols, then no memory allocation is performed and
+      * the current values are left unchanged. In all other cases, including
+      * shrinking, the data is reallocated and all previous values are lost.
+      *
+      * Example: \include Matrix_resize_int_int.cpp
+      * Output: \verbinclude Matrix_resize_int_int.out
+      *
+      * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)
+      */
+    EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols)
+    {
+      eigen_assert(   EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,nbRows==RowsAtCompileTime)
+                   && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,nbCols==ColsAtCompileTime)
+                   && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,nbRows<=MaxRowsAtCompileTime)
+                   && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,nbCols<=MaxColsAtCompileTime)
+                   && nbRows>=0 && nbCols>=0 && "Invalid sizes when resizing a matrix or array.");
+      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
+      #ifdef EIGEN_INITIALIZE_COEFFS
+        Index size = nbRows*nbCols;
+        bool size_changed = size != this->size();
+        m_storage.resize(size, nbRows, nbCols);
+        if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+      #else
+        internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(nbRows, nbCols);
+        m_storage.resize(nbRows*nbCols, nbRows, nbCols);
+      #endif
+    }
+
+    /** Resizes \c *this to a vector of length \a size
+      *
+      * \only_for_vectors. This method does not work for
+      * partially dynamic matrices when the static dimension is anything other
+      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
+      *
+      * Example: \include Matrix_resize_int.cpp
+      * Output: \verbinclude Matrix_resize_int.out
+      *
+      * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)
+      */
+    inline void resize(Index size)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)
+      eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0);
+      #ifdef EIGEN_INITIALIZE_COEFFS
+        bool size_changed = size != this->size();
+      #endif
+      if(RowsAtCompileTime == 1)
+        m_storage.resize(size, 1, size);
+      else
+        m_storage.resize(size, size, 1);
+      #ifdef EIGEN_INITIALIZE_COEFFS
+        if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+      #endif
+    }
+
+    /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange
+      * as in the example below.
+      *
+      * Example: \include Matrix_resize_NoChange_int.cpp
+      * Output: \verbinclude Matrix_resize_NoChange_int.out
+      *
+      * \sa resize(Index,Index)
+      */
+    inline void resize(NoChange_t, Index nbCols)
+    {
+      resize(rows(), nbCols);
+    }
+
+    /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange
+      * as in the example below.
+      *
+      * Example: \include Matrix_resize_int_NoChange.cpp
+      * Output: \verbinclude Matrix_resize_int_NoChange.out
+      *
+      * \sa resize(Index,Index)
+      */
+    inline void resize(Index nbRows, NoChange_t)
+    {
+      resize(nbRows, cols());
+    }
+
+    /** Resizes \c *this to have the same dimensions as \a other.
+      * Takes care of doing all the checking that's needed.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)
+    {
+      const OtherDerived& other = _other.derived();
+      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols());
+      const Index othersize = other.rows()*other.cols();
+      if(RowsAtCompileTime == 1)
+      {
+        eigen_assert(other.rows() == 1 || other.cols() == 1);
+        resize(1, othersize);
+      }
+      else if(ColsAtCompileTime == 1)
+      {
+        eigen_assert(other.rows() == 1 || other.cols() == 1);
+        resize(othersize, 1);
+      }
+      else resize(other.rows(), other.cols());
+    }
+
+    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+      *
+      * The method is intended for matrices of dynamic size. If you only want to change the number
+      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
+      * conservativeResize(Index, NoChange_t).
+      *
+      * Matrices are resized relative to the top-left element. In case values need to be 
+      * appended to the matrix they will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, Index nbCols)
+    {
+      internal::conservative_resize_like_impl<Derived>::run(*this, nbRows, nbCols);
+    }
+
+    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+      *
+      * As opposed to conservativeResize(Index rows, Index cols), this version leaves
+      * the number of columns unchanged.
+      *
+      * In case the matrix is growing, new rows will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(Index nbRows, NoChange_t)
+    {
+      // Note: see the comment in conservativeResize(Index,Index)
+      conservativeResize(nbRows, cols());
+    }
+
+    /** Resizes the matrix to \a rows x \a cols while leaving old values untouched.
+      *
+      * As opposed to conservativeResize(Index rows, Index cols), this version leaves
+      * the number of rows unchanged.
+      *
+      * In case the matrix is growing, new columns will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index nbCols)
+    {
+      // Note: see the comment in conservativeResize(Index,Index)
+      conservativeResize(rows(), nbCols);
+    }
+
+    /** Resizes the vector to \a size while retaining old values.
+      *
+      * \only_for_vectors. This method does not work for
+      * partially dynamic matrices when the static dimension is anything other
+      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.
+      *
+      * When values are appended, they will be uninitialized.
+      */
+    EIGEN_STRONG_INLINE void conservativeResize(Index size)
+    {
+      internal::conservative_resize_like_impl<Derived>::run(*this, size);
+    }
+
+    /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched.
+      *
+      * The method is intended for matrices of dynamic size. If you only want to change the number
+      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or
+      * conservativeResize(Index, NoChange_t).
+      *
+      * Matrices are resized relative to the top-left element. In case values need to be 
+      * appended to the matrix they will copied from \c other.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase<OtherDerived>& other)
+    {
+      internal::conservative_resize_like_impl<Derived,OtherDerived>::run(*this, other);
+    }
+
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other)
+    {
+      return _set(other);
+    }
+
+    /** \sa MatrixBase::lazyAssign() */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase<OtherDerived>& other)
+    {
+      _resize_to_match(other);
+      return Base::lazyAssign(other.derived());
+    }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue<OtherDerived>& func)
+    {
+      resize(func.rows(), func.cols());
+      return Base::operator=(func);
+    }
+
+    EIGEN_STRONG_INLINE PlainObjectBase() : m_storage()
+    {
+//       _check_template_params();
+//       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    // FIXME is it still needed ?
+    /** \internal */
+    PlainObjectBase(internal::constructor_without_unaligned_array_assert)
+      : m_storage(internal::constructor_without_unaligned_array_assert())
+    {
+//       _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+#endif
+
+    EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
+      : m_storage(a_size, nbRows, nbCols)
+    {
+//       _check_template_params();
+//       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
+    }
+
+    /** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)
+    {
+      _resize_to_match(other);
+      Base::operator=(other.derived());
+      return this->derived();
+    }
+
+    /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other)
+      : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
+    {
+      _check_template_params();
+      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.derived().rows(), other.derived().cols());
+      Base::operator=(other.derived());
+    }
+
+    /** \name Map
+      * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,
+      * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned
+      * \a data pointers.
+      *
+      * \see class Map
+      */
+    //@{
+    static inline ConstMapType Map(const Scalar* data)
+    { return ConstMapType(data); }
+    static inline MapType Map(Scalar* data)
+    { return MapType(data); }
+    static inline ConstMapType Map(const Scalar* data, Index size)
+    { return ConstMapType(data, size); }
+    static inline MapType Map(Scalar* data, Index size)
+    { return MapType(data, size); }
+    static inline ConstMapType Map(const Scalar* data, Index rows, Index cols)
+    { return ConstMapType(data, rows, cols); }
+    static inline MapType Map(Scalar* data, Index rows, Index cols)
+    { return MapType(data, rows, cols); }
+
+    static inline ConstAlignedMapType MapAligned(const Scalar* data)
+    { return ConstAlignedMapType(data); }
+    static inline AlignedMapType MapAligned(Scalar* data)
+    { return AlignedMapType(data); }
+    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size)
+    { return ConstAlignedMapType(data, size); }
+    static inline AlignedMapType MapAligned(Scalar* data, Index size)
+    { return AlignedMapType(data, size); }
+    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)
+    { return ConstAlignedMapType(data, rows, cols); }
+    static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)
+    { return AlignedMapType(data, rows, cols); }
+
+    template<int Outer, int Inner>
+    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+
+    template<int Outer, int Inner>
+    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, const Stride<Outer, Inner>& stride)
+    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index size, const Stride<Outer, Inner>& stride)
+    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+    template<int Outer, int Inner>
+    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)
+    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }
+    //@}
+
+    using Base::setConstant;
+    Derived& setConstant(Index size, const Scalar& value);
+    Derived& setConstant(Index rows, Index cols, const Scalar& value);
+
+    using Base::setZero;
+    Derived& setZero(Index size);
+    Derived& setZero(Index rows, Index cols);
+
+    using Base::setOnes;
+    Derived& setOnes(Index size);
+    Derived& setOnes(Index rows, Index cols);
+
+    using Base::setRandom;
+    Derived& setRandom(Index size);
+    Derived& setRandom(Index rows, Index cols);
+
+    #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN
+    #include EIGEN_PLAINOBJECTBASE_PLUGIN
+    #endif
+
+  protected:
+    /** \internal Resizes *this in preparation for assigning \a other to it.
+      * Takes care of doing all the checking that's needed.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other)
+    {
+      #ifdef EIGEN_NO_AUTOMATIC_RESIZING
+      eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size())
+                 : (rows() == other.rows() && cols() == other.cols())))
+        && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
+      EIGEN_ONLY_USED_FOR_DEBUG(other);
+      if(this->size()==0)
+        resizeLike(other);
+      #else
+      resizeLike(other);
+      #endif
+    }
+
+    /**
+      * \brief Copies the value of the expression \a other into \c *this with automatic resizing.
+      *
+      * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized),
+      * it will be initialized.
+      *
+      * Note that copying a row-vector into a vector (and conversely) is allowed.
+      * The resizing, if any, is then done in the appropriate way so that row-vectors
+      * remain row-vectors and vectors remain vectors.
+      *
+      * \sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()
+      *
+      * \internal
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other)
+    {
+      _set_selector(other.derived(), typename internal::conditional<static_cast<bool>(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type());
+      return this->derived();
+    }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); }
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); }
+
+    /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which
+      * is the case when creating a new matrix) so one can enforce lazy evaluation.
+      *
+      * \sa operator=(const MatrixBase<OtherDerived>&), _set()
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)
+    {
+      // I don't think we need this resize call since the lazyAssign will anyways resize
+      // and lazyAssign will be called by the assign selector.
+      //_resize_to_match(other);
+      // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because
+      // it wouldn't allow to copy a row-vector into a column-vector.
+      return internal::assign_selector<Derived,OtherDerived,false>::run(this->derived(), other.derived());
+    }
+
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE void _init2(Index nbRows, Index nbCols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)
+    {
+      EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&
+                          bool(NumTraits<T1>::IsInteger),
+                          FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)
+      resize(nbRows,nbCols);
+    }
+    template<typename T0, typename T1>
+    EIGEN_STRONG_INLINE void _init2(const Scalar& val0, const Scalar& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)
+      m_storage.data()[0] = val0;
+      m_storage.data()[1] = val1;
+    }
+
+    template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
+    friend struct internal::matrix_swap_impl;
+
+    /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the
+      * data pointers.
+      */
+    template<typename OtherDerived>
+    void _swap(DenseBase<OtherDerived> const & other)
+    {
+      enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };
+      internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.const_cast_derived());
+    }
+
+  public:
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    static EIGEN_STRONG_INLINE void _check_template_params()
+    {
+      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor)
+                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0)
+                        && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0))
+                        && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0))
+                        && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0))
+                        && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0))
+                        && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic)
+                        && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic)
+                        && (Options & (DontAlign|RowMajor)) == Options),
+        INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    }
+#endif
+
+private:
+    enum { ThisConstantIsPrivateInPlainObjectBase };
+};
+
+namespace internal {
+
+template <typename Derived, typename OtherDerived, bool IsVector>
+struct conservative_resize_like_impl
+{
+  typedef typename Derived::Index Index;
+  static void run(DenseBase<Derived>& _this, Index rows, Index cols)
+  {
+    if (_this.rows() == rows && _this.cols() == cols) return;
+    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
+
+    if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows
+         (!Derived::IsRowMajor && _this.rows() == rows) )  // column-major and we change only the number of columns
+    {
+      internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);
+      _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);
+    }
+    else
+    {
+      // The storage order does not allow us to use reallocation.
+      typename Derived::PlainObject tmp(rows,cols);
+      const Index common_rows = (std::min)(rows, _this.rows());
+      const Index common_cols = (std::min)(cols, _this.cols());
+      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
+      _this.derived().swap(tmp);
+    }
+  }
+
+  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
+  {
+    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
+
+    // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),
+    // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the
+    // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or
+    // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like
+    // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.
+    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
+    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
+
+    if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows
+         (!Derived::IsRowMajor && _this.rows() == other.rows()) )  // column-major and we change only the number of columns
+    {
+      const Index new_rows = other.rows() - _this.rows();
+      const Index new_cols = other.cols() - _this.cols();
+      _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());
+      if (new_rows>0)
+        _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);
+      else if (new_cols>0)
+        _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols);
+    }
+    else
+    {
+      // The storage order does not allow us to use reallocation.
+      typename Derived::PlainObject tmp(other);
+      const Index common_rows = (std::min)(tmp.rows(), _this.rows());
+      const Index common_cols = (std::min)(tmp.cols(), _this.cols());
+      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
+      _this.derived().swap(tmp);
+    }
+  }
+};
+
+// Here, the specialization for vectors inherits from the general matrix case
+// to allow calling .conservativeResize(rows,cols) on vectors.
+template <typename Derived, typename OtherDerived>
+struct conservative_resize_like_impl<Derived,OtherDerived,true>
+  : conservative_resize_like_impl<Derived,OtherDerived,false>
+{
+  using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
+  
+  typedef typename Derived::Index Index;
+  static void run(DenseBase<Derived>& _this, Index size)
+  {
+    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;
+    const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;
+    _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);
+  }
+
+  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)
+  {
+    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
+
+    const Index num_new_elements = other.size() - _this.size();
+
+    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();
+    const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;
+    _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);
+
+    if (num_new_elements > 0)
+      _this.tail(num_new_elements) = other.tail(num_new_elements);
+  }
+};
+
+template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>
+struct matrix_swap_impl
+{
+  static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+  {
+    a.base().swap(b);
+  }
+};
+
+template<typename MatrixTypeA, typename MatrixTypeB>
+struct matrix_swap_impl<MatrixTypeA, MatrixTypeB, true>
+{
+  static inline void run(MatrixTypeA& a, MatrixTypeB& b)
+  {
+    static_cast<typename MatrixTypeA::Base&>(a).m_storage.swap(static_cast<typename MatrixTypeB::Base&>(b).m_storage);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_DENSESTORAGEBASE_H
diff --git a/Eigen/src/Core/ProductBase.h b/Eigen/src/Core/ProductBase.h
new file mode 100644
index 0000000..cf74470
--- /dev/null
+++ b/Eigen/src/Core/ProductBase.h
@@ -0,0 +1,290 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PRODUCTBASE_H
+#define EIGEN_PRODUCTBASE_H
+
+namespace Eigen { 
+
+/** \class ProductBase
+  * \ingroup Core_Module
+  *
+  */
+
+namespace internal {
+template<typename Derived, typename _Lhs, typename _Rhs>
+struct traits<ProductBase<Derived,_Lhs,_Rhs> >
+{
+  typedef MatrixXpr XprKind;
+  typedef typename remove_all<_Lhs>::type Lhs;
+  typedef typename remove_all<_Rhs>::type Rhs;
+  typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+  typedef typename promote_storage_type<typename traits<Lhs>::StorageKind,
+                                           typename traits<Rhs>::StorageKind>::ret StorageKind;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  enum {
+    RowsAtCompileTime = traits<Lhs>::RowsAtCompileTime,
+    ColsAtCompileTime = traits<Rhs>::ColsAtCompileTime,
+    MaxRowsAtCompileTime = traits<Lhs>::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = traits<Rhs>::MaxColsAtCompileTime,
+    Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0)
+          | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit,
+                  // Note that EvalBeforeNestingBit and NestByRefBit
+                  // are not used in practice because nested is overloaded for products
+    CoeffReadCost = 0 // FIXME why is it needed ?
+  };
+};
+}
+
+#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \
+  typedef ProductBase<Derived, Lhs, Rhs > Base; \
+  EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+  typedef typename Base::LhsNested LhsNested; \
+  typedef typename Base::_LhsNested _LhsNested; \
+  typedef typename Base::LhsBlasTraits LhsBlasTraits; \
+  typedef typename Base::ActualLhsType ActualLhsType; \
+  typedef typename Base::_ActualLhsType _ActualLhsType; \
+  typedef typename Base::RhsNested RhsNested; \
+  typedef typename Base::_RhsNested _RhsNested; \
+  typedef typename Base::RhsBlasTraits RhsBlasTraits; \
+  typedef typename Base::ActualRhsType ActualRhsType; \
+  typedef typename Base::_ActualRhsType _ActualRhsType; \
+  using Base::m_lhs; \
+  using Base::m_rhs;
+
+template<typename Derived, typename Lhs, typename Rhs>
+class ProductBase : public MatrixBase<Derived>
+{
+  public:
+    typedef MatrixBase<Derived> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase)
+    
+    typedef typename Lhs::Nested LhsNested;
+    typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+    typedef internal::blas_traits<_LhsNested> LhsBlasTraits;
+    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
+    typedef typename internal::remove_all<ActualLhsType>::type _ActualLhsType;
+    typedef typename internal::traits<Lhs>::Scalar LhsScalar;
+
+    typedef typename Rhs::Nested RhsNested;
+    typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+    typedef internal::blas_traits<_RhsNested> RhsBlasTraits;
+    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
+    typedef typename internal::remove_all<ActualRhsType>::type _ActualRhsType;
+    typedef typename internal::traits<Rhs>::Scalar RhsScalar;
+
+    // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once
+    typedef CoeffBasedProduct<LhsNested, RhsNested, 0> FullyLazyCoeffBaseProductType;
+
+  public:
+
+#ifndef EIGEN_NO_MALLOC
+    typedef typename Base::PlainObject BasePlainObject;
+    typedef Matrix<Scalar,RowsAtCompileTime==1?1:Dynamic,ColsAtCompileTime==1?1:Dynamic,BasePlainObject::Options> DynPlainObject;
+    typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)),
+                                           BasePlainObject, DynPlainObject>::type PlainObject;
+#else
+    typedef typename Base::PlainObject PlainObject;
+#endif
+
+    ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
+      : m_lhs(a_lhs), m_rhs(a_rhs)
+    {
+      eigen_assert(a_lhs.cols() == a_rhs.rows()
+        && "invalid matrix product"
+        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
+    }
+
+    inline Index rows() const { return m_lhs.rows(); }
+    inline Index cols() const { return m_rhs.cols(); }
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); }
+
+    template<typename Dest>
+    inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); }
+
+    template<typename Dest>
+    inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); }
+
+    template<typename Dest>
+    inline void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { derived().scaleAndAddTo(dst,alpha); }
+
+    const _LhsNested& lhs() const { return m_lhs; }
+    const _RhsNested& rhs() const { return m_rhs; }
+
+    // Implicit conversion to the nested type (trigger the evaluation of the product)
+    operator const PlainObject& () const
+    {
+      m_result.resize(m_lhs.rows(), m_rhs.cols());
+      derived().evalTo(m_result);
+      return m_result;
+    }
+
+    const Diagonal<const FullyLazyCoeffBaseProductType,0> diagonal() const
+    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+    template<int Index>
+    const Diagonal<FullyLazyCoeffBaseProductType,Index> diagonal() const
+    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); }
+
+    const Diagonal<FullyLazyCoeffBaseProductType,Dynamic> diagonal(Index index) const
+    { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); }
+
+    // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression
+    typename Base::CoeffReturnType coeff(Index row, Index col) const
+    {
+#ifdef EIGEN2_SUPPORT
+      return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum();
+#else
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      Matrix<Scalar,1,1> result = *this;
+      return result.coeff(row,col);
+#endif
+    }
+
+    typename Base::CoeffReturnType coeff(Index i) const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      Matrix<Scalar,1,1> result = *this;
+      return result.coeff(i);
+    }
+
+    const Scalar& coeffRef(Index row, Index col) const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      return derived().coeffRef(row,col);
+    }
+
+    const Scalar& coeffRef(Index i) const
+    {
+      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)
+      eigen_assert(this->rows() == 1 && this->cols() == 1);
+      return derived().coeffRef(i);
+    }
+
+  protected:
+
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+
+    mutable PlainObject m_result;
+};
+
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+namespace internal {
+template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
+struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
+{
+  typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
+};
+template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
+struct nested<const GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
+{
+  typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
+};
+}
+
+template<typename NestedProduct>
+class ScaledProduct;
+
+// Note that these two operator* functions are not defined as member
+// functions of ProductBase, because, otherwise we would have to
+// define all overloads defined in MatrixBase. Furthermore, Using
+// "using Base::operator*" would not work with MSVC.
+//
+// Also note that here we accept any compatible scalar types
+template<typename Derived,typename Lhs,typename Rhs>
+const ScaledProduct<Derived>
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::Scalar& x)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+template<typename Derived,typename Lhs,typename Rhs>
+typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
+                      const ScaledProduct<Derived> >::type
+operator*(const ProductBase<Derived,Lhs,Rhs>& prod, const typename Derived::RealScalar& x)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+
+template<typename Derived,typename Lhs,typename Rhs>
+const ScaledProduct<Derived>
+operator*(const typename Derived::Scalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+template<typename Derived,typename Lhs,typename Rhs>
+typename internal::enable_if<!internal::is_same<typename Derived::Scalar,typename Derived::RealScalar>::value,
+                      const ScaledProduct<Derived> >::type
+operator*(const typename Derived::RealScalar& x,const ProductBase<Derived,Lhs,Rhs>& prod)
+{ return ScaledProduct<Derived>(prod.derived(), x); }
+
+namespace internal {
+template<typename NestedProduct>
+struct traits<ScaledProduct<NestedProduct> >
+ : traits<ProductBase<ScaledProduct<NestedProduct>,
+                         typename NestedProduct::_LhsNested,
+                         typename NestedProduct::_RhsNested> >
+{
+  typedef typename traits<NestedProduct>::StorageKind StorageKind;
+};
+}
+
+template<typename NestedProduct>
+class ScaledProduct
+  : public ProductBase<ScaledProduct<NestedProduct>,
+                       typename NestedProduct::_LhsNested,
+                       typename NestedProduct::_RhsNested>
+{
+  public:
+    typedef ProductBase<ScaledProduct<NestedProduct>,
+                       typename NestedProduct::_LhsNested,
+                       typename NestedProduct::_RhsNested> Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::PlainObject PlainObject;
+//     EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct)
+
+    ScaledProduct(const NestedProduct& prod, const Scalar& x)
+    : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {}
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); }
+
+    template<typename Dest>
+    inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); }
+
+    template<typename Dest>
+    inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); }
+
+    template<typename Dest>
+    inline void scaleAndAddTo(Dest& dst, const Scalar& a_alpha) const { m_prod.derived().scaleAndAddTo(dst,a_alpha * m_alpha); }
+
+    const Scalar& alpha() const { return m_alpha; }
+    
+  protected:
+    const NestedProduct& m_prod;
+    Scalar m_alpha;
+};
+
+/** \internal
+  * Overloaded to perform an efficient C = (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+{
+  other.derived().evalTo(derived());
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PRODUCTBASE_H
diff --git a/Eigen/src/Core/Random.h b/Eigen/src/Core/Random.h
new file mode 100644
index 0000000..480fea4
--- /dev/null
+++ b/Eigen/src/Core/Random.h
@@ -0,0 +1,152 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_RANDOM_H
+#define EIGEN_RANDOM_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Scalar> struct scalar_random_op {
+  EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op)
+  template<typename Index>
+  inline const Scalar operator() (Index, Index = 0) const { return random<Scalar>(); }
+};
+
+template<typename Scalar>
+struct functor_traits<scalar_random_op<Scalar> >
+{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };
+
+} // end namespace internal
+
+/** \returns a random matrix expression
+  *
+  * The parameters \a rows and \a cols are the number of rows and of columns of
+  * the returned matrix. Must be compatible with this MatrixBase type.
+  *
+  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
+  * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_random_int_int.cpp
+  * Output: \verbinclude MatrixBase_random_int_int.out
+  *
+  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
+  * behavior with expressions involving random matrices.
+  *
+  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random()
+  */
+template<typename Derived>
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random(Index rows, Index cols)
+{
+  return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());
+}
+
+/** \returns a random vector expression
+  *
+  * The parameter \a size is the size of the returned vector.
+  * Must be compatible with this MatrixBase type.
+  *
+  * \only_for_vectors
+  *
+  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
+  * it is redundant to pass \a size as argument, so Random() should be used
+  * instead.
+  *
+  * Example: \include MatrixBase_random_int.cpp
+  * Output: \verbinclude MatrixBase_random_int.out
+  *
+  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+  * a temporary vector whenever it is nested in a larger expression. This prevents unexpected
+  * behavior with expressions involving random matrices.
+  *
+  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random()
+  */
+template<typename Derived>
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random(Index size)
+{
+  return NullaryExpr(size, internal::scalar_random_op<Scalar>());
+}
+
+/** \returns a fixed-size random matrix or vector expression
+  *
+  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
+  * need to use the variants taking size arguments.
+  *
+  * Example: \include MatrixBase_random.cpp
+  * Output: \verbinclude MatrixBase_random.out
+  *
+  * This expression has the "evaluate before nesting" flag so that it will be evaluated into
+  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected
+  * behavior with expressions involving random matrices.
+  *
+  * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index)
+  */
+template<typename Derived>
+inline const CwiseNullaryOp<internal::scalar_random_op<typename internal::traits<Derived>::Scalar>, Derived>
+DenseBase<Derived>::Random()
+{
+  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());
+}
+
+/** Sets all coefficients in this expression to random values.
+  *
+  * Example: \include MatrixBase_setRandom.cpp
+  * Output: \verbinclude MatrixBase_setRandom.out
+  *
+  * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)
+  */
+template<typename Derived>
+inline Derived& DenseBase<Derived>::setRandom()
+{
+  return *this = Random(rows(), cols());
+}
+
+/** Resizes to the given \a newSize, and sets all coefficients in this expression to random values.
+  *
+  * \only_for_vectors
+  *
+  * Example: \include Matrix_setRandom_int.cpp
+  * Output: \verbinclude Matrix_setRandom_int.out
+  *
+  * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index newSize)
+{
+  resize(newSize);
+  return setRandom();
+}
+
+/** Resizes to the given size, and sets all coefficients in this expression to random values.
+  *
+  * \param nbRows the new number of rows
+  * \param nbCols the new number of columns
+  *
+  * Example: \include Matrix_setRandom_int_int.cpp
+  * Output: \verbinclude Matrix_setRandom_int_int.out
+  *
+  * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+PlainObjectBase<Derived>::setRandom(Index nbRows, Index nbCols)
+{
+  resize(nbRows, nbCols);
+  return setRandom();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_RANDOM_H
diff --git a/Eigen/src/Core/Redux.h b/Eigen/src/Core/Redux.h
new file mode 100644
index 0000000..50548fa
--- /dev/null
+++ b/Eigen/src/Core/Redux.h
@@ -0,0 +1,408 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REDUX_H
+#define EIGEN_REDUX_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// TODO
+//  * implement other kind of vectorization
+//  * factorize code
+
+/***************************************************************************
+* Part 1 : the logic deciding a strategy for vectorization and unrolling
+***************************************************************************/
+
+template<typename Func, typename Derived>
+struct redux_traits
+{
+public:
+  enum {
+    PacketSize = packet_traits<typename Derived::Scalar>::size,
+    InnerMaxSize = int(Derived::IsRowMajor)
+                 ? Derived::MaxColsAtCompileTime
+                 : Derived::MaxRowsAtCompileTime
+  };
+
+  enum {
+    MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
+                  && (functor_traits<Func>::PacketAccess),
+    MayLinearVectorize = MightVectorize && (int(Derived::Flags)&LinearAccessBit),
+    MaySliceVectorize  = MightVectorize && int(InnerMaxSize)>=3*PacketSize
+  };
+
+public:
+  enum {
+    Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
+              : int(MaySliceVectorize)  ? int(SliceVectorizedTraversal)
+                                        : int(DefaultTraversal)
+  };
+
+public:
+  enum {
+    Cost = (  Derived::SizeAtCompileTime == Dynamic
+           || Derived::CoeffReadCost == Dynamic
+           || (Derived::SizeAtCompileTime!=1 && functor_traits<Func>::Cost == Dynamic)
+           ) ? Dynamic
+           : Derived::SizeAtCompileTime * Derived::CoeffReadCost
+               + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
+    UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
+  };
+
+public:
+  enum {
+    Unrolling = Cost != Dynamic && Cost <= UnrollingLimit
+              ? CompleteUnrolling
+              : NoUnrolling
+  };
+};
+
+/***************************************************************************
+* Part 2 : unrollers
+***************************************************************************/
+
+/*** no vectorization ***/
+
+template<typename Func, typename Derived, int Start, int Length>
+struct redux_novec_unroller
+{
+  enum {
+    HalfLength = Length/2
+  };
+
+  typedef typename Derived::Scalar Scalar;
+
+  static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
+  {
+    return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
+                redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func));
+  }
+};
+
+template<typename Func, typename Derived, int Start>
+struct redux_novec_unroller<Func, Derived, Start, 1>
+{
+  enum {
+    outer = Start / Derived::InnerSizeAtCompileTime,
+    inner = Start % Derived::InnerSizeAtCompileTime
+  };
+
+  typedef typename Derived::Scalar Scalar;
+
+  static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&)
+  {
+    return mat.coeffByOuterInner(outer, inner);
+  }
+};
+
+// This is actually dead code and will never be called. It is required
+// to prevent false warnings regarding failed inlining though
+// for 0 length run() will never be called at all.
+template<typename Func, typename Derived, int Start>
+struct redux_novec_unroller<Func, Derived, Start, 0>
+{
+  typedef typename Derived::Scalar Scalar;
+  static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
+};
+
+/*** vectorization ***/
+
+template<typename Func, typename Derived, int Start, int Length>
+struct redux_vec_unroller
+{
+  enum {
+    PacketSize = packet_traits<typename Derived::Scalar>::size,
+    HalfLength = Length/2
+  };
+
+  typedef typename Derived::Scalar Scalar;
+  typedef typename packet_traits<Scalar>::type PacketScalar;
+
+  static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)
+  {
+    return func.packetOp(
+            redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
+            redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) );
+  }
+};
+
+template<typename Func, typename Derived, int Start>
+struct redux_vec_unroller<Func, Derived, Start, 1>
+{
+  enum {
+    index = Start * packet_traits<typename Derived::Scalar>::size,
+    outer = index / int(Derived::InnerSizeAtCompileTime),
+    inner = index % int(Derived::InnerSizeAtCompileTime),
+    alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned
+  };
+
+  typedef typename Derived::Scalar Scalar;
+  typedef typename packet_traits<Scalar>::type PacketScalar;
+
+  static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)
+  {
+    return mat.template packetByOuterInner<alignment>(outer, inner);
+  }
+};
+
+/***************************************************************************
+* Part 3 : implementation of all cases
+***************************************************************************/
+
+template<typename Func, typename Derived,
+         int Traversal = redux_traits<Func, Derived>::Traversal,
+         int Unrolling = redux_traits<Func, Derived>::Unrolling
+>
+struct redux_impl;
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
+{
+  typedef typename Derived::Scalar Scalar;
+  typedef typename Derived::Index Index;
+  static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func)
+  {
+    eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+    Scalar res;
+    res = mat.coeffByOuterInner(0, 0);
+    for(Index i = 1; i < mat.innerSize(); ++i)
+      res = func(res, mat.coeffByOuterInner(0, i));
+    for(Index i = 1; i < mat.outerSize(); ++i)
+      for(Index j = 0; j < mat.innerSize(); ++j)
+        res = func(res, mat.coeffByOuterInner(i, j));
+    return res;
+  }
+};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling>
+  : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime>
+{};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
+{
+  typedef typename Derived::Scalar Scalar;
+  typedef typename packet_traits<Scalar>::type PacketScalar;
+  typedef typename Derived::Index Index;
+
+  static Scalar run(const Derived& mat, const Func& func)
+  {
+    const Index size = mat.size();
+    eigen_assert(size && "you are using an empty matrix");
+    const Index packetSize = packet_traits<Scalar>::size;
+    const Index alignedStart = internal::first_aligned(mat);
+    enum {
+      alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit)
+                ? Aligned : Unaligned
+    };
+    const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
+    const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize);
+    const Index alignedEnd2 = alignedStart + alignedSize2;
+    const Index alignedEnd  = alignedStart + alignedSize;
+    Scalar res;
+    if(alignedSize)
+    {
+      PacketScalar packet_res0 = mat.template packet<alignment>(alignedStart);
+      if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop
+      {
+        PacketScalar packet_res1 = mat.template packet<alignment>(alignedStart+packetSize);
+        for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize)
+        {
+          packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment>(index));
+          packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment>(index+packetSize));
+        }
+
+        packet_res0 = func.packetOp(packet_res0,packet_res1);
+        if(alignedEnd>alignedEnd2)
+          packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment>(alignedEnd2));
+      }
+      res = func.predux(packet_res0);
+
+      for(Index index = 0; index < alignedStart; ++index)
+        res = func(res,mat.coeff(index));
+
+      for(Index index = alignedEnd; index < size; ++index)
+        res = func(res,mat.coeff(index));
+    }
+    else // too small to vectorize anything.
+         // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
+    {
+      res = mat.coeff(0);
+      for(Index index = 1; index < size; ++index)
+        res = func(res,mat.coeff(index));
+    }
+
+    return res;
+  }
+};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
+{
+  typedef typename Derived::Scalar Scalar;
+  typedef typename packet_traits<Scalar>::type PacketScalar;
+  typedef typename Derived::Index Index;
+
+  static Scalar run(const Derived& mat, const Func& func)
+  {
+    eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+    const Index innerSize = mat.innerSize();
+    const Index outerSize = mat.outerSize();
+    enum {
+      packetSize = packet_traits<Scalar>::size
+    };
+    const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
+    Scalar res;
+    if(packetedInnerSize)
+    {
+      PacketScalar packet_res = mat.template packet<Unaligned>(0,0);
+      for(Index j=0; j<outerSize; ++j)
+        for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
+          packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned>(j,i));
+
+      res = func.predux(packet_res);
+      for(Index j=0; j<outerSize; ++j)
+        for(Index i=packetedInnerSize; i<innerSize; ++i)
+          res = func(res, mat.coeffByOuterInner(j,i));
+    }
+    else // too small to vectorize anything.
+         // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
+    {
+      res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func);
+    }
+
+    return res;
+  }
+};
+
+template<typename Func, typename Derived>
+struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
+{
+  typedef typename Derived::Scalar Scalar;
+  typedef typename packet_traits<Scalar>::type PacketScalar;
+  enum {
+    PacketSize = packet_traits<Scalar>::size,
+    Size = Derived::SizeAtCompileTime,
+    VectorizedSize = (Size / PacketSize) * PacketSize
+  };
+  static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func)
+  {
+    eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
+    Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
+    if (VectorizedSize != Size)
+      res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
+    return res;
+  }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Part 4 : public API
+***************************************************************************/
+
+
+/** \returns the result of a full redux operation on the whole matrix or vector using \a func
+  *
+  * The template parameter \a BinaryOp is the type of the functor \a func which must be
+  * an associative operator. Both current STL and TR1 functor styles are handled.
+  *
+  * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
+  */
+template<typename Derived>
+template<typename Func>
+EIGEN_STRONG_INLINE typename internal::result_of<Func(typename internal::traits<Derived>::Scalar)>::type
+DenseBase<Derived>::redux(const Func& func) const
+{
+  typedef typename internal::remove_all<typename Derived::Nested>::type ThisNested;
+  return internal::redux_impl<Func, ThisNested>
+            ::run(derived(), func);
+}
+
+/** \returns the minimum of all coefficients of \c *this.
+  * \warning the result is undefined if \c *this contains NaN.
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff() const
+{
+  return this->redux(Eigen::internal::scalar_min_op<Scalar>());
+}
+
+/** \returns the maximum of all coefficients of \c *this.
+  * \warning the result is undefined if \c *this contains NaN.
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff() const
+{
+  return this->redux(Eigen::internal::scalar_max_op<Scalar>());
+}
+
+/** \returns the sum of all coefficients of *this
+  *
+  * \sa trace(), prod(), mean()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::sum() const
+{
+  if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
+    return Scalar(0);
+  return this->redux(Eigen::internal::scalar_sum_op<Scalar>());
+}
+
+/** \returns the mean of all coefficients of *this
+*
+* \sa trace(), prod(), sum()
+*/
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::mean() const
+{
+  return Scalar(this->redux(Eigen::internal::scalar_sum_op<Scalar>())) / Scalar(this->size());
+}
+
+/** \returns the product of all coefficients of *this
+  *
+  * Example: \include MatrixBase_prod.cpp
+  * Output: \verbinclude MatrixBase_prod.out
+  *
+  * \sa sum(), mean(), trace()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::prod() const
+{
+  if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
+    return Scalar(1);
+  return this->redux(Eigen::internal::scalar_product_op<Scalar>());
+}
+
+/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
+  *
+  * \c *this can be any matrix, not necessarily square.
+  *
+  * \sa diagonal(), sum()
+  */
+template<typename Derived>
+EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
+MatrixBase<Derived>::trace() const
+{
+  return derived().diagonal().sum();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_REDUX_H
diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h
new file mode 100644
index 0000000..f53674c
--- /dev/null
+++ b/Eigen/src/Core/Ref.h
@@ -0,0 +1,269 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REF_H
+#define EIGEN_REF_H
+
+namespace Eigen { 
+
+template<typename Derived> class RefBase;
+template<typename PlainObjectType, int Options = 0,
+         typename StrideType = typename internal::conditional<PlainObjectType::IsVectorAtCompileTime,InnerStride<1>,OuterStride<> >::type > class Ref;
+
+/** \class Ref
+  * \ingroup Core_Module
+  *
+  * \brief A matrix or vector expression mapping an existing expressions
+  *
+  * \tparam PlainObjectType the equivalent matrix type of the mapped data
+  * \tparam Options specifies whether the pointer is \c #Aligned, or \c #Unaligned.
+  *                The default is \c #Unaligned.
+  * \tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),
+  *                   but accept a variable outer stride (leading dimension).
+  *                   This can be overridden by specifying strides.
+  *                   The type passed here must be a specialization of the Stride template, see examples below.
+  *
+  * This class permits to write non template functions taking Eigen's object as parameters while limiting the number of copies.
+  * A Ref<> object can represent either a const expression or a l-value:
+  * \code
+  * // in-out argument:
+  * void foo1(Ref<VectorXf> x);
+  *
+  * // read-only const argument:
+  * void foo2(const Ref<const VectorXf>& x);
+  * \endcode
+  *
+  * In the in-out case, the input argument must satisfies the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.
+  * By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.
+  * Likewise, a Ref<MatrixXf> can reference any column major dense matrix expression of float whose column's elements are contiguously stored with
+  * the possibility to have a constant space inbetween each column, i.e.: the inner stride mmust be equal to 1, but the outer-stride (or leading dimension),
+  * can be greater than the number of rows.
+  *
+  * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.
+  * Here are some examples:
+  * \code
+  * MatrixXf A;
+  * VectorXf a;
+  * foo1(a.head());             // OK
+  * foo1(A.col());              // OK
+  * foo1(A.row());              // compilation error because here innerstride!=1
+  * foo2(A.row());              // The row is copied into a contiguous temporary
+  * foo2(2*a);                  // The expression is evaluated into a temporary
+  * foo2(A.col().segment(2,4)); // No temporary
+  * \endcode
+  *
+  * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameter.
+  * Here is an example accepting an innerstride!=1:
+  * \code
+  * // in-out argument:
+  * void foo3(Ref<VectorXf,0,InnerStride<> > x);
+  * foo3(A.row());              // OK
+  * \endcode
+  * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involved more
+  * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overloads internally calling a
+  * template function, e.g.:
+  * \code
+  * // in the .h:
+  * void foo(const Ref<MatrixXf>& A);
+  * void foo(const Ref<MatrixXf,0,Stride<> >& A);
+  *
+  * // in the .cpp:
+  * template<typename TypeOfA> void foo_impl(const TypeOfA& A) {
+  *     ... // crazy code goes here
+  * }
+  * void foo(const Ref<MatrixXf>& A) { foo_impl(A); }
+  * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }
+  * \endcode
+  *
+  *
+  * \sa PlainObjectBase::Map(), \ref TopicStorageOrders
+  */
+
+namespace internal {
+
+template<typename _PlainObjectType, int _Options, typename _StrideType>
+struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
+  : public traits<Map<_PlainObjectType, _Options, _StrideType> >
+{
+  typedef _PlainObjectType PlainObjectType;
+  typedef _StrideType StrideType;
+  enum {
+    Options = _Options,
+    Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit
+  };
+
+  template<typename Derived> struct match {
+    enum {
+      HasDirectAccess = internal::has_direct_access<Derived>::ret,
+      StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
+      InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
+                      || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
+                      || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
+      OuterStrideMatch = Derived::IsVectorAtCompileTime
+                      || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
+      AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
+      ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
+      MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
+    };
+    typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
+  };
+  
+};
+
+template<typename Derived>
+struct traits<RefBase<Derived> > : public traits<Derived> {};
+
+}
+
+template<typename Derived> class RefBase
+ : public MapBase<Derived>
+{
+  typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;
+  typedef typename internal::traits<Derived>::StrideType StrideType;
+
+public:
+
+  typedef MapBase<Derived> Base;
+  EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)
+
+  inline Index innerStride() const
+  {
+    return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;
+  }
+
+  inline Index outerStride() const
+  {
+    return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()
+         : IsVectorAtCompileTime ? this->size()
+         : int(Flags)&RowMajorBit ? this->cols()
+         : this->rows();
+  }
+
+  RefBase()
+    : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime),
+      // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:
+      m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,
+               StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)
+  {}
+  
+  EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)
+
+protected:
+
+  typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;
+
+  template<typename Expression>
+  void construct(Expression& expr)
+  {
+    if(PlainObjectType::RowsAtCompileTime==1)
+    {
+      eigen_assert(expr.rows()==1 || expr.cols()==1);
+      ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());
+    }
+    else if(PlainObjectType::ColsAtCompileTime==1)
+    {
+      eigen_assert(expr.rows()==1 || expr.cols()==1);
+      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);
+    }
+    else
+      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
+    
+    if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))
+      ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);
+    else
+      ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
+                                   StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());    
+  }
+
+  StrideBase m_stride;
+};
+
+
+template<typename PlainObjectType, int Options, typename StrideType> class Ref
+  : public RefBase<Ref<PlainObjectType, Options, StrideType> >
+{
+  private:
+    typedef internal::traits<Ref> Traits;
+    template<typename Derived>
+    inline Ref(const PlainObjectBase<Derived>& expr,
+               typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
+  public:
+
+    typedef RefBase<Ref> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Derived>
+    inline Ref(PlainObjectBase<Derived>& expr,
+               typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
+    {
+      EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+      Base::construct(expr.derived());
+    }
+    template<typename Derived>
+    inline Ref(const DenseBase<Derived>& expr,
+               typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
+    #else
+    template<typename Derived>
+    inline Ref(DenseBase<Derived>& expr)
+    #endif
+    {
+      EIGEN_STATIC_ASSERT(static_cast<bool>(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
+      EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
+      enum { THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY = Derived::ThisConstantIsPrivateInPlainObjectBase};
+      Base::construct(expr.const_cast_derived());
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)
+
+};
+
+// this is the const ref version
+template<typename TPlainObjectType, int Options, typename StrideType> class Ref<const TPlainObjectType, Options, StrideType>
+  : public RefBase<Ref<const TPlainObjectType, Options, StrideType> >
+{
+    typedef internal::traits<Ref> Traits;
+  public:
+
+    typedef RefBase<Ref> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
+
+    template<typename Derived>
+    inline Ref(const DenseBase<Derived>& expr,
+               typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
+    {
+//      std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
+//      std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
+//      std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
+      construct(expr.derived(), typename Traits::template match<Derived>::type());
+    }
+
+  protected:
+
+    template<typename Expression>
+    void construct(const Expression& expr,internal::true_type)
+    {
+      Base::construct(expr);
+    }
+
+    template<typename Expression>
+    void construct(const Expression& expr, internal::false_type)
+    {
+      m_object.lazyAssign(expr);
+      Base::construct(m_object);
+    }
+
+  protected:
+    TPlainObjectType m_object;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_REF_H
diff --git a/Eigen/src/Core/Replicate.h b/Eigen/src/Core/Replicate.h
new file mode 100644
index 0000000..ac4537c
--- /dev/null
+++ b/Eigen/src/Core/Replicate.h
@@ -0,0 +1,177 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REPLICATE_H
+#define EIGEN_REPLICATE_H
+
+namespace Eigen { 
+
+/**
+  * \class Replicate
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the multiple replication of a matrix or vector
+  *
+  * \param MatrixType the type of the object we are replicating
+  *
+  * This class represents an expression of the multiple replication of a matrix or vector.
+  * It is the return type of DenseBase::replicate() and most of the time
+  * this is the only way it is used.
+  *
+  * \sa DenseBase::replicate()
+  */
+
+namespace internal {
+template<typename MatrixType,int RowFactor,int ColFactor>
+struct traits<Replicate<MatrixType,RowFactor,ColFactor> >
+ : traits<MatrixType>
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename traits<MatrixType>::XprKind XprKind;
+  enum {
+    Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
+  };
+  typedef typename nested<MatrixType,Factor>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  enum {
+    RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic
+                      ? Dynamic
+                      : RowFactor * MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic
+                      ? Dynamic
+                      : ColFactor * MatrixType::ColsAtCompileTime,
+   //FIXME we don't propagate the max sizes !!!
+    MaxRowsAtCompileTime = RowsAtCompileTime,
+    MaxColsAtCompileTime = ColsAtCompileTime,
+    IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1
+               : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0
+               : (MatrixType::Flags & RowMajorBit) ? 1 : 0,
+    Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0),
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+  };
+};
+}
+
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
+  : public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type
+{
+    typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;
+    typedef typename internal::traits<Replicate>::_MatrixTypeNested _MatrixTypeNested;
+  public:
+
+    typedef typename internal::dense_xpr_base<Replicate>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)
+
+    template<typename OriginalMatrixType>
+    inline explicit Replicate(const OriginalMatrixType& a_matrix)
+      : m_matrix(a_matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
+                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
+      eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic);
+    }
+
+    template<typename OriginalMatrixType>
+    inline Replicate(const OriginalMatrixType& a_matrix, Index rowFactor, Index colFactor)
+      : m_matrix(a_matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),
+                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)
+    }
+
+    inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }
+    inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }
+
+    inline Scalar coeff(Index rowId, Index colId) const
+    {
+      // try to avoid using modulo; this is a pure optimization strategy
+      const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
+                            : RowFactor==1 ? rowId
+                            : rowId%m_matrix.rows();
+      const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
+                            : ColFactor==1 ? colId
+                            : colId%m_matrix.cols();
+
+      return m_matrix.coeff(actual_row, actual_col);
+    }
+    template<int LoadMode>
+    inline PacketScalar packet(Index rowId, Index colId) const
+    {
+      const Index actual_row  = internal::traits<MatrixType>::RowsAtCompileTime==1 ? 0
+                            : RowFactor==1 ? rowId
+                            : rowId%m_matrix.rows();
+      const Index actual_col  = internal::traits<MatrixType>::ColsAtCompileTime==1 ? 0
+                            : ColFactor==1 ? colId
+                            : colId%m_matrix.cols();
+
+      return m_matrix.template packet<LoadMode>(actual_row, actual_col);
+    }
+
+    const _MatrixTypeNested& nestedExpression() const
+    { 
+      return m_matrix; 
+    }
+
+  protected:
+    MatrixTypeNested m_matrix;
+    const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;
+    const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;
+};
+
+/**
+  * \return an expression of the replication of \c *this
+  *
+  * Example: \include MatrixBase_replicate.cpp
+  * Output: \verbinclude MatrixBase_replicate.out
+  *
+  * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate
+  */
+template<typename Derived>
+template<int RowFactor, int ColFactor>
+const Replicate<Derived,RowFactor,ColFactor>
+DenseBase<Derived>::replicate() const
+{
+  return Replicate<Derived,RowFactor,ColFactor>(derived());
+}
+
+/**
+  * \return an expression of the replication of \c *this
+  *
+  * Example: \include MatrixBase_replicate_int_int.cpp
+  * Output: \verbinclude MatrixBase_replicate_int_int.out
+  *
+  * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
+  */
+template<typename Derived>
+const typename DenseBase<Derived>::ReplicateReturnType
+DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
+{
+  return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
+}
+
+/**
+  * \return an expression of the replication of each column (or row) of \c *this
+  *
+  * Example: \include DirectionWise_replicate_int.cpp
+  * Output: \verbinclude DirectionWise_replicate_int.out
+  *
+  * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate
+  */
+template<typename ExpressionType, int Direction>
+const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+VectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const
+{
+  return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType
+          (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_REPLICATE_H
diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h
new file mode 100644
index 0000000..f635598
--- /dev/null
+++ b/Eigen/src/Core/ReturnByValue.h
@@ -0,0 +1,99 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_RETURNBYVALUE_H
+#define EIGEN_RETURNBYVALUE_H
+
+namespace Eigen {
+
+/** \class ReturnByValue
+  * \ingroup Core_Module
+  *
+  */
+
+namespace internal {
+
+template<typename Derived>
+struct traits<ReturnByValue<Derived> >
+  : public traits<typename traits<Derived>::ReturnType>
+{
+  enum {
+    // We're disabling the DirectAccess because e.g. the constructor of
+    // the Block-with-DirectAccess expression requires to have a coeffRef method.
+    // Also, we don't want to have to implement the stride stuff.
+    Flags = (traits<typename traits<Derived>::ReturnType>::Flags
+             | EvalBeforeNestingBit) & ~DirectAccessBit
+  };
+};
+
+/* The ReturnByValue object doesn't even have a coeff() method.
+ * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix.
+ * So internal::nested always gives the plain return matrix type.
+ *
+ * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ??
+ */
+template<typename Derived,int n,typename PlainObject>
+struct nested<ReturnByValue<Derived>, n, PlainObject>
+{
+  typedef typename traits<Derived>::ReturnType type;
+};
+
+} // end namespace internal
+
+template<typename Derived> class ReturnByValue
+  : internal::no_assignment_operator, public internal::dense_xpr_base< ReturnByValue<Derived> >::type
+{
+  public:
+    typedef typename internal::traits<Derived>::ReturnType ReturnType;
+
+    typedef typename internal::dense_xpr_base<ReturnByValue>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
+
+    template<typename Dest>
+    inline void evalTo(Dest& dst) const
+    { static_cast<const Derived*>(this)->evalTo(dst); }
+    inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }
+    inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT
+    class Unusable{
+      Unusable(const Unusable&) {}
+      Unusable& operator=(const Unusable&) {return *this;}
+    };
+    const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); }
+    const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
+    Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
+    Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
+    template<int LoadMode>  Unusable& packet(Index) const;
+    template<int LoadMode>  Unusable& packet(Index, Index) const;
+#endif
+};
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+  other.evalTo(derived());
+  return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
+{
+  other.evalTo(derived());
+  return derived();
+}
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_RETURNBYVALUE_H
diff --git a/Eigen/src/Core/Reverse.h b/Eigen/src/Core/Reverse.h
new file mode 100644
index 0000000..e30ae3d
--- /dev/null
+++ b/Eigen/src/Core/Reverse.h
@@ -0,0 +1,224 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REVERSE_H
+#define EIGEN_REVERSE_H
+
+namespace Eigen { 
+
+/** \class Reverse
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the reverse of a vector or matrix
+  *
+  * \param MatrixType the type of the object of which we are taking the reverse
+  *
+  * This class represents an expression of the reverse of a vector.
+  * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::reverse(), VectorwiseOp::reverse()
+  */
+
+namespace internal {
+
+template<typename MatrixType, int Direction>
+struct traits<Reverse<MatrixType, Direction> >
+ : traits<MatrixType>
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename traits<MatrixType>::XprKind XprKind;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  enum {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+
+    // let's enable LinearAccess only with vectorization because of the product overhead
+    LinearAccess = ( (Direction==BothDirections) && (int(_MatrixTypeNested::Flags)&PacketAccessBit) )
+                 ? LinearAccessBit : 0,
+
+    Flags = int(_MatrixTypeNested::Flags) & (HereditaryBits | LvalueBit | PacketAccessBit | LinearAccess),
+
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+  };
+};
+
+template<typename PacketScalar, bool ReversePacket> struct reverse_packet_cond
+{
+  static inline PacketScalar run(const PacketScalar& x) { return preverse(x); }
+};
+
+template<typename PacketScalar> struct reverse_packet_cond<PacketScalar,false>
+{
+  static inline PacketScalar run(const PacketScalar& x) { return x; }
+};
+
+} // end namespace internal 
+
+template<typename MatrixType, int Direction> class Reverse
+  : public internal::dense_xpr_base< Reverse<MatrixType, Direction> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Reverse>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)
+    using Base::IsRowMajor;
+
+    // next line is necessary because otherwise const version of operator()
+    // is hidden by non-const version defined in this file
+    using Base::operator(); 
+
+  protected:
+    enum {
+      PacketSize = internal::packet_traits<Scalar>::size,
+      IsColMajor = !IsRowMajor,
+      ReverseRow = (Direction == Vertical)   || (Direction == BothDirections),
+      ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
+      OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,
+      OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1,
+      ReversePacket = (Direction == BothDirections)
+                    || ((Direction == Vertical)   && IsColMajor)
+                    || ((Direction == Horizontal) && IsRowMajor)
+    };
+    typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet;
+  public:
+
+    inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    inline Index innerStride() const
+    {
+      return -m_matrix.innerStride();
+    }
+
+    inline Scalar& operator()(Index row, Index col)
+    {
+      eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols());
+      return coeffRef(row, col);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row,
+                                                    ReverseCol ? m_matrix.cols() - col - 1 : col);
+    }
+
+    inline CoeffReturnType coeff(Index row, Index col) const
+    {
+      return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row,
+                            ReverseCol ? m_matrix.cols() - col - 1 : col);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return m_matrix.coeff(m_matrix.size() - index - 1);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1);
+    }
+
+    inline Scalar& operator()(Index index)
+    {
+      eigen_assert(index >= 0 && index < m_matrix.size());
+      return coeffRef(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index row, Index col) const
+    {
+      return reverse_packet::run(m_matrix.template packet<LoadMode>(
+                                    ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
+                                    ReverseCol ? m_matrix.cols() - col - OffsetCol : col));
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index row, Index col, const PacketScalar& x)
+    {
+      m_matrix.const_cast_derived().template writePacket<LoadMode>(
+                                      ReverseRow ? m_matrix.rows() - row - OffsetRow : row,
+                                      ReverseCol ? m_matrix.cols() - col - OffsetCol : col,
+                                      reverse_packet::run(x));
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return internal::preverse(m_matrix.template packet<LoadMode>( m_matrix.size() - index - PacketSize ));
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      m_matrix.const_cast_derived().template writePacket<LoadMode>(m_matrix.size() - index - PacketSize, internal::preverse(x));
+    }
+
+    const typename internal::remove_all<typename MatrixType::Nested>::type& 
+    nestedExpression() const 
+    {
+      return m_matrix;
+    }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+};
+
+/** \returns an expression of the reverse of *this.
+  *
+  * Example: \include MatrixBase_reverse.cpp
+  * Output: \verbinclude MatrixBase_reverse.out
+  *
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::ReverseReturnType
+DenseBase<Derived>::reverse()
+{
+  return derived();
+}
+
+/** This is the const version of reverse(). */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstReverseReturnType
+DenseBase<Derived>::reverse() const
+{
+  return derived();
+}
+
+/** This is the "in place" version of reverse: it reverses \c *this.
+  *
+  * In most cases it is probably better to simply use the reversed expression
+  * of a matrix. However, when reversing the matrix data itself is really needed,
+  * then this "in-place" version is probably the right choice because it provides
+  * the following additional features:
+  *  - less error prone: doing the same operation with .reverse() requires special care:
+  *    \code m = m.reverse().eval(); \endcode
+  *  - this API allows to avoid creating a temporary (the current implementation creates a temporary, but that could be avoided using swap)
+  *  - it allows future optimizations (cache friendliness, etc.)
+  *
+  * \sa reverse() */
+template<typename Derived>
+inline void DenseBase<Derived>::reverseInPlace()
+{
+  derived() = derived().reverse().eval();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_REVERSE_H
diff --git a/Eigen/src/Core/Select.h b/Eigen/src/Core/Select.h
new file mode 100644
index 0000000..87993bb
--- /dev/null
+++ b/Eigen/src/Core/Select.h
@@ -0,0 +1,162 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELECT_H
+#define EIGEN_SELECT_H
+
+namespace Eigen { 
+
+/** \class Select
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a coefficient wise version of the C++ ternary operator ?:
+  *
+  * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix
+  * \param ThenMatrixType the type of the \em then expression
+  * \param ElseMatrixType the type of the \em else expression
+  *
+  * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.
+  * It is the return type of DenseBase::select() and most of the time this is the only way it is used.
+  *
+  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const
+  */
+
+namespace internal {
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
+struct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
+ : traits<ThenMatrixType>
+{
+  typedef typename traits<ThenMatrixType>::Scalar Scalar;
+  typedef Dense StorageKind;
+  typedef typename traits<ThenMatrixType>::XprKind XprKind;
+  typedef typename ConditionMatrixType::Nested ConditionMatrixNested;
+  typedef typename ThenMatrixType::Nested ThenMatrixNested;
+  typedef typename ElseMatrixType::Nested ElseMatrixNested;
+  enum {
+    RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,
+    Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits,
+    CoeffReadCost = traits<typename remove_all<ConditionMatrixNested>::type>::CoeffReadCost
+                  + EIGEN_SIZE_MAX(traits<typename remove_all<ThenMatrixNested>::type>::CoeffReadCost,
+                                   traits<typename remove_all<ElseMatrixNested>::type>::CoeffReadCost)
+  };
+};
+}
+
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
+class Select : internal::no_assignment_operator,
+  public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<Select>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Select)
+
+    Select(const ConditionMatrixType& a_conditionMatrix,
+           const ThenMatrixType& a_thenMatrix,
+           const ElseMatrixType& a_elseMatrix)
+      : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)
+    {
+      eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());
+      eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());
+    }
+
+    Index rows() const { return m_condition.rows(); }
+    Index cols() const { return m_condition.cols(); }
+
+    const Scalar coeff(Index i, Index j) const
+    {
+      if (m_condition.coeff(i,j))
+        return m_then.coeff(i,j);
+      else
+        return m_else.coeff(i,j);
+    }
+
+    const Scalar coeff(Index i) const
+    {
+      if (m_condition.coeff(i))
+        return m_then.coeff(i);
+      else
+        return m_else.coeff(i);
+    }
+
+    const ConditionMatrixType& conditionMatrix() const
+    {
+      return m_condition;
+    }
+
+    const ThenMatrixType& thenMatrix() const
+    {
+      return m_then;
+    }
+
+    const ElseMatrixType& elseMatrix() const
+    {
+      return m_else;
+    }
+
+  protected:
+    typename ConditionMatrixType::Nested m_condition;
+    typename ThenMatrixType::Nested m_then;
+    typename ElseMatrixType::Nested m_else;
+};
+
+
+/** \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j)
+  * if \c *this(i,j), and \a elseMatrix(i,j) otherwise.
+  *
+  * Example: \include MatrixBase_select.cpp
+  * Output: \verbinclude MatrixBase_select.out
+  *
+  * \sa class Select
+  */
+template<typename Derived>
+template<typename ThenDerived,typename ElseDerived>
+inline const Select<Derived,ThenDerived,ElseDerived>
+DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
+                            const DenseBase<ElseDerived>& elseMatrix) const
+{
+  return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived());
+}
+
+/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
+  * the \em else expression being a scalar value.
+  *
+  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
+  */
+template<typename Derived>
+template<typename ThenDerived>
+inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+DenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,
+                           const typename ThenDerived::Scalar& elseScalar) const
+{
+  return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(
+    derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));
+}
+
+/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with
+  * the \em then expression being a scalar value.
+  *
+  * \sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select
+  */
+template<typename Derived>
+template<typename ElseDerived>
+inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+DenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,
+                           const DenseBase<ElseDerived>& elseMatrix) const
+{
+  return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(
+    derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELECT_H
diff --git a/Eigen/src/Core/SelfAdjointView.h b/Eigen/src/Core/SelfAdjointView.h
new file mode 100644
index 0000000..6fa7cd1
--- /dev/null
+++ b/Eigen/src/Core/SelfAdjointView.h
@@ -0,0 +1,314 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFADJOINTMATRIX_H
+#define EIGEN_SELFADJOINTMATRIX_H
+
+namespace Eigen { 
+
+/** \class SelfAdjointView
+  * \ingroup Core_Module
+  *
+  *
+  * \brief Expression of a selfadjoint matrix from a triangular part of a dense matrix
+  *
+  * \param MatrixType the type of the dense matrix storing the coefficients
+  * \param TriangularPart can be either \c #Lower or \c #Upper
+  *
+  * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+  * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+  * and most of the time this is the only way that it is used.
+  *
+  * \sa class TriangularBase, MatrixBase::selfadjointView()
+  */
+
+namespace internal {
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SelfAdjointView<MatrixType, UpLo> > : traits<MatrixType>
+{
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+  typedef MatrixType ExpressionType;
+  typedef typename MatrixType::PlainObject DenseMatrixType;
+  enum {
+    Mode = UpLo | SelfAdjoint,
+    Flags =  MatrixTypeNestedCleaned::Flags & (HereditaryBits)
+           & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)), // FIXME these flags should be preserved
+    CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
+  };
+};
+}
+
+template <typename Lhs, int LhsMode, bool LhsIsVector,
+          typename Rhs, int RhsMode, bool RhsIsVector>
+struct SelfadjointProductMatrix;
+
+// FIXME could also be called SelfAdjointWrapper to be consistent with DiagonalWrapper ??
+template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
+  : public TriangularBase<SelfAdjointView<MatrixType, UpLo> >
+{
+  public:
+
+    typedef TriangularBase<SelfAdjointView> Base;
+    typedef typename internal::traits<SelfAdjointView>::MatrixTypeNested MatrixTypeNested;
+    typedef typename internal::traits<SelfAdjointView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
+
+    /** \brief The type of coefficients in this matrix */
+    typedef typename internal::traits<SelfAdjointView>::Scalar Scalar; 
+
+    typedef typename MatrixType::Index Index;
+
+    enum {
+      Mode = internal::traits<SelfAdjointView>::Mode
+    };
+    typedef typename MatrixType::PlainObject PlainObject;
+
+    inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
+    {}
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    inline Index outerStride() const { return m_matrix.outerStride(); }
+    inline Index innerStride() const { return m_matrix.innerStride(); }
+
+    /** \sa MatrixBase::coeff()
+      * \warning the coordinates must fit into the referenced triangular part
+      */
+    inline Scalar coeff(Index row, Index col) const
+    {
+      Base::check_coordinates_internal(row, col);
+      return m_matrix.coeff(row, col);
+    }
+
+    /** \sa MatrixBase::coeffRef()
+      * \warning the coordinates must fit into the referenced triangular part
+      */
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      Base::check_coordinates_internal(row, col);
+      return m_matrix.const_cast_derived().coeffRef(row, col);
+    }
+
+    /** \internal */
+    const MatrixTypeNestedCleaned& _expression() const { return m_matrix; }
+
+    const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+    MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
+
+    /** Efficient self-adjoint matrix times vector/matrix product */
+    template<typename OtherDerived>
+    SelfadjointProductMatrix<MatrixType,Mode,false,OtherDerived,0,OtherDerived::IsVectorAtCompileTime>
+    operator*(const MatrixBase<OtherDerived>& rhs) const
+    {
+      return SelfadjointProductMatrix
+              <MatrixType,Mode,false,OtherDerived,0,OtherDerived::IsVectorAtCompileTime>
+              (m_matrix, rhs.derived());
+    }
+
+    /** Efficient vector/matrix times self-adjoint matrix product */
+    template<typename OtherDerived> friend
+    SelfadjointProductMatrix<OtherDerived,0,OtherDerived::IsVectorAtCompileTime,MatrixType,Mode,false>
+    operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView& rhs)
+    {
+      return SelfadjointProductMatrix
+              <OtherDerived,0,OtherDerived::IsVectorAtCompileTime,MatrixType,Mode,false>
+              (lhs.derived(),rhs.m_matrix);
+    }
+
+    /** Perform a symmetric rank 2 update of the selfadjoint matrix \c *this:
+      * \f$ this = this + \alpha u v^* + conj(\alpha) v u^* \f$
+      * \returns a reference to \c *this
+      *
+      * The vectors \a u and \c v \b must be column vectors, however they can be
+      * a adjoint expression without any overhead. Only the meaningful triangular
+      * part of the matrix is updated, the rest is left unchanged.
+      *
+      * \sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)
+      */
+    template<typename DerivedU, typename DerivedV>
+    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha = Scalar(1));
+
+    /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+      * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+      *
+      * \returns a reference to \c *this
+      *
+      * Note that to perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+      * call this function with u.adjoint().
+      *
+      * \sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)
+      */
+    template<typename DerivedU>
+    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
+
+/////////// Cholesky module ///////////
+
+    const LLT<PlainObject, UpLo> llt() const;
+    const LDLT<PlainObject, UpLo> ldlt() const;
+
+/////////// Eigenvalue module ///////////
+
+    /** Real part of #Scalar */
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    /** Return type of eigenvalues() */
+    typedef Matrix<RealScalar, internal::traits<MatrixType>::ColsAtCompileTime, 1> EigenvaluesReturnType;
+
+    EigenvaluesReturnType eigenvalues() const;
+    RealScalar operatorNorm() const;
+    
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived>
+    SelfAdjointView& operator=(const MatrixBase<OtherDerived>& other)
+    {
+      enum {
+        OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper
+      };
+      m_matrix.const_cast_derived().template triangularView<UpLo>() = other;
+      m_matrix.const_cast_derived().template triangularView<OtherPart>() = other.adjoint();
+      return *this;
+    }
+    template<typename OtherMatrixType, unsigned int OtherMode>
+    SelfAdjointView& operator=(const TriangularView<OtherMatrixType, OtherMode>& other)
+    {
+      enum {
+        OtherPart = UpLo == Upper ? StrictlyLower : StrictlyUpper
+      };
+      m_matrix.const_cast_derived().template triangularView<UpLo>() = other.toDenseMatrix();
+      m_matrix.const_cast_derived().template triangularView<OtherPart>() = other.toDenseMatrix().adjoint();
+      return *this;
+    }
+    #endif
+
+  protected:
+    MatrixTypeNested m_matrix;
+};
+
+
+// template<typename OtherDerived, typename MatrixType, unsigned int UpLo>
+// internal::selfadjoint_matrix_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >
+// operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView<MatrixType,UpLo>& rhs)
+// {
+//   return internal::matrix_selfadjoint_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >(lhs.derived(),rhs);
+// }
+
+// selfadjoint to dense matrix
+
+namespace internal {
+
+template<typename Derived1, typename Derived2, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount, ClearOpposite>
+{
+  enum {
+    col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+  };
+
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Upper), UnrollCount-1, ClearOpposite>::run(dst, src);
+
+    if(row == col)
+      dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
+    else if(row < col)
+      dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, 0, ClearOpposite>
+{
+  static inline void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount, ClearOpposite>
+{
+  enum {
+    col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+  };
+
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    triangular_assignment_selector<Derived1, Derived2, (SelfAdjoint|Lower), UnrollCount-1, ClearOpposite>::run(dst, src);
+
+    if(row == col)
+      dst.coeffRef(row, col) = numext::real(src.coeff(row, col));
+    else if(row > col)
+      dst.coeffRef(col, row) = numext::conj(dst.coeffRef(row, col) = src.coeff(row, col));
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, 0, ClearOpposite>
+{
+  static inline void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Upper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      for(Index i = 0; i < j; ++i)
+      {
+        dst.copyCoeff(i, j, src);
+        dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
+      }
+      dst.copyCoeff(j, j, src);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, SelfAdjoint|Lower, Dynamic, ClearOpposite>
+{
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+  typedef typename Derived1::Index Index;
+    for(Index i = 0; i < dst.rows(); ++i)
+    {
+      for(Index j = 0; j < i; ++j)
+      {
+        dst.copyCoeff(i, j, src);
+        dst.coeffRef(j,i) = numext::conj(dst.coeff(i,j));
+      }
+      dst.copyCoeff(i, i, src);
+    }
+  }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of MatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type
+MatrixBase<Derived>::selfadjointView() const
+{
+  return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type
+MatrixBase<Derived>::selfadjointView()
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINTMATRIX_H
diff --git a/Eigen/src/Core/SelfCwiseBinaryOp.h b/Eigen/src/Core/SelfCwiseBinaryOp.h
new file mode 100644
index 0000000..22f3047
--- /dev/null
+++ b/Eigen/src/Core/SelfCwiseBinaryOp.h
@@ -0,0 +1,197 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFCWISEBINARYOP_H
+#define EIGEN_SELFCWISEBINARYOP_H
+
+namespace Eigen { 
+
+/** \class SelfCwiseBinaryOp
+  * \ingroup Core_Module
+  *
+  * \internal
+  *
+  * \brief Internal helper class for optimizing operators like +=, -=
+  *
+  * This is a pseudo expression class re-implementing the copyCoeff/copyPacket
+  * method to directly performs a +=/-= operations in an optimal way. In particular,
+  * this allows to make sure that the input/output data are loaded only once using
+  * aligned packet loads.
+  *
+  * \sa class SwapWrapper for a similar trick.
+  */
+
+namespace internal {
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct traits<SelfCwiseBinaryOp<BinaryOp,Lhs,Rhs> >
+  : traits<CwiseBinaryOp<BinaryOp,Lhs,Rhs> >
+{
+  enum {
+    // Note that it is still a good idea to preserve the DirectAccessBit
+    // so that assign can correctly align the data.
+    Flags = traits<CwiseBinaryOp<BinaryOp,Lhs,Rhs> >::Flags | (Lhs::Flags&DirectAccessBit) | (Lhs::Flags&LvalueBit),
+    OuterStrideAtCompileTime = Lhs::OuterStrideAtCompileTime,
+    InnerStrideAtCompileTime = Lhs::InnerStrideAtCompileTime
+  };
+};
+}
+
+template<typename BinaryOp, typename Lhs, typename Rhs> class SelfCwiseBinaryOp
+  : public internal::dense_xpr_base< SelfCwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<SelfCwiseBinaryOp>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SelfCwiseBinaryOp)
+
+    typedef typename internal::packet_traits<Scalar>::type Packet;
+
+    inline SelfCwiseBinaryOp(Lhs& xpr, const BinaryOp& func = BinaryOp()) : m_matrix(xpr), m_functor(func) {}
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    inline Index outerStride() const { return m_matrix.outerStride(); }
+    inline Index innerStride() const { return m_matrix.innerStride(); }
+    inline const Scalar* data() const { return m_matrix.data(); }
+
+    // note that this function is needed by assign to correctly align loads/stores
+    // TODO make Assign use .data()
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(Lhs)
+      return m_matrix.const_cast_derived().coeffRef(row, col);
+    }
+    inline const Scalar& coeffRef(Index row, Index col) const
+    {
+      return m_matrix.coeffRef(row, col);
+    }
+
+    // note that this function is needed by assign to correctly align loads/stores
+    // TODO make Assign use .data()
+    inline Scalar& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(Lhs)
+      return m_matrix.const_cast_derived().coeffRef(index);
+    }
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return m_matrix.const_cast_derived().coeffRef(index);
+    }
+
+    template<typename OtherDerived>
+    void copyCoeff(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(row >= 0 && row < rows()
+                         && col >= 0 && col < cols());
+      Scalar& tmp = m_matrix.coeffRef(row,col);
+      tmp = m_functor(tmp, _other.coeff(row,col));
+    }
+
+    template<typename OtherDerived>
+    void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(index >= 0 && index < m_matrix.size());
+      Scalar& tmp = m_matrix.coeffRef(index);
+      tmp = m_functor(tmp, _other.coeff(index));
+    }
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    void copyPacket(Index row, Index col, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(row >= 0 && row < rows()
+                        && col >= 0 && col < cols());
+      m_matrix.template writePacket<StoreMode>(row, col,
+        m_functor.packetOp(m_matrix.template packet<StoreMode>(row, col),_other.template packet<LoadMode>(row, col)) );
+    }
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(index >= 0 && index < m_matrix.size());
+      m_matrix.template writePacket<StoreMode>(index,
+        m_functor.packetOp(m_matrix.template packet<StoreMode>(index),_other.template packet<LoadMode>(index)) );
+    }
+
+    // reimplement lazyAssign to handle complex *= real
+    // see CwiseBinaryOp ctor for details
+    template<typename RhsDerived>
+    EIGEN_STRONG_INLINE SelfCwiseBinaryOp& lazyAssign(const DenseBase<RhsDerived>& rhs)
+    {
+      EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs,RhsDerived)
+      EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename RhsDerived::Scalar);
+      
+    #ifdef EIGEN_DEBUG_ASSIGN
+      internal::assign_traits<SelfCwiseBinaryOp, RhsDerived>::debug();
+    #endif
+      eigen_assert(rows() == rhs.rows() && cols() == rhs.cols());
+      internal::assign_impl<SelfCwiseBinaryOp, RhsDerived>::run(*this,rhs.derived());
+    #ifndef EIGEN_NO_DEBUG
+      this->checkTransposeAliasing(rhs.derived());
+    #endif
+      return *this;
+    }
+    
+    // overloaded to honor evaluation of special matrices
+    // maybe another solution would be to not use SelfCwiseBinaryOp
+    // at first...
+    SelfCwiseBinaryOp& operator=(const Rhs& _rhs)
+    {
+      typename internal::nested<Rhs>::type rhs(_rhs);
+      return Base::operator=(rhs);
+    }
+
+    Lhs& expression() const 
+    { 
+      return m_matrix;
+    }
+
+    const BinaryOp& functor() const 
+    { 
+      return m_functor;
+    }
+
+  protected:
+    Lhs& m_matrix;
+    const BinaryOp& m_functor;
+
+  private:
+    SelfCwiseBinaryOp& operator=(const SelfCwiseBinaryOp&);
+};
+
+template<typename Derived>
+inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
+{
+  typedef typename Derived::PlainObject PlainObject;
+  SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
+  tmp = PlainObject::Constant(rows(),cols(),other);
+  return derived();
+}
+
+template<typename Derived>
+inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
+{
+  typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
+                                        internal::scalar_quotient_op<Scalar>,
+                                        internal::scalar_product_op<Scalar> >::type BinOp;
+  typedef typename Derived::PlainObject PlainObject;
+  SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
+  Scalar actual_other;
+  if(NumTraits<Scalar>::IsInteger)  actual_other = other;
+  else                              actual_other = Scalar(1)/other;
+  tmp = PlainObject::Constant(rows(),cols(), actual_other);
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFCWISEBINARYOP_H
diff --git a/Eigen/src/Core/SolveTriangular.h b/Eigen/src/Core/SolveTriangular.h
new file mode 100644
index 0000000..ef17f28
--- /dev/null
+++ b/Eigen/src/Core/SolveTriangular.h
@@ -0,0 +1,260 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SOLVETRIANGULAR_H
+#define EIGEN_SOLVETRIANGULAR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// Forward declarations:
+// The following two routines are implemented in the products/TriangularSolver*.h files
+template<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>
+struct triangular_solve_vector;
+
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
+struct triangular_solve_matrix;
+
+// small helper struct extracting some traits on the underlying solver operation
+template<typename Lhs, typename Rhs, int Side>
+class trsolve_traits
+{
+  private:
+    enum {
+      RhsIsVectorAtCompileTime = (Side==OnTheLeft ? Rhs::ColsAtCompileTime : Rhs::RowsAtCompileTime)==1
+    };
+  public:
+    enum {
+      Unrolling   = (RhsIsVectorAtCompileTime && Rhs::SizeAtCompileTime != Dynamic && Rhs::SizeAtCompileTime <= 8)
+                  ? CompleteUnrolling : NoUnrolling,
+      RhsVectors  = RhsIsVectorAtCompileTime ? 1 : Dynamic
+    };
+};
+
+template<typename Lhs, typename Rhs,
+  int Side, // can be OnTheLeft/OnTheRight
+  int Mode, // can be Upper/Lower | UnitDiag
+  int Unrolling = trsolve_traits<Lhs,Rhs,Side>::Unrolling,
+  int RhsVectors = trsolve_traits<Lhs,Rhs,Side>::RhsVectors
+  >
+struct triangular_solver_selector;
+
+template<typename Lhs, typename Rhs, int Side, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>
+{
+  typedef typename Lhs::Scalar LhsScalar;
+  typedef typename Rhs::Scalar RhsScalar;
+  typedef blas_traits<Lhs> LhsProductTraits;
+  typedef typename LhsProductTraits::ExtractType ActualLhsType;
+  typedef Map<Matrix<RhsScalar,Dynamic,1>, Aligned> MappedRhs;
+  static void run(const Lhs& lhs, Rhs& rhs)
+  {
+    ActualLhsType actualLhs = LhsProductTraits::extract(lhs);
+
+    // FIXME find a way to allow an inner stride if packet_traits<Scalar>::size==1
+
+    bool useRhsDirectly = Rhs::InnerStrideAtCompileTime==1 || rhs.innerStride()==1;
+
+    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(),
+                                                  (useRhsDirectly ? rhs.data() : 0));
+                                                  
+    if(!useRhsDirectly)
+      MappedRhs(actualRhs,rhs.size()) = rhs;
+
+    triangular_solve_vector<LhsScalar, RhsScalar, typename Lhs::Index, Side, Mode, LhsProductTraits::NeedToConjugate,
+                            (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>
+      ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs);
+
+    if(!useRhsDirectly)
+      rhs = MappedRhs(actualRhs, rhs.size());
+  }
+};
+
+// the rhs is a matrix
+template<typename Lhs, typename Rhs, int Side, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>
+{
+  typedef typename Rhs::Scalar Scalar;
+  typedef typename Rhs::Index Index;
+  typedef blas_traits<Lhs> LhsProductTraits;
+  typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;
+
+  static void run(const Lhs& lhs, Rhs& rhs)
+  {
+    typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsProductTraits::extract(lhs);
+
+    const Index size = lhs.rows();
+    const Index othersize = Side==OnTheLeft? rhs.cols() : rhs.rows();
+
+    typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
+              Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxRowsAtCompileTime,4> BlockingType;
+
+    BlockingType blocking(rhs.rows(), rhs.cols(), size);
+
+    triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,
+                               (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>
+      ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride(), blocking);
+  }
+};
+
+/***************************************************************************
+* meta-unrolling implementation
+***************************************************************************/
+
+template<typename Lhs, typename Rhs, int Mode, int Index, int Size,
+         bool Stop = Index==Size>
+struct triangular_solver_unroller;
+
+template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
+struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,false> {
+  enum {
+    IsLower = ((Mode&Lower)==Lower),
+    I = IsLower ? Index : Size - Index - 1,
+    S = IsLower ? 0     : I+1
+  };
+  static void run(const Lhs& lhs, Rhs& rhs)
+  {
+    if (Index>0)
+      rhs.coeffRef(I) -= lhs.row(I).template segment<Index>(S).transpose()
+                         .cwiseProduct(rhs.template segment<Index>(S)).sum();
+
+    if(!(Mode & UnitDiag))
+      rhs.coeffRef(I) /= lhs.coeff(I,I);
+
+    triangular_solver_unroller<Lhs,Rhs,Mode,Index+1,Size>::run(lhs,rhs);
+  }
+};
+
+template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
+struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,true> {
+  static void run(const Lhs&, Rhs&) {}
+};
+
+template<typename Lhs, typename Rhs, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,CompleteUnrolling,1> {
+  static void run(const Lhs& lhs, Rhs& rhs)
+  { triangular_solver_unroller<Lhs,Rhs,Mode,0,Rhs::SizeAtCompileTime>::run(lhs,rhs); }
+};
+
+template<typename Lhs, typename Rhs, int Mode>
+struct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {
+  static void run(const Lhs& lhs, Rhs& rhs)
+  {
+    Transpose<const Lhs> trLhs(lhs);
+    Transpose<Rhs> trRhs(rhs);
+    
+    triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>,
+                              ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
+                              0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs);
+  }
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* TriangularView methods
+***************************************************************************/
+
+/** "in-place" version of TriangularView::solve() where the result is written in \a other
+  *
+  * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
+  * This function will const_cast it, so constness isn't honored here.
+  *
+  * See TriangularView:solve() for the details.
+  */
+template<typename MatrixType, unsigned int Mode>
+template<int Side, typename OtherDerived>
+void TriangularView<MatrixType,Mode>::solveInPlace(const MatrixBase<OtherDerived>& _other) const
+{
+  OtherDerived& other = _other.const_cast_derived();
+  eigen_assert( cols() == rows() && ((Side==OnTheLeft && cols() == other.rows()) || (Side==OnTheRight && cols() == other.cols())) );
+  eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+  enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit  && OtherDerived::IsVectorAtCompileTime };
+  typedef typename internal::conditional<copy,
+    typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+  OtherCopy otherCopy(other);
+
+  internal::triangular_solver_selector<MatrixType, typename internal::remove_reference<OtherCopy>::type,
+    Side, Mode>::run(nestedExpression(), otherCopy);
+
+  if (copy)
+    other = otherCopy;
+}
+
+/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular.
+  *
+  * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other if
+  * \a Side==OnTheLeft (the default), or the right-inverse-multiply  \a other * inverse(\c *this) if
+  * \a Side==OnTheRight.
+  *
+  * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
+  * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
+  * is an upper (resp. lower) triangular matrix.
+  *
+  * Example: \include MatrixBase_marked.cpp
+  * Output: \verbinclude MatrixBase_marked.out
+  *
+  * This function returns an expression of the inverse-multiply and can works in-place if it is assigned
+  * to the same matrix or vector \a other.
+  *
+  * For users coming from BLAS, this function (and more specifically solveInPlace()) offer
+  * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines.
+  *
+  * \sa TriangularView::solveInPlace()
+  */
+template<typename Derived, unsigned int Mode>
+template<int Side, typename Other>
+const internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other>
+TriangularView<Derived,Mode>::solve(const MatrixBase<Other>& other) const
+{
+  return internal::triangular_solve_retval<Side,TriangularView,Other>(*this, other.derived());
+}
+
+namespace internal {
+
+
+template<int Side, typename TriangularType, typename Rhs>
+struct traits<triangular_solve_retval<Side, TriangularType, Rhs> >
+{
+  typedef typename internal::plain_matrix_type_column_major<Rhs>::type ReturnType;
+};
+
+template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval
+ : public ReturnByValue<triangular_solve_retval<Side, TriangularType, Rhs> >
+{
+  typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+  typedef ReturnByValue<triangular_solve_retval> Base;
+  typedef typename Base::Index Index;
+
+  triangular_solve_retval(const TriangularType& tri, const Rhs& rhs)
+    : m_triangularMatrix(tri), m_rhs(rhs)
+  {}
+
+  inline Index rows() const { return m_rhs.rows(); }
+  inline Index cols() const { return m_rhs.cols(); }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    if(!(is_same<RhsNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_rhs)))
+      dst = m_rhs;
+    m_triangularMatrix.template solveInPlace<Side>(dst);
+  }
+
+  protected:
+    const TriangularType& m_triangularMatrix;
+    typename Rhs::Nested m_rhs;
+};
+
+} // namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SOLVETRIANGULAR_H
diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h
new file mode 100644
index 0000000..389d942
--- /dev/null
+++ b/Eigen/src/Core/StableNorm.h
@@ -0,0 +1,203 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STABLENORM_H
+#define EIGEN_STABLENORM_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename ExpressionType, typename Scalar>
+inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
+{
+  using std::max;
+  Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
+  
+  if (maxCoeff>scale)
+  {
+    ssq = ssq * numext::abs2(scale/maxCoeff);
+    Scalar tmp = Scalar(1)/maxCoeff;
+    if(tmp > NumTraits<Scalar>::highest())
+    {
+      invScale = NumTraits<Scalar>::highest();
+      scale = Scalar(1)/invScale;
+    }
+    else
+    {
+      scale = maxCoeff;
+      invScale = tmp;
+    }
+  }
+  
+  // TODO if the maxCoeff is much much smaller than the current scale,
+  // then we can neglect this sub vector
+  if(scale>Scalar(0)) // if scale==0, then bl is 0 
+    ssq += (bl*invScale).squaredNorm();
+}
+
+template<typename Derived>
+inline typename NumTraits<typename traits<Derived>::Scalar>::Real
+blueNorm_impl(const EigenBase<Derived>& _vec)
+{
+  typedef typename Derived::RealScalar RealScalar;  
+  typedef typename Derived::Index Index;
+  using std::pow;
+  using std::min;
+  using std::max;
+  using std::sqrt;
+  using std::abs;
+  const Derived& vec(_vec.derived());
+  static bool initialized = false;
+  static RealScalar b1, b2, s1m, s2m, overfl, rbig, relerr;
+  if(!initialized)
+  {
+    int ibeta, it, iemin, iemax, iexp;
+    RealScalar eps;
+    // This program calculates the machine-dependent constants
+    // bl, b2, slm, s2m, relerr overfl
+    // from the "basic" machine-dependent numbers
+    // nbig, ibeta, it, iemin, iemax, rbig.
+    // The following define the basic machine-dependent constants.
+    // For portability, the PORT subprograms "ilmaeh" and "rlmach"
+    // are used. For any specific computer, each of the assignment
+    // statements can be replaced
+    ibeta = std::numeric_limits<RealScalar>::radix;                 // base for floating-point numbers
+    it    = std::numeric_limits<RealScalar>::digits;                // number of base-beta digits in mantissa
+    iemin = std::numeric_limits<RealScalar>::min_exponent;          // minimum exponent
+    iemax = std::numeric_limits<RealScalar>::max_exponent;          // maximum exponent
+    rbig  = (std::numeric_limits<RealScalar>::max)();               // largest floating-point number
+
+    iexp  = -((1-iemin)/2);
+    b1    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // lower boundary of midrange
+    iexp  = (iemax + 1 - it)/2;
+    b2    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // upper boundary of midrange
+
+    iexp  = (2-iemin)/2;
+    s1m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for lower range
+    iexp  = - ((iemax+it)/2);
+    s2m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for upper range
+
+    overfl  = rbig*s2m;                                             // overflow boundary for abig
+    eps     = RealScalar(pow(double(ibeta), 1-it));
+    relerr  = sqrt(eps);                                            // tolerance for neglecting asml
+    initialized = true;
+  }
+  Index n = vec.size();
+  RealScalar ab2 = b2 / RealScalar(n);
+  RealScalar asml = RealScalar(0);
+  RealScalar amed = RealScalar(0);
+  RealScalar abig = RealScalar(0);
+  for(typename Derived::InnerIterator it(vec, 0); it; ++it)
+  {
+    RealScalar ax = abs(it.value());
+    if(ax > ab2)     abig += numext::abs2(ax*s2m);
+    else if(ax < b1) asml += numext::abs2(ax*s1m);
+    else             amed += numext::abs2(ax);
+  }
+  if(abig > RealScalar(0))
+  {
+    abig = sqrt(abig);
+    if(abig > overfl)
+    {
+      return rbig;
+    }
+    if(amed > RealScalar(0))
+    {
+      abig = abig/s2m;
+      amed = sqrt(amed);
+    }
+    else
+      return abig/s2m;
+  }
+  else if(asml > RealScalar(0))
+  {
+    if (amed > RealScalar(0))
+    {
+      abig = sqrt(amed);
+      amed = sqrt(asml) / s1m;
+    }
+    else
+      return sqrt(asml)/s1m;
+  }
+  else
+    return sqrt(amed);
+  asml = (min)(abig, amed);
+  abig = (max)(abig, amed);
+  if(asml <= abig*relerr)
+    return abig;
+  else
+    return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
+}
+
+} // end namespace internal
+
+/** \returns the \em l2 norm of \c *this avoiding underflow and overflow.
+  * This version use a blockwise two passes algorithm:
+  *  1 - find the absolute largest coefficient \c s
+  *  2 - compute \f$ s \Vert \frac{*this}{s} \Vert \f$ in a standard way
+  *
+  * For architecture/scalar types supporting vectorization, this version
+  * is faster than blueNorm(). Otherwise the blueNorm() is much faster.
+  *
+  * \sa norm(), blueNorm(), hypotNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::stableNorm() const
+{
+  using std::min;
+  using std::sqrt;
+  const Index blockSize = 4096;
+  RealScalar scale(0);
+  RealScalar invScale(1);
+  RealScalar ssq(0); // sum of square
+  enum {
+    Alignment = (int(Flags)&DirectAccessBit) || (int(Flags)&AlignedBit) ? 1 : 0
+  };
+  Index n = size();
+  Index bi = internal::first_aligned(derived());
+  if (bi>0)
+    internal::stable_norm_kernel(this->head(bi), ssq, scale, invScale);
+  for (; bi<n; bi+=blockSize)
+    internal::stable_norm_kernel(this->segment(bi,(min)(blockSize, n - bi)).template forceAlignedAccessIf<Alignment>(), ssq, scale, invScale);
+  return scale * sqrt(ssq);
+}
+
+/** \returns the \em l2 norm of \c *this using the Blue's algorithm.
+  * A Portable Fortran Program to Find the Euclidean Norm of a Vector,
+  * ACM TOMS, Vol 4, Issue 1, 1978.
+  *
+  * For architecture/scalar types without vectorization, this version
+  * is much faster than stableNorm(). Otherwise the stableNorm() is faster.
+  *
+  * \sa norm(), stableNorm(), hypotNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::blueNorm() const
+{
+  return internal::blueNorm_impl(*this);
+}
+
+/** \returns the \em l2 norm of \c *this avoiding undeflow and overflow.
+  * This version use a concatenation of hypot() calls, and it is very slow.
+  *
+  * \sa norm(), stableNorm()
+  */
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+MatrixBase<Derived>::hypotNorm() const
+{
+  return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_STABLENORM_H
diff --git a/Eigen/src/Core/Stride.h b/Eigen/src/Core/Stride.h
new file mode 100644
index 0000000..1e3f5fe
--- /dev/null
+++ b/Eigen/src/Core/Stride.h
@@ -0,0 +1,108 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STRIDE_H
+#define EIGEN_STRIDE_H
+
+namespace Eigen { 
+
+/** \class Stride
+  * \ingroup Core_Module
+  *
+  * \brief Holds strides information for Map
+  *
+  * This class holds the strides information for mapping arrays with strides with class Map.
+  *
+  * It holds two values: the inner stride and the outer stride.
+  *
+  * The inner stride is the pointer increment between two consecutive entries within a given row of a
+  * row-major matrix or within a given column of a column-major matrix.
+  *
+  * The outer stride is the pointer increment between two consecutive rows of a row-major matrix or
+  * between two consecutive columns of a column-major matrix.
+  *
+  * These two values can be passed either at compile-time as template parameters, or at runtime as
+  * arguments to the constructor.
+  *
+  * Indeed, this class takes two template parameters:
+  *  \param _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime.
+  *  \param _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime.
+  *
+  * Here is an example:
+  * \include Map_general_stride.cpp
+  * Output: \verbinclude Map_general_stride.out
+  *
+  * \sa class InnerStride, class OuterStride, \ref TopicStorageOrders
+  */
+template<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>
+class Stride
+{
+  public:
+    typedef DenseIndex Index;
+    enum {
+      InnerStrideAtCompileTime = _InnerStrideAtCompileTime,
+      OuterStrideAtCompileTime = _OuterStrideAtCompileTime
+    };
+
+    /** Default constructor, for use when strides are fixed at compile time */
+    Stride()
+      : m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime)
+    {
+      eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic);
+    }
+
+    /** Constructor allowing to pass the strides at runtime */
+    Stride(Index outerStride, Index innerStride)
+      : m_outer(outerStride), m_inner(innerStride)
+    {
+      eigen_assert(innerStride>=0 && outerStride>=0);
+    }
+
+    /** Copy constructor */
+    Stride(const Stride& other)
+      : m_outer(other.outer()), m_inner(other.inner())
+    {}
+
+    /** \returns the outer stride */
+    inline Index outer() const { return m_outer.value(); }
+    /** \returns the inner stride */
+    inline Index inner() const { return m_inner.value(); }
+
+  protected:
+    internal::variable_if_dynamic<Index, OuterStrideAtCompileTime> m_outer;
+    internal::variable_if_dynamic<Index, InnerStrideAtCompileTime> m_inner;
+};
+
+/** \brief Convenience specialization of Stride to specify only an inner stride
+  * See class Map for some examples */
+template<int Value = Dynamic>
+class InnerStride : public Stride<0, Value>
+{
+    typedef Stride<0, Value> Base;
+  public:
+    typedef DenseIndex Index;
+    InnerStride() : Base() {}
+    InnerStride(Index v) : Base(0, v) {}
+};
+
+/** \brief Convenience specialization of Stride to specify only an outer stride
+  * See class Map for some examples */
+template<int Value = Dynamic>
+class OuterStride : public Stride<Value, 0>
+{
+    typedef Stride<Value, 0> Base;
+  public:
+    typedef DenseIndex Index;
+    OuterStride() : Base() {}
+    OuterStride(Index v) : Base(v,0) {}
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_STRIDE_H
diff --git a/Eigen/src/Core/Swap.h b/Eigen/src/Core/Swap.h
new file mode 100644
index 0000000..bf58bd5
--- /dev/null
+++ b/Eigen/src/Core/Swap.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SWAP_H
+#define EIGEN_SWAP_H
+
+namespace Eigen { 
+
+/** \class SwapWrapper
+  * \ingroup Core_Module
+  *
+  * \internal
+  *
+  * \brief Internal helper class for swapping two expressions
+  */
+namespace internal {
+template<typename ExpressionType>
+struct traits<SwapWrapper<ExpressionType> > : traits<ExpressionType> {};
+}
+
+template<typename ExpressionType> class SwapWrapper
+  : public internal::dense_xpr_base<SwapWrapper<ExpressionType> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<SwapWrapper>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SwapWrapper)
+    typedef typename internal::packet_traits<Scalar>::type Packet;
+
+    inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
+
+    inline Index rows() const { return m_expression.rows(); }
+    inline Index cols() const { return m_expression.cols(); }
+    inline Index outerStride() const { return m_expression.outerStride(); }
+    inline Index innerStride() const { return m_expression.innerStride(); }
+    
+    typedef typename internal::conditional<
+                       internal::is_lvalue<ExpressionType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+                     
+    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }
+    inline const Scalar* data() const { return m_expression.data(); }
+
+    inline Scalar& coeffRef(Index rowId, Index colId)
+    {
+      return m_expression.const_cast_derived().coeffRef(rowId, colId);
+    }
+
+    inline Scalar& coeffRef(Index index)
+    {
+      return m_expression.const_cast_derived().coeffRef(index);
+    }
+
+    inline Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      return m_expression.coeffRef(rowId, colId);
+    }
+
+    inline Scalar& coeffRef(Index index) const
+    {
+      return m_expression.coeffRef(index);
+    }
+
+    template<typename OtherDerived>
+    void copyCoeff(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(rowId >= 0 && rowId < rows()
+                         && colId >= 0 && colId < cols());
+      Scalar tmp = m_expression.coeff(rowId, colId);
+      m_expression.coeffRef(rowId, colId) = _other.coeff(rowId, colId);
+      _other.coeffRef(rowId, colId) = tmp;
+    }
+
+    template<typename OtherDerived>
+    void copyCoeff(Index index, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(index >= 0 && index < m_expression.size());
+      Scalar tmp = m_expression.coeff(index);
+      m_expression.coeffRef(index) = _other.coeff(index);
+      _other.coeffRef(index) = tmp;
+    }
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    void copyPacket(Index rowId, Index colId, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(rowId >= 0 && rowId < rows()
+                        && colId >= 0 && colId < cols());
+      Packet tmp = m_expression.template packet<StoreMode>(rowId, colId);
+      m_expression.template writePacket<StoreMode>(rowId, colId,
+        _other.template packet<LoadMode>(rowId, colId)
+      );
+      _other.template writePacket<LoadMode>(rowId, colId, tmp);
+    }
+
+    template<typename OtherDerived, int StoreMode, int LoadMode>
+    void copyPacket(Index index, const DenseBase<OtherDerived>& other)
+    {
+      OtherDerived& _other = other.const_cast_derived();
+      eigen_internal_assert(index >= 0 && index < m_expression.size());
+      Packet tmp = m_expression.template packet<StoreMode>(index);
+      m_expression.template writePacket<StoreMode>(index,
+        _other.template packet<LoadMode>(index)
+      );
+      _other.template writePacket<LoadMode>(index, tmp);
+    }
+
+    ExpressionType& expression() const { return m_expression; }
+
+  protected:
+    ExpressionType& m_expression;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SWAP_H
diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h
new file mode 100644
index 0000000..22096ea
--- /dev/null
+++ b/Eigen/src/Core/Transpose.h
@@ -0,0 +1,419 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSPOSE_H
+#define EIGEN_TRANSPOSE_H
+
+namespace Eigen { 
+
+/** \class Transpose
+  * \ingroup Core_Module
+  *
+  * \brief Expression of the transpose of a matrix
+  *
+  * \param MatrixType the type of the object of which we are taking the transpose
+  *
+  * This class represents an expression of the transpose of a matrix.
+  * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
+  * and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::transpose(), MatrixBase::adjoint()
+  */
+
+namespace internal {
+template<typename MatrixType>
+struct traits<Transpose<MatrixType> > : traits<MatrixType>
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename traits<MatrixType>::XprKind XprKind;
+  enum {
+    RowsAtCompileTime = MatrixType::ColsAtCompileTime,
+    ColsAtCompileTime = MatrixType::RowsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,
+    Flags0 = MatrixTypeNestedPlain::Flags & ~(LvalueBit | NestByRefBit),
+    Flags1 = Flags0 | FlagsLvalueBit,
+    Flags = Flags1 ^ RowMajorBit,
+    CoeffReadCost = MatrixTypeNestedPlain::CoeffReadCost,
+    InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,
+    OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret
+  };
+};
+}
+
+template<typename MatrixType, typename StorageKind> class TransposeImpl;
+
+template<typename MatrixType> class Transpose
+  : public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>
+{
+  public:
+
+    typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;
+    EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
+
+    inline Transpose(MatrixType& a_matrix) : m_matrix(a_matrix) {}
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
+
+    inline Index rows() const { return m_matrix.cols(); }
+    inline Index cols() const { return m_matrix.rows(); }
+
+    /** \returns the nested expression */
+    const typename internal::remove_all<typename MatrixType::Nested>::type&
+    nestedExpression() const { return m_matrix; }
+
+    /** \returns the nested expression */
+    typename internal::remove_all<typename MatrixType::Nested>::type&
+    nestedExpression() { return m_matrix.const_cast_derived(); }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+};
+
+namespace internal {
+
+template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>
+struct TransposeImpl_base
+{
+  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
+};
+
+template<typename MatrixType>
+struct TransposeImpl_base<MatrixType, false>
+{
+  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;
+};
+
+} // end namespace internal
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Dense>
+  : public internal::TransposeImpl_base<MatrixType>::type
+{
+  public:
+
+    typedef typename internal::TransposeImpl_base<MatrixType>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)
+
+    inline Index innerStride() const { return derived().nestedExpression().innerStride(); }
+    inline Index outerStride() const { return derived().nestedExpression().outerStride(); }
+
+    typedef typename internal::conditional<
+                       internal::is_lvalue<MatrixType>::value,
+                       Scalar,
+                       const Scalar
+                     >::type ScalarWithConstIfNotLvalue;
+
+    inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }
+    inline const Scalar* data() const { return derived().nestedExpression().data(); }
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index rowId, Index colId)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return derived().nestedExpression().const_cast_derived().coeffRef(colId, rowId);
+    }
+
+    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return derived().nestedExpression().const_cast_derived().coeffRef(index);
+    }
+
+    inline const Scalar& coeffRef(Index rowId, Index colId) const
+    {
+      return derived().nestedExpression().coeffRef(colId, rowId);
+    }
+
+    inline const Scalar& coeffRef(Index index) const
+    {
+      return derived().nestedExpression().coeffRef(index);
+    }
+
+    inline CoeffReturnType coeff(Index rowId, Index colId) const
+    {
+      return derived().nestedExpression().coeff(colId, rowId);
+    }
+
+    inline CoeffReturnType coeff(Index index) const
+    {
+      return derived().nestedExpression().coeff(index);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index rowId, Index colId) const
+    {
+      return derived().nestedExpression().template packet<LoadMode>(colId, rowId);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index rowId, Index colId, const PacketScalar& x)
+    {
+      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(colId, rowId, x);
+    }
+
+    template<int LoadMode>
+    inline const PacketScalar packet(Index index) const
+    {
+      return derived().nestedExpression().template packet<LoadMode>(index);
+    }
+
+    template<int LoadMode>
+    inline void writePacket(Index index, const PacketScalar& x)
+    {
+      derived().nestedExpression().const_cast_derived().template writePacket<LoadMode>(index, x);
+    }
+};
+
+/** \returns an expression of the transpose of *this.
+  *
+  * Example: \include MatrixBase_transpose.cpp
+  * Output: \verbinclude MatrixBase_transpose.out
+  *
+  * \warning If you want to replace a matrix by its own transpose, do \b NOT do this:
+  * \code
+  * m = m.transpose(); // bug!!! caused by aliasing effect
+  * \endcode
+  * Instead, use the transposeInPlace() method:
+  * \code
+  * m.transposeInPlace();
+  * \endcode
+  * which gives Eigen good opportunities for optimization, or alternatively you can also do:
+  * \code
+  * m = m.transpose().eval();
+  * \endcode
+  *
+  * \sa transposeInPlace(), adjoint() */
+template<typename Derived>
+inline Transpose<Derived>
+DenseBase<Derived>::transpose()
+{
+  return derived();
+}
+
+/** This is the const version of transpose().
+  *
+  * Make sure you read the warning for transpose() !
+  *
+  * \sa transposeInPlace(), adjoint() */
+template<typename Derived>
+inline typename DenseBase<Derived>::ConstTransposeReturnType
+DenseBase<Derived>::transpose() const
+{
+  return ConstTransposeReturnType(derived());
+}
+
+/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
+  *
+  * Example: \include MatrixBase_adjoint.cpp
+  * Output: \verbinclude MatrixBase_adjoint.out
+  *
+  * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this:
+  * \code
+  * m = m.adjoint(); // bug!!! caused by aliasing effect
+  * \endcode
+  * Instead, use the adjointInPlace() method:
+  * \code
+  * m.adjointInPlace();
+  * \endcode
+  * which gives Eigen good opportunities for optimization, or alternatively you can also do:
+  * \code
+  * m = m.adjoint().eval();
+  * \endcode
+  *
+  * \sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::AdjointReturnType
+MatrixBase<Derived>::adjoint() const
+{
+  return this->transpose(); // in the complex case, the .conjugate() is be implicit here
+                            // due to implicit conversion to return type
+}
+
+/***************************************************************************
+* "in place" transpose implementation
+***************************************************************************/
+
+namespace internal {
+
+template<typename MatrixType,
+  bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic>
+struct inplace_transpose_selector;
+
+template<typename MatrixType>
+struct inplace_transpose_selector<MatrixType,true> { // square matrix
+  static void run(MatrixType& m) {
+    m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
+  }
+};
+
+template<typename MatrixType>
+struct inplace_transpose_selector<MatrixType,false> { // non square matrix
+  static void run(MatrixType& m) {
+    if (m.rows()==m.cols())
+      m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());
+    else
+      m = m.transpose().eval();
+  }
+};
+
+} // end namespace internal
+
+/** This is the "in place" version of transpose(): it replaces \c *this by its own transpose.
+  * Thus, doing
+  * \code
+  * m.transposeInPlace();
+  * \endcode
+  * has the same effect on m as doing
+  * \code
+  * m = m.transpose().eval();
+  * \endcode
+  * and is faster and also safer because in the latter line of code, forgetting the eval() results
+  * in a bug caused by \ref TopicAliasing "aliasing".
+  *
+  * Notice however that this method is only useful if you want to replace a matrix by its own transpose.
+  * If you just need the transpose of a matrix, use transpose().
+  *
+  * \note if the matrix is not square, then \c *this must be a resizable matrix. 
+  * This excludes (non-square) fixed-size matrices, block-expressions and maps.
+  *
+  * \sa transpose(), adjoint(), adjointInPlace() */
+template<typename Derived>
+inline void DenseBase<Derived>::transposeInPlace()
+{
+  eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))
+               && "transposeInPlace() called on a non-square non-resizable matrix");
+  internal::inplace_transpose_selector<Derived>::run(derived());
+}
+
+/***************************************************************************
+* "in place" adjoint implementation
+***************************************************************************/
+
+/** This is the "in place" version of adjoint(): it replaces \c *this by its own transpose.
+  * Thus, doing
+  * \code
+  * m.adjointInPlace();
+  * \endcode
+  * has the same effect on m as doing
+  * \code
+  * m = m.adjoint().eval();
+  * \endcode
+  * and is faster and also safer because in the latter line of code, forgetting the eval() results
+  * in a bug caused by aliasing.
+  *
+  * Notice however that this method is only useful if you want to replace a matrix by its own adjoint.
+  * If you just need the adjoint of a matrix, use adjoint().
+  *
+  * \note if the matrix is not square, then \c *this must be a resizable matrix.
+  * This excludes (non-square) fixed-size matrices, block-expressions and maps.
+  *
+  * \sa transpose(), adjoint(), transposeInPlace() */
+template<typename Derived>
+inline void MatrixBase<Derived>::adjointInPlace()
+{
+  derived() = adjoint().eval();
+}
+
+#ifndef EIGEN_NO_DEBUG
+
+// The following is to detect aliasing problems in most common cases.
+
+namespace internal {
+
+template<typename BinOp,typename NestedXpr,typename Rhs>
+struct blas_traits<SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> >
+ : blas_traits<NestedXpr>
+{
+  typedef SelfCwiseBinaryOp<BinOp,NestedXpr,Rhs> XprType;
+  static inline const XprType extract(const XprType& x) { return x; }
+};
+
+template<bool DestIsTransposed, typename OtherDerived>
+struct check_transpose_aliasing_compile_time_selector
+{
+  enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };
+};
+
+template<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
+struct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
+{
+  enum { ret =    bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed
+               || bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed
+  };
+};
+
+template<typename Scalar, bool DestIsTransposed, typename OtherDerived>
+struct check_transpose_aliasing_run_time_selector
+{
+  static bool run(const Scalar* dest, const OtherDerived& src)
+  {
+    return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src));
+  }
+};
+
+template<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>
+struct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >
+{
+  static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)
+  {
+    return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs())))
+        || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs())));
+  }
+};
+
+// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing,
+// is because when the condition controlling the assert is known at compile time, ICC emits a warning.
+// This is actually a good warning: in expressions that don't have any transposing, the condition is
+// known at compile time to be false, and using that, we can avoid generating the code of the assert again
+// and again for all these expressions that don't need it.
+
+template<typename Derived, typename OtherDerived,
+         bool MightHaveTransposeAliasing
+                 = check_transpose_aliasing_compile_time_selector
+                     <blas_traits<Derived>::IsTransposed,OtherDerived>::ret
+        >
+struct checkTransposeAliasing_impl
+{
+    static void run(const Derived& dst, const OtherDerived& other)
+    {
+        eigen_assert((!check_transpose_aliasing_run_time_selector
+                      <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>
+                      ::run(extract_data(dst), other))
+          && "aliasing detected during transposition, use transposeInPlace() "
+             "or evaluate the rhs into a temporary using .eval()");
+
+    }
+};
+
+template<typename Derived, typename OtherDerived>
+struct checkTransposeAliasing_impl<Derived, OtherDerived, false>
+{
+    static void run(const Derived&, const OtherDerived&)
+    {
+    }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename OtherDerived>
+void DenseBase<Derived>::checkTransposeAliasing(const OtherDerived& other) const
+{
+    internal::checkTransposeAliasing_impl<Derived, OtherDerived>::run(derived(), other);
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSPOSE_H
diff --git a/Eigen/src/Core/Transpositions.h b/Eigen/src/Core/Transpositions.h
new file mode 100644
index 0000000..e4ba075
--- /dev/null
+++ b/Eigen/src/Core/Transpositions.h
@@ -0,0 +1,436 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSPOSITIONS_H
+#define EIGEN_TRANSPOSITIONS_H
+
+namespace Eigen { 
+
+/** \class Transpositions
+  * \ingroup Core_Module
+  *
+  * \brief Represents a sequence of transpositions (row/column interchange)
+  *
+  * \param SizeAtCompileTime the number of transpositions, or Dynamic
+  * \param MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.
+  *
+  * This class represents a permutation transformation as a sequence of \em n transpositions
+  * \f$[T_{n-1} \ldots T_{i} \ldots T_{0}]\f$. It is internally stored as a vector of integers \c indices.
+  * Each transposition \f$ T_{i} \f$ applied on the left of a matrix (\f$ T_{i} M\f$) interchanges
+  * the rows \c i and \c indices[i] of the matrix \c M.
+  * A transposition applied on the right (e.g., \f$ M T_{i}\f$) yields a column interchange.
+  *
+  * Compared to the class PermutationMatrix, such a sequence of transpositions is what is
+  * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.
+  * 
+  * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:
+  * \code
+  * Transpositions tr;
+  * MatrixXf mat;
+  * mat = tr * mat;
+  * \endcode
+  * In this example, we detect that the matrix appears on both side, and so the transpositions
+  * are applied in-place without any temporary or extra copy.
+  *
+  * \sa class PermutationMatrix
+  */
+
+namespace internal {
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed=false> struct transposition_matrix_product_retval;
+}
+
+template<typename Derived>
+class TranspositionsBase
+{
+    typedef internal::traits<Derived> Traits;
+    
+  public:
+
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    Derived& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Derived& operator=(const TranspositionsBase& other)
+    {
+      indices() = other.indices();
+      return derived();
+    }
+    #endif
+
+    /** \returns the number of transpositions */
+    inline Index size() const { return indices().size(); }
+
+    /** Direct access to the underlying index vector */
+    inline const Index& coeff(Index i) const { return indices().coeff(i); }
+    /** Direct access to the underlying index vector */
+    inline Index& coeffRef(Index i) { return indices().coeffRef(i); }
+    /** Direct access to the underlying index vector */
+    inline const Index& operator()(Index i) const { return indices()(i); }
+    /** Direct access to the underlying index vector */
+    inline Index& operator()(Index i) { return indices()(i); }
+    /** Direct access to the underlying index vector */
+    inline const Index& operator[](Index i) const { return indices()(i); }
+    /** Direct access to the underlying index vector */
+    inline Index& operator[](Index i) { return indices()(i); }
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return derived().indices(); }
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return derived().indices(); }
+
+    /** Resizes to given size. */
+    inline void resize(int newSize)
+    {
+      indices().resize(newSize);
+    }
+
+    /** Sets \c *this to represents an identity transformation */
+    void setIdentity()
+    {
+      for(int i = 0; i < indices().size(); ++i)
+        coeffRef(i) = i;
+    }
+
+    // FIXME: do we want such methods ?
+    // might be usefull when the target matrix expression is complex, e.g.:
+    // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
+    /*
+    template<typename MatrixType>
+    void applyForwardToRows(MatrixType& mat) const
+    {
+      for(Index k=0 ; k<size() ; ++k)
+        if(m_indices(k)!=k)
+          mat.row(k).swap(mat.row(m_indices(k)));
+    }
+
+    template<typename MatrixType>
+    void applyBackwardToRows(MatrixType& mat) const
+    {
+      for(Index k=size()-1 ; k>=0 ; --k)
+        if(m_indices(k)!=k)
+          mat.row(k).swap(mat.row(m_indices(k)));
+    }
+    */
+
+    /** \returns the inverse transformation */
+    inline Transpose<TranspositionsBase> inverse() const
+    { return Transpose<TranspositionsBase>(derived()); }
+
+    /** \returns the tranpose transformation */
+    inline Transpose<TranspositionsBase> transpose() const
+    { return Transpose<TranspositionsBase>(derived()); }
+
+  protected:
+};
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+struct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
+{
+  typedef IndexType Index;
+  typedef Matrix<Index, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType>
+class Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType> >
+{
+    typedef internal::traits<Transpositions> Traits;
+  public:
+
+    typedef TranspositionsBase<Transpositions> Base;
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    inline Transpositions() {}
+
+    /** Copy constructor. */
+    template<typename OtherDerived>
+    inline Transpositions(const TranspositionsBase<OtherDerived>& other)
+      : m_indices(other.indices()) {}
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Standard copy constructor. Defined only to prevent a default copy constructor
+      * from hiding the other templated constructor */
+    inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}
+    #endif
+
+    /** Generic constructor from expression of the transposition indices. */
+    template<typename Other>
+    explicit inline Transpositions(const MatrixBase<Other>& a_indices) : m_indices(a_indices)
+    {}
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    Transpositions& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      return Base::operator=(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Transpositions& operator=(const Transpositions& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** Constructs an uninitialized permutation matrix of given size.
+      */
+    inline Transpositions(Index size) : m_indices(size)
+    {}
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+
+namespace internal {
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int _PacketAccess>
+struct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,_PacketAccess> >
+{
+  typedef IndexType Index;
+  typedef Map<const Matrix<Index,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;
+};
+}
+
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename IndexType, int PacketAccess>
+class Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess>
+ : public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,IndexType>,PacketAccess> >
+{
+    typedef internal::traits<Map> Traits;
+  public:
+
+    typedef TranspositionsBase<Map> Base;
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    inline Map(const Index* indicesPtr)
+      : m_indices(indicesPtr)
+    {}
+
+    inline Map(const Index* indicesPtr, Index size)
+      : m_indices(indicesPtr,size)
+    {}
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    Map& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      return Base::operator=(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    Map& operator=(const Map& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+    
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    IndicesType m_indices;
+};
+
+namespace internal {
+template<typename _IndicesType>
+struct traits<TranspositionsWrapper<_IndicesType> >
+{
+  typedef typename _IndicesType::Scalar Index;
+  typedef _IndicesType IndicesType;
+};
+}
+
+template<typename _IndicesType>
+class TranspositionsWrapper
+ : public TranspositionsBase<TranspositionsWrapper<_IndicesType> >
+{
+    typedef internal::traits<TranspositionsWrapper> Traits;
+  public:
+
+    typedef TranspositionsBase<TranspositionsWrapper> Base;
+    typedef typename Traits::IndicesType IndicesType;
+    typedef typename IndicesType::Scalar Index;
+
+    inline TranspositionsWrapper(IndicesType& a_indices)
+      : m_indices(a_indices)
+    {}
+
+    /** Copies the \a other transpositions into \c *this */
+    template<typename OtherDerived>
+    TranspositionsWrapper& operator=(const TranspositionsBase<OtherDerived>& other)
+    {
+      return Base::operator=(other);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is a special case of the templated operator=. Its purpose is to
+      * prevent a default operator= from hiding the templated operator=.
+      */
+    TranspositionsWrapper& operator=(const TranspositionsWrapper& other)
+    {
+      m_indices = other.m_indices;
+      return *this;
+    }
+    #endif
+
+    /** const version of indices(). */
+    const IndicesType& indices() const { return m_indices; }
+
+    /** \returns a reference to the stored array representing the transpositions. */
+    IndicesType& indices() { return m_indices; }
+
+  protected:
+
+    const typename IndicesType::Nested m_indices;
+};
+
+/** \returns the \a matrix with the \a transpositions applied to the columns.
+  */
+template<typename Derived, typename TranspositionsDerived>
+inline const internal::transposition_matrix_product_retval<TranspositionsDerived, Derived, OnTheRight>
+operator*(const MatrixBase<Derived>& matrix,
+          const TranspositionsBase<TranspositionsDerived> &transpositions)
+{
+  return internal::transposition_matrix_product_retval
+           <TranspositionsDerived, Derived, OnTheRight>
+           (transpositions.derived(), matrix.derived());
+}
+
+/** \returns the \a matrix with the \a transpositions applied to the rows.
+  */
+template<typename Derived, typename TranspositionDerived>
+inline const internal::transposition_matrix_product_retval
+               <TranspositionDerived, Derived, OnTheLeft>
+operator*(const TranspositionsBase<TranspositionDerived> &transpositions,
+          const MatrixBase<Derived>& matrix)
+{
+  return internal::transposition_matrix_product_retval
+           <TranspositionDerived, Derived, OnTheLeft>
+           (transpositions.derived(), matrix.derived());
+}
+
+namespace internal {
+
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
+struct traits<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename TranspositionType, typename MatrixType, int Side, bool Transposed>
+struct transposition_matrix_product_retval
+ : public ReturnByValue<transposition_matrix_product_retval<TranspositionType, MatrixType, Side, Transposed> >
+{
+    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+    typedef typename TranspositionType::Index Index;
+
+    transposition_matrix_product_retval(const TranspositionType& tr, const MatrixType& matrix)
+      : m_transpositions(tr), m_matrix(matrix)
+    {}
+
+    inline int rows() const { return m_matrix.rows(); }
+    inline int cols() const { return m_matrix.cols(); }
+
+    template<typename Dest> inline void evalTo(Dest& dst) const
+    {
+      const int size = m_transpositions.size();
+      Index j = 0;
+
+      if(!(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix)))
+        dst = m_matrix;
+
+      for(int k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)
+        if((j=m_transpositions.coeff(k))!=k)
+        {
+          if(Side==OnTheLeft)
+            dst.row(k).swap(dst.row(j));
+          else if(Side==OnTheRight)
+            dst.col(k).swap(dst.col(j));
+        }
+    }
+
+  protected:
+    const TranspositionType& m_transpositions;
+    typename MatrixType::Nested m_matrix;
+};
+
+} // end namespace internal
+
+/* Template partial specialization for transposed/inverse transpositions */
+
+template<typename TranspositionsDerived>
+class Transpose<TranspositionsBase<TranspositionsDerived> >
+{
+    typedef TranspositionsDerived TranspositionType;
+    typedef typename TranspositionType::IndicesType IndicesType;
+  public:
+
+    Transpose(const TranspositionType& t) : m_transpositions(t) {}
+
+    inline int size() const { return m_transpositions.size(); }
+
+    /** \returns the \a matrix with the inverse transpositions applied to the columns.
+      */
+    template<typename Derived> friend
+    inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>
+    operator*(const MatrixBase<Derived>& matrix, const Transpose& trt)
+    {
+      return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheRight, true>(trt.m_transpositions, matrix.derived());
+    }
+
+    /** \returns the \a matrix with the inverse transpositions applied to the rows.
+      */
+    template<typename Derived>
+    inline const internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>
+    operator*(const MatrixBase<Derived>& matrix) const
+    {
+      return internal::transposition_matrix_product_retval<TranspositionType, Derived, OnTheLeft, true>(m_transpositions, matrix.derived());
+    }
+
+  protected:
+    const TranspositionType& m_transpositions;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSPOSITIONS_H
diff --git a/Eigen/src/Core/TriangularMatrix.h b/Eigen/src/Core/TriangularMatrix.h
new file mode 100644
index 0000000..4d65392
--- /dev/null
+++ b/Eigen/src/Core/TriangularMatrix.h
@@ -0,0 +1,839 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULARMATRIX_H
+#define EIGEN_TRIANGULARMATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+  
+template<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;
+  
+}
+
+/** \internal
+  *
+  * \class TriangularBase
+  * \ingroup Core_Module
+  *
+  * \brief Base class for triangular part in a matrix
+  */
+template<typename Derived> class TriangularBase : public EigenBase<Derived>
+{
+  public:
+
+    enum {
+      Mode = internal::traits<Derived>::Mode,
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime
+    };
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::traits<Derived>::DenseMatrixType DenseMatrixType;
+    typedef DenseMatrixType DenseType;
+
+    inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }
+
+    inline Index rows() const { return derived().rows(); }
+    inline Index cols() const { return derived().cols(); }
+    inline Index outerStride() const { return derived().outerStride(); }
+    inline Index innerStride() const { return derived().innerStride(); }
+
+    inline Scalar coeff(Index row, Index col) const  { return derived().coeff(row,col); }
+    inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }
+
+    /** \see MatrixBase::copyCoeff(row,col)
+      */
+    template<typename Other>
+    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)
+    {
+      derived().coeffRef(row, col) = other.coeff(row, col);
+    }
+
+    inline Scalar operator()(Index row, Index col) const
+    {
+      check_coordinates(row, col);
+      return coeff(row,col);
+    }
+    inline Scalar& operator()(Index row, Index col)
+    {
+      check_coordinates(row, col);
+      return coeffRef(row,col);
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+    #endif // not EIGEN_PARSED_BY_DOXYGEN
+
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived> &other) const;
+    template<typename DenseDerived>
+    void evalToLazy(MatrixBase<DenseDerived> &other) const;
+
+    DenseMatrixType toDenseMatrix() const
+    {
+      DenseMatrixType res(rows(), cols());
+      evalToLazy(res);
+      return res;
+    }
+
+  protected:
+
+    void check_coordinates(Index row, Index col) const
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(row);
+      EIGEN_ONLY_USED_FOR_DEBUG(col);
+      eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());
+      const int mode = int(Mode) & ~SelfAdjoint;
+      EIGEN_ONLY_USED_FOR_DEBUG(mode);
+      eigen_assert((mode==Upper && col>=row)
+                || (mode==Lower && col<=row)
+                || ((mode==StrictlyUpper || mode==UnitUpper) && col>row)
+                || ((mode==StrictlyLower || mode==UnitLower) && col<row));
+    }
+
+    #ifdef EIGEN_INTERNAL_DEBUGGING
+    void check_coordinates_internal(Index row, Index col) const
+    {
+      check_coordinates(row, col);
+    }
+    #else
+    void check_coordinates_internal(Index , Index ) const {}
+    #endif
+
+};
+
+/** \class TriangularView
+  * \ingroup Core_Module
+  *
+  * \brief Base class for triangular part in a matrix
+  *
+  * \param MatrixType the type of the object in which we are taking the triangular part
+  * \param Mode the kind of triangular matrix expression to construct. Can be #Upper,
+  *             #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.
+  *             This is in fact a bit field; it must have either #Upper or #Lower, 
+  *             and additionnaly it may have #UnitDiag or #ZeroDiag or neither.
+  *
+  * This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular
+  * matrices one should speak of "trapezoid" parts. This class is the return type
+  * of MatrixBase::triangularView() and most of the time this is the only way it is used.
+  *
+  * \sa MatrixBase::triangularView()
+  */
+namespace internal {
+template<typename MatrixType, unsigned int _Mode>
+struct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType>
+{
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
+  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+  typedef MatrixType ExpressionType;
+  typedef typename MatrixType::PlainObject DenseMatrixType;
+  enum {
+    Mode = _Mode,
+    Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
+    CoeffReadCost = MatrixTypeNestedCleaned::CoeffReadCost
+  };
+};
+}
+
+template<int Mode, bool LhsIsTriangular,
+         typename Lhs, bool LhsIsVector,
+         typename Rhs, bool RhsIsVector>
+struct TriangularProduct;
+
+template<typename _MatrixType, unsigned int _Mode> class TriangularView
+  : public TriangularBase<TriangularView<_MatrixType, _Mode> >
+{
+  public:
+
+    typedef TriangularBase<TriangularView> Base;
+    typedef typename internal::traits<TriangularView>::Scalar Scalar;
+
+    typedef _MatrixType MatrixType;
+    typedef typename internal::traits<TriangularView>::DenseMatrixType DenseMatrixType;
+    typedef DenseMatrixType PlainObject;
+
+  protected:
+    typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested;
+    typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;
+    typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;
+
+    typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;
+    
+  public:
+    using Base::evalToLazy;
+  
+
+    typedef typename internal::traits<TriangularView>::StorageKind StorageKind;
+    typedef typename internal::traits<TriangularView>::Index Index;
+
+    enum {
+      Mode = _Mode,
+      TransposeMode = (Mode & Upper ? Lower : 0)
+                    | (Mode & Lower ? Upper : 0)
+                    | (Mode & (UnitDiag))
+                    | (Mode & (ZeroDiag))
+    };
+
+    inline TriangularView(const MatrixType& matrix) : m_matrix(matrix)
+    {}
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    inline Index outerStride() const { return m_matrix.outerStride(); }
+    inline Index innerStride() const { return m_matrix.innerStride(); }
+
+    /** \sa MatrixBase::operator+=() */
+    template<typename Other> TriangularView&  operator+=(const DenseBase<Other>& other) { return *this = m_matrix + other.derived(); }
+    /** \sa MatrixBase::operator-=() */
+    template<typename Other> TriangularView&  operator-=(const DenseBase<Other>& other) { return *this = m_matrix - other.derived(); }
+    /** \sa MatrixBase::operator*=() */
+    TriangularView&  operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix * other; }
+    /** \sa MatrixBase::operator/=() */
+    TriangularView&  operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = m_matrix / other; }
+
+    /** \sa MatrixBase::fill() */
+    void fill(const Scalar& value) { setConstant(value); }
+    /** \sa MatrixBase::setConstant() */
+    TriangularView& setConstant(const Scalar& value)
+    { return *this = MatrixType::Constant(rows(), cols(), value); }
+    /** \sa MatrixBase::setZero() */
+    TriangularView& setZero() { return setConstant(Scalar(0)); }
+    /** \sa MatrixBase::setOnes() */
+    TriangularView& setOnes() { return setConstant(Scalar(1)); }
+
+    /** \sa MatrixBase::coeff()
+      * \warning the coordinates must fit into the referenced triangular part
+      */
+    inline Scalar coeff(Index row, Index col) const
+    {
+      Base::check_coordinates_internal(row, col);
+      return m_matrix.coeff(row, col);
+    }
+
+    /** \sa MatrixBase::coeffRef()
+      * \warning the coordinates must fit into the referenced triangular part
+      */
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      Base::check_coordinates_internal(row, col);
+      return m_matrix.const_cast_derived().coeffRef(row, col);
+    }
+
+    const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+    MatrixTypeNestedCleaned& nestedExpression() { return *const_cast<MatrixTypeNestedCleaned*>(&m_matrix); }
+
+    /** Assigns a triangular matrix to a triangular part of a dense matrix */
+    template<typename OtherDerived>
+    TriangularView& operator=(const TriangularBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    TriangularView& operator=(const MatrixBase<OtherDerived>& other);
+
+    TriangularView& operator=(const TriangularView& other)
+    { return *this = other.nestedExpression(); }
+
+    template<typename OtherDerived>
+    void lazyAssign(const TriangularBase<OtherDerived>& other);
+
+    template<typename OtherDerived>
+    void lazyAssign(const MatrixBase<OtherDerived>& other);
+
+    /** \sa MatrixBase::conjugate() */
+    inline TriangularView<MatrixConjugateReturnType,Mode> conjugate()
+    { return m_matrix.conjugate(); }
+    /** \sa MatrixBase::conjugate() const */
+    inline const TriangularView<MatrixConjugateReturnType,Mode> conjugate() const
+    { return m_matrix.conjugate(); }
+
+    /** \sa MatrixBase::adjoint() const */
+    inline const TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> adjoint() const
+    { return m_matrix.adjoint(); }
+
+    /** \sa MatrixBase::transpose() */
+    inline TriangularView<Transpose<MatrixType>,TransposeMode> transpose()
+    {
+      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)
+      return m_matrix.const_cast_derived().transpose();
+    }
+    /** \sa MatrixBase::transpose() const */
+    inline const TriangularView<Transpose<MatrixType>,TransposeMode> transpose() const
+    {
+      return m_matrix.transpose();
+    }
+
+    /** Efficient triangular matrix times vector/matrix product */
+    template<typename OtherDerived>
+    TriangularProduct<Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
+    operator*(const MatrixBase<OtherDerived>& rhs) const
+    {
+      return TriangularProduct
+              <Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
+              (m_matrix, rhs.derived());
+    }
+
+    /** Efficient vector/matrix times triangular matrix product */
+    template<typename OtherDerived> friend
+    TriangularProduct<Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
+    operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
+    {
+      return TriangularProduct
+              <Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
+              (lhs.derived(),rhs.m_matrix);
+    }
+
+    #ifdef EIGEN2_SUPPORT
+    template<typename OtherDerived>
+    struct eigen2_product_return_type
+    {
+      typedef typename TriangularView<MatrixType,Mode>::DenseMatrixType DenseMatrixType;
+      typedef typename OtherDerived::PlainObject::DenseType OtherPlainObject;
+      typedef typename ProductReturnType<DenseMatrixType, OtherPlainObject>::Type ProdRetType;
+      typedef typename ProdRetType::PlainObject type;
+    };
+    template<typename OtherDerived>
+    const typename eigen2_product_return_type<OtherDerived>::type
+    operator*(const EigenBase<OtherDerived>& rhs) const
+    {
+      typename OtherDerived::PlainObject::DenseType rhsPlainObject;
+      rhs.evalTo(rhsPlainObject);
+      return this->toDenseMatrix() * rhsPlainObject;
+    }
+    template<typename OtherMatrixType>
+    bool isApprox(const TriangularView<OtherMatrixType, Mode>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return this->toDenseMatrix().isApprox(other.toDenseMatrix(), precision);
+    }
+    template<typename OtherDerived>
+    bool isApprox(const MatrixBase<OtherDerived>& other, typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision()) const
+    {
+      return this->toDenseMatrix().isApprox(other, precision);
+    }
+    #endif // EIGEN2_SUPPORT
+
+    template<int Side, typename Other>
+    inline const internal::triangular_solve_retval<Side,TriangularView, Other>
+    solve(const MatrixBase<Other>& other) const;
+
+    template<int Side, typename OtherDerived>
+    void solveInPlace(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename Other>
+    inline const internal::triangular_solve_retval<OnTheLeft,TriangularView, Other> 
+    solve(const MatrixBase<Other>& other) const
+    { return solve<OnTheLeft>(other); }
+
+    template<typename OtherDerived>
+    void solveInPlace(const MatrixBase<OtherDerived>& other) const
+    { return solveInPlace<OnTheLeft>(other); }
+
+    const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const
+    {
+      EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
+      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
+    }
+    SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()
+    {
+      EIGEN_STATIC_ASSERT((Mode&UnitDiag)==0,PROGRAMMING_ERROR);
+      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);
+    }
+
+    template<typename OtherDerived>
+    void swap(TriangularBase<OtherDerived> const & other)
+    {
+      TriangularView<SwapWrapper<MatrixType>,Mode>(const_cast<MatrixType&>(m_matrix)).lazyAssign(other.derived());
+    }
+
+    template<typename OtherDerived>
+    void swap(MatrixBase<OtherDerived> const & other)
+    {
+      SwapWrapper<MatrixType> swaper(const_cast<MatrixType&>(m_matrix));
+      TriangularView<SwapWrapper<MatrixType>,Mode>(swaper).lazyAssign(other.derived());
+    }
+
+    Scalar determinant() const
+    {
+      if (Mode & UnitDiag)
+        return 1;
+      else if (Mode & ZeroDiag)
+        return 0;
+      else
+        return m_matrix.diagonal().prod();
+    }
+    
+    // TODO simplify the following:
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    {
+      setZero();
+      return assignProduct(other.derived(),1);
+    }
+    
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    {
+      return assignProduct(other.derived(),1);
+    }
+    
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
+    {
+      return assignProduct(other.derived(),-1);
+    }
+    
+    
+    template<typename ProductDerived>
+    EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
+    {
+      setZero();
+      return assignProduct(other.derived(),other.alpha());
+    }
+    
+    template<typename ProductDerived>
+    EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
+    {
+      return assignProduct(other.derived(),other.alpha());
+    }
+    
+    template<typename ProductDerived>
+    EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
+    {
+      return assignProduct(other.derived(),-other.alpha());
+    }
+    
+  protected:
+    
+    template<typename ProductDerived, typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
+    
+    template<int Mode, bool LhsIsTriangular,
+         typename Lhs, bool LhsIsVector,
+         typename Rhs, bool RhsIsVector>
+    EIGEN_STRONG_INLINE TriangularView& assignProduct(const TriangularProduct<Mode, LhsIsTriangular, Lhs, LhsIsVector, Rhs, RhsIsVector>& prod, const Scalar& alpha)
+    {
+      lazyAssign(alpha*prod.eval());
+      return *this;
+    }
+
+    MatrixTypeNested m_matrix;
+};
+
+/***************************************************************************
+* Implementation of triangular evaluation/assignment
+***************************************************************************/
+
+namespace internal {
+
+template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount, bool ClearOpposite>
+struct triangular_assignment_selector
+{
+  enum {
+    col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived1::RowsAtCompileTime
+  };
+  
+  typedef typename Derived1::Scalar Scalar;
+
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    triangular_assignment_selector<Derived1, Derived2, Mode, UnrollCount-1, ClearOpposite>::run(dst, src);
+
+    eigen_assert( Mode == Upper || Mode == Lower
+            || Mode == StrictlyUpper || Mode == StrictlyLower
+            || Mode == UnitUpper || Mode == UnitLower);
+    if((Mode == Upper && row <= col)
+    || (Mode == Lower && row >= col)
+    || (Mode == StrictlyUpper && row < col)
+    || (Mode == StrictlyLower && row > col)
+    || (Mode == UnitUpper && row < col)
+    || (Mode == UnitLower && row > col))
+      dst.copyCoeff(row, col, src);
+    else if(ClearOpposite)
+    {
+      if (Mode&UnitDiag && row==col)
+        dst.coeffRef(row, col) = Scalar(1);
+      else
+        dst.coeffRef(row, col) = Scalar(0);
+    }
+  }
+};
+
+// prevent buggy user code from causing an infinite recursion
+template<typename Derived1, typename Derived2, unsigned int Mode, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Mode, 0, ClearOpposite>
+{
+  static inline void run(Derived1 &, const Derived2 &) {}
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Upper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  typedef typename Derived1::Scalar Scalar;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows()-1);
+      for(Index i = 0; i <= maxi; ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+        for(Index i = maxi+1; i < dst.rows(); ++i)
+          dst.coeffRef(i, j) = Scalar(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, Lower, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      for(Index i = j; i < dst.rows(); ++i)
+        dst.copyCoeff(i, j, src);
+      Index maxi = (std::min)(j, dst.rows());
+      if (ClearOpposite)
+        for(Index i = 0; i < maxi; ++i)
+          dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, StrictlyUpper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  typedef typename Derived1::Scalar Scalar;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows());
+      for(Index i = 0; i < maxi; ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+        for(Index i = maxi; i < dst.rows(); ++i)
+          dst.coeffRef(i, j) = Scalar(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, StrictlyLower, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      for(Index i = j+1; i < dst.rows(); ++i)
+        dst.copyCoeff(i, j, src);
+      Index maxi = (std::min)(j, dst.rows()-1);
+      if (ClearOpposite)
+        for(Index i = 0; i <= maxi; ++i)
+          dst.coeffRef(i, j) = static_cast<typename Derived1::Scalar>(0);
+    }
+  }
+};
+
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, UnitUpper, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows());
+      for(Index i = 0; i < maxi; ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+      {
+        for(Index i = maxi+1; i < dst.rows(); ++i)
+          dst.coeffRef(i, j) = 0;
+      }
+    }
+    dst.diagonal().setOnes();
+  }
+};
+template<typename Derived1, typename Derived2, bool ClearOpposite>
+struct triangular_assignment_selector<Derived1, Derived2, UnitLower, Dynamic, ClearOpposite>
+{
+  typedef typename Derived1::Index Index;
+  static inline void run(Derived1 &dst, const Derived2 &src)
+  {
+    for(Index j = 0; j < dst.cols(); ++j)
+    {
+      Index maxi = (std::min)(j, dst.rows());
+      for(Index i = maxi+1; i < dst.rows(); ++i)
+        dst.copyCoeff(i, j, src);
+      if (ClearOpposite)
+      {
+        for(Index i = 0; i < maxi; ++i)
+          dst.coeffRef(i, j) = 0;
+      }
+    }
+    dst.diagonal().setOnes();
+  }
+};
+
+} // end namespace internal
+
+// FIXME should we keep that possibility
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+inline TriangularView<MatrixType, Mode>&
+TriangularView<MatrixType, Mode>::operator=(const MatrixBase<OtherDerived>& other)
+{
+  if(OtherDerived::Flags & EvalBeforeAssigningBit)
+  {
+    typename internal::plain_matrix_type<OtherDerived>::type other_evaluated(other.rows(), other.cols());
+    other_evaluated.template triangularView<Mode>().lazyAssign(other.derived());
+    lazyAssign(other_evaluated);
+  }
+  else
+    lazyAssign(other.derived());
+  return *this;
+}
+
+// FIXME should we keep that possibility
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+void TriangularView<MatrixType, Mode>::lazyAssign(const MatrixBase<OtherDerived>& other)
+{
+  enum {
+    unroll = MatrixType::SizeAtCompileTime != Dynamic
+          && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
+          && MatrixType::SizeAtCompileTime*internal::traits<OtherDerived>::CoeffReadCost/2 <= EIGEN_UNROLLING_LIMIT
+  };
+  eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
+
+  internal::triangular_assignment_selector
+    <MatrixType, OtherDerived, int(Mode),
+    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
+    false // do not change the opposite triangular part
+    >::run(m_matrix.const_cast_derived(), other.derived());
+}
+
+
+
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+inline TriangularView<MatrixType, Mode>&
+TriangularView<MatrixType, Mode>::operator=(const TriangularBase<OtherDerived>& other)
+{
+  eigen_assert(Mode == int(OtherDerived::Mode));
+  if(internal::traits<OtherDerived>::Flags & EvalBeforeAssigningBit)
+  {
+    typename OtherDerived::DenseMatrixType other_evaluated(other.rows(), other.cols());
+    other_evaluated.template triangularView<Mode>().lazyAssign(other.derived().nestedExpression());
+    lazyAssign(other_evaluated);
+  }
+  else
+    lazyAssign(other.derived().nestedExpression());
+  return *this;
+}
+
+template<typename MatrixType, unsigned int Mode>
+template<typename OtherDerived>
+void TriangularView<MatrixType, Mode>::lazyAssign(const TriangularBase<OtherDerived>& other)
+{
+  enum {
+    unroll = MatrixType::SizeAtCompileTime != Dynamic
+                   && internal::traits<OtherDerived>::CoeffReadCost != Dynamic
+                   && MatrixType::SizeAtCompileTime * internal::traits<OtherDerived>::CoeffReadCost / 2
+                        <= EIGEN_UNROLLING_LIMIT
+  };
+  eigen_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
+
+  internal::triangular_assignment_selector
+    <MatrixType, OtherDerived, int(Mode),
+    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic,
+    false // preserve the opposite triangular part
+    >::run(m_matrix.const_cast_derived(), other.derived().nestedExpression());
+}
+
+/***************************************************************************
+* Implementation of TriangularBase methods
+***************************************************************************/
+
+/** Assigns a triangular or selfadjoint matrix to a dense matrix.
+  * If the matrix is triangular, the opposite part is set to zero. */
+template<typename Derived>
+template<typename DenseDerived>
+void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const
+{
+  if(internal::traits<Derived>::Flags & EvalBeforeAssigningBit)
+  {
+    typename internal::plain_matrix_type<Derived>::type other_evaluated(rows(), cols());
+    evalToLazy(other_evaluated);
+    other.derived().swap(other_evaluated);
+  }
+  else
+    evalToLazy(other.derived());
+}
+
+/** Assigns a triangular or selfadjoint matrix to a dense matrix.
+  * If the matrix is triangular, the opposite part is set to zero. */
+template<typename Derived>
+template<typename DenseDerived>
+void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const
+{
+  enum {
+    unroll = DenseDerived::SizeAtCompileTime != Dynamic
+                   && internal::traits<Derived>::CoeffReadCost != Dynamic
+                   && DenseDerived::SizeAtCompileTime * internal::traits<Derived>::CoeffReadCost / 2
+                        <= EIGEN_UNROLLING_LIMIT
+  };
+  other.derived().resize(this->rows(), this->cols());
+
+  internal::triangular_assignment_selector
+    <DenseDerived, typename internal::traits<Derived>::MatrixTypeNestedCleaned, Derived::Mode,
+    unroll ? int(DenseDerived::SizeAtCompileTime) : Dynamic,
+    true // clear the opposite triangular part
+    >::run(other.derived(), derived().nestedExpression());
+}
+
+/***************************************************************************
+* Implementation of TriangularView methods
+***************************************************************************/
+
+/***************************************************************************
+* Implementation of MatrixBase methods
+***************************************************************************/
+
+#ifdef EIGEN2_SUPPORT
+
+// implementation of part<>(), including the SelfAdjoint case.
+
+namespace internal {
+template<typename MatrixType, unsigned int Mode>
+struct eigen2_part_return_type
+{
+  typedef TriangularView<MatrixType, Mode> type;
+};
+
+template<typename MatrixType>
+struct eigen2_part_return_type<MatrixType, SelfAdjoint>
+{
+  typedef SelfAdjointView<MatrixType, Upper> type;
+};
+}
+
+/** \deprecated use MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+const typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part() const
+{
+  return derived();
+}
+
+/** \deprecated use MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+typename internal::eigen2_part_return_type<Derived, Mode>::type MatrixBase<Derived>::part()
+{
+  return derived();
+}
+#endif
+
+/**
+  * \returns an expression of a triangular view extracted from the current matrix
+  *
+  * The parameter \a Mode can have the following values: \c #Upper, \c #StrictlyUpper, \c #UnitUpper,
+  * \c #Lower, \c #StrictlyLower, \c #UnitLower.
+  *
+  * Example: \include MatrixBase_extract.cpp
+  * Output: \verbinclude MatrixBase_extract.out
+  *
+  * \sa class TriangularView
+  */
+template<typename Derived>
+template<unsigned int Mode>
+typename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type
+MatrixBase<Derived>::triangularView()
+{
+  return derived();
+}
+
+/** This is the const version of MatrixBase::triangularView() */
+template<typename Derived>
+template<unsigned int Mode>
+typename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type
+MatrixBase<Derived>::triangularView() const
+{
+  return derived();
+}
+
+/** \returns true if *this is approximately equal to an upper triangular matrix,
+  *          within the precision given by \a prec.
+  *
+  * \sa isLowerTriangular()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const
+{
+  using std::abs;
+  RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
+  for(Index j = 0; j < cols(); ++j)
+  {
+    Index maxi = (std::min)(j, rows()-1);
+    for(Index i = 0; i <= maxi; ++i)
+    {
+      RealScalar absValue = abs(coeff(i,j));
+      if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
+    }
+  }
+  RealScalar threshold = maxAbsOnUpperPart * prec;
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = j+1; i < rows(); ++i)
+      if(abs(coeff(i, j)) > threshold) return false;
+  return true;
+}
+
+/** \returns true if *this is approximately equal to a lower triangular matrix,
+  *          within the precision given by \a prec.
+  *
+  * \sa isUpperTriangular()
+  */
+template<typename Derived>
+bool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const
+{
+  using std::abs;
+  RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
+  for(Index j = 0; j < cols(); ++j)
+    for(Index i = j; i < rows(); ++i)
+    {
+      RealScalar absValue = abs(coeff(i,j));
+      if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
+    }
+  RealScalar threshold = maxAbsOnLowerPart * prec;
+  for(Index j = 1; j < cols(); ++j)
+  {
+    Index maxi = (std::min)(j, rows()-1);
+    for(Index i = 0; i < maxi; ++i)
+      if(abs(coeff(i, j)) > threshold) return false;
+  }
+  return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULARMATRIX_H
diff --git a/Eigen/src/Core/VectorBlock.h b/Eigen/src/Core/VectorBlock.h
new file mode 100644
index 0000000..1a7330f
--- /dev/null
+++ b/Eigen/src/Core/VectorBlock.h
@@ -0,0 +1,95 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_VECTORBLOCK_H
+#define EIGEN_VECTORBLOCK_H
+
+namespace Eigen { 
+
+/** \class VectorBlock
+  * \ingroup Core_Module
+  *
+  * \brief Expression of a fixed-size or dynamic-size sub-vector
+  *
+  * \param VectorType the type of the object in which we are taking a sub-vector
+  * \param Size size of the sub-vector we are taking at compile time (optional)
+  *
+  * This class represents an expression of either a fixed-size or dynamic-size sub-vector.
+  * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and
+  * most of the time this is the only way it is used.
+  *
+  * However, if you want to directly maniputate sub-vector expressions,
+  * for instance if you want to write a function returning such an expression, you
+  * will need to use this class.
+  *
+  * Here is an example illustrating the dynamic case:
+  * \include class_VectorBlock.cpp
+  * Output: \verbinclude class_VectorBlock.out
+  *
+  * \note Even though this expression has dynamic size, in the case where \a VectorType
+  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
+  * it does not cause a dynamic memory allocation.
+  *
+  * Here is an example illustrating the fixed-size case:
+  * \include class_FixedVectorBlock.cpp
+  * Output: \verbinclude class_FixedVectorBlock.out
+  *
+  * \sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)
+  */
+
+namespace internal {
+template<typename VectorType, int Size>
+struct traits<VectorBlock<VectorType, Size> >
+  : public traits<Block<VectorType,
+                     traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+                     traits<VectorType>::Flags & RowMajorBit ? Size : 1> >
+{
+};
+}
+
+template<typename VectorType, int Size> class VectorBlock
+  : public Block<VectorType,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1>
+{
+    typedef Block<VectorType,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,
+                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base;
+    enum {
+      IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit)
+    };
+  public:
+    EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)
+
+    using Base::operator=;
+
+    /** Dynamic-size constructor
+      */
+    inline VectorBlock(VectorType& vector, Index start, Index size)
+      : Base(vector,
+             IsColVector ? start : 0, IsColVector ? 0 : start,
+             IsColVector ? size  : 1, IsColVector ? 1 : size)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
+    }
+
+    /** Fixed-size constructor
+      */
+    inline VectorBlock(VectorType& vector, Index start)
+      : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);
+    }
+};
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_VECTORBLOCK_H
diff --git a/Eigen/src/Core/VectorwiseOp.h b/Eigen/src/Core/VectorwiseOp.h
new file mode 100644
index 0000000..d5ab036
--- /dev/null
+++ b/Eigen/src/Core/VectorwiseOp.h
@@ -0,0 +1,642 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PARTIAL_REDUX_H
+#define EIGEN_PARTIAL_REDUX_H
+
+namespace Eigen { 
+
+/** \class PartialReduxExpr
+  * \ingroup Core_Module
+  *
+  * \brief Generic expression of a partially reduxed matrix
+  *
+  * \tparam MatrixType the type of the matrix we are applying the redux operation
+  * \tparam MemberOp type of the member functor
+  * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)
+  *
+  * This class represents an expression of a partial redux operator of a matrix.
+  * It is the return type of some VectorwiseOp functions,
+  * and most of the time this is the only way it is used.
+  *
+  * \sa class VectorwiseOp
+  */
+
+template< typename MatrixType, typename MemberOp, int Direction>
+class PartialReduxExpr;
+
+namespace internal {
+template<typename MatrixType, typename MemberOp, int Direction>
+struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
+ : traits<MatrixType>
+{
+  typedef typename MemberOp::result_type Scalar;
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename traits<MatrixType>::XprKind XprKind;
+  typedef typename MatrixType::Scalar InputScalar;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+  enum {
+    RowsAtCompileTime = Direction==Vertical   ? 1 : MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = Direction==Vertical   ? 1 : MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
+    Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
+    Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0),
+    TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime :  MatrixType::ColsAtCompileTime
+  };
+  #if EIGEN_GNUC_AT_LEAST(3,4)
+  typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
+  #else
+  typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
+  #endif
+  enum {
+    CoeffReadCost = TraversalSize==Dynamic ? Dynamic
+                  : TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
+  };
+};
+}
+
+template< typename MatrixType, typename MemberOp, int Direction>
+class PartialReduxExpr : internal::no_assignment_operator,
+  public internal::dense_xpr_base< PartialReduxExpr<MatrixType, MemberOp, Direction> >::type
+{
+  public:
+
+    typedef typename internal::dense_xpr_base<PartialReduxExpr>::type Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr)
+    typedef typename internal::traits<PartialReduxExpr>::MatrixTypeNested MatrixTypeNested;
+    typedef typename internal::traits<PartialReduxExpr>::_MatrixTypeNested _MatrixTypeNested;
+
+    PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
+      : m_matrix(mat), m_functor(func) {}
+
+    Index rows() const { return (Direction==Vertical   ? 1 : m_matrix.rows()); }
+    Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const
+    {
+      if (Direction==Vertical)
+        return m_functor(m_matrix.col(j));
+      else
+        return m_functor(m_matrix.row(i));
+    }
+
+    const Scalar coeff(Index index) const
+    {
+      if (Direction==Vertical)
+        return m_functor(m_matrix.col(index));
+      else
+        return m_functor(m_matrix.row(index));
+    }
+
+  protected:
+    MatrixTypeNested m_matrix;
+    const MemberOp m_functor;
+};
+
+#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST)                               \
+  template <typename ResultType>                                        \
+  struct member_##MEMBER {                                              \
+    EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER)                            \
+    typedef ResultType result_type;                                     \
+    template<typename Scalar, int Size> struct Cost                     \
+    { enum { value = COST }; };                                         \
+    template<typename XprType>                                          \
+    EIGEN_STRONG_INLINE ResultType operator()(const XprType& mat) const \
+    { return mat.MEMBER(); } \
+  }
+
+namespace internal {
+
+EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits<scalar_hypot_op<Scalar> >::Cost );
+EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits<Scalar>::AddCost + NumTraits<Scalar>::MulCost);
+EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost);
+EIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost);
+
+
+template <typename BinaryOp, typename Scalar>
+struct member_redux {
+  typedef typename result_of<
+                     BinaryOp(Scalar)
+                   >::type  result_type;
+  template<typename _Scalar, int Size> struct Cost
+  { enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; };
+  member_redux(const BinaryOp func) : m_functor(func) {}
+  template<typename Derived>
+  inline result_type operator()(const DenseBase<Derived>& mat) const
+  { return mat.redux(m_functor); }
+  const BinaryOp m_functor;
+};
+}
+
+/** \class VectorwiseOp
+  * \ingroup Core_Module
+  *
+  * \brief Pseudo expression providing partial reduction operations
+  *
+  * \param ExpressionType the type of the object on which to do partial reductions
+  * \param Direction indicates the direction of the redux (#Vertical or #Horizontal)
+  *
+  * This class represents a pseudo expression with partial reduction features.
+  * It is the return type of DenseBase::colwise() and DenseBase::rowwise()
+  * and most of the time this is the only way it is used.
+  *
+  * Example: \include MatrixBase_colwise.cpp
+  * Output: \verbinclude MatrixBase_colwise.out
+  *
+  * \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr
+  */
+template<typename ExpressionType, int Direction> class VectorwiseOp
+{
+  public:
+
+    typedef typename ExpressionType::Scalar Scalar;
+    typedef typename ExpressionType::RealScalar RealScalar;
+    typedef typename ExpressionType::Index Index;
+    typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
+        ExpressionType, ExpressionType&>::type ExpressionTypeNested;
+    typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned;
+
+    template<template<typename _Scalar> class Functor,
+                      typename Scalar=typename internal::traits<ExpressionType>::Scalar> struct ReturnType
+    {
+      typedef PartialReduxExpr<ExpressionType,
+                               Functor<Scalar>,
+                               Direction
+                              > Type;
+    };
+
+    template<typename BinaryOp> struct ReduxReturnType
+    {
+      typedef PartialReduxExpr<ExpressionType,
+                               internal::member_redux<BinaryOp,typename internal::traits<ExpressionType>::Scalar>,
+                               Direction
+                              > Type;
+    };
+
+    enum {
+      IsVertical   = (Direction==Vertical) ? 1 : 0,
+      IsHorizontal = (Direction==Horizontal) ? 1 : 0
+    };
+
+  protected:
+
+    /** \internal
+      * \returns the i-th subvector according to the \c Direction */
+    typedef typename internal::conditional<Direction==Vertical,
+                               typename ExpressionType::ColXpr,
+                               typename ExpressionType::RowXpr>::type SubVector;
+    SubVector subVector(Index i)
+    {
+      return SubVector(m_matrix.derived(),i);
+    }
+
+    /** \internal
+      * \returns the number of subvectors in the direction \c Direction */
+    Index subVectors() const
+    { return Direction==Vertical?m_matrix.cols():m_matrix.rows(); }
+
+    template<typename OtherDerived> struct ExtendedType {
+      typedef Replicate<OtherDerived,
+                        Direction==Vertical   ? 1 : ExpressionType::RowsAtCompileTime,
+                        Direction==Horizontal ? 1 : ExpressionType::ColsAtCompileTime> Type;
+    };
+
+    /** \internal
+      * Replicates a vector to match the size of \c *this */
+    template<typename OtherDerived>
+    typename ExtendedType<OtherDerived>::Type
+    extendedTo(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Vertical, OtherDerived::MaxColsAtCompileTime==1),
+                          YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Horizontal, OtherDerived::MaxRowsAtCompileTime==1),
+                          YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
+      return typename ExtendedType<OtherDerived>::Type
+                      (other.derived(),
+                       Direction==Vertical   ? 1 : m_matrix.rows(),
+                       Direction==Horizontal ? 1 : m_matrix.cols());
+    }
+    
+    template<typename OtherDerived> struct OppositeExtendedType {
+      typedef Replicate<OtherDerived,
+                        Direction==Horizontal ? 1 : ExpressionType::RowsAtCompileTime,
+                        Direction==Vertical   ? 1 : ExpressionType::ColsAtCompileTime> Type;
+    };
+
+    /** \internal
+      * Replicates a vector in the opposite direction to match the size of \c *this */
+    template<typename OtherDerived>
+    typename OppositeExtendedType<OtherDerived>::Type
+    extendedToOpposite(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Horizontal, OtherDerived::MaxColsAtCompileTime==1),
+                          YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)
+      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(Direction==Vertical, OtherDerived::MaxRowsAtCompileTime==1),
+                          YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)
+      return typename OppositeExtendedType<OtherDerived>::Type
+                      (other.derived(),
+                       Direction==Horizontal  ? 1 : m_matrix.rows(),
+                       Direction==Vertical    ? 1 : m_matrix.cols());
+    }
+
+  public:
+
+    inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {}
+
+    /** \internal */
+    inline const ExpressionType& _expression() const { return m_matrix; }
+
+    /** \returns a row or column vector expression of \c *this reduxed by \a func
+      *
+      * The template parameter \a BinaryOp is the type of the functor
+      * of the custom redux operator. Note that func must be an associative operator.
+      *
+      * \sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise()
+      */
+    template<typename BinaryOp>
+    const typename ReduxReturnType<BinaryOp>::Type
+    redux(const BinaryOp& func = BinaryOp()) const
+    { return typename ReduxReturnType<BinaryOp>::Type(_expression(), func); }
+
+    /** \returns a row (or column) vector expression of the smallest coefficient
+      * of each column (or row) of the referenced expression.
+      * 
+      * \warning the result is undefined if \c *this contains NaN.
+      *
+      * Example: \include PartialRedux_minCoeff.cpp
+      * Output: \verbinclude PartialRedux_minCoeff.out
+      *
+      * \sa DenseBase::minCoeff() */
+    const typename ReturnType<internal::member_minCoeff>::Type minCoeff() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression of the largest coefficient
+      * of each column (or row) of the referenced expression.
+      * 
+      * \warning the result is undefined if \c *this contains NaN.
+      *
+      * Example: \include PartialRedux_maxCoeff.cpp
+      * Output: \verbinclude PartialRedux_maxCoeff.out
+      *
+      * \sa DenseBase::maxCoeff() */
+    const typename ReturnType<internal::member_maxCoeff>::Type maxCoeff() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression of the squared norm
+      * of each column (or row) of the referenced expression.
+      *
+      * Example: \include PartialRedux_squaredNorm.cpp
+      * Output: \verbinclude PartialRedux_squaredNorm.out
+      *
+      * \sa DenseBase::squaredNorm() */
+    const typename ReturnType<internal::member_squaredNorm,RealScalar>::Type squaredNorm() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression of the norm
+      * of each column (or row) of the referenced expression.
+      *
+      * Example: \include PartialRedux_norm.cpp
+      * Output: \verbinclude PartialRedux_norm.out
+      *
+      * \sa DenseBase::norm() */
+    const typename ReturnType<internal::member_norm,RealScalar>::Type norm() const
+    { return _expression(); }
+
+
+    /** \returns a row (or column) vector expression of the norm
+      * of each column (or row) of the referenced expression, using
+      * blue's algorithm.
+      *
+      * \sa DenseBase::blueNorm() */
+    const typename ReturnType<internal::member_blueNorm,RealScalar>::Type blueNorm() const
+    { return _expression(); }
+
+
+    /** \returns a row (or column) vector expression of the norm
+      * of each column (or row) of the referenced expression, avoiding
+      * underflow and overflow.
+      *
+      * \sa DenseBase::stableNorm() */
+    const typename ReturnType<internal::member_stableNorm,RealScalar>::Type stableNorm() const
+    { return _expression(); }
+
+
+    /** \returns a row (or column) vector expression of the norm
+      * of each column (or row) of the referenced expression, avoiding
+      * underflow and overflow using a concatenation of hypot() calls.
+      *
+      * \sa DenseBase::hypotNorm() */
+    const typename ReturnType<internal::member_hypotNorm,RealScalar>::Type hypotNorm() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression of the sum
+      * of each column (or row) of the referenced expression.
+      *
+      * Example: \include PartialRedux_sum.cpp
+      * Output: \verbinclude PartialRedux_sum.out
+      *
+      * \sa DenseBase::sum() */
+    const typename ReturnType<internal::member_sum>::Type sum() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression of the mean
+    * of each column (or row) of the referenced expression.
+    *
+    * \sa DenseBase::mean() */
+    const typename ReturnType<internal::member_mean>::Type mean() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression representing
+      * whether \b all coefficients of each respective column (or row) are \c true.
+      *
+      * \sa DenseBase::all() */
+    const typename ReturnType<internal::member_all>::Type all() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression representing
+      * whether \b at \b least one coefficient of each respective column (or row) is \c true.
+      *
+      * \sa DenseBase::any() */
+    const typename ReturnType<internal::member_any>::Type any() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression representing
+      * the number of \c true coefficients of each respective column (or row).
+      *
+      * Example: \include PartialRedux_count.cpp
+      * Output: \verbinclude PartialRedux_count.out
+      *
+      * \sa DenseBase::count() */
+    const PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> count() const
+    { return _expression(); }
+
+    /** \returns a row (or column) vector expression of the product
+      * of each column (or row) of the referenced expression.
+      *
+      * Example: \include PartialRedux_prod.cpp
+      * Output: \verbinclude PartialRedux_prod.out
+      *
+      * \sa DenseBase::prod() */
+    const typename ReturnType<internal::member_prod>::Type prod() const
+    { return _expression(); }
+
+
+    /** \returns a matrix expression
+      * where each column (or row) are reversed.
+      *
+      * Example: \include Vectorwise_reverse.cpp
+      * Output: \verbinclude Vectorwise_reverse.out
+      *
+      * \sa DenseBase::reverse() */
+    const Reverse<ExpressionType, Direction> reverse() const
+    { return Reverse<ExpressionType, Direction>( _expression() ); }
+
+    typedef Replicate<ExpressionType,Direction==Vertical?Dynamic:1,Direction==Horizontal?Dynamic:1> ReplicateReturnType;
+    const ReplicateReturnType replicate(Index factor) const;
+
+    /**
+      * \return an expression of the replication of each column (or row) of \c *this
+      *
+      * Example: \include DirectionWise_replicate.cpp
+      * Output: \verbinclude DirectionWise_replicate.out
+      *
+      * \sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate
+      */
+    // NOTE implemented here because of sunstudio's compilation errors
+    template<int Factor> const Replicate<ExpressionType,(IsVertical?Factor:1),(IsHorizontal?Factor:1)>
+    replicate(Index factor = Factor) const
+    {
+      return Replicate<ExpressionType,Direction==Vertical?Factor:1,Direction==Horizontal?Factor:1>
+          (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);
+    }
+
+/////////// Artithmetic operators ///////////
+
+    /** Copies the vector \a other to each subvector of \c *this */
+    template<typename OtherDerived>
+    ExpressionType& operator=(const DenseBase<OtherDerived>& other)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME
+      return const_cast<ExpressionType&>(m_matrix = extendedTo(other.derived()));
+    }
+
+    /** Adds the vector \a other to each subvector of \c *this */
+    template<typename OtherDerived>
+    ExpressionType& operator+=(const DenseBase<OtherDerived>& other)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      return const_cast<ExpressionType&>(m_matrix += extendedTo(other.derived()));
+    }
+
+    /** Substracts the vector \a other to each subvector of \c *this */
+    template<typename OtherDerived>
+    ExpressionType& operator-=(const DenseBase<OtherDerived>& other)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      return const_cast<ExpressionType&>(m_matrix -= extendedTo(other.derived()));
+    }
+
+    /** Multiples each subvector of \c *this by the vector \a other */
+    template<typename OtherDerived>
+    ExpressionType& operator*=(const DenseBase<OtherDerived>& other)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      m_matrix *= extendedTo(other.derived());
+      return const_cast<ExpressionType&>(m_matrix);
+    }
+
+    /** Divides each subvector of \c *this by the vector \a other */
+    template<typename OtherDerived>
+    ExpressionType& operator/=(const DenseBase<OtherDerived>& other)
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      m_matrix /= extendedTo(other.derived());
+      return const_cast<ExpressionType&>(m_matrix);
+    }
+
+    /** Returns the expression of the sum of the vector \a other to each subvector of \c *this */
+    template<typename OtherDerived> EIGEN_STRONG_INLINE
+    CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
+                  const ExpressionTypeNestedCleaned,
+                  const typename ExtendedType<OtherDerived>::Type>
+    operator+(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      return m_matrix + extendedTo(other.derived());
+    }
+
+    /** Returns the expression of the difference between each subvector of \c *this and the vector \a other */
+    template<typename OtherDerived>
+    CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+                  const ExpressionTypeNestedCleaned,
+                  const typename ExtendedType<OtherDerived>::Type>
+    operator-(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      return m_matrix - extendedTo(other.derived());
+    }
+
+    /** Returns the expression where each subvector is the product of the vector \a other
+      * by the corresponding subvector of \c *this */
+    template<typename OtherDerived> EIGEN_STRONG_INLINE
+    CwiseBinaryOp<internal::scalar_product_op<Scalar>,
+                  const ExpressionTypeNestedCleaned,
+                  const typename ExtendedType<OtherDerived>::Type>
+    operator*(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      return m_matrix * extendedTo(other.derived());
+    }
+
+    /** Returns the expression where each subvector is the quotient of the corresponding
+      * subvector of \c *this by the vector \a other */
+    template<typename OtherDerived>
+    CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
+                  const ExpressionTypeNestedCleaned,
+                  const typename ExtendedType<OtherDerived>::Type>
+    operator/(const DenseBase<OtherDerived>& other) const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)
+      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)
+      return m_matrix / extendedTo(other.derived());
+    }
+    
+    /** \returns an expression where each column of row of the referenced matrix are normalized.
+      * The referenced matrix is \b not modified.
+      * \sa MatrixBase::normalized(), normalize()
+      */
+    CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,
+                  const ExpressionTypeNestedCleaned,
+                  const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type>
+    normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); }
+    
+    
+    /** Normalize in-place each row or columns of the referenced matrix.
+      * \sa MatrixBase::normalize(), normalized()
+      */
+    void normalize() {
+      m_matrix = this->normalized();
+    }
+
+/////////// Geometry module ///////////
+
+    #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+    Homogeneous<ExpressionType,Direction> homogeneous() const;
+    #endif
+
+    typedef typename ExpressionType::PlainObject CrossReturnType;
+    template<typename OtherDerived>
+    const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const;
+
+    enum {
+      HNormalized_Size = Direction==Vertical ? internal::traits<ExpressionType>::RowsAtCompileTime
+                                             : internal::traits<ExpressionType>::ColsAtCompileTime,
+      HNormalized_SizeMinusOne = HNormalized_Size==Dynamic ? Dynamic : HNormalized_Size-1
+    };
+    typedef Block<const ExpressionType,
+                  Direction==Vertical   ? int(HNormalized_SizeMinusOne)
+                                        : int(internal::traits<ExpressionType>::RowsAtCompileTime),
+                  Direction==Horizontal ? int(HNormalized_SizeMinusOne)
+                                        : int(internal::traits<ExpressionType>::ColsAtCompileTime)>
+            HNormalized_Block;
+    typedef Block<const ExpressionType,
+                  Direction==Vertical   ? 1 : int(internal::traits<ExpressionType>::RowsAtCompileTime),
+                  Direction==Horizontal ? 1 : int(internal::traits<ExpressionType>::ColsAtCompileTime)>
+            HNormalized_Factors;
+    typedef CwiseBinaryOp<internal::scalar_quotient_op<typename internal::traits<ExpressionType>::Scalar>,
+                const HNormalized_Block,
+                const Replicate<HNormalized_Factors,
+                  Direction==Vertical   ? HNormalized_SizeMinusOne : 1,
+                  Direction==Horizontal ? HNormalized_SizeMinusOne : 1> >
+            HNormalizedReturnType;
+
+    const HNormalizedReturnType hnormalized() const;
+
+  protected:
+    ExpressionTypeNested m_matrix;
+};
+
+/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
+  *
+  * Example: \include MatrixBase_colwise.cpp
+  * Output: \verbinclude MatrixBase_colwise.out
+  *
+  * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
+  */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstColwiseReturnType
+DenseBase<Derived>::colwise() const
+{
+  return derived();
+}
+
+/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
+  *
+  * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::ColwiseReturnType
+DenseBase<Derived>::colwise()
+{
+  return derived();
+}
+
+/** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations
+  *
+  * Example: \include MatrixBase_rowwise.cpp
+  * Output: \verbinclude MatrixBase_rowwise.out
+  *
+  * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
+  */
+template<typename Derived>
+inline const typename DenseBase<Derived>::ConstRowwiseReturnType
+DenseBase<Derived>::rowwise() const
+{
+  return derived();
+}
+
+/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
+  *
+  * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
+  */
+template<typename Derived>
+inline typename DenseBase<Derived>::RowwiseReturnType
+DenseBase<Derived>::rowwise()
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARTIAL_REDUX_H
diff --git a/Eigen/src/Core/Visitor.h b/Eigen/src/Core/Visitor.h
new file mode 100644
index 0000000..64867b7
--- /dev/null
+++ b/Eigen/src/Core/Visitor.h
@@ -0,0 +1,237 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_VISITOR_H
+#define EIGEN_VISITOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Visitor, typename Derived, int UnrollCount>
+struct visitor_impl
+{
+  enum {
+    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
+    row = (UnrollCount-1) % Derived::RowsAtCompileTime
+  };
+
+  static inline void run(const Derived &mat, Visitor& visitor)
+  {
+    visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
+    visitor(mat.coeff(row, col), row, col);
+  }
+};
+
+template<typename Visitor, typename Derived>
+struct visitor_impl<Visitor, Derived, 1>
+{
+  static inline void run(const Derived &mat, Visitor& visitor)
+  {
+    return visitor.init(mat.coeff(0, 0), 0, 0);
+  }
+};
+
+template<typename Visitor, typename Derived>
+struct visitor_impl<Visitor, Derived, Dynamic>
+{
+  typedef typename Derived::Index Index;
+  static inline void run(const Derived& mat, Visitor& visitor)
+  {
+    visitor.init(mat.coeff(0,0), 0, 0);
+    for(Index i = 1; i < mat.rows(); ++i)
+      visitor(mat.coeff(i, 0), i, 0);
+    for(Index j = 1; j < mat.cols(); ++j)
+      for(Index i = 0; i < mat.rows(); ++i)
+        visitor(mat.coeff(i, j), i, j);
+  }
+};
+
+} // end namespace internal
+
+/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector.
+  *
+  * The template parameter \a Visitor is the type of the visitor and provides the following interface:
+  * \code
+  * struct MyVisitor {
+  *   // called for the first coefficient
+  *   void init(const Scalar& value, Index i, Index j);
+  *   // called for all other coefficients
+  *   void operator() (const Scalar& value, Index i, Index j);
+  * };
+  * \endcode
+  *
+  * \note compared to one or two \em for \em loops, visitors offer automatic
+  * unrolling for small fixed size matrix.
+  *
+  * \sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()
+  */
+template<typename Derived>
+template<typename Visitor>
+void DenseBase<Derived>::visit(Visitor& visitor) const
+{
+  enum { unroll = SizeAtCompileTime != Dynamic
+                   && CoeffReadCost != Dynamic
+                   && (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
+                   && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
+                      <= EIGEN_UNROLLING_LIMIT };
+  return internal::visitor_impl<Visitor, Derived,
+      unroll ? int(SizeAtCompileTime) : Dynamic
+    >::run(derived(), visitor);
+}
+
+namespace internal {
+
+/** \internal
+  * \brief Base class to implement min and max visitors
+  */
+template <typename Derived>
+struct coeff_visitor
+{
+  typedef typename Derived::Index Index;
+  typedef typename Derived::Scalar Scalar;
+  Index row, col;
+  Scalar res;
+  inline void init(const Scalar& value, Index i, Index j)
+  {
+    res = value;
+    row = i;
+    col = j;
+  }
+};
+
+/** \internal
+  * \brief Visitor computing the min coefficient with its value and coordinates
+  *
+  * \sa DenseBase::minCoeff(Index*, Index*)
+  */
+template <typename Derived>
+struct min_coeff_visitor : coeff_visitor<Derived>
+{
+  typedef typename Derived::Index Index;
+  typedef typename Derived::Scalar Scalar;
+  void operator() (const Scalar& value, Index i, Index j)
+  {
+    if(value < this->res)
+    {
+      this->res = value;
+      this->row = i;
+      this->col = j;
+    }
+  }
+};
+
+template<typename Scalar>
+struct functor_traits<min_coeff_visitor<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost
+  };
+};
+
+/** \internal
+  * \brief Visitor computing the max coefficient with its value and coordinates
+  *
+  * \sa DenseBase::maxCoeff(Index*, Index*)
+  */
+template <typename Derived>
+struct max_coeff_visitor : coeff_visitor<Derived>
+{
+  typedef typename Derived::Index Index;
+  typedef typename Derived::Scalar Scalar;
+  void operator() (const Scalar& value, Index i, Index j)
+  {
+    if(value > this->res)
+    {
+      this->res = value;
+      this->row = i;
+      this->col = j;
+    }
+  }
+};
+
+template<typename Scalar>
+struct functor_traits<max_coeff_visitor<Scalar> > {
+  enum {
+    Cost = NumTraits<Scalar>::AddCost
+  };
+};
+
+} // end namespace internal
+
+/** \returns the minimum of all coefficients of *this and puts in *row and *col its location.
+  * \warning the result is undefined if \c *this contains NaN.
+  *
+  * \sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visitor(), DenseBase::minCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const
+{
+  internal::min_coeff_visitor<Derived> minVisitor;
+  this->visit(minVisitor);
+  *rowId = minVisitor.row;
+  if (colId) *colId = minVisitor.col;
+  return minVisitor.res;
+}
+
+/** \returns the minimum of all coefficients of *this and puts in *index its location.
+  * \warning the result is undefined if \c *this contains NaN. 
+  *
+  * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::minCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::minCoeff(IndexType* index) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  internal::min_coeff_visitor<Derived> minVisitor;
+  this->visit(minVisitor);
+  *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row;
+  return minVisitor.res;
+}
+
+/** \returns the maximum of all coefficients of *this and puts in *row and *col its location.
+  * \warning the result is undefined if \c *this contains NaN. 
+  *
+  * \sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const
+{
+  internal::max_coeff_visitor<Derived> maxVisitor;
+  this->visit(maxVisitor);
+  *rowPtr = maxVisitor.row;
+  if (colPtr) *colPtr = maxVisitor.col;
+  return maxVisitor.res;
+}
+
+/** \returns the maximum of all coefficients of *this and puts in *index its location.
+  * \warning the result is undefined if \c *this contains NaN.
+  *
+  * \sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()
+  */
+template<typename Derived>
+template<typename IndexType>
+typename internal::traits<Derived>::Scalar
+DenseBase<Derived>::maxCoeff(IndexType* index) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  internal::max_coeff_visitor<Derived> maxVisitor;
+  this->visit(maxVisitor);
+  *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;
+  return maxVisitor.res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_VISITOR_H
diff --git a/Eigen/src/Core/arch/AltiVec/CMakeLists.txt b/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
new file mode 100644
index 0000000..9f8d2e9
--- /dev/null
+++ b/Eigen/src/Core/arch/AltiVec/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_AltiVec_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Core_arch_AltiVec_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/AltiVec COMPONENT Devel
+)
diff --git a/Eigen/src/Core/arch/AltiVec/Complex.h b/Eigen/src/Core/arch/AltiVec/Complex.h
new file mode 100644
index 0000000..68d9a2b
--- /dev/null
+++ b/Eigen/src/Core/arch/AltiVec/Complex.h
@@ -0,0 +1,217 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_ALTIVEC_H
+#define EIGEN_COMPLEX_ALTIVEC_H
+
+namespace Eigen {
+
+namespace internal {
+
+static Packet4ui  p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+static Packet16uc p16uc_COMPLEX_RE   = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
+static Packet16uc p16uc_COMPLEX_IM   = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };
+static Packet16uc p16uc_COMPLEX_REV  = vec_sld(p16uc_REVERSE, p16uc_REVERSE, 8);//{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };
+static Packet16uc p16uc_COMPLEX_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8);//{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_PSET_HI = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 1));//{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };
+static Packet16uc p16uc_PSET_LO = (Packet16uc) vec_mergeh((Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 2), (Packet4ui) vec_splat((Packet4ui)p16uc_FORWARD, 3));//{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };
+
+//---------- float ----------
+struct Packet2cf
+{
+  EIGEN_STRONG_INLINE Packet2cf() {}
+  EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+  Packet4f  v;
+};
+
+template<> struct packet_traits<std::complex<float> >  : default_packet_traits
+{
+  typedef Packet2cf type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = 2,
+
+    HasAdd    = 1,
+    HasSub    = 1,
+    HasMul    = 1,
+    HasDiv    = 1,
+    HasNegate = 1,
+    HasAbs    = 0,
+    HasAbs2   = 0,
+    HasMin    = 0,
+    HasMax    = 0,
+    HasSetLinear = 0
+  };
+};
+
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)
+{
+  Packet2cf res;
+  /* On AltiVec we cannot load 64-bit registers, so wa have to take care of alignment */
+  if((ptrdiff_t(&from) % 16) == 0)
+    res.v = pload<Packet4f>((const float *)&from);
+  else
+    res.v = ploadu<Packet4f>((const float *)&from);
+  res.v = vec_perm(res.v, res.v, p16uc_PSET_HI);
+  return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_add(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_sub(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf((Packet4f)vec_xor((Packet4ui)a.v, p4ui_CONJ_XOR)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  Packet4f v1, v2;
+
+  // Permute and multiply the real parts of a and b
+  v1 = vec_perm(a.v, a.v, p16uc_COMPLEX_RE);
+  // Get the imaginary parts of a
+  v2 = vec_perm(a.v, a.v, p16uc_COMPLEX_IM);
+  // multiply a_re * b 
+  v1 = vec_madd(v1, b.v, p4f_ZERO);
+  // multiply a_im * b and get the conjugate result
+  v2 = vec_madd(v2, b.v, p4f_ZERO);
+  v2 = (Packet4f) vec_xor((Packet4ui)v2, p4ui_CONJ_XOR);
+  // permute back to a proper order
+  v2 = vec_perm(v2, v2, p16uc_COMPLEX_REV);
+  
+  return Packet2cf(vec_add(v1, v2));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_or(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_xor(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(vec_and(a.v, vec_nor(b.v,b.v))); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>*     from)
+{
+  return pset1<Packet2cf>(*from);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *   addr) { vec_dstt((float *)addr, DST_CTRL(2,2,32), DST_CHAN); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)
+{
+  std::complex<float> EIGEN_ALIGN16 res[2];
+  pstore((float *)&res, a.v);
+
+  return res[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+  Packet4f rev_a;
+  rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX_REV2);
+  return Packet2cf(rev_a);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+  Packet4f b;
+  b = (Packet4f) vec_sld(a.v, a.v, 8);
+  b = padd(a.v, b);
+  return pfirst(Packet2cf(b));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+  Packet4f b1, b2;
+  
+  b1 = (Packet4f) vec_sld(vecs[0].v, vecs[1].v, 8);
+  b2 = (Packet4f) vec_sld(vecs[1].v, vecs[0].v, 8);
+  b2 = (Packet4f) vec_sld(b2, b2, 8);
+  b2 = padd(b1, b2);
+
+  return Packet2cf(b2);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+  Packet4f b;
+  Packet2cf prod;
+  b = (Packet4f) vec_sld(a.v, a.v, 8);
+  prod = pmul(a, Packet2cf(b));
+
+  return pfirst(prod);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+  static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
+  {
+    if (Offset==1)
+    {
+      first.v = vec_sld(first.v, second.v, 8);
+    }
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    return internal::pmul(a, pconj(b));
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    return internal::pmul(pconj(a), b);
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    return pconj(internal::pmul(a, b));
+  }
+};
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  // TODO optimize it for AltiVec
+  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+  Packet4f s = vec_madd(b.v, b.v, p4f_ZERO);
+  return Packet2cf(pdiv(res.v, vec_add(s,vec_perm(s, s, p16uc_COMPLEX_REV))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)
+{
+  return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX_REV));
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_ALTIVEC_H
diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h
new file mode 100644
index 0000000..e408996
--- /dev/null
+++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -0,0 +1,501 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Konstantinos Margaritis <markos@codex.gr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_ALTIVEC_H
+#define EIGEN_PACKET_MATH_ALTIVEC_H
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
+#endif
+
+#ifndef EIGEN_HAS_FUSE_CJMADD
+#define EIGEN_HAS_FUSE_CJMADD 1
+#endif
+
+// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
+#endif
+
+typedef __vector float          Packet4f;
+typedef __vector int            Packet4i;
+typedef __vector unsigned int   Packet4ui;
+typedef __vector __bool int     Packet4bi;
+typedef __vector short int      Packet8i;
+typedef __vector unsigned char  Packet16uc;
+
+// We don't want to write the same code all the time, but we need to reuse the constants
+// and it doesn't really work to declare them global, so we define macros instead
+
+#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
+  Packet4f p4f_##NAME = (Packet4f) vec_splat_s32(X)
+
+#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \
+  Packet4i p4i_##NAME = vec_splat_s32(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+  Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+  Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+  Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+#define DST_CHAN 1
+#define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride))
+
+// Define global static constants:
+static Packet4f p4f_COUNTDOWN = { 3.0, 2.0, 1.0, 0.0 };
+static Packet4i p4i_COUNTDOWN = { 3, 2, 1, 0 };
+static Packet16uc p16uc_REVERSE = {12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3};
+static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0);
+static Packet16uc p16uc_DUPLICATE = {0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7};
+
+static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16);
+static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1);
+static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0);
+static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1);
+
+template<> struct packet_traits<float>  : default_packet_traits
+{
+  typedef Packet4f type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4,
+
+    // FIXME check the Has*
+    HasSin  = 0,
+    HasCos  = 0,
+    HasLog  = 0,
+    HasExp  = 0,
+    HasSqrt = 0
+  };
+};
+template<> struct packet_traits<int>    : default_packet_traits
+{
+  typedef Packet4i type;
+  enum {
+    // FIXME check the Has*
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4
+  };
+};
+
+template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
+/*
+inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
+{
+  union {
+    Packet4f   v;
+    float n[4];
+  } vt;
+  vt.v = v;
+  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+  return s;
+}
+
+inline std::ostream & operator <<(std::ostream & s, const Packet4i & v)
+{
+  union {
+    Packet4i   v;
+    int n[4];
+  } vt;
+  vt.v = v;
+  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+  return s;
+}
+
+inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)
+{
+  union {
+    Packet4ui   v;
+    unsigned int n[4];
+  } vt;
+  vt.v = v;
+  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+  return s;
+}
+
+inline std::ostream & operator <<(std::ostream & s, const Packetbi & v)
+{
+  union {
+    Packet4bi v;
+    unsigned int n[4];
+  } vt;
+  vt.v = v;
+  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
+  return s;
+}
+*/
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) {
+  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+  float EIGEN_ALIGN16 af[4];
+  af[0] = from;
+  Packet4f vc = vec_ld(0, af);
+  vc = vec_splat(vc, 0);
+  return vc;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)   {
+  int EIGEN_ALIGN16 ai[4];
+  ai[0] = from;
+  Packet4i vc = vec_ld(0, ai);
+  vc = vec_splat(vc, 0);
+  return vc;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return vec_add(pset1<Packet4f>(a), p4f_COUNTDOWN); }
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)     { return vec_add(pset1<Packet4i>(a), p4i_COUNTDOWN); }
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_add(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_add(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_sub(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_sub(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub<Packet4f>(p4f_ZERO, a); }
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub<Packet4i>(p4i_ZERO, a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); }
+/* Commented out: it's actually slower than processing it scalar
+ *
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+  // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec
+  //Set up constants, variables
+  Packet4i a1, b1, bswap, low_prod, high_prod, prod, prod_, v1sel;
+
+  // Get the absolute values
+  a1  = vec_abs(a);
+  b1  = vec_abs(b);
+
+  // Get the signs using xor
+  Packet4bi sgn = (Packet4bi) vec_cmplt(vec_xor(a, b), p4i_ZERO);
+
+  // Do the multiplication for the asbolute values.
+  bswap = (Packet4i) vec_rl((Packet4ui) b1, (Packet4ui) p4i_MINUS16 );
+  low_prod = vec_mulo((Packet8i) a1, (Packet8i)b1);
+  high_prod = vec_msum((Packet8i) a1, (Packet8i) bswap, p4i_ZERO);
+  high_prod = (Packet4i) vec_sl((Packet4ui) high_prod, (Packet4ui) p4i_MINUS16);
+  prod = vec_add( low_prod, high_prod );
+
+  // NOR the product and select only the negative elements according to the sign mask
+  prod_ = vec_nor(prod, prod);
+  prod_ = vec_sel(p4i_ZERO, prod_, sgn);
+
+  // Add 1 to the result to get the negative numbers
+  v1sel = vec_sel(p4i_ZERO, p4i_ONE, sgn);
+  prod_ = vec_add(prod_, v1sel);
+
+  // Merge the results back to the final vector.
+  prod = vec_sel(prod, prod_, sgn);
+
+  return prod;
+}
+*/
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  Packet4f t, y_0, y_1, res;
+
+  // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
+  y_0 = vec_re(b);
+
+  // Do one Newton-Raphson iteration to get the needed accuracy
+  t   = vec_nmsub(y_0, b, p4f_ONE);
+  y_1 = vec_madd(y_0, t, y_0);
+
+  res = vec_madd(a, y_1, p4f_ZERO);
+  return res;
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by AltiVec");
+  return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); }
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{
+  EIGEN_DEBUG_ALIGNED_LOAD
+  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+  Packet16uc MSQ, LSQ;
+  Packet16uc mask;
+  MSQ = vec_ld(0, (unsigned char *)from);          // most significant quadword
+  LSQ = vec_ld(15, (unsigned char *)from);         // least significant quadword
+  mask = vec_lvsl(0, from);                        // create the permute mask
+  return (Packet4f) vec_perm(MSQ, LSQ, mask);           // align the data
+
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
+{
+  EIGEN_DEBUG_ALIGNED_LOAD
+  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+  Packet16uc MSQ, LSQ;
+  Packet16uc mask;
+  MSQ = vec_ld(0, (unsigned char *)from);          // most significant quadword
+  LSQ = vec_ld(15, (unsigned char *)from);         // least significant quadword
+  mask = vec_lvsl(0, from);                        // create the permute mask
+  return (Packet4i) vec_perm(MSQ, LSQ, mask);    // align the data
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
+{
+  Packet4f p;
+  if((ptrdiff_t(&from) % 16) == 0)  p = pload<Packet4f>(from);
+  else                              p = ploadu<Packet4f>(from);
+  return vec_perm(p, p, p16uc_DUPLICATE);
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
+{
+  Packet4i p;
+  if((ptrdiff_t(&from) % 16) == 0)  p = pload<Packet4i>(from);
+  else                              p = ploadu<Packet4i>(from);
+  return vec_perm(p, p, p16uc_DUPLICATE);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from)
+{
+  EIGEN_DEBUG_UNALIGNED_STORE
+  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+  // Warning: not thread safe!
+  Packet16uc MSQ, LSQ, edges;
+  Packet16uc edgeAlign, align;
+
+  MSQ = vec_ld(0, (unsigned char *)to);                     // most significant quadword
+  LSQ = vec_ld(15, (unsigned char *)to);                    // least significant quadword
+  edgeAlign = vec_lvsl(0, to);                              // permute map to extract edges
+  edges=vec_perm(LSQ,MSQ,edgeAlign);                        // extract the edges
+  align = vec_lvsr( 0, to );                                // permute map to misalign data
+  MSQ = vec_perm(edges,(Packet16uc)from,align);             // misalign the data (MSQ)
+  LSQ = vec_perm((Packet16uc)from,edges,align);             // misalign the data (LSQ)
+  vec_st( LSQ, 15, (unsigned char *)to );                   // Store the LSQ part first
+  vec_st( MSQ, 0, (unsigned char *)to );                    // Store the MSQ part
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from)
+{
+  EIGEN_DEBUG_UNALIGNED_STORE
+  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
+  // Warning: not thread safe!
+  Packet16uc MSQ, LSQ, edges;
+  Packet16uc edgeAlign, align;
+
+  MSQ = vec_ld(0, (unsigned char *)to);                     // most significant quadword
+  LSQ = vec_ld(15, (unsigned char *)to);                    // least significant quadword
+  edgeAlign = vec_lvsl(0, to);                              // permute map to extract edges
+  edges=vec_perm(LSQ, MSQ, edgeAlign);                      // extract the edges
+  align = vec_lvsr( 0, to );                                // permute map to misalign data
+  MSQ = vec_perm(edges, (Packet16uc) from, align);          // misalign the data (MSQ)
+  LSQ = vec_perm((Packet16uc) from, edges, align);          // misalign the data (LSQ)
+  vec_st( LSQ, 15, (unsigned char *)to );                   // Store the LSQ part first
+  vec_st( MSQ, 0, (unsigned char *)to );                    // Store the MSQ part
+}
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*     addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
+
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int   EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return (Packet4f)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return (Packet4i)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  Packet4f b, sum;
+  b   = (Packet4f) vec_sld(a, a, 8);
+  sum = vec_add(a, b);
+  b   = (Packet4f) vec_sld(sum, sum, 4);
+  sum = vec_add(sum, b);
+  return pfirst(sum);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  Packet4f v[4], sum[4];
+
+  // It's easier and faster to transpose then add as columns
+  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
+  // Do the transpose, first set of moves
+  v[0] = vec_mergeh(vecs[0], vecs[2]);
+  v[1] = vec_mergel(vecs[0], vecs[2]);
+  v[2] = vec_mergeh(vecs[1], vecs[3]);
+  v[3] = vec_mergel(vecs[1], vecs[3]);
+  // Get the resulting vectors
+  sum[0] = vec_mergeh(v[0], v[2]);
+  sum[1] = vec_mergel(v[0], v[2]);
+  sum[2] = vec_mergeh(v[1], v[3]);
+  sum[3] = vec_mergel(v[1], v[3]);
+
+  // Now do the summation:
+  // Lines 0+1
+  sum[0] = vec_add(sum[0], sum[1]);
+  // Lines 2+3
+  sum[1] = vec_add(sum[2], sum[3]);
+  // Add the results
+  sum[0] = vec_add(sum[0], sum[1]);
+
+  return sum[0];
+}
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+  Packet4i sum;
+  sum = vec_sums(a, p4i_ZERO);
+  sum = vec_sld(sum, p4i_ZERO, 12);
+  return pfirst(sum);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+  Packet4i v[4], sum[4];
+
+  // It's easier and faster to transpose then add as columns
+  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
+  // Do the transpose, first set of moves
+  v[0] = vec_mergeh(vecs[0], vecs[2]);
+  v[1] = vec_mergel(vecs[0], vecs[2]);
+  v[2] = vec_mergeh(vecs[1], vecs[3]);
+  v[3] = vec_mergel(vecs[1], vecs[3]);
+  // Get the resulting vectors
+  sum[0] = vec_mergeh(v[0], v[2]);
+  sum[1] = vec_mergel(v[0], v[2]);
+  sum[2] = vec_mergeh(v[1], v[3]);
+  sum[3] = vec_mergel(v[1], v[3]);
+
+  // Now do the summation:
+  // Lines 0+1
+  sum[0] = vec_add(sum[0], sum[1]);
+  // Lines 2+3
+  sum[1] = vec_add(sum[2], sum[3]);
+  // Add the results
+  sum[0] = vec_add(sum[0], sum[1]);
+
+  return sum[0];
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+  Packet4f prod;
+  prod = pmul(a, (Packet4f)vec_sld(a, a, 8));
+  return pfirst(pmul(prod, (Packet4f)vec_sld(prod, prod, 4)));
+}
+
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  return aux[0] * aux[1] * aux[2] * aux[3];
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+  Packet4f b, res;
+  b = vec_min(a, vec_sld(a, a, 8));
+  res = vec_min(b, vec_sld(b, b, 4));
+  return pfirst(res);
+}
+
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+  Packet4i b, res;
+  b = vec_min(a, vec_sld(a, a, 8));
+  res = vec_min(b, vec_sld(b, b, 4));
+  return pfirst(res);
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+  Packet4f b, res;
+  b = vec_max(a, vec_sld(a, a, 8));
+  res = vec_max(b, vec_sld(b, b, 4));
+  return pfirst(res);
+}
+
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+  Packet4i b, res;
+  b = vec_max(a, vec_sld(a, a, 8));
+  res = vec_max(b, vec_sld(b, b, 4));
+  return pfirst(res);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+  {
+    if (Offset!=0)
+      first = vec_sld(first, second, Offset*4);
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
+  {
+    if (Offset!=0)
+      first = vec_sld(first, second, Offset*4);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_ALTIVEC_H
diff --git a/Eigen/src/Core/arch/CMakeLists.txt b/Eigen/src/Core/arch/CMakeLists.txt
new file mode 100644
index 0000000..8456dec
--- /dev/null
+++ b/Eigen/src/Core/arch/CMakeLists.txt
@@ -0,0 +1,4 @@
+ADD_SUBDIRECTORY(SSE)
+ADD_SUBDIRECTORY(AltiVec)
+ADD_SUBDIRECTORY(NEON)
+ADD_SUBDIRECTORY(Default)
diff --git a/Eigen/src/Core/arch/Default/CMakeLists.txt b/Eigen/src/Core/arch/Default/CMakeLists.txt
new file mode 100644
index 0000000..339c091
--- /dev/null
+++ b/Eigen/src/Core/arch/Default/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_Default_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Core_arch_Default_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/Default COMPONENT Devel
+)
diff --git a/Eigen/src/Core/arch/Default/Settings.h b/Eigen/src/Core/arch/Default/Settings.h
new file mode 100644
index 0000000..097373c
--- /dev/null
+++ b/Eigen/src/Core/arch/Default/Settings.h
@@ -0,0 +1,49 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/* All the parameters defined in this file can be specialized in the
+ * architecture specific files, and/or by the user.
+ * More to come... */
+
+#ifndef EIGEN_DEFAULT_SETTINGS_H
+#define EIGEN_DEFAULT_SETTINGS_H
+
+/** Defines the maximal loop size to enable meta unrolling of loops.
+  * Note that the value here is expressed in Eigen's own notion of "number of FLOPS",
+  * it does not correspond to the number of iterations or the number of instructions
+  */
+#ifndef EIGEN_UNROLLING_LIMIT
+#define EIGEN_UNROLLING_LIMIT 100
+#endif
+
+/** Defines the threshold between a "small" and a "large" matrix.
+  * This threshold is mainly used to select the proper product implementation.
+  */
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+/** Defines the maximal width of the blocks used in the triangular product and solver
+  * for vectors (level 2 blas xTRMV and xTRSV). The default is 8.
+  */
+#ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH
+#define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8
+#endif
+
+
+/** Defines the default number of registers available for that architecture.
+  * Currently it must be 8 or 16. Other values will fail.
+  */
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
+#endif
+
+#endif // EIGEN_DEFAULT_SETTINGS_H
diff --git a/Eigen/src/Core/arch/NEON/CMakeLists.txt b/Eigen/src/Core/arch/NEON/CMakeLists.txt
new file mode 100644
index 0000000..fd4d4af
--- /dev/null
+++ b/Eigen/src/Core/arch/NEON/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_NEON_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Core_arch_NEON_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/NEON COMPONENT Devel
+)
diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h
new file mode 100644
index 0000000..8d9255e
--- /dev/null
+++ b/Eigen/src/Core/arch/NEON/Complex.h
@@ -0,0 +1,253 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_NEON_H
+#define EIGEN_COMPLEX_NEON_H
+
+namespace Eigen {
+
+namespace internal {
+
+static uint32x4_t p4ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET4(0x00000000, 0x80000000, 0x00000000, 0x80000000);
+static uint32x2_t p2ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET2(0x00000000, 0x80000000);
+
+//---------- float ----------
+struct Packet2cf
+{
+  EIGEN_STRONG_INLINE Packet2cf() {}
+  EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}
+  Packet4f  v;
+};
+
+template<> struct packet_traits<std::complex<float> >  : default_packet_traits
+{
+  typedef Packet2cf type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = 2,
+
+    HasAdd    = 1,
+    HasSub    = 1,
+    HasMul    = 1,
+    HasDiv    = 1,
+    HasNegate = 1,
+    HasAbs    = 0,
+    HasAbs2   = 0,
+    HasMin    = 0,
+    HasMax    = 0,
+    HasSetLinear = 0
+  };
+};
+
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)
+{
+  float32x2_t r64;
+  r64 = vld1_f32((float *)&from);
+
+  return Packet2cf(vcombine_f32(r64, r64));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
+{
+  Packet4ui b = vreinterpretq_u32_f32(a.v);
+  return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  Packet4f v1, v2;
+
+  // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
+  v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));
+  // Get the real values of a | a1_im | a1_im | a2_im | a2_im |
+  v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1));
+  // Multiply the real a with b
+  v1 = vmulq_f32(v1, b.v);
+  // Multiply the imag a with b
+  v2 = vmulq_f32(v2, b.v);
+  // Conjugate v2 
+  v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR));
+  // Swap real/imag elements in v2.
+  v2 = vrev64q_f32(v2);
+  // Add and return the result
+  return Packet2cf(vaddq_f32(v1, v2));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *   addr) { EIGEN_ARM_PREFETCH((float *)addr); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)
+{
+  std::complex<float> EIGEN_ALIGN16 x[2];
+  vst1q_f32((float *)x, a.v);
+  return x[0];
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)
+{
+  float32x2_t a_lo, a_hi;
+  Packet4f a_r128;
+
+  a_lo = vget_low_f32(a.v);
+  a_hi = vget_high_f32(a.v);
+  a_r128 = vcombine_f32(a_hi, a_lo);
+
+  return Packet2cf(a_r128);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)
+{
+  return Packet2cf(vrev64q_f32(a.v));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+  float32x2_t a1, a2;
+  std::complex<float> s;
+
+  a1 = vget_low_f32(a.v);
+  a2 = vget_high_f32(a.v);
+  a2 = vadd_f32(a1, a2);
+  vst1_f32((float *)&s, a2);
+
+  return s;
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+  Packet4f sum1, sum2, sum;
+
+  // Add the first two 64-bit float32x2_t of vecs[0]
+  sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));
+  sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));
+  sum = vaddq_f32(sum1, sum2);
+
+  return Packet2cf(sum);
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+  float32x2_t a1, a2, v1, v2, prod;
+  std::complex<float> s;
+
+  a1 = vget_low_f32(a.v);
+  a2 = vget_high_f32(a.v);
+   // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
+  v1 = vdup_lane_f32(a1, 0);
+  // Get the real values of a | a1_im | a1_im | a2_im | a2_im |
+  v2 = vdup_lane_f32(a1, 1);
+  // Multiply the real a with b
+  v1 = vmul_f32(v1, a2);
+  // Multiply the imag a with b
+  v2 = vmul_f32(v2, a2);
+  // Conjugate v2 
+  v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR));
+  // Swap real/imag elements in v2.
+  v2 = vrev64_f32(v2);
+  // Add v1, v2
+  prod = vadd_f32(v1, v2);
+
+  vst1_f32((float *)&s, prod);
+
+  return s;
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+  EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)
+  {
+    if (Offset==1)
+    {
+      first.v = vextq_f32(first.v, second.v, 2);
+    }
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    return internal::pmul(a, pconj(b));
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    return internal::pmul(pconj(a), b);
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    return pconj(internal::pmul(a, b));
+  }
+};
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  // TODO optimize it for AltiVec
+  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+  Packet4f s, rev_s;
+
+  // this computes the norm
+  s = vmulq_f32(b.v, b.v);
+  rev_s = vrev64q_f32(s);
+
+  return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_NEON_H
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
new file mode 100644
index 0000000..94dfab3
--- /dev/null
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -0,0 +1,419 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Konstantinos Margaritis <markos@codex.gr>
+// Heavily based on Gael's SSE version.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_NEON_H
+#define EIGEN_PACKET_MATH_NEON_H
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+// FIXME NEON has 16 quad registers, but since the current register allocator
+// is so bad, it is much better to reduce it to 8
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8
+#endif
+
+typedef float32x4_t Packet4f;
+typedef int32x4_t   Packet4i;
+typedef uint32x4_t  Packet4ui;
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+  const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+#if defined(__llvm__) && !defined(__clang__)
+  //Special treatment for Apple's llvm-gcc, its NEON packet types are unions
+  #define EIGEN_INIT_NEON_PACKET2(X, Y)       {{X, Y}}
+  #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {{X, Y, Z, W}}
+#else
+  //Default initializer for packets
+  #define EIGEN_INIT_NEON_PACKET2(X, Y)       {X, Y}
+  #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
+#endif
+
+// arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function
+// which available on LLVM and GCC (at least)
+#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+  #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
+#elif defined __pld
+  #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
+#elif !defined(__aarch64__)
+  #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( "   pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
+#else
+  // by default no explicit prefetching
+  #define EIGEN_ARM_PREFETCH(ADDR)
+#endif
+
+template<> struct packet_traits<float>  : default_packet_traits
+{
+  typedef Packet4f type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = 4,
+   
+    HasDiv  = 1,
+    // FIXME check the Has*
+    HasSin  = 0,
+    HasCos  = 0,
+    HasLog  = 0,
+    HasExp  = 0,
+    HasSqrt = 0
+  };
+};
+template<> struct packet_traits<int>    : default_packet_traits
+{
+  typedef Packet4i type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4
+    // FIXME check the Has*
+  };
+};
+
+#if EIGEN_GNUC_AT_MOST(4,4) && !defined(__llvm__)
+// workaround gcc 4.2, 4.3 and 4.4 compilatin issue
+EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }
+EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }
+EIGEN_STRONG_INLINE void        vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
+EIGEN_STRONG_INLINE void        vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
+#endif
+
+template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
+
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return vdupq_n_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)   { return vdupq_n_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a)
+{
+  Packet4f countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
+  return vaddq_f32(pset1<Packet4f>(a), countdown);
+}
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)
+{
+  Packet4i countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3);
+  return vaddq_s32(pset1<Packet4i>(a), countdown);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  Packet4f inv, restep, div;
+
+  // NEON does not offer a divide instruction, we have to do a reciprocal approximation
+  // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers
+  // a reciprocal estimate AND a reciprocal step -which saves a few instructions
+  // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with
+  // Newton-Raphson and vrecpsq_f32()
+  inv = vrecpeq_f32(b);
+
+  // This returns a differential, by which we will have to multiply inv to get a better
+  // approximation of 1/b.
+  restep = vrecpsq_f32(b, inv);
+  inv = vmulq_f32(restep, inv);
+
+  // Finally, multiply a by 1/b and get the wanted result of the division.
+  div = vmulq_f32(a, inv);
+
+  return div;
+}
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by NEON");
+  return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }
+
+// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)
+{
+  return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
+}
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*   from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)   { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
+{
+  float32x2_t lo, hi;
+  lo = vld1_dup_f32(from);
+  hi = vld1_dup_f32(from+1);
+  return vcombine_f32(lo, hi);
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
+{
+  int32x2_t lo, hi;
+  lo = vld1_dup_s32(from);
+  hi = vld1_dup_s32(from+1);
+  return vcombine_s32(lo, hi);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*     addr) { EIGEN_ARM_PREFETCH(addr); }
+
+// FIXME only store the 2 first elements ?
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int   EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {
+  float32x2_t a_lo, a_hi;
+  Packet4f a_r64;
+
+  a_r64 = vrev64q_f32(a);
+  a_lo = vget_low_f32(a_r64);
+  a_hi = vget_high_f32(a_r64);
+  return vcombine_f32(a_hi, a_lo);
+}
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {
+  int32x2_t a_lo, a_hi;
+  Packet4i a_r64;
+
+  a_r64 = vrev64q_s32(a);
+  a_lo = vget_low_s32(a_r64);
+  a_hi = vget_high_s32(a_r64);
+  return vcombine_s32(a_hi, a_lo);
+}
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, sum;
+
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  sum = vpadd_f32(a_lo, a_hi);
+  sum = vpadd_f32(sum, sum);
+  return vget_lane_f32(sum, 0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  float32x4x2_t vtrn1, vtrn2, res1, res2;
+  Packet4f sum1, sum2, sum;
+
+  // NEON zip performs interleaving of the supplied vectors.
+  // We perform two interleaves in a row to acquire the transposed vector
+  vtrn1 = vzipq_f32(vecs[0], vecs[2]);
+  vtrn2 = vzipq_f32(vecs[1], vecs[3]);
+  res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
+  res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
+
+  // Do the addition of the resulting vectors
+  sum1 = vaddq_f32(res1.val[0], res1.val[1]);
+  sum2 = vaddq_f32(res2.val[0], res2.val[1]);
+  sum = vaddq_f32(sum1, sum2);
+
+  return sum;
+}
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, sum;
+
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  sum = vpadd_s32(a_lo, a_hi);
+  sum = vpadd_s32(sum, sum);
+  return vget_lane_s32(sum, 0);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+  int32x4x2_t vtrn1, vtrn2, res1, res2;
+  Packet4i sum1, sum2, sum;
+
+  // NEON zip performs interleaving of the supplied vectors.
+  // We perform two interleaves in a row to acquire the transposed vector
+  vtrn1 = vzipq_s32(vecs[0], vecs[2]);
+  vtrn2 = vzipq_s32(vecs[1], vecs[3]);
+  res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
+  res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
+
+  // Do the addition of the resulting vectors
+  sum1 = vaddq_s32(res1.val[0], res1.val[1]);
+  sum2 = vaddq_s32(res2.val[0], res2.val[1]);
+  sum = vaddq_s32(sum1, sum2);
+
+  return sum;
+}
+
+// Other reduction functions:
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, prod;
+
+  // Get a_lo = |a1|a2| and a_hi = |a3|a4|
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
+  prod = vmul_f32(a_lo, a_hi);
+  // Multiply prod with its swapped value |a2*a4|a1*a3|
+  prod = vmul_f32(prod, vrev64_f32(prod));
+
+  return vget_lane_f32(prod, 0);
+}
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, prod;
+
+  // Get a_lo = |a1|a2| and a_hi = |a3|a4|
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
+  prod = vmul_s32(a_lo, a_hi);
+  // Multiply prod with its swapped value |a2*a4|a1*a3|
+  prod = vmul_s32(prod, vrev64_s32(prod));
+
+  return vget_lane_s32(prod, 0);
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, min;
+
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  min = vpmin_f32(a_lo, a_hi);
+  min = vpmin_f32(min, min);
+
+  return vget_lane_f32(min, 0);
+}
+
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, min;
+
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  min = vpmin_s32(a_lo, a_hi);
+  min = vpmin_s32(min, min);
+  
+  return vget_lane_s32(min, 0);
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+  float32x2_t a_lo, a_hi, max;
+
+  a_lo = vget_low_f32(a);
+  a_hi = vget_high_f32(a);
+  max = vpmax_f32(a_lo, a_hi);
+  max = vpmax_f32(max, max);
+
+  return vget_lane_f32(max, 0);
+}
+
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+  int32x2_t a_lo, a_hi, max;
+
+  a_lo = vget_low_s32(a);
+  a_hi = vget_high_s32(a);
+  max = vpmax_s32(a_lo, a_hi);
+
+  return vget_lane_s32(max, 0);
+}
+
+// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,
+// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074
+#define PALIGN_NEON(Offset,Type,Command) \
+template<>\
+struct palign_impl<Offset,Type>\
+{\
+    EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
+    {\
+        if (Offset!=0)\
+            first = Command(first, second, Offset);\
+    }\
+};\
+
+PALIGN_NEON(0,Packet4f,vextq_f32)
+PALIGN_NEON(1,Packet4f,vextq_f32)
+PALIGN_NEON(2,Packet4f,vextq_f32)
+PALIGN_NEON(3,Packet4f,vextq_f32)
+PALIGN_NEON(0,Packet4i,vextq_s32)
+PALIGN_NEON(1,Packet4i,vextq_s32)
+PALIGN_NEON(2,Packet4i,vextq_s32)
+PALIGN_NEON(3,Packet4i,vextq_s32)
+    
+#undef PALIGN_NEON
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_NEON_H
diff --git a/Eigen/src/Core/arch/SSE/CMakeLists.txt b/Eigen/src/Core/arch/SSE/CMakeLists.txt
new file mode 100644
index 0000000..46ea7cc
--- /dev/null
+++ b/Eigen/src/Core/arch/SSE/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_arch_SSE_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Core_arch_SSE_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/arch/SSE COMPONENT Devel
+)
diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h
new file mode 100644
index 0000000..91bba5e
--- /dev/null
+++ b/Eigen/src/Core/arch/SSE/Complex.h
@@ -0,0 +1,442 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_SSE_H
+#define EIGEN_COMPLEX_SSE_H
+
+namespace Eigen {
+
+namespace internal {
+
+//---------- float ----------
+struct Packet2cf
+{
+  EIGEN_STRONG_INLINE Packet2cf() {}
+  EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {}
+  __m128  v;
+};
+
+template<> struct packet_traits<std::complex<float> >  : default_packet_traits
+{
+  typedef Packet2cf type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size = 2,
+
+    HasAdd    = 1,
+    HasSub    = 1,
+    HasMul    = 1,
+    HasDiv    = 1,
+    HasNegate = 1,
+    HasAbs    = 0,
+    HasAbs2   = 0,
+    HasMin    = 0,
+    HasMax    = 0,
+    HasSetLinear = 0
+  };
+};
+
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+
+template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a)
+{
+  const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
+  return Packet2cf(_mm_xor_ps(a.v,mask));
+}
+template<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)
+{
+  const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+  return Packet2cf(_mm_xor_ps(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  // TODO optimize it for SSE3 and 4
+  #ifdef EIGEN_VECTORIZE_SSE3
+  return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v),
+                                 _mm_mul_ps(_mm_movehdup_ps(a.v),
+                                            vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+//   return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
+//                                  _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+//                                             vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+  #else
+  const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000));
+  return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
+                              _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+                                                    vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
+  #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }
+template<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }
+
+template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)
+{
+  Packet2cf res;
+#if EIGEN_GNUC_AT_MOST(4,2)
+  // Workaround annoying "may be used uninitialized in this function" warning with gcc 4.2
+  res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), reinterpret_cast<const __m64*>(&from));
+#elif EIGEN_GNUC_AT_LEAST(4,6)
+  // Suppress annoying "may be used uninitialized in this function" warning with gcc >= 4.6
+  #pragma GCC diagnostic push
+  #pragma GCC diagnostic ignored "-Wuninitialized"
+  res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
+  #pragma GCC diagnostic pop
+#else
+  res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
+#endif
+  return Packet2cf(_mm_movelh_ps(res.v,res.v));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }
+
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)
+{
+  #if EIGEN_GNUC_AT_MOST(4,3)
+  // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares...
+  // This workaround also fix invalid code generation with gcc 4.3
+  EIGEN_ALIGN16 std::complex<float> res[2];
+  _mm_store_ps((float*)res, a.v);
+  return res[0];
+  #else
+  std::complex<float> res;
+  _mm_storel_pi((__m64*)&res, a.v);
+  return res;
+  #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(_mm_castps_pd(a.v)))); }
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)
+{
+  return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v))));
+}
+
+template<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)
+{
+  return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v)));
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)
+{
+  return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v))));
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet2cf>
+{
+  static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)
+  {
+    if (Offset==1)
+    {
+      first.v = _mm_movehl_ps(first.v, first.v);
+      first.v = _mm_movelh_ps(first.v, second.v);
+    }
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, false,true>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    #ifdef EIGEN_VECTORIZE_SSE3
+    return internal::pmul(a, pconj(b));
+    #else
+    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+    return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),
+                                _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+                                           vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+    #endif
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,false>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    #ifdef EIGEN_VECTORIZE_SSE3
+    return internal::pmul(pconj(a), b);
+    #else
+    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+    return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
+                                _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+                                                      vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
+    #endif
+  }
+};
+
+template<> struct conj_helper<Packet2cf, Packet2cf, true,true>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const
+  {
+    #ifdef EIGEN_VECTORIZE_SSE3
+    return pconj(internal::pmul(a, b));
+    #else
+    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
+    return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),
+                                _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
+                                           vec4f_swizzle1(b.v, 1, 0, 3, 2))));
+    #endif
+  }
+};
+
+template<> struct conj_helper<Packet4f, Packet2cf, false,false>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const
+  { return padd(c, pmul(x,y)); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const
+  { return Packet2cf(Eigen::internal::pmul(x, y.v)); }
+};
+
+template<> struct conj_helper<Packet2cf, Packet4f, false,false>
+{
+  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const
+  { return padd(c, pmul(x,y)); }
+
+  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const
+  { return Packet2cf(Eigen::internal::pmul(x.v, y)); }
+};
+
+template<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)
+{
+  // TODO optimize it for SSE3 and 4
+  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
+  __m128 s = _mm_mul_ps(b.v,b.v);
+  return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1)))));
+}
+
+EIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)
+{
+  return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
+}
+
+
+//---------- double ----------
+struct Packet1cd
+{
+  EIGEN_STRONG_INLINE Packet1cd() {}
+  EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {}
+  __m128d  v;
+};
+
+template<> struct packet_traits<std::complex<double> >  : default_packet_traits
+{
+  typedef Packet1cd type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 0,
+    size = 1,
+
+    HasAdd    = 1,
+    HasSub    = 1,
+    HasMul    = 1,
+    HasDiv    = 1,
+    HasNegate = 1,
+    HasAbs    = 0,
+    HasAbs2   = 0,
+    HasMin    = 0,
+    HasMax    = 0,
+    HasSetLinear = 0
+  };
+};
+
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1}; };
+
+template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(a.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)
+{
+  const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+  return Packet1cd(_mm_xor_pd(a.v,mask));
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{
+  // TODO optimize it for SSE3 and 4
+  #ifdef EIGEN_VECTORIZE_SSE3
+  return Packet1cd(_mm_addsub_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
+                                 _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+                                            vec2d_swizzle1(b.v, 1, 0))));
+  #else
+  const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
+  return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
+                              _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+                                                    vec2d_swizzle1(b.v, 1, 0)), mask)));
+  #endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd pand   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd por    <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pxor   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); }
+
+// FIXME force unaligned load, this is a temporary fix
+template<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)
+{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
+template<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>&  from)
+{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
+
+template<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }
+
+// FIXME force unaligned store, this is a temporary fix
+template<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
+template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
+
+template<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+template<> EIGEN_STRONG_INLINE std::complex<double>  pfirst<Packet1cd>(const Packet1cd& a)
+{
+  EIGEN_ALIGN16 double res[2];
+  _mm_store_pd(res, a.v);
+  return std::complex<double>(res[0],res[1]);
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)
+{
+  return pfirst(a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)
+{
+  return vecs[0];
+}
+
+template<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)
+{
+  return pfirst(a);
+}
+
+template<int Offset>
+struct palign_impl<Offset,Packet1cd>
+{
+  static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)
+  {
+    // FIXME is it sure we never have to align a Packet1cd?
+    // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...
+  }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, false,true>
+{
+  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+  {
+    #ifdef EIGEN_VECTORIZE_SSE3
+    return internal::pmul(a, pconj(b));
+    #else
+    const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+    return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),
+                                _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+                                           vec2d_swizzle1(b.v, 1, 0))));
+    #endif
+  }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, true,false>
+{
+  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+  {
+    #ifdef EIGEN_VECTORIZE_SSE3
+    return internal::pmul(pconj(a), b);
+    #else
+    const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+    return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
+                                _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+                                                      vec2d_swizzle1(b.v, 1, 0)), mask)));
+    #endif
+  }
+};
+
+template<> struct conj_helper<Packet1cd, Packet1cd, true,true>
+{
+  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const
+  { return padd(pmul(x,y),c); }
+
+  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const
+  {
+    #ifdef EIGEN_VECTORIZE_SSE3
+    return pconj(internal::pmul(a, b));
+    #else
+    const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+    return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),
+                                _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
+                                           vec2d_swizzle1(b.v, 1, 0))));
+    #endif
+  }
+};
+
+template<> struct conj_helper<Packet2d, Packet1cd, false,false>
+{
+  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const
+  { return padd(c, pmul(x,y)); }
+
+  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const
+  { return Packet1cd(Eigen::internal::pmul(x, y.v)); }
+};
+
+template<> struct conj_helper<Packet1cd, Packet2d, false,false>
+{
+  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const
+  { return padd(c, pmul(x,y)); }
+
+  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const
+  { return Packet1cd(Eigen::internal::pmul(x.v, y)); }
+};
+
+template<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)
+{
+  // TODO optimize it for SSE3 and 4
+  Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
+  __m128d s = _mm_mul_pd(b.v,b.v);
+  return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1))));
+}
+
+EIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)
+{
+  return Packet1cd(preverse(x.v));
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_SSE_H
diff --git a/Eigen/src/Core/arch/SSE/MathFunctions.h b/Eigen/src/Core/arch/SSE/MathFunctions.h
new file mode 100644
index 0000000..d16f30b
--- /dev/null
+++ b/Eigen/src/Core/arch/SSE/MathFunctions.h
@@ -0,0 +1,475 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007 Julien Pommier
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* The sin, cos, exp, and log functions of this file come from
+ * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/
+ */
+
+#ifndef EIGEN_MATH_FUNCTIONS_SSE_H
+#define EIGEN_MATH_FUNCTIONS_SSE_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f plog<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
+
+  /* the smallest non denormalized float number */
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos,  0x00800000);
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf,     0xff800000);//-1.f/0.f);
+  
+  /* natural logarithm computed for 4 simultaneous float
+    return NaN for x <= 0
+  */
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
+
+
+  Packet4i emm0;
+
+  Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
+  Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
+
+  x = pmax(x, p4f_min_norm_pos);  /* cut off denormalized stuff */
+  emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
+
+  /* keep only the fractional part */
+  x = _mm_and_ps(x, p4f_inv_mant_mask);
+  x = _mm_or_ps(x, p4f_half);
+
+  emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
+  Packet4f e = padd(_mm_cvtepi32_ps(emm0), p4f_1);
+
+  /* part2:
+     if( x < SQRTHF ) {
+       e -= 1;
+       x = x + x - 1.0;
+     } else { x = x - 1.0; }
+  */
+  Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
+  Packet4f tmp = _mm_and_ps(x, mask);
+  x = psub(x, p4f_1);
+  e = psub(e, _mm_and_ps(p4f_1, mask));
+  x = padd(x, tmp);
+
+  Packet4f x2 = pmul(x,x);
+  Packet4f x3 = pmul(x2,x);
+
+  Packet4f y, y1, y2;
+  y  = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
+  y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
+  y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
+  y  = pmadd(y , x, p4f_cephes_log_p2);
+  y1 = pmadd(y1, x, p4f_cephes_log_p5);
+  y2 = pmadd(y2, x, p4f_cephes_log_p8);
+  y = pmadd(y, x3, y1);
+  y = pmadd(y, x3, y2);
+  y = pmul(y, x3);
+
+  y1 = pmul(e, p4f_cephes_log_q1);
+  tmp = pmul(x2, p4f_half);
+  y = padd(y, y1);
+  x = psub(x, tmp);
+  y2 = pmul(e, p4f_cephes_log_q2);
+  x = padd(x, y);
+  x = padd(x, y2);
+  // negative arg will be NAN, 0 will be -INF
+  return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)),
+                   _mm_and_ps(iszero_mask, p4f_minus_inf));
+}
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pexp<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
+
+
+  _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);
+  _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
+
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
+
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
+
+  Packet4f tmp = _mm_setzero_ps(), fx;
+  Packet4i emm0;
+
+  // clamp x
+  x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
+
+  /* express exp(x) as exp(g + n*log(2)) */
+  fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  fx = _mm_floor_ps(fx);
+#else
+  emm0 = _mm_cvttps_epi32(fx);
+  tmp  = _mm_cvtepi32_ps(emm0);
+  /* if greater, substract 1 */
+  Packet4f mask = _mm_cmpgt_ps(tmp, fx);
+  mask = _mm_and_ps(mask, p4f_1);
+  fx = psub(tmp, mask);
+#endif
+
+  tmp = pmul(fx, p4f_cephes_exp_C1);
+  Packet4f z = pmul(fx, p4f_cephes_exp_C2);
+  x = psub(x, tmp);
+  x = psub(x, z);
+
+  z = pmul(x,x);
+
+  Packet4f y = p4f_cephes_exp_p0;
+  y = pmadd(y, x, p4f_cephes_exp_p1);
+  y = pmadd(y, x, p4f_cephes_exp_p2);
+  y = pmadd(y, x, p4f_cephes_exp_p3);
+  y = pmadd(y, x, p4f_cephes_exp_p4);
+  y = pmadd(y, x, p4f_cephes_exp_p5);
+  y = pmadd(y, z, x);
+  y = padd(y, p4f_1);
+
+  // build 2^n
+  emm0 = _mm_cvttps_epi32(fx);
+  emm0 = _mm_add_epi32(emm0, p4i_0x7f);
+  emm0 = _mm_slli_epi32(emm0, 23);
+  return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
+}
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet2d pexp<Packet2d>(const Packet2d& _x)
+{
+  Packet2d x = _x;
+
+  _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
+  _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
+  _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);
+
+  _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);
+  _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
+
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
+  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
+  static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
+
+  Packet2d tmp = _mm_setzero_pd(), fx;
+  Packet4i emm0;
+
+  // clamp x
+  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
+  /* express exp(x) as exp(g + n*log(2)) */
+  fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
+
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  fx = _mm_floor_pd(fx);
+#else
+  emm0 = _mm_cvttpd_epi32(fx);
+  tmp  = _mm_cvtepi32_pd(emm0);
+  /* if greater, substract 1 */
+  Packet2d mask = _mm_cmpgt_pd(tmp, fx);
+  mask = _mm_and_pd(mask, p2d_1);
+  fx = psub(tmp, mask);
+#endif
+
+  tmp = pmul(fx, p2d_cephes_exp_C1);
+  Packet2d z = pmul(fx, p2d_cephes_exp_C2);
+  x = psub(x, tmp);
+  x = psub(x, z);
+
+  Packet2d x2 = pmul(x,x);
+
+  Packet2d px = p2d_cephes_exp_p0;
+  px = pmadd(px, x2, p2d_cephes_exp_p1);
+  px = pmadd(px, x2, p2d_cephes_exp_p2);
+  px = pmul (px, x);
+
+  Packet2d qx = p2d_cephes_exp_q0;
+  qx = pmadd(qx, x2, p2d_cephes_exp_q1);
+  qx = pmadd(qx, x2, p2d_cephes_exp_q2);
+  qx = pmadd(qx, x2, p2d_cephes_exp_q3);
+
+  x = pdiv(px,psub(qx,px));
+  x = pmadd(p2d_2,x,p2d_1);
+
+  // build 2^n
+  emm0 = _mm_cvttpd_epi32(fx);
+  emm0 = _mm_add_epi32(emm0, p4i_1023_0);
+  emm0 = _mm_slli_epi32(emm0, 20);
+  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
+  return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
+}
+
+/* evaluation of 4 sines at onces, using SSE2 intrinsics.
+
+   The code is the exact rewriting of the cephes sinf function.
+   Precision is excellent as long as x < 8192 (I did not bother to
+   take into account the special handling they have for greater values
+   -- it does not return garbage for arguments over 8192, though, but
+   the extra precision is missing).
+
+   Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
+   surprising but correct result.
+*/
+
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psin<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+
+  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
+  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
+  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
+  _EIGEN_DECLARE_CONST_Packet4i(4, 4);
+
+  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
+
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
+
+  Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
+
+  Packet4i emm0, emm2;
+  sign_bit = x;
+  /* take the absolute value */
+  x = pabs(x);
+
+  /* take the modulo */
+
+  /* extract the sign bit (upper one) */
+  sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
+
+  /* scale by 4/Pi */
+  y = pmul(x, p4f_cephes_FOPI);
+
+  /* store the integer part of y in mm0 */
+  emm2 = _mm_cvttps_epi32(y);
+  /* j=(j+1) & (~1) (see the cephes sources) */
+  emm2 = _mm_add_epi32(emm2, p4i_1);
+  emm2 = _mm_and_si128(emm2, p4i_not1);
+  y = _mm_cvtepi32_ps(emm2);
+  /* get the swap sign flag */
+  emm0 = _mm_and_si128(emm2, p4i_4);
+  emm0 = _mm_slli_epi32(emm0, 29);
+  /* get the polynom selection mask
+     there is one polynom for 0 <= x <= Pi/4
+     and another one for Pi/4<x<=Pi/2
+
+     Both branches will be computed.
+  */
+  emm2 = _mm_and_si128(emm2, p4i_2);
+  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+  Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
+  Packet4f poly_mask = _mm_castsi128_ps(emm2);
+  sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
+
+  /* The magic pass: "Extended precision modular arithmetic"
+     x = ((x - y * DP1) - y * DP2) - y * DP3; */
+  xmm1 = pmul(y, p4f_minus_cephes_DP1);
+  xmm2 = pmul(y, p4f_minus_cephes_DP2);
+  xmm3 = pmul(y, p4f_minus_cephes_DP3);
+  x = padd(x, xmm1);
+  x = padd(x, xmm2);
+  x = padd(x, xmm3);
+
+  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
+  y = p4f_coscof_p0;
+  Packet4f z = _mm_mul_ps(x,x);
+
+  y = pmadd(y, z, p4f_coscof_p1);
+  y = pmadd(y, z, p4f_coscof_p2);
+  y = pmul(y, z);
+  y = pmul(y, z);
+  Packet4f tmp = pmul(z, p4f_half);
+  y = psub(y, tmp);
+  y = padd(y, p4f_1);
+
+  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
+
+  Packet4f y2 = p4f_sincof_p0;
+  y2 = pmadd(y2, z, p4f_sincof_p1);
+  y2 = pmadd(y2, z, p4f_sincof_p2);
+  y2 = pmul(y2, z);
+  y2 = pmul(y2, x);
+  y2 = padd(y2, x);
+
+  /* select the correct result from the two polynoms */
+  y2 = _mm_and_ps(poly_mask, y2);
+  y = _mm_andnot_ps(poly_mask, y);
+  y = _mm_or_ps(y,y2);
+  /* update the sign */
+  return _mm_xor_ps(y, sign_bit);
+}
+
+/* almost the same as psin */
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f pcos<Packet4f>(const Packet4f& _x)
+{
+  Packet4f x = _x;
+  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
+  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
+
+  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
+  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
+  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
+  _EIGEN_DECLARE_CONST_Packet4i(4, 4);
+
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
+  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
+  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
+  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
+
+  Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
+  Packet4i emm0, emm2;
+
+  x = pabs(x);
+
+  /* scale by 4/Pi */
+  y = pmul(x, p4f_cephes_FOPI);
+
+  /* get the integer part of y */
+  emm2 = _mm_cvttps_epi32(y);
+  /* j=(j+1) & (~1) (see the cephes sources) */
+  emm2 = _mm_add_epi32(emm2, p4i_1);
+  emm2 = _mm_and_si128(emm2, p4i_not1);
+  y = _mm_cvtepi32_ps(emm2);
+
+  emm2 = _mm_sub_epi32(emm2, p4i_2);
+
+  /* get the swap sign flag */
+  emm0 = _mm_andnot_si128(emm2, p4i_4);
+  emm0 = _mm_slli_epi32(emm0, 29);
+  /* get the polynom selection mask */
+  emm2 = _mm_and_si128(emm2, p4i_2);
+  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
+
+  Packet4f sign_bit = _mm_castsi128_ps(emm0);
+  Packet4f poly_mask = _mm_castsi128_ps(emm2);
+
+  /* The magic pass: "Extended precision modular arithmetic"
+     x = ((x - y * DP1) - y * DP2) - y * DP3; */
+  xmm1 = pmul(y, p4f_minus_cephes_DP1);
+  xmm2 = pmul(y, p4f_minus_cephes_DP2);
+  xmm3 = pmul(y, p4f_minus_cephes_DP3);
+  x = padd(x, xmm1);
+  x = padd(x, xmm2);
+  x = padd(x, xmm3);
+
+  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
+  y = p4f_coscof_p0;
+  Packet4f z = pmul(x,x);
+
+  y = pmadd(y,z,p4f_coscof_p1);
+  y = pmadd(y,z,p4f_coscof_p2);
+  y = pmul(y, z);
+  y = pmul(y, z);
+  Packet4f tmp = _mm_mul_ps(z, p4f_half);
+  y = psub(y, tmp);
+  y = padd(y, p4f_1);
+
+  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
+  Packet4f y2 = p4f_sincof_p0;
+  y2 = pmadd(y2, z, p4f_sincof_p1);
+  y2 = pmadd(y2, z, p4f_sincof_p2);
+  y2 = pmul(y2, z);
+  y2 = pmadd(y2, x, x);
+
+  /* select the correct result from the two polynoms */
+  y2 = _mm_and_ps(poly_mask, y2);
+  y  = _mm_andnot_ps(poly_mask, y);
+  y  = _mm_or_ps(y,y2);
+
+  /* update the sign */
+  return _mm_xor_ps(y, sign_bit);
+}
+
+#if EIGEN_FAST_MATH
+
+// This is based on Quake3's fast inverse square root.
+// For detail see here: http://www.beyond3d.com/content/articles/8/
+// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly.
+template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
+Packet4f psqrt<Packet4f>(const Packet4f& _x)
+{
+  Packet4f half = pmul(_x, pset1<Packet4f>(.5f));
+
+  /* select only the inverse sqrt of non-zero inputs */
+  Packet4f non_zero_mask = _mm_cmpge_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)()));
+  Packet4f x = _mm_and_ps(non_zero_mask, _mm_rsqrt_ps(_x));
+
+  x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));
+  return pmul(_x,x);
+}
+
+#else
+
+template<> EIGEN_STRONG_INLINE Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
+
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATH_FUNCTIONS_SSE_H
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
new file mode 100644
index 0000000..fc8ae50
--- /dev/null
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -0,0 +1,649 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PACKET_MATH_SSE_H
+#define EIGEN_PACKET_MATH_SSE_H
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
+#endif
+
+#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
+#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
+#endif
+
+typedef __m128  Packet4f;
+typedef __m128i Packet4i;
+typedef __m128d Packet2d;
+
+template<> struct is_arithmetic<__m128>  { enum { value = true }; };
+template<> struct is_arithmetic<__m128i> { enum { value = true }; };
+template<> struct is_arithmetic<__m128d> { enum { value = true }; };
+
+#define vec4f_swizzle1(v,p,q,r,s) \
+  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
+
+#define vec4i_swizzle1(v,p,q,r,s) \
+  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
+
+#define vec2d_swizzle1(v,p,q) \
+  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
+  
+#define vec4f_swizzle2(a,b,p,q,r,s) \
+  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
+
+#define vec4i_swizzle2(a,b,p,q,r,s) \
+  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
+
+#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
+  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
+  const Packet2d p2d_##NAME = pset1<Packet2d>(X)
+
+#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
+  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
+
+#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
+  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
+
+
+template<> struct packet_traits<float>  : default_packet_traits
+{
+  typedef Packet4f type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4,
+
+    HasDiv  = 1,
+    HasSin  = EIGEN_FAST_MATH,
+    HasCos  = EIGEN_FAST_MATH,
+    HasLog  = 1,
+    HasExp  = 1,
+    HasSqrt = 1
+  };
+};
+template<> struct packet_traits<double> : default_packet_traits
+{
+  typedef Packet2d type;
+  enum {
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=2,
+
+    HasDiv  = 1,
+    HasExp  = 1,
+    HasSqrt = 1
+  };
+};
+template<> struct packet_traits<int>    : default_packet_traits
+{
+  typedef Packet4i type;
+  enum {
+    // FIXME check the Has*
+    Vectorizable = 1,
+    AlignedOnScalar = 1,
+    size=4
+  };
+};
+
+template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
+template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
+
+#if defined(_MSC_VER) && (_MSC_VER==1500)
+// Workaround MSVC 9 internal compiler error.
+// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
+// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
+template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
+#else
+template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set1_ps(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
+template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
+template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
+
+template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
+{
+  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
+  return _mm_xor_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
+{
+  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
+  return _mm_xor_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
+{
+  return psub(_mm_setr_epi32(0,0,0,0), a);
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
+template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  return _mm_mullo_epi32(a,b);
+#else
+  // this version is slightly faster than 4 scalar products
+  return vec4i_swizzle1(
+            vec4i_swizzle2(
+              _mm_mul_epu32(a,b),
+              _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
+                            vec4i_swizzle1(b,1,0,3,2)),
+              0,2,0,2),
+            0,2,1,3);
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
+{ eigen_assert(false && "packet integer division are not supported by SSE");
+  return pset1<Packet4i>(0);
+}
+
+// for some weird raisons, it has to be overloaded for packet of integers
+template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  return _mm_min_epi32(a,b);
+#else
+  // after some bench, this version *is* faster than a scalar implementation
+  Packet4i mask = _mm_cmplt_epi32(a,b);
+  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
+{
+#ifdef EIGEN_VECTORIZE_SSE4_1
+  return _mm_max_epi32(a,b);
+#else
+  // after some bench, this version *is* faster than a scalar implementation
+  Packet4i mask = _mm_cmpgt_epi32(a,b);
+  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
+#endif
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
+template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
+template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
+
+template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
+template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
+template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
+
+#if defined(_MSC_VER)
+  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
+    EIGEN_DEBUG_UNALIGNED_LOAD
+    #if (_MSC_VER==1600)
+    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
+    // (i.e., it does not generate an unaligned load!!
+    // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
+    // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
+    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
+    res = _mm_loadh_pi(res, (const __m64*)(from+2));
+    return res;
+    #else
+    return _mm_loadu_ps(from);
+    #endif
+  }
+  template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
+  template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int*    from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
+#else
+// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
+// require pointer casting to incompatible pointer types and leads to invalid code
+// because of the strict aliasing rule. The "dummy" stuff are required to enforce
+// a correct instruction dependency.
+// TODO: do the same for MSVC (ICC is compatible)
+// NOTE: with the code below, MSVC's compiler crashes!
+
+#if defined(__GNUC__) && defined(__i386__)
+  // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
+  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
+#elif defined(__clang__)
+  // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
+  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
+#else
+  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
+{
+  EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+  return _mm_loadu_ps(from);
+#else
+  __m128d res;
+  res =  _mm_load_sd((const double*)(from)) ;
+  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
+  return _mm_castpd_ps(res);
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
+{
+  EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+  return _mm_loadu_pd(from);
+#else
+  __m128d res;
+  res = _mm_load_sd(from) ;
+  res = _mm_loadh_pd(res,from+1);
+  return res;
+#endif
+}
+template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
+{
+  EIGEN_DEBUG_UNALIGNED_LOAD
+#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
+  return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
+#else
+  __m128d res;
+  res =  _mm_load_sd((const double*)(from)) ;
+  res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
+  return _mm_castpd_si128(res);
+#endif
+}
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
+{
+  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
+}
+template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)
+{ return pset1<Packet2d>(from[0]); }
+template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
+{
+  Packet4i tmp;
+  tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
+  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
+}
+
+template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
+template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
+
+template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
+  EIGEN_DEBUG_UNALIGNED_STORE
+  _mm_storel_pd((to), from);
+  _mm_storeh_pd((to+1), from);
+}
+template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
+template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
+
+// some compilers might be tempted to perform multiple moves instead of using a vector path.
+template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
+{
+  Packet4f pa = _mm_set_ss(a);
+  pstore(to, vec4f_swizzle1(pa,0,0,0,0));
+}
+// some compilers might be tempted to perform multiple moves instead of using a vector path.
+template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
+{
+  Packet2d pa = _mm_set_sd(a);
+  pstore(to, vec2d_swizzle1(pa,0,0));
+}
+
+template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
+
+#if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
+// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
+// Direct of the struct members fixed bug #62.
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
+#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
+#else
+template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
+template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
+template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
+#endif
+
+template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
+{ return _mm_shuffle_ps(a,a,0x1B); }
+template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
+{ return _mm_shuffle_pd(a,a,0x1); }
+template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
+{ return _mm_shuffle_epi32(a,0x1B); }
+
+
+template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
+{
+  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
+  return _mm_and_ps(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
+{
+  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
+  return _mm_and_pd(a,mask);
+}
+template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
+{
+  #ifdef EIGEN_VECTORIZE_SSSE3
+  return _mm_abs_epi32(a);
+  #else
+  Packet4i aux = _mm_srai_epi32(a,31);
+  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
+  #endif
+}
+
+EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
+{
+  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
+  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
+  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
+  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
+}
+
+#ifdef EIGEN_VECTORIZE_SSE3
+// TODO implement SSE2 versions as well as integer versions
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
+}
+template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
+{
+  return _mm_hadd_pd(vecs[0], vecs[1]);
+}
+// SSSE3 version:
+// EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
+// {
+//   return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
+// }
+
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp0 = _mm_hadd_ps(a,a);
+  return pfirst(_mm_hadd_ps(tmp0, tmp0));
+}
+
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
+
+// SSSE3 version:
+// EIGEN_STRONG_INLINE float predux(const Packet4i& a)
+// {
+//   Packet4i tmp0 = _mm_hadd_epi32(a,a);
+//   return pfirst(_mm_hadd_epi32(tmp0, tmp0));
+// }
+#else
+// SSE2 versions
+template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
+{
+  Packet4f tmp0, tmp1, tmp2;
+  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
+  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
+  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
+  tmp0 = _mm_add_ps(tmp0, tmp1);
+  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
+  tmp1 = _mm_add_ps(tmp1, tmp2);
+  tmp2 = _mm_movehl_ps(tmp1, tmp0);
+  tmp0 = _mm_movelh_ps(tmp0, tmp1);
+  return _mm_add_ps(tmp0, tmp2);
+}
+
+template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
+{
+  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
+}
+#endif  // SSE3
+
+template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
+{
+  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
+  return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
+}
+
+template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
+{
+  Packet4i tmp0, tmp1, tmp2;
+  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
+  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
+  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
+  tmp0 = _mm_add_epi32(tmp0, tmp1);
+  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
+  tmp1 = _mm_add_epi32(tmp1, tmp2);
+  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
+  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
+  return _mm_add_epi32(tmp0, tmp2);
+}
+
+// Other reduction functions:
+
+// mul
+template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
+{
+  // after some experiments, it is seems this is the fastest way to implement it
+  // for GCC (eg., reusing pmul is very slow !)
+  // TODO try to call _mm_mul_epu32 directly
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;
+}
+
+// min
+template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
+{
+  // after some experiments, it is seems this is the fastest way to implement it
+  // for GCC (eg., it does not like using std::min after the pstore !!)
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
+  int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
+  return aux0<aux2 ? aux0 : aux2;
+}
+
+// max
+template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
+{
+  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
+  return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
+}
+template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
+{
+  return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
+}
+template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
+{
+  // after some experiments, it is seems this is the fastest way to implement it
+  // for GCC (eg., it does not like using std::min after the pstore !!)
+  EIGEN_ALIGN16 int aux[4];
+  pstore(aux, a);
+  int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
+  int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
+  return aux0>aux2 ? aux0 : aux2;
+}
+
+#if (defined __GNUC__)
+// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)
+// {
+//   Packet4f res = b;
+//   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
+//   return res;
+// }
+// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)
+// {
+//   Packet4i res = a;
+//   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
+//   return res;
+// }
+#endif
+
+#ifdef EIGEN_VECTORIZE_SSSE3
+// SSSE3 versions
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+  {
+    if (Offset!=0)
+      first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
+  {
+    if (Offset!=0)
+      first = _mm_alignr_epi8(second,first, Offset*4);
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet2d>
+{
+  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
+  {
+    if (Offset==1)
+      first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
+  }
+};
+#else
+// SSE2 versions
+template<int Offset>
+struct palign_impl<Offset,Packet4f>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
+  {
+    if (Offset==1)
+    {
+      first = _mm_move_ss(first,second);
+      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
+    }
+    else if (Offset==2)
+    {
+      first = _mm_movehl_ps(first,first);
+      first = _mm_movelh_ps(first,second);
+    }
+    else if (Offset==3)
+    {
+      first = _mm_move_ss(first,second);
+      first = _mm_shuffle_ps(first,second,0x93);
+    }
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet4i>
+{
+  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
+  {
+    if (Offset==1)
+    {
+      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+      first = _mm_shuffle_epi32(first,0x39);
+    }
+    else if (Offset==2)
+    {
+      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
+      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+    }
+    else if (Offset==3)
+    {
+      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
+      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
+    }
+  }
+};
+
+template<int Offset>
+struct palign_impl<Offset,Packet2d>
+{
+  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
+  {
+    if (Offset==1)
+    {
+      first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
+      first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
+    }
+  }
+};
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PACKET_MATH_SSE_H
diff --git a/Eigen/src/Core/products/CMakeLists.txt b/Eigen/src/Core/products/CMakeLists.txt
new file mode 100644
index 0000000..21fc94a
--- /dev/null
+++ b/Eigen/src/Core/products/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_Product_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Core_Product_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/products COMPONENT Devel
+  )
diff --git a/Eigen/src/Core/products/CoeffBasedProduct.h b/Eigen/src/Core/products/CoeffBasedProduct.h
new file mode 100644
index 0000000..2a9d65b
--- /dev/null
+++ b/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -0,0 +1,476 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COEFFBASED_PRODUCT_H
+#define EIGEN_COEFFBASED_PRODUCT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/*********************************************************************************
+*  Coefficient based product implementation.
+*  It is designed for the following use cases:
+*  - small fixed sizes
+*  - lazy products
+*********************************************************************************/
+
+/* Since the all the dimensions of the product are small, here we can rely
+ * on the generic Assign mechanism to evaluate the product per coeff (or packet).
+ *
+ * Note that here the inner-loops should always be unrolled.
+ */
+
+template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl;
+
+template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl;
+
+template<typename LhsNested, typename RhsNested, int NestingFlags>
+struct traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
+{
+  typedef MatrixXpr XprKind;
+  typedef typename remove_all<LhsNested>::type _LhsNested;
+  typedef typename remove_all<RhsNested>::type _RhsNested;
+  typedef typename scalar_product_traits<typename _LhsNested::Scalar, typename _RhsNested::Scalar>::ReturnType Scalar;
+  typedef typename promote_storage_type<typename traits<_LhsNested>::StorageKind,
+                                           typename traits<_RhsNested>::StorageKind>::ret StorageKind;
+  typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+                                         typename traits<_RhsNested>::Index>::type Index;
+
+  enum {
+      LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+      RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+      LhsFlags = _LhsNested::Flags,
+      RhsFlags = _RhsNested::Flags,
+
+      RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+      ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+      InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+      MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+      LhsRowMajor = LhsFlags & RowMajorBit,
+      RhsRowMajor = RhsFlags & RowMajorBit,
+
+      SameType = is_same<typename _LhsNested::Scalar,typename _RhsNested::Scalar>::value,
+
+      CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit)
+                      && (ColsAtCompileTime == Dynamic
+                          || ( (ColsAtCompileTime % packet_traits<Scalar>::size) == 0
+                              && (RhsFlags&AlignedBit)
+                             )
+                         ),
+
+      CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit)
+                      && (RowsAtCompileTime == Dynamic
+                          || ( (RowsAtCompileTime % packet_traits<Scalar>::size) == 0
+                              && (LhsFlags&AlignedBit)
+                             )
+                         ),
+
+      EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+                     : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+                     : (RhsRowMajor && !CanVectorizeLhs),
+
+      Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit)
+            | (EvalToRowMajor ? RowMajorBit : 0)
+            | NestingFlags
+            | (LhsFlags & RhsFlags & AlignedBit)
+            // TODO enable vectorization for mixed types
+            | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0),
+
+      CoeffReadCost = InnerSize == Dynamic ? Dynamic
+                    : InnerSize == 0 ? 0
+                    : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
+                      + (InnerSize - 1) * NumTraits<Scalar>::AddCost,
+
+      /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
+      * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
+      * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
+      * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
+      */
+      CanVectorizeInner =    SameType
+                          && LhsRowMajor
+                          && (!RhsRowMajor)
+                          && (LhsFlags & RhsFlags & ActualPacketAccessBit)
+                          && (LhsFlags & RhsFlags & AlignedBit)
+                          && (InnerSize % packet_traits<Scalar>::size == 0)
+    };
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested, int NestingFlags>
+class CoeffBasedProduct
+  : internal::no_assignment_operator,
+    public MatrixBase<CoeffBasedProduct<LhsNested, RhsNested, NestingFlags> >
+{
+  public:
+
+    typedef MatrixBase<CoeffBasedProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(CoeffBasedProduct)
+    typedef typename Base::PlainObject PlainObject;
+
+  private:
+
+    typedef typename internal::traits<CoeffBasedProduct>::_LhsNested _LhsNested;
+    typedef typename internal::traits<CoeffBasedProduct>::_RhsNested _RhsNested;
+
+    enum {
+      PacketSize = internal::packet_traits<Scalar>::size,
+      InnerSize  = internal::traits<CoeffBasedProduct>::InnerSize,
+      Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
+      CanVectorizeInner = internal::traits<CoeffBasedProduct>::CanVectorizeInner
+    };
+
+    typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
+                                   Unroll ? InnerSize : Dynamic,
+                                   _LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
+
+    typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
+
+  public:
+
+    inline CoeffBasedProduct(const CoeffBasedProduct& other)
+      : Base(), m_lhs(other.m_lhs), m_rhs(other.m_rhs)
+    {}
+
+    template<typename Lhs, typename Rhs>
+    inline CoeffBasedProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      // we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable.
+      // We still allow to mix T and complex<T>.
+      EIGEN_STATIC_ASSERT((internal::scalar_product_traits<typename Lhs::RealScalar, typename Rhs::RealScalar>::Defined),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+      eigen_assert(lhs.cols() == rhs.rows()
+        && "invalid matrix product"
+        && "if you wanted a coeff-wise or a dot product use the respective explicit functions");
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const
+    {
+      Scalar res;
+      ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
+      return res;
+    }
+
+    /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
+     * which is why we don't set the LinearAccessBit.
+     */
+    EIGEN_STRONG_INLINE const Scalar coeff(Index index) const
+    {
+      Scalar res;
+      const Index row = RowsAtCompileTime == 1 ? 0 : index;
+      const Index col = RowsAtCompileTime == 1 ? index : 0;
+      ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
+      return res;
+    }
+
+    template<int LoadMode>
+    EIGEN_STRONG_INLINE const PacketScalar packet(Index row, Index col) const
+    {
+      PacketScalar res;
+      internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
+                              Unroll ? InnerSize : Dynamic,
+                              _LhsNested, _RhsNested, PacketScalar, LoadMode>
+        ::run(row, col, m_lhs, m_rhs, res);
+      return res;
+    }
+
+    // Implicit conversion to the nested type (trigger the evaluation of the product)
+    EIGEN_STRONG_INLINE operator const PlainObject& () const
+    {
+      m_result.lazyAssign(*this);
+      return m_result;
+    }
+
+    const _LhsNested& lhs() const { return m_lhs; }
+    const _RhsNested& rhs() const { return m_rhs; }
+
+    const Diagonal<const LazyCoeffBasedProductType,0> diagonal() const
+    { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
+
+    template<int DiagonalIndex>
+    const Diagonal<const LazyCoeffBasedProductType,DiagonalIndex> diagonal() const
+    { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this); }
+
+    const Diagonal<const LazyCoeffBasedProductType,Dynamic> diagonal(Index index) const
+    { return reinterpret_cast<const LazyCoeffBasedProductType&>(*this).diagonal(index); }
+
+  protected:
+    typename internal::add_const_on_value_type<LhsNested>::type m_lhs;
+    typename internal::add_const_on_value_type<RhsNested>::type m_rhs;
+
+    mutable PlainObject m_result;
+};
+
+namespace internal {
+
+// here we need to overload the nested rule for products
+// such that the nested type is a const reference to a plain matrix
+template<typename Lhs, typename Rhs, int N, typename PlainObject>
+struct nested<CoeffBasedProduct<Lhs,Rhs,EvalBeforeNestingBit|EvalBeforeAssigningBit>, N, PlainObject>
+{
+  typedef PlainObject const& type;
+};
+
+/***************************************************************************
+* Normal product .coeff() implementation (with meta-unrolling)
+***************************************************************************/
+
+/**************************************
+*** Scalar path  - no vectorization ***
+**************************************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+  {
+    product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
+    res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+  {
+    res = lhs.coeff(row, 0) * rhs.coeff(0, col);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
+  {
+    res = RetScalar(0);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
+  {
+    res = (lhs.row(row).transpose().cwiseProduct( rhs.col(col) )).sum();
+  }
+};
+
+/*******************************************
+*** Scalar path with inner vectorization ***
+*******************************************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet>
+struct product_coeff_vectorized_unroller
+{
+  typedef typename Lhs::Index Index;
+  enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
+  {
+    product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
+    pres = padd(pres, pmul( lhs.template packet<Aligned>(row, UnrollingIndex) , rhs.template packet<Aligned>(UnrollingIndex, col) ));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet>
+struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
+  {
+    pres = pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
+  {
+    res = 0;
+  }
+};
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::PacketScalar Packet;
+  typedef typename Lhs::Index Index;
+  enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
+  {
+    Packet pres;
+    product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
+    res = predux(pres);
+  }
+};
+
+template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
+struct product_coeff_vectorized_dyn_selector
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+  {
+    res = lhs.row(row).transpose().cwiseProduct(rhs.col(col)).sum();
+  }
+};
+
+// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower
+// NOTE maybe they are now useless since we have a specialization for Block<Matrix>
+template<typename Lhs, typename Rhs, int RhsCols>
+struct product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+  {
+    res = lhs.transpose().cwiseProduct(rhs.col(col)).sum();
+  }
+};
+
+template<typename Lhs, typename Rhs, int LhsRows>
+struct product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+  {
+    res = lhs.row(row).transpose().cwiseProduct(rhs).sum();
+  }
+};
+
+template<typename Lhs, typename Rhs>
+struct product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+  {
+    res = lhs.transpose().cwiseProduct(rhs).sum();
+  }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
+  {
+    product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res);
+  }
+};
+
+/*******************
+*** Packet path  ***
+*******************/
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+  {
+    product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
+    res =  pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
+  }
+};
+
+template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+  {
+    product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
+    res =  pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+  {
+    res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
+  {
+    res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
+  {
+    res = pset1<Packet>(0);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
+  {
+    res = pset1<Packet>(0);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
+  {
+    res = pset1<Packet>(0);
+    for(Index i = 0; i < lhs.cols(); ++i)
+      res =  pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
+{
+  typedef typename Lhs::Index Index;
+  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
+  {
+    res = pset1<Packet>(0);
+    for(Index i = 0; i < lhs.cols(); ++i)
+      res =  pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COEFFBASED_PRODUCT_H
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
new file mode 100644
index 0000000..bcdca5b
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -0,0 +1,1341 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
+#define EIGEN_GENERAL_BLOCK_PANEL_H
+
+namespace Eigen { 
+  
+namespace internal {
+
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
+class gebp_traits;
+
+
+/** \internal \returns b if a<=0, and returns a otherwise. */
+inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
+{
+  return a<=0 ? b : a;
+}
+
+/** \internal */
+inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0)
+{
+  static std::ptrdiff_t m_l1CacheSize = 0;
+  static std::ptrdiff_t m_l2CacheSize = 0;
+  if(m_l2CacheSize==0)
+  {
+    m_l1CacheSize = manage_caching_sizes_helper(queryL1CacheSize(),8 * 1024);
+    m_l2CacheSize = manage_caching_sizes_helper(queryTopLevelCacheSize(),1*1024*1024);
+  }
+  
+  if(action==SetAction)
+  {
+    // set the cpu cache size and cache all block sizes from a global cache size in byte
+    eigen_internal_assert(l1!=0 && l2!=0);
+    m_l1CacheSize = *l1;
+    m_l2CacheSize = *l2;
+  }
+  else if(action==GetAction)
+  {
+    eigen_internal_assert(l1!=0 && l2!=0);
+    *l1 = m_l1CacheSize;
+    *l2 = m_l2CacheSize;
+  }
+  else
+  {
+    eigen_internal_assert(false);
+  }
+}
+
+/** \brief Computes the blocking parameters for a m x k times k x n matrix product
+  *
+  * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.
+  * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.
+  * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.
+  *
+  * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
+  * this function computes the blocking size parameters along the respective dimensions
+  * for matrix products and related algorithms. The blocking sizes depends on various
+  * parameters:
+  * - the L1 and L2 cache sizes,
+  * - the register level blocking sizes defined by gebp_traits,
+  * - the number of scalars that fit into a packet (when vectorization is enabled).
+  *
+  * \sa setCpuCacheSizes */
+template<typename LhsScalar, typename RhsScalar, int KcFactor, typename SizeType>
+void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n)
+{
+  EIGEN_UNUSED_VARIABLE(n);
+  // Explanations:
+  // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and
+  // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed
+  // per kc x nr vertical small panels where nr is the blocking size along the n dimension
+  // at the register level. For vectorization purpose, these small vertical panels are unpacked,
+  // e.g., each coefficient is replicated to fit a packet. This small vertical panel has to
+  // stay in L1 cache.
+  std::ptrdiff_t l1, l2;
+
+  typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+  enum {
+    kdiv = KcFactor * 2 * Traits::nr
+         * Traits::RhsProgress * sizeof(RhsScalar),
+    mr = gebp_traits<LhsScalar,RhsScalar>::mr,
+    mr_mask = (0xffffffff/mr)*mr
+  };
+
+  manage_caching_sizes(GetAction, &l1, &l2);
+  k = std::min<SizeType>(k, l1/kdiv);
+  SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
+  if(_m<m) m = _m & mr_mask;
+}
+
+template<typename LhsScalar, typename RhsScalar, typename SizeType>
+inline void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n)
+{
+  computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n);
+}
+
+#ifdef EIGEN_HAS_FUSE_CJMADD
+  #define MADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);
+#else
+
+  // FIXME (a bit overkill maybe ?)
+
+  template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
+    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
+    {
+      c = cj.pmadd(a,b,c);
+    }
+  };
+
+  template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
+    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
+    {
+      t = b; t = cj.pmul(a,t); c = padd(c,t);
+    }
+  };
+
+  template<typename CJ, typename A, typename B, typename C, typename T>
+  EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
+  {
+    gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
+  }
+
+  #define MADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);
+//   #define MADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);
+#endif
+
+/* Vectorization logic
+ *  real*real: unpack rhs to constant packets, ...
+ * 
+ *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
+ *          storing each res packet into two packets (2x2),
+ *          at the end combine them: swap the second and addsub them 
+ *  cf*cf : same but with 2x4 blocks
+ *  cplx*real : unpack rhs to constant packets, ...
+ *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
+ */
+template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
+class gebp_traits
+{
+public:
+  typedef _LhsScalar LhsScalar;
+  typedef _RhsScalar RhsScalar;
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+  enum {
+    ConjLhs = _ConjLhs,
+    ConjRhs = _ConjRhs,
+    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
+    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+
+    // register block size along the N direction (must be either 2 or 4)
+    nr = NumberOfRegisters/4,
+
+    // register block size along the M direction (currently, this one cannot be modified)
+    mr = 2 * LhsPacketSize,
+    
+    WorkSpaceFactor = nr * RhsPacketSize,
+
+    LhsProgress = LhsPacketSize,
+    RhsProgress = RhsPacketSize
+  };
+
+  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+  typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+  typedef ResPacket AccPacket;
+  
+  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+  {
+    p = pset1<ResPacket>(ResScalar(0));
+  }
+
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+  {
+    dest = pload<RhsPacket>(b);
+  }
+
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = pload<LhsPacket>(a);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const
+  {
+    tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
+  }
+
+  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    r = pmadd(c,alpha,r);
+  }
+
+protected:
+//   conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
+//   conj_helper<LhsPacket,RhsPacket,ConjLhs,ConjRhs> pcj;
+};
+
+template<typename RealScalar, bool _ConjLhs>
+class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
+{
+public:
+  typedef std::complex<RealScalar> LhsScalar;
+  typedef RealScalar RhsScalar;
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+  enum {
+    ConjLhs = _ConjLhs,
+    ConjRhs = false,
+    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
+    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+    nr = NumberOfRegisters/4,
+    mr = 2 * LhsPacketSize,
+    WorkSpaceFactor = nr*RhsPacketSize,
+
+    LhsProgress = LhsPacketSize,
+    RhsProgress = RhsPacketSize
+  };
+
+  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+  typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+  typedef ResPacket AccPacket;
+
+  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+  {
+    p = pset1<ResPacket>(ResScalar(0));
+  }
+
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+  {
+    dest = pload<RhsPacket>(b);
+  }
+
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = pload<LhsPacket>(a);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+  {
+    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+  {
+    tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
+  {
+    c += a * b;
+  }
+
+  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    r = cj.pmadd(c,alpha,r);
+  }
+
+protected:
+  conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
+};
+
+template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
+class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
+{
+public:
+  typedef std::complex<RealScalar>  Scalar;
+  typedef std::complex<RealScalar>  LhsScalar;
+  typedef std::complex<RealScalar>  RhsScalar;
+  typedef std::complex<RealScalar>  ResScalar;
+  
+  enum {
+    ConjLhs = _ConjLhs,
+    ConjRhs = _ConjRhs,
+    Vectorizable = packet_traits<RealScalar>::Vectorizable
+                && packet_traits<Scalar>::Vectorizable,
+    RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,
+    ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    nr = 2,
+    mr = 2 * ResPacketSize,
+    WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
+
+    LhsProgress = ResPacketSize,
+    RhsProgress = Vectorizable ? 2*ResPacketSize : 1
+  };
+  
+  typedef typename packet_traits<RealScalar>::type RealPacket;
+  typedef typename packet_traits<Scalar>::type     ScalarPacket;
+  struct DoublePacket
+  {
+    RealPacket first;
+    RealPacket second;
+  };
+
+  typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
+  typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type AccPacket;
+  
+  EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
+
+  EIGEN_STRONG_INLINE void initAcc(DoublePacket& p)
+  {
+    p.first   = pset1<RealPacket>(RealScalar(0));
+    p.second  = pset1<RealPacket>(RealScalar(0));
+  }
+
+  /* Unpack the rhs coeff such that each complex coefficient is spread into
+   * two packects containing respectively the real and imaginary coefficient
+   * duplicated as many time as needed: (x+iy) => [x, ..., x] [y, ..., y]
+   */
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const Scalar* rhs, Scalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+    {
+      if(Vectorizable)
+      {
+        pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0],             real(rhs[k]));
+        pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k]));
+      }
+      else
+        b[k] = rhs[k];
+    }
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = *b; }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacket& dest) const
+  {
+    dest.first  = pload<RealPacket>((const RealScalar*)b);
+    dest.second = pload<RealPacket>((const RealScalar*)(b+ResPacketSize));
+  }
+
+  // nothing special here
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacket& c, RhsPacket& /*tmp*/) const
+  {
+    c.first   = padd(pmul(a,b.first), c.first);
+    c.second  = padd(pmul(a,b.second),c.second);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
+  {
+    c = cj.pmadd(a,b,c);
+  }
+  
+  EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
+  
+  EIGEN_STRONG_INLINE void acc(const DoublePacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    // assemble c
+    ResPacket tmp;
+    if((!ConjLhs)&&(!ConjRhs))
+    {
+      tmp = pcplxflip(pconj(ResPacket(c.second)));
+      tmp = padd(ResPacket(c.first),tmp);
+    }
+    else if((!ConjLhs)&&(ConjRhs))
+    {
+      tmp = pconj(pcplxflip(ResPacket(c.second)));
+      tmp = padd(ResPacket(c.first),tmp);
+    }
+    else if((ConjLhs)&&(!ConjRhs))
+    {
+      tmp = pcplxflip(ResPacket(c.second));
+      tmp = padd(pconj(ResPacket(c.first)),tmp);
+    }
+    else if((ConjLhs)&&(ConjRhs))
+    {
+      tmp = pcplxflip(ResPacket(c.second));
+      tmp = psub(pconj(ResPacket(c.first)),tmp);
+    }
+    
+    r = pmadd(tmp,alpha,r);
+  }
+
+protected:
+  conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
+};
+
+template<typename RealScalar, bool _ConjRhs>
+class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
+{
+public:
+  typedef std::complex<RealScalar>  Scalar;
+  typedef RealScalar  LhsScalar;
+  typedef Scalar      RhsScalar;
+  typedef Scalar      ResScalar;
+
+  enum {
+    ConjLhs = false,
+    ConjRhs = _ConjRhs,
+    Vectorizable = packet_traits<RealScalar>::Vectorizable
+                && packet_traits<Scalar>::Vectorizable,
+    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
+    
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+    nr = 4,
+    mr = 2*ResPacketSize,
+    WorkSpaceFactor = nr*RhsPacketSize,
+
+    LhsProgress = ResPacketSize,
+    RhsProgress = ResPacketSize
+  };
+
+  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+  typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+  typedef ResPacket AccPacket;
+
+  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
+  {
+    p = pset1<ResPacket>(ResScalar(0));
+  }
+
+  EIGEN_STRONG_INLINE void unpackRhs(DenseIndex n, const RhsScalar* rhs, RhsScalar* b)
+  {
+    for(DenseIndex k=0; k<n; k++)
+      pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
+  }
+
+  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+  {
+    dest = pload<RhsPacket>(b);
+  }
+
+  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+  {
+    dest = ploaddup<LhsPacket>(a);
+  }
+
+  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
+  {
+    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
+  {
+    tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
+  }
+
+  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
+  {
+    c += a * b;
+  }
+
+  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
+  {
+    r = cj.pmadd(alpha,c,r);
+  }
+
+protected:
+  conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
+};
+
+/* optimized GEneral packed Block * packed Panel product kernel
+ *
+ * Mixing type logic: C += A * B
+ *  |  A  |  B  | comments
+ *  |real |cplx | no vectorization yet, would require to pack A with duplication
+ *  |cplx |real | easy vectorization
+ */
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+struct gebp_kernel
+{
+  typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
+  typedef typename Traits::ResScalar ResScalar;
+  typedef typename Traits::LhsPacket LhsPacket;
+  typedef typename Traits::RhsPacket RhsPacket;
+  typedef typename Traits::ResPacket ResPacket;
+  typedef typename Traits::AccPacket AccPacket;
+
+  enum {
+    Vectorizable  = Traits::Vectorizable,
+    LhsProgress   = Traits::LhsProgress,
+    RhsProgress   = Traits::RhsProgress,
+    ResPacketSize = Traits::ResPacketSize
+  };
+
+  EIGEN_DONT_INLINE
+  void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
+                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB=0);
+};
+
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+EIGEN_DONT_INLINE
+void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
+  ::operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
+               Index strideA, Index strideB, Index offsetA, Index offsetB, RhsScalar* unpackedB)
+  {
+    Traits traits;
+    
+    if(strideA==-1) strideA = depth;
+    if(strideB==-1) strideB = depth;
+    conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+//     conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+    Index packet_cols = (cols/nr) * nr;
+    const Index peeled_mc = (rows/mr)*mr;
+    // FIXME:
+    const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0);
+    const Index peeled_kc = (depth/4)*4;
+
+    if(unpackedB==0)
+      unpackedB = const_cast<RhsScalar*>(blockB - strideB * nr * RhsProgress);
+
+    // loops on each micro vertical panel of rhs (depth x nr)
+    for(Index j2=0; j2<packet_cols; j2+=nr)
+    {
+      traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB); 
+
+      // loops on each largest micro horizontal panel of lhs (mr x depth)
+      // => we select a mr x nr micro block of res which is entirely
+      //    stored into mr/packet_size x nr registers.
+      for(Index i=0; i<peeled_mc; i+=mr)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+        prefetch(&blA[0]);
+
+        // gets res block as register
+        AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
+                  traits.initAcc(C0);
+                  traits.initAcc(C1);
+        if(nr==4) traits.initAcc(C2);
+        if(nr==4) traits.initAcc(C3);
+                  traits.initAcc(C4);
+                  traits.initAcc(C5);
+        if(nr==4) traits.initAcc(C6);
+        if(nr==4) traits.initAcc(C7);
+
+        ResScalar* r0 = &res[(j2+0)*resStride + i];
+        ResScalar* r1 = r0 + resStride;
+        ResScalar* r2 = r1 + resStride;
+        ResScalar* r3 = r2 + resStride;
+
+        prefetch(r0+16);
+        prefetch(r1+16);
+        prefetch(r2+16);
+        prefetch(r3+16);
+
+        // performs "inner" product
+        // TODO let's check wether the folowing peeled loop could not be
+        //      optimized via optimal prefetching from one loop to the other
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<peeled_kc; k+=4)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0, A1;
+            RhsPacket B_0;
+            RhsPacket T0;
+            
+EIGEN_ASM_COMMENT("mybegin2");
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.loadLhs(&blA[3*LhsProgress], A1);
+            traits.loadRhs(&blB[2*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[3*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+
+            traits.loadLhs(&blA[4*LhsProgress], A0);
+            traits.loadLhs(&blA[5*LhsProgress], A1);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[5*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+
+            traits.loadLhs(&blA[6*LhsProgress], A0);
+            traits.loadLhs(&blA[7*LhsProgress], A1);
+            traits.loadRhs(&blB[6*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[7*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+EIGEN_ASM_COMMENT("myend");
+          }
+          else
+          {
+EIGEN_ASM_COMMENT("mybegin4");
+            LhsPacket A0, A1;
+            RhsPacket B_0, B1, B2, B3;
+            RhsPacket T0;
+            
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+
+            traits.madd(A0,B_0,C0,T0);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.loadRhs(&blB[5*RhsProgress], B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.loadRhs(&blB[6*RhsProgress], B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.madd(A1,B3,C7,B3);
+            traits.loadLhs(&blA[3*LhsProgress], A1);
+            traits.loadRhs(&blB[7*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[8*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.loadRhs(&blB[9*RhsProgress], B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.loadRhs(&blB[10*RhsProgress], B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.loadLhs(&blA[4*LhsProgress], A0);
+            traits.madd(A1,B3,C7,B3);
+            traits.loadLhs(&blA[5*LhsProgress], A1);
+            traits.loadRhs(&blB[11*RhsProgress], B3);
+
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[12*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.loadRhs(&blB[13*RhsProgress], B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.loadRhs(&blB[14*RhsProgress], B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.loadLhs(&blA[6*LhsProgress], A0);
+            traits.madd(A1,B3,C7,B3);
+            traits.loadLhs(&blA[7*LhsProgress], A1);
+            traits.loadRhs(&blB[15*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.madd(A1,B3,C7,B3);
+          }
+
+          blB += 4*nr*RhsProgress;
+          blA += 4*mr;
+        }
+        // process remaining peeled loop
+        for(Index k=peeled_kc; k<depth; k++)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0, A1;
+            RhsPacket B_0;
+            RhsPacket T0;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.madd(A0,B_0,C0,T0);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B_0);
+            traits.madd(A0,B_0,C1,T0);
+            traits.madd(A1,B_0,C5,B_0);
+          }
+          else
+          {
+            LhsPacket A0, A1;
+            RhsPacket B_0, B1, B2, B3;
+            RhsPacket T0;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadLhs(&blA[1*LhsProgress], A1);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+
+            traits.madd(A0,B_0,C0,T0);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.madd(A1,B_0,C4,B_0);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+            traits.madd(A0,B1,C1,T0);
+            traits.madd(A1,B1,C5,B1);
+            traits.madd(A0,B2,C2,T0);
+            traits.madd(A1,B2,C6,B2);
+            traits.madd(A0,B3,C3,T0);
+            traits.madd(A1,B3,C7,B3);
+          }
+
+          blB += nr*RhsProgress;
+          blA += mr;
+        }
+
+        if(nr==4)
+        {
+          ResPacket R0, R1, R2, R3, R4, R5, R6;
+          ResPacket alphav = pset1<ResPacket>(alpha);
+
+          R0 = ploadu<ResPacket>(r0);
+          R1 = ploadu<ResPacket>(r1);
+          R2 = ploadu<ResPacket>(r2);
+          R3 = ploadu<ResPacket>(r3);
+          R4 = ploadu<ResPacket>(r0 + ResPacketSize);
+          R5 = ploadu<ResPacket>(r1 + ResPacketSize);
+          R6 = ploadu<ResPacket>(r2 + ResPacketSize);
+          traits.acc(C0, alphav, R0);
+          pstoreu(r0, R0);
+          R0 = ploadu<ResPacket>(r3 + ResPacketSize);
+
+          traits.acc(C1, alphav, R1);
+          traits.acc(C2, alphav, R2);
+          traits.acc(C3, alphav, R3);
+          traits.acc(C4, alphav, R4);
+          traits.acc(C5, alphav, R5);
+          traits.acc(C6, alphav, R6);
+          traits.acc(C7, alphav, R0);
+          
+          pstoreu(r1, R1);
+          pstoreu(r2, R2);
+          pstoreu(r3, R3);
+          pstoreu(r0 + ResPacketSize, R4);
+          pstoreu(r1 + ResPacketSize, R5);
+          pstoreu(r2 + ResPacketSize, R6);
+          pstoreu(r3 + ResPacketSize, R0);
+        }
+        else
+        {
+          ResPacket R0, R1, R4;
+          ResPacket alphav = pset1<ResPacket>(alpha);
+
+          R0 = ploadu<ResPacket>(r0);
+          R1 = ploadu<ResPacket>(r1);
+          R4 = ploadu<ResPacket>(r0 + ResPacketSize);
+          traits.acc(C0, alphav, R0);
+          pstoreu(r0, R0);
+          R0 = ploadu<ResPacket>(r1 + ResPacketSize);
+          traits.acc(C1, alphav, R1);
+          traits.acc(C4, alphav, R4);
+          traits.acc(C5, alphav, R0);
+          pstoreu(r1, R1);
+          pstoreu(r0 + ResPacketSize, R4);
+          pstoreu(r1 + ResPacketSize, R0);
+        }
+        
+      }
+      
+      if(rows-peeled_mc>=LhsProgress)
+      {
+        Index i = peeled_mc;
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
+        prefetch(&blA[0]);
+
+        // gets res block as register
+        AccPacket C0, C1, C2, C3;
+                  traits.initAcc(C0);
+                  traits.initAcc(C1);
+        if(nr==4) traits.initAcc(C2);
+        if(nr==4) traits.initAcc(C3);
+
+        // performs "inner" product
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<peeled_kc; k+=4)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[2*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadLhs(&blA[1*LhsProgress], A0);
+            traits.loadRhs(&blB[3*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.loadRhs(&blB[5*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[6*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadLhs(&blA[3*LhsProgress], A0);
+            traits.loadRhs(&blB[7*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+          }
+          else
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1, B2, B3;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+            traits.loadRhs(&blB[4*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadRhs(&blB[5*RhsProgress], B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.loadRhs(&blB[6*RhsProgress], B2);
+            traits.madd(A0,B3,C3,B3);
+            traits.loadLhs(&blA[1*LhsProgress], A0);
+            traits.loadRhs(&blB[7*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[8*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadRhs(&blB[9*RhsProgress], B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.loadRhs(&blB[10*RhsProgress], B2);
+            traits.madd(A0,B3,C3,B3);
+            traits.loadLhs(&blA[2*LhsProgress], A0);
+            traits.loadRhs(&blB[11*RhsProgress], B3);
+
+            traits.madd(A0,B_0,C0,B_0);
+            traits.loadRhs(&blB[12*RhsProgress], B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.loadRhs(&blB[13*RhsProgress], B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.loadRhs(&blB[14*RhsProgress], B2);
+            traits.madd(A0,B3,C3,B3);
+
+            traits.loadLhs(&blA[3*LhsProgress], A0);
+            traits.loadRhs(&blB[15*RhsProgress], B3);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.madd(A0,B3,C3,B3);
+          }
+
+          blB += nr*4*RhsProgress;
+          blA += 4*LhsProgress;
+        }
+        // process remaining peeled loop
+        for(Index k=peeled_kc; k<depth; k++)
+        {
+          if(nr==2)
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+          }
+          else
+          {
+            LhsPacket A0;
+            RhsPacket B_0, B1, B2, B3;
+
+            traits.loadLhs(&blA[0*LhsProgress], A0);
+            traits.loadRhs(&blB[0*RhsProgress], B_0);
+            traits.loadRhs(&blB[1*RhsProgress], B1);
+            traits.loadRhs(&blB[2*RhsProgress], B2);
+            traits.loadRhs(&blB[3*RhsProgress], B3);
+
+            traits.madd(A0,B_0,C0,B_0);
+            traits.madd(A0,B1,C1,B1);
+            traits.madd(A0,B2,C2,B2);
+            traits.madd(A0,B3,C3,B3);
+          }
+
+          blB += nr*RhsProgress;
+          blA += LhsProgress;
+        }
+
+        ResPacket R0, R1, R2, R3;
+        ResPacket alphav = pset1<ResPacket>(alpha);
+
+        ResScalar* r0 = &res[(j2+0)*resStride + i];
+        ResScalar* r1 = r0 + resStride;
+        ResScalar* r2 = r1 + resStride;
+        ResScalar* r3 = r2 + resStride;
+
+                  R0 = ploadu<ResPacket>(r0);
+                  R1 = ploadu<ResPacket>(r1);
+        if(nr==4) R2 = ploadu<ResPacket>(r2);
+        if(nr==4) R3 = ploadu<ResPacket>(r3);
+
+                  traits.acc(C0, alphav, R0);
+                  traits.acc(C1, alphav, R1);
+        if(nr==4) traits.acc(C2, alphav, R2);
+        if(nr==4) traits.acc(C3, alphav, R3);
+
+                  pstoreu(r0, R0);
+                  pstoreu(r1, R1);
+        if(nr==4) pstoreu(r2, R2);
+        if(nr==4) pstoreu(r3, R3);
+      }
+      for(Index i=peeled_mc2; i<rows; i++)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA];
+        prefetch(&blA[0]);
+
+        // gets a 1 x nr res block as registers
+        ResScalar C0(0), C1(0), C2(0), C3(0);
+        // TODO directly use blockB ???
+        const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+        for(Index k=0; k<depth; k++)
+        {
+          if(nr==2)
+          {
+            LhsScalar A0;
+            RhsScalar B_0, B1;
+
+            A0 = blA[k];
+            B_0 = blB[0];
+            B1 = blB[1];
+            MADD(cj,A0,B_0,C0,B_0);
+            MADD(cj,A0,B1,C1,B1);
+          }
+          else
+          {
+            LhsScalar A0;
+            RhsScalar B_0, B1, B2, B3;
+
+            A0 = blA[k];
+            B_0 = blB[0];
+            B1 = blB[1];
+            B2 = blB[2];
+            B3 = blB[3];
+
+            MADD(cj,A0,B_0,C0,B_0);
+            MADD(cj,A0,B1,C1,B1);
+            MADD(cj,A0,B2,C2,B2);
+            MADD(cj,A0,B3,C3,B3);
+          }
+
+          blB += nr;
+        }
+                  res[(j2+0)*resStride + i] += alpha*C0;
+                  res[(j2+1)*resStride + i] += alpha*C1;
+        if(nr==4) res[(j2+2)*resStride + i] += alpha*C2;
+        if(nr==4) res[(j2+3)*resStride + i] += alpha*C3;
+      }
+    }
+    // process remaining rhs/res columns one at a time
+    // => do the same but with nr==1
+    for(Index j2=packet_cols; j2<cols; j2++)
+    {
+      // unpack B
+      traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB);
+
+      for(Index i=0; i<peeled_mc; i+=mr)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+        prefetch(&blA[0]);
+
+        // TODO move the res loads to the stores
+
+        // get res block as registers
+        AccPacket C0, C4;
+        traits.initAcc(C0);
+        traits.initAcc(C4);
+
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<depth; k++)
+        {
+          LhsPacket A0, A1;
+          RhsPacket B_0;
+          RhsPacket T0;
+
+          traits.loadLhs(&blA[0*LhsProgress], A0);
+          traits.loadLhs(&blA[1*LhsProgress], A1);
+          traits.loadRhs(&blB[0*RhsProgress], B_0);
+          traits.madd(A0,B_0,C0,T0);
+          traits.madd(A1,B_0,C4,B_0);
+
+          blB += RhsProgress;
+          blA += 2*LhsProgress;
+        }
+        ResPacket R0, R4;
+        ResPacket alphav = pset1<ResPacket>(alpha);
+
+        ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+        R0 = ploadu<ResPacket>(r0);
+        R4 = ploadu<ResPacket>(r0+ResPacketSize);
+
+        traits.acc(C0, alphav, R0);
+        traits.acc(C4, alphav, R4);
+
+        pstoreu(r0,               R0);
+        pstoreu(r0+ResPacketSize, R4);
+      }
+      if(rows-peeled_mc>=LhsProgress)
+      {
+        Index i = peeled_mc;
+        const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
+        prefetch(&blA[0]);
+
+        AccPacket C0;
+        traits.initAcc(C0);
+
+        const RhsScalar* blB = unpackedB;
+        for(Index k=0; k<depth; k++)
+        {
+          LhsPacket A0;
+          RhsPacket B_0;
+          traits.loadLhs(blA, A0);
+          traits.loadRhs(blB, B_0);
+          traits.madd(A0, B_0, C0, B_0);
+          blB += RhsProgress;
+          blA += LhsProgress;
+        }
+
+        ResPacket alphav = pset1<ResPacket>(alpha);
+        ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]);
+        traits.acc(C0, alphav, R0);
+        pstoreu(&res[(j2+0)*resStride + i], R0);
+      }
+      for(Index i=peeled_mc2; i<rows; i++)
+      {
+        const LhsScalar* blA = &blockA[i*strideA+offsetA];
+        prefetch(&blA[0]);
+
+        // gets a 1 x 1 res block as registers
+        ResScalar C0(0);
+        // FIXME directly use blockB ??
+        const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+        for(Index k=0; k<depth; k++)
+        {
+          LhsScalar A0 = blA[k];
+          RhsScalar B_0 = blB[k];
+          MADD(cj, A0, B_0, C0, B_0);
+        }
+        res[(j2+0)*resStride + i] += alpha*C0;
+      }
+    }
+  }
+
+
+#undef CJMADD
+
+// pack a block of the lhs
+// The traversal is as follow (mr==4):
+//   0  4  8 12 ...
+//   1  5  9 13 ...
+//   2  6 10 14 ...
+//   3  7 11 15 ...
+//
+//  16 20 24 28 ...
+//  17 21 25 29 ...
+//  18 22 26 30 ...
+//  19 23 27 31 ...
+//
+//  32 33 34 35 ...
+//  36 36 38 39 ...
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs
+{
+  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder, Conjugate, PanelMode>
+  ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset)
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  enum { PacketSize = packet_traits<Scalar>::size };
+
+  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
+  EIGEN_UNUSED_VARIABLE(stride)
+  EIGEN_UNUSED_VARIABLE(offset)
+  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+  eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
+  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+  const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+  Index count = 0;
+  Index peeled_mc = (rows/Pack1)*Pack1;
+  for(Index i=0; i<peeled_mc; i+=Pack1)
+  {
+    if(PanelMode) count += Pack1 * offset;
+
+    if(StorageOrder==ColMajor)
+    {
+      for(Index k=0; k<depth; k++)
+      {
+        Packet A, B, C, D;
+        if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+        if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
+        if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
+        if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
+        if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
+        if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
+        if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
+        if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
+      }
+    }
+    else
+    {
+      for(Index k=0; k<depth; k++)
+      {
+        // TODO add a vectorized transpose here
+        Index w=0;
+        for(; w<Pack1-3; w+=4)
+        {
+          Scalar a(cj(lhs(i+w+0, k))),
+                  b(cj(lhs(i+w+1, k))),
+                  c(cj(lhs(i+w+2, k))),
+                  d(cj(lhs(i+w+3, k)));
+          blockA[count++] = a;
+          blockA[count++] = b;
+          blockA[count++] = c;
+          blockA[count++] = d;
+        }
+        if(Pack1%4)
+          for(;w<Pack1;++w)
+            blockA[count++] = cj(lhs(i+w, k));
+      }
+    }
+    if(PanelMode) count += Pack1 * (stride-offset-depth);
+  }
+  if(rows-peeled_mc>=Pack2)
+  {
+    if(PanelMode) count += Pack2*offset;
+    for(Index k=0; k<depth; k++)
+      for(Index w=0; w<Pack2; w++)
+        blockA[count++] = cj(lhs(peeled_mc+w, k));
+    if(PanelMode) count += Pack2 * (stride-offset-depth);
+    peeled_mc += Pack2;
+  }
+  for(Index i=peeled_mc; i<rows; i++)
+  {
+    if(PanelMode) count += offset;
+    for(Index k=0; k<depth; k++)
+      blockA[count++] = cj(lhs(i, k));
+    if(PanelMode) count += (stride-offset-depth);
+  }
+}
+
+// copy a complete panel of the rhs
+// this version is optimized for column major matrices
+// The traversal order is as follow: (nr==4):
+//  0  1  2  3   12 13 14 15   24 27
+//  4  5  6  7   16 17 18 19   25 28
+//  8  9 10 11   20 21 22 23   26 29
+//  .  .  .  .    .  .  .  .    .  .
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  enum { PacketSize = packet_traits<Scalar>::size };
+  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, PanelMode>
+  ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
+{
+  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
+  EIGEN_UNUSED_VARIABLE(stride)
+  EIGEN_UNUSED_VARIABLE(offset)
+  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+  Index packet_cols = (cols/nr) * nr;
+  Index count = 0;
+  for(Index j2=0; j2<packet_cols; j2+=nr)
+  {
+    // skip what we have before
+    if(PanelMode) count += nr * offset;
+    const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+    const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+    const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+    const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+    for(Index k=0; k<depth; k++)
+    {
+                blockB[count+0] = cj(b0[k]);
+                blockB[count+1] = cj(b1[k]);
+      if(nr==4) blockB[count+2] = cj(b2[k]);
+      if(nr==4) blockB[count+3] = cj(b3[k]);
+      count += nr;
+    }
+    // skip what we have after
+    if(PanelMode) count += nr * (stride-offset-depth);
+  }
+
+  // copy the remaining columns one at a time (nr==1)
+  for(Index j2=packet_cols; j2<cols; ++j2)
+  {
+    if(PanelMode) count += offset;
+    const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+    for(Index k=0; k<depth; k++)
+    {
+      blockB[count] = cj(b0[k]);
+      count += 1;
+    }
+    if(PanelMode) count += (stride-offset-depth);
+  }
+}
+
+// this version is optimized for row major matrices
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+struct gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
+{
+  enum { PacketSize = packet_traits<Scalar>::size };
+  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int nr, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, PanelMode>
+  ::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
+{
+  EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
+  EIGEN_UNUSED_VARIABLE(stride)
+  EIGEN_UNUSED_VARIABLE(offset)
+  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+  Index packet_cols = (cols/nr) * nr;
+  Index count = 0;
+  for(Index j2=0; j2<packet_cols; j2+=nr)
+  {
+    // skip what we have before
+    if(PanelMode) count += nr * offset;
+    for(Index k=0; k<depth; k++)
+    {
+      const Scalar* b0 = &rhs[k*rhsStride + j2];
+                blockB[count+0] = cj(b0[0]);
+                blockB[count+1] = cj(b0[1]);
+      if(nr==4) blockB[count+2] = cj(b0[2]);
+      if(nr==4) blockB[count+3] = cj(b0[3]);
+      count += nr;
+    }
+    // skip what we have after
+    if(PanelMode) count += nr * (stride-offset-depth);
+  }
+  // copy the remaining columns one at a time (nr==1)
+  for(Index j2=packet_cols; j2<cols; ++j2)
+  {
+    if(PanelMode) count += offset;
+    const Scalar* b0 = &rhs[j2];
+    for(Index k=0; k<depth; k++)
+    {
+      blockB[count] = cj(b0[k*rhsStride]);
+      count += 1;
+    }
+    if(PanelMode) count += stride-offset-depth;
+  }
+}
+
+} // end namespace internal
+
+/** \returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
+  * \sa setCpuCacheSize */
+inline std::ptrdiff_t l1CacheSize()
+{
+  std::ptrdiff_t l1, l2;
+  internal::manage_caching_sizes(GetAction, &l1, &l2);
+  return l1;
+}
+
+/** \returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.
+  * \sa setCpuCacheSize */
+inline std::ptrdiff_t l2CacheSize()
+{
+  std::ptrdiff_t l1, l2;
+  internal::manage_caching_sizes(GetAction, &l1, &l2);
+  return l2;
+}
+
+/** Set the cpu L1 and L2 cache sizes (in bytes).
+  * These values are use to adjust the size of the blocks
+  * for the algorithms working per blocks.
+  *
+  * \sa computeProductBlockingSizes */
+inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2)
+{
+  internal::manage_caching_sizes(SetAction, &l1, &l2);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_BLOCK_PANEL_H
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h
new file mode 100644
index 0000000..3f5ffcf
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -0,0 +1,427 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
+
+/* Specialization for a row-major destination matrix => simple transposition of the product */
+template<
+  typename Index,
+  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
+struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
+{
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+  static EIGEN_STRONG_INLINE void run(
+    Index rows, Index cols, Index depth,
+    const LhsScalar* lhs, Index lhsStride,
+    const RhsScalar* rhs, Index rhsStride,
+    ResScalar* res, Index resStride,
+    ResScalar alpha,
+    level3_blocking<RhsScalar,LhsScalar>& blocking,
+    GemmParallelInfo<Index>* info = 0)
+  {
+    // transpose the product such that the result is column major
+    general_matrix_matrix_product<Index,
+      RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
+      LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
+      ColMajor>
+    ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
+  }
+};
+
+/*  Specialization for a col-major destination matrix
+ *    => Blocking algorithm following Goto's paper */
+template<
+  typename Index,
+  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
+struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
+{
+
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+static void run(Index rows, Index cols, Index depth,
+  const LhsScalar* _lhs, Index lhsStride,
+  const RhsScalar* _rhs, Index rhsStride,
+  ResScalar* res, Index resStride,
+  ResScalar alpha,
+  level3_blocking<LhsScalar,RhsScalar>& blocking,
+  GemmParallelInfo<Index>* info = 0)
+{
+  const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+  const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+  typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
+  Index kc = blocking.kc();                   // cache block size along the K direction
+  Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction
+  //Index nc = blocking.nc(); // cache block size along the N direction
+
+  gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+  gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
+  gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
+
+#ifdef EIGEN_HAS_OPENMP
+  if(info)
+  {
+    // this is the parallel version!
+    Index tid = omp_get_thread_num();
+    Index threads = omp_get_num_threads();
+    
+    std::size_t sizeA = kc*mc;
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
+    ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
+    
+    RhsScalar* blockB = blocking.blockB();
+    eigen_internal_assert(blockB!=0);
+
+    // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
+    for(Index k=0; k<depth; k+=kc)
+    {
+      const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
+
+      // In order to reduce the chance that a thread has to wait for the other,
+      // let's start by packing A'.
+      pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
+
+      // Pack B_k to B' in a parallel fashion:
+      // each thread packs the sub block B_k,j to B'_j where j is the thread id.
+
+      // However, before copying to B'_j, we have to make sure that no other thread is still using it,
+      // i.e., we test that info[tid].users equals 0.
+      // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
+      while(info[tid].users!=0) {}
+      info[tid].users += threads;
+
+      pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
+
+      // Notify the other threads that the part B'_j is ready to go.
+      info[tid].sync = k;
+
+      // Computes C_i += A' * B' per B'_j
+      for(Index shift=0; shift<threads; ++shift)
+      {
+        Index j = (tid+shift)%threads;
+
+        // At this point we have to make sure that B'_j has been updated by the thread j,
+        // we use testAndSetOrdered to mimic a volatile access.
+        // However, no need to wait for the B' part which has been updated by the current thread!
+        if(shift>0)
+          while(info[j].sync!=k) {}
+
+        gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
+      }
+
+      // Then keep going as usual with the remaining A'
+      for(Index i=mc; i<rows; i+=mc)
+      {
+        const Index actual_mc = (std::min)(i+mc,rows)-i;
+
+        // pack A_i,k to A'
+        pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
+
+        // C_i += A' * B'
+        gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
+      }
+
+      // Release all the sub blocks B'_j of B' for the current thread,
+      // i.e., we simply decrement the number of users by 1
+      for(Index j=0; j<threads; ++j)
+        #pragma omp atomic
+        --(info[j].users);
+    }
+  }
+  else
+#endif // EIGEN_HAS_OPENMP
+  {
+    EIGEN_UNUSED_VARIABLE(info);
+
+    // this is the sequential version!
+    std::size_t sizeA = kc*mc;
+    std::size_t sizeB = kc*cols;
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+
+    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
+    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
+    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
+
+    // For each horizontal panel of the rhs, and corresponding panel of the lhs...
+    // (==GEMM_VAR1)
+    for(Index k2=0; k2<depth; k2+=kc)
+    {
+      const Index actual_kc = (std::min)(k2+kc,depth)-k2;
+
+      // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
+      // => Pack rhs's panel into a sequential chunk of memory (L2 caching)
+      // Note that this panel will be read as many times as the number of blocks in the lhs's
+      // vertical panel which is, in practice, a very low number.
+      pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
+
+      // For each mc x kc block of the lhs's vertical panel...
+      // (==GEPP_VAR1)
+      for(Index i2=0; i2<rows; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(i2+mc,rows)-i2;
+
+        // We pack the lhs's block into a sequential chunk of memory (L1 caching)
+        // Note that this block will be read a very high number of times, which is equal to the number of
+        // micro vertical panel of the large rhs's panel (e.g., cols/4 times).
+        pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
+
+        // Everything is packed, we can now call the block * panel kernel:
+        gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
+      }
+    }
+  }
+}
+
+};
+
+/*********************************************************************************
+*  Specialization of GeneralProduct<> for "large" GEMM, i.e.,
+*  implementation of the high level wrapper to general_matrix_matrix_product
+**********************************************************************************/
+
+template<typename Lhs, typename Rhs>
+struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
+ : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
+{};
+
+template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
+struct gemm_functor
+{
+  gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
+                  BlockingType& blocking)
+    : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
+  {}
+
+  void initParallelSession() const
+  {
+    m_blocking.allocateB();
+  }
+
+  void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
+  {
+    if(cols==-1)
+      cols = m_rhs.cols();
+
+    Gemm::run(rows, cols, m_lhs.cols(),
+              /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
+              /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
+              (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
+              m_actualAlpha, m_blocking, info);
+  }
+
+  protected:
+    const Lhs& m_lhs;
+    const Rhs& m_rhs;
+    Dest& m_dest;
+    Scalar m_actualAlpha;
+    BlockingType& m_blocking;
+};
+
+template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
+bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
+
+template<typename _LhsScalar, typename _RhsScalar>
+class level3_blocking
+{
+    typedef _LhsScalar LhsScalar;
+    typedef _RhsScalar RhsScalar;
+
+  protected:
+    LhsScalar* m_blockA;
+    RhsScalar* m_blockB;
+    RhsScalar* m_blockW;
+
+    DenseIndex m_mc;
+    DenseIndex m_nc;
+    DenseIndex m_kc;
+
+  public:
+
+    level3_blocking()
+      : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
+    {}
+
+    inline DenseIndex mc() const { return m_mc; }
+    inline DenseIndex nc() const { return m_nc; }
+    inline DenseIndex kc() const { return m_kc; }
+
+    inline LhsScalar* blockA() { return m_blockA; }
+    inline RhsScalar* blockB() { return m_blockB; }
+    inline RhsScalar* blockW() { return m_blockW; }
+};
+
+template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
+class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
+  : public level3_blocking<
+      typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
+      typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
+{
+    enum {
+      Transpose = StorageOrder==RowMajor,
+      ActualRows = Transpose ? MaxCols : MaxRows,
+      ActualCols = Transpose ? MaxRows : MaxCols
+    };
+    typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
+    typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
+    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+    enum {
+      SizeA = ActualRows * MaxDepth,
+      SizeB = ActualCols * MaxDepth,
+      SizeW = MaxDepth * Traits::WorkSpaceFactor
+    };
+
+    EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
+    EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
+    EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
+
+  public:
+
+    gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
+    {
+      this->m_mc = ActualRows;
+      this->m_nc = ActualCols;
+      this->m_kc = MaxDepth;
+      this->m_blockA = m_staticA;
+      this->m_blockB = m_staticB;
+      this->m_blockW = m_staticW;
+    }
+
+    inline void allocateA() {}
+    inline void allocateB() {}
+    inline void allocateW() {}
+    inline void allocateAll() {}
+};
+
+template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
+class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
+  : public level3_blocking<
+      typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
+      typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
+{
+    enum {
+      Transpose = StorageOrder==RowMajor
+    };
+    typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
+    typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
+    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
+    DenseIndex m_sizeA;
+    DenseIndex m_sizeB;
+    DenseIndex m_sizeW;
+
+  public:
+
+    gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
+    {
+      this->m_mc = Transpose ? cols : rows;
+      this->m_nc = Transpose ? rows : cols;
+      this->m_kc = depth;
+
+      computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
+      m_sizeA = this->m_mc * this->m_kc;
+      m_sizeB = this->m_kc * this->m_nc;
+      m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
+    }
+
+    void allocateA()
+    {
+      if(this->m_blockA==0)
+        this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
+    }
+
+    void allocateB()
+    {
+      if(this->m_blockB==0)
+        this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
+    }
+
+    void allocateW()
+    {
+      if(this->m_blockW==0)
+        this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
+    }
+
+    void allocateAll()
+    {
+      allocateA();
+      allocateB();
+      allocateW();
+    }
+
+    ~gemm_blocking_space()
+    {
+      aligned_delete(this->m_blockA, m_sizeA);
+      aligned_delete(this->m_blockB, m_sizeB);
+      aligned_delete(this->m_blockW, m_sizeW);
+    }
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class GeneralProduct<Lhs, Rhs, GemmProduct>
+  : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
+{
+    enum {
+      MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
+    };
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
+    
+    typedef typename  Lhs::Scalar LhsScalar;
+    typedef typename  Rhs::Scalar RhsScalar;
+    typedef           Scalar      ResScalar;
+
+    GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {
+      typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
+      EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
+    }
+
+    template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
+    {
+      eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+      typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
+      typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
+
+      Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+                                 * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+      typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
+              Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
+
+      typedef internal::gemm_functor<
+        Scalar, Index,
+        internal::general_matrix_matrix_product<
+          Index,
+          LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
+          RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
+          (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
+        _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
+
+      BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
+
+      internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
+    }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
new file mode 100644
index 0000000..5c37639
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h
@@ -0,0 +1,278 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
+
+namespace Eigen { 
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update;
+
+namespace internal {
+
+/**********************************************************************
+* This file implements a general A * B product while
+* evaluating only one triangular part of the product.
+* This is more general version of self adjoint product (C += A A^T)
+* as the level 3 SYRK Blas routine.
+**********************************************************************/
+
+// forward declarations (defined at the end of this file)
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>
+struct tribb_kernel;
+  
+/* Optimized matrix-matrix product evaluating only one triangular half */
+template <typename Index,
+          typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+                              int ResStorageOrder, int  UpLo, int Version = Specialized>
+struct general_matrix_matrix_triangular_product;
+
+// as usual if the result is row major => we transpose the product
+template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>
+struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo,Version>
+{
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,
+                                      const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
+  {
+    general_matrix_matrix_triangular_product<Index,
+        RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
+        LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
+        ColMajor, UpLo==Lower?Upper:Lower>
+      ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha);
+  }
+};
+
+template <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>
+struct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Version>
+{
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,
+                                      const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, const ResScalar& alpha)
+  {
+    const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+    const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+    typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
+    Index kc = depth; // cache block size along the K direction
+    Index mc = size;  // cache block size along the M direction
+    Index nc = size;  // cache block size along the N direction
+    computeProductBlockingSizes<LhsScalar,RhsScalar>(kc, mc, nc);
+    // !!! mc must be a multiple of nr:
+    if(mc > Traits::nr)
+      mc = (mc/Traits::nr)*Traits::nr;
+
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+    std::size_t sizeB = sizeW + kc*size;
+    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, kc*mc, 0);
+    ei_declare_aligned_stack_constructed_variable(RhsScalar, allocatedBlockB, sizeB, 0);
+    RhsScalar* blockB = allocatedBlockB + sizeW;
+    
+    gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+    gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
+    gebp_kernel <LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
+    tribb_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs, UpLo> sybb;
+
+    for(Index k2=0; k2<depth; k2+=kc)
+    {
+      const Index actual_kc = (std::min)(k2+kc,depth)-k2;
+
+      // note that the actual rhs is the transpose/adjoint of mat
+      pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, size);
+
+      for(Index i2=0; i2<size; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(i2+mc,size)-i2;
+
+        pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
+
+        // the selected actual_mc * size panel of res is split into three different part:
+        //  1 - before the diagonal => processed with gebp or skipped
+        //  2 - the actual_mc x actual_mc symmetric block => processed with a special kernel
+        //  3 - after the diagonal => processed with gebp or skipped
+        if (UpLo==Lower)
+          gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, (std::min)(size,i2), alpha,
+               -1, -1, 0, 0, allocatedBlockB);
+
+        sybb(res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha, allocatedBlockB);
+
+        if (UpLo==Upper)
+        {
+          Index j2 = i2+actual_mc;
+          gebp(res+resStride*j2+i2, resStride, blockA, blockB+actual_kc*j2, actual_mc, actual_kc, (std::max)(Index(0), size-j2), alpha,
+               -1, -1, 0, 0, allocatedBlockB);
+        }
+      }
+    }
+  }
+};
+
+// Optimized packed Block * packed Block product kernel evaluating only one given triangular part
+// This kernel is built on top of the gebp kernel:
+// - the current destination block is processed per panel of actual_mc x BlockSize
+//   where BlockSize is set to the minimal value allowing gebp to be as fast as possible
+// - then, as usual, each panel is split into three parts along the diagonal,
+//   the sub blocks above and below the diagonal are processed as usual,
+//   while the triangular block overlapping the diagonal is evaluated into a
+//   small temporary buffer which is then accumulated into the result using a
+//   triangular traversal.
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>
+struct tribb_kernel
+{
+  typedef gebp_traits<LhsScalar,RhsScalar,ConjLhs,ConjRhs> Traits;
+  typedef typename Traits::ResScalar ResScalar;
+  
+  enum {
+    BlockSize  = EIGEN_PLAIN_ENUM_MAX(mr,nr)
+  };
+  void operator()(ResScalar* res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha, RhsScalar* workspace)
+  {
+    gebp_kernel<LhsScalar, RhsScalar, Index, mr, nr, ConjLhs, ConjRhs> gebp_kernel;
+    Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer;
+
+    // let's process the block per panel of actual_mc x BlockSize,
+    // again, each is split into three parts, etc.
+    for (Index j=0; j<size; j+=BlockSize)
+    {
+      Index actualBlockSize = std::min<Index>(BlockSize,size - j);
+      const RhsScalar* actual_b = blockB+j*depth;
+
+      if(UpLo==Upper)
+        gebp_kernel(res+j*resStride, resStride, blockA, actual_b, j, depth, actualBlockSize, alpha,
+                    -1, -1, 0, 0, workspace);
+
+      // selfadjoint micro block
+      {
+        Index i = j;
+        buffer.setZero();
+        // 1 - apply the kernel on the temporary buffer
+        gebp_kernel(buffer.data(), BlockSize, blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,
+                    -1, -1, 0, 0, workspace);
+        // 2 - triangular accumulation
+        for(Index j1=0; j1<actualBlockSize; ++j1)
+        {
+          ResScalar* r = res + (j+j1)*resStride + i;
+          for(Index i1=UpLo==Lower ? j1 : 0;
+              UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1)
+            r[i1] += buffer(i1,j1);
+        }
+      }
+
+      if(UpLo==Lower)
+      {
+        Index i = j+actualBlockSize;
+        gebp_kernel(res+j*resStride+i, resStride, blockA+depth*i, actual_b, size-i, depth, actualBlockSize, alpha,
+                    -1, -1, 0, 0, workspace);
+      }
+    }
+  }
+};
+
+} // end namespace internal
+
+// high level API
+
+template<typename MatrixType, typename ProductType, int UpLo, bool IsOuterProduct>
+struct general_product_to_triangular_selector;
+
+
+template<typename MatrixType, typename ProductType, int UpLo>
+struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>
+{
+  static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
+  {
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    
+    typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
+    typedef internal::blas_traits<Lhs> LhsBlasTraits;
+    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+    typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+    typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    
+    typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
+    typedef internal::blas_traits<Rhs> RhsBlasTraits;
+    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+    typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+    typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+    enum {
+      StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+      UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1,
+      UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1
+    };
+    
+    internal::gemv_static_vector_if<Scalar,Lhs::SizeAtCompileTime,Lhs::MaxSizeAtCompileTime,!UseLhsDirectly> static_lhs;
+    ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(),
+      (UseLhsDirectly ? const_cast<Scalar*>(actualLhs.data()) : static_lhs.data()));
+    if(!UseLhsDirectly) Map<typename _ActualLhs::PlainObject>(actualLhsPtr, actualLhs.size()) = actualLhs;
+    
+    internal::gemv_static_vector_if<Scalar,Rhs::SizeAtCompileTime,Rhs::MaxSizeAtCompileTime,!UseRhsDirectly> static_rhs;
+    ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(),
+      (UseRhsDirectly ? const_cast<Scalar*>(actualRhs.data()) : static_rhs.data()));
+    if(!UseRhsDirectly) Map<typename _ActualRhs::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+    
+    
+    selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
+                              LhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,
+                              RhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex>
+          ::run(actualLhs.size(), mat.data(), mat.outerStride(), actualLhsPtr, actualRhsPtr, actualAlpha);
+  }
+};
+
+template<typename MatrixType, typename ProductType, int UpLo>
+struct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>
+{
+  static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha)
+  {
+    typedef typename MatrixType::Index Index;
+    
+    typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;
+    typedef internal::blas_traits<Lhs> LhsBlasTraits;
+    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;
+    typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;
+    typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    
+    typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;
+    typedef internal::blas_traits<Rhs> RhsBlasTraits;
+    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;
+    typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;
+    typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    typename ProductType::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());
+
+    internal::general_matrix_matrix_triangular_product<Index,
+      typename Lhs::Scalar, _ActualLhs::Flags&RowMajorBit ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
+      typename Rhs::Scalar, _ActualRhs::Flags&RowMajorBit ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
+      MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
+      ::run(mat.cols(), actualLhs.cols(),
+            &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),
+            mat.data(), mat.outerStride(), actualAlpha);
+  }
+};
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename ProductDerived, typename _Lhs, typename _Rhs>
+TriangularView<MatrixType,UpLo>& TriangularView<MatrixType,UpLo>::assignProduct(const ProductBase<ProductDerived, _Lhs,_Rhs>& prod, const Scalar& alpha)
+{
+  general_product_to_triangular_selector<MatrixType, ProductDerived, UpLo, (_Lhs::ColsAtCompileTime==1) || (_Rhs::RowsAtCompileTime==1)>::run(m_matrix.const_cast_derived(), prod.derived(), alpha);
+  
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
new file mode 100644
index 0000000..3deed06
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_MKL.h
@@ -0,0 +1,146 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Level 3 BLAS SYRK/HERK implementation.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int  UpLo>
+struct general_matrix_matrix_rankupdate :
+       general_matrix_matrix_triangular_product<
+         Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {};
+
+
+// try to go to BLAS specialization
+#define EIGEN_MKL_RANKUPDATE_SPECIALIZE(Scalar) \
+template <typename Index, int LhsStorageOrder, bool ConjugateLhs, \
+                          int RhsStorageOrder, bool ConjugateRhs, int  UpLo> \
+struct general_matrix_matrix_triangular_product<Index,Scalar,LhsStorageOrder,ConjugateLhs, \
+               Scalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Specialized> { \
+  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \
+                          const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha) \
+  { \
+    if (lhs==rhs) { \
+      general_matrix_matrix_rankupdate<Index,Scalar,LhsStorageOrder,ConjugateLhs,ColMajor,UpLo> \
+      ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha); \
+    } else { \
+      general_matrix_matrix_triangular_product<Index, \
+        Scalar, LhsStorageOrder, ConjugateLhs, \
+        Scalar, RhsStorageOrder, ConjugateRhs, \
+        ColMajor, UpLo, BuiltIn> \
+      ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha); \
+    } \
+  } \
+};
+
+EIGEN_MKL_RANKUPDATE_SPECIALIZE(double)
+//EIGEN_MKL_RANKUPDATE_SPECIALIZE(dcomplex)
+EIGEN_MKL_RANKUPDATE_SPECIALIZE(float)
+//EIGEN_MKL_RANKUPDATE_SPECIALIZE(scomplex)
+
+// SYRK for float/double
+#define EIGEN_MKL_RANKUPDATE_R(EIGTYPE, MKLTYPE, MKLFUNC) \
+template <typename Index, int AStorageOrder, bool ConjugateA, int  UpLo> \
+struct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,ColMajor,UpLo> { \
+  enum { \
+    IsLower = (UpLo&Lower) == Lower, \
+    LowUp = IsLower ? Lower : Upper, \
+    conjA = ((AStorageOrder==ColMajor) && ConjugateA) ? 1 : 0 \
+  }; \
+  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \
+                          const EIGTYPE* rhs, Index rhsStride, EIGTYPE* res, Index resStride, EIGTYPE alpha) \
+  { \
+  /* typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs;*/ \
+\
+   MKL_INT lda=lhsStride, ldc=resStride, n=size, k=depth; \
+   char uplo=(IsLower) ? 'L' : 'U', trans=(AStorageOrder==RowMajor) ? 'T':'N'; \
+   MKLTYPE alpha_, beta_; \
+\
+/* Set alpha_ & beta_ */ \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1)); \
+   MKLFUNC(&uplo, &trans, &n, &k, &alpha_, lhs, &lda, &beta_, res, &ldc); \
+  } \
+};
+
+// HERK for complex data
+#define EIGEN_MKL_RANKUPDATE_C(EIGTYPE, MKLTYPE, RTYPE, MKLFUNC) \
+template <typename Index, int AStorageOrder, bool ConjugateA, int  UpLo> \
+struct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,ColMajor,UpLo> { \
+  enum { \
+    IsLower = (UpLo&Lower) == Lower, \
+    LowUp = IsLower ? Lower : Upper, \
+    conjA = (((AStorageOrder==ColMajor) && ConjugateA) || ((AStorageOrder==RowMajor) && !ConjugateA)) ? 1 : 0 \
+  }; \
+  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \
+                          const EIGTYPE* rhs, Index rhsStride, EIGTYPE* res, Index resStride, EIGTYPE alpha) \
+  { \
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, AStorageOrder> MatrixType; \
+\
+   MKL_INT lda=lhsStride, ldc=resStride, n=size, k=depth; \
+   char uplo=(IsLower) ? 'L' : 'U', trans=(AStorageOrder==RowMajor) ? 'C':'N'; \
+   RTYPE alpha_, beta_; \
+   const EIGTYPE* a_ptr; \
+\
+/* Set alpha_ & beta_ */ \
+/*   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); */\
+/*   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1));*/ \
+   alpha_ = alpha.real(); \
+   beta_ = 1.0; \
+/* Copy with conjugation in some cases*/ \
+   MatrixType a; \
+   if (conjA) { \
+     Map<const MatrixType, 0, OuterStride<> > mapA(lhs,n,k,OuterStride<>(lhsStride)); \
+     a = mapA.conjugate(); \
+     lda = a.outerStride(); \
+     a_ptr = a.data(); \
+   } else a_ptr=lhs; \
+   MKLFUNC(&uplo, &trans, &n, &k, &alpha_, (MKLTYPE*)a_ptr, &lda, &beta_, (MKLTYPE*)res, &ldc); \
+  } \
+};
+
+
+EIGEN_MKL_RANKUPDATE_R(double, double, dsyrk)
+EIGEN_MKL_RANKUPDATE_R(float,  float,  ssyrk)
+
+//EIGEN_MKL_RANKUPDATE_C(dcomplex, MKL_Complex16, double, zherk)
+//EIGEN_MKL_RANKUPDATE_C(scomplex, MKL_Complex8,  double, cherk)
+
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_MKL_H
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h b/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
new file mode 100644
index 0000000..060af32
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix_MKL.h
@@ -0,0 +1,118 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   General matrix-matrix product functionality based on ?GEMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
+#define EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/**********************************************************************
+* This file implements general matrix-matrix multiplication using BLAS
+* gemm function via partial specialization of
+* general_matrix_matrix_product::run(..) method for float, double,
+* std::complex<float> and std::complex<double> types
+**********************************************************************/
+
+// gemm specialization
+
+#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, MKLTYPE, MKLPREFIX) \
+template< \
+  typename Index, \
+  int LhsStorageOrder, bool ConjugateLhs, \
+  int RhsStorageOrder, bool ConjugateRhs> \
+struct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor> \
+{ \
+static void run(Index rows, Index cols, Index depth, \
+  const EIGTYPE* _lhs, Index lhsStride, \
+  const EIGTYPE* _rhs, Index rhsStride, \
+  EIGTYPE* res, Index resStride, \
+  EIGTYPE alpha, \
+  level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/, \
+  GemmParallelInfo<Index>* /*info = 0*/) \
+{ \
+  using std::conj; \
+\
+  char transa, transb; \
+  MKL_INT m, n, k, lda, ldb, ldc; \
+  const EIGTYPE *a, *b; \
+  MKLTYPE alpha_, beta_; \
+  MatrixX##EIGPREFIX a_tmp, b_tmp; \
+  EIGTYPE myone(1);\
+\
+/* Set transpose options */ \
+  transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
+  transb = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
+\
+/* Set m, n, k */ \
+  m = (MKL_INT)rows;  \
+  n = (MKL_INT)cols;  \
+  k = (MKL_INT)depth; \
+\
+/* Set alpha_ & beta_ */ \
+  assign_scalar_eig2mkl(alpha_, alpha); \
+  assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+  lda = (MKL_INT)lhsStride; \
+  ldb = (MKL_INT)rhsStride; \
+  ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+  if ((LhsStorageOrder==ColMajor) && (ConjugateLhs)) { \
+    Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,k,OuterStride<>(lhsStride)); \
+    a_tmp = lhs.conjugate(); \
+    a = a_tmp.data(); \
+    lda = a_tmp.outerStride(); \
+  } else a = _lhs; \
+\
+  if ((RhsStorageOrder==ColMajor) && (ConjugateRhs)) { \
+    Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,k,n,OuterStride<>(rhsStride)); \
+    b_tmp = rhs.conjugate(); \
+    b = b_tmp.data(); \
+    ldb = b_tmp.outerStride(); \
+  } else b = _rhs; \
+\
+  MKLPREFIX##gemm(&transa, &transb, &m, &n, &k, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+}};
+
+GEMM_SPECIALIZATION(double,   d,  double,        d)
+GEMM_SPECIALIZATION(float,    f,  float,         s)
+GEMM_SPECIALIZATION(dcomplex, cd, MKL_Complex16, z)
+GEMM_SPECIALIZATION(scomplex, cf, MKL_Complex8,  c)
+
+} // end namespase internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_MATRIX_MKL_H
diff --git a/Eigen/src/Core/products/GeneralMatrixVector.h b/Eigen/src/Core/products/GeneralMatrixVector.h
new file mode 100644
index 0000000..0938770
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralMatrixVector.h
@@ -0,0 +1,566 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H
+#define EIGEN_GENERAL_MATRIX_VECTOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/* Optimized col-major matrix * vector product:
+ * This algorithm processes 4 columns at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 4 and to reduce
+ * the instruction dependency. Moreover, we know that all bands have the
+ * same alignment pattern.
+ *
+ * Mixing type logic: C += alpha * A * B
+ *  |  A  |  B  |alpha| comments
+ *  |real |cplx |cplx | no vectorization
+ *  |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization
+ *  |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp
+ *  |cplx |real |real | optimal case, vectorization possible via real-cplx mul
+ */
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+struct general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+enum {
+  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
+              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
+  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
+};
+
+typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+
+EIGEN_DONT_INLINE static void run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index resIncr, RhsScalar alpha);
+};
+
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index resIncr, RhsScalar alpha)
+{
+  EIGEN_UNUSED_VARIABLE(resIncr)
+  eigen_internal_assert(resIncr==1);
+  #ifdef _EIGEN_ACCUMULATE_PACKETS
+  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
+  #endif
+  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \
+    pstore(&res[j], \
+      padd(pload<ResPacket>(&res[j]), \
+        padd( \
+          padd(pcj.pmul(EIGEN_CAT(ploa , A0)<LhsPacket>(&lhs0[j]),    ptmp0), \
+                  pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs1[j]),   ptmp1)), \
+          padd(pcj.pmul(EIGEN_CAT(ploa , A2)<LhsPacket>(&lhs2[j]),    ptmp2), \
+                  pcj.pmul(EIGEN_CAT(ploa , A13)<LhsPacket>(&lhs3[j]),   ptmp3)) )))
+
+  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+  if(ConjugateRhs)
+    alpha = numext::conj(alpha);
+
+  enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
+  const Index columnsAtOnce = 4;
+  const Index peels = 2;
+  const Index LhsPacketAlignedMask = LhsPacketSize-1;
+  const Index ResPacketAlignedMask = ResPacketSize-1;
+//  const Index PeelAlignedMask = ResPacketSize*peels-1;
+  const Index size = rows;
+  
+  // How many coeffs of the result do we have to skip to be aligned.
+  // Here we assume data are at least aligned on the base scalar type.
+  Index alignedStart = internal::first_aligned(res,size);
+  Index alignedSize = ResPacketSize>1 ? alignedStart + ((size-alignedStart) & ~ResPacketAlignedMask) : 0;
+  const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
+
+  const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
+  Index alignmentPattern = alignmentStep==0 ? AllAligned
+                       : alignmentStep==(LhsPacketSize/2) ? EvenAligned
+                       : FirstAligned;
+
+  // we cannot assume the first element is aligned because of sub-matrices
+  const Index lhsAlignmentOffset = internal::first_aligned(lhs,size);
+
+  // find how many columns do we have to skip to be aligned with the result (if possible)
+  Index skipColumns = 0;
+  // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
+  if( (size_t(lhs)%sizeof(LhsScalar)) || (size_t(res)%sizeof(ResScalar)) )
+  {
+    alignedSize = 0;
+    alignedStart = 0;
+  }
+  else if (LhsPacketSize>1)
+  {
+    eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0 || size<LhsPacketSize);
+
+    while (skipColumns<LhsPacketSize &&
+          alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%LhsPacketSize))
+      ++skipColumns;
+    if (skipColumns==LhsPacketSize)
+    {
+      // nothing can be aligned, no need to skip any column
+      alignmentPattern = NoneAligned;
+      skipColumns = 0;
+    }
+    else
+    {
+      skipColumns = (std::min)(skipColumns,cols);
+      // note that the skiped columns are processed later.
+    }
+
+    eigen_internal_assert(  (alignmentPattern==NoneAligned)
+                      || (skipColumns + columnsAtOnce >= cols)
+                      || LhsPacketSize > size
+                      || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(LhsPacket))==0);
+  }
+  else if(Vectorizable)
+  {
+    alignedStart = 0;
+    alignedSize = size;
+    alignmentPattern = AllAligned;
+  }
+
+  Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+  Index offset3 = (FirstAligned && alignmentStep==1?1:3);
+
+  Index columnBound = ((cols-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
+  for (Index i=skipColumns; i<columnBound; i+=columnsAtOnce)
+  {
+    RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[i*rhsIncr]),
+              ptmp1 = pset1<RhsPacket>(alpha*rhs[(i+offset1)*rhsIncr]),
+              ptmp2 = pset1<RhsPacket>(alpha*rhs[(i+2)*rhsIncr]),
+              ptmp3 = pset1<RhsPacket>(alpha*rhs[(i+offset3)*rhsIncr]);
+
+    // this helps a lot generating better binary code
+    const LhsScalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
+                    *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
+
+    if (Vectorizable)
+    {
+      /* explicit vectorization */
+      // process initial unaligned coeffs
+      for (Index j=0; j<alignedStart; ++j)
+      {
+        res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
+        res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
+        res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
+        res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
+      }
+
+      if (alignedSize>alignedStart)
+      {
+        switch(alignmentPattern)
+        {
+          case AllAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,d,d);
+            break;
+          case EvenAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,d);
+            break;
+          case FirstAligned:
+          {
+            Index j = alignedStart;
+            if(peels>1)
+            {
+              LhsPacket A00, A01, A02, A03, A10, A11, A12, A13;
+              ResPacket T0, T1;
+
+              A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
+              A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
+              A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
+
+              for (; j<peeledSize; j+=peels*ResPacketSize)
+              {
+                A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]);  palign<1>(A01,A11);
+                A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]);  palign<2>(A02,A12);
+                A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]);  palign<3>(A03,A13);
+
+                A00 = pload<LhsPacket>(&lhs0[j]);
+                A10 = pload<LhsPacket>(&lhs0[j+LhsPacketSize]);
+                T0  = pcj.pmadd(A00, ptmp0, pload<ResPacket>(&res[j]));
+                T1  = pcj.pmadd(A10, ptmp0, pload<ResPacket>(&res[j+ResPacketSize]));
+
+                T0  = pcj.pmadd(A01, ptmp1, T0);
+                A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]);  palign<1>(A11,A01);
+                T0  = pcj.pmadd(A02, ptmp2, T0);
+                A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]);  palign<2>(A12,A02);
+                T0  = pcj.pmadd(A03, ptmp3, T0);
+                pstore(&res[j],T0);
+                A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]);  palign<3>(A13,A03);
+                T1  = pcj.pmadd(A11, ptmp1, T1);
+                T1  = pcj.pmadd(A12, ptmp2, T1);
+                T1  = pcj.pmadd(A13, ptmp3, T1);
+                pstore(&res[j+ResPacketSize],T1);
+              }
+            }
+            for (; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,du);
+            break;
+          }
+          default:
+            for (Index j = alignedStart; j<alignedSize; j+=ResPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(du,du,du);
+            break;
+        }
+      }
+    } // end explicit vectorization
+
+    /* process remaining coeffs (or all if there is no explicit vectorization) */
+    for (Index j=alignedSize; j<size; ++j)
+    {
+      res[j] = cj.pmadd(lhs0[j], pfirst(ptmp0), res[j]);
+      res[j] = cj.pmadd(lhs1[j], pfirst(ptmp1), res[j]);
+      res[j] = cj.pmadd(lhs2[j], pfirst(ptmp2), res[j]);
+      res[j] = cj.pmadd(lhs3[j], pfirst(ptmp3), res[j]);
+    }
+  }
+
+  // process remaining first and last columns (at most columnsAtOnce-1)
+  Index end = cols;
+  Index start = columnBound;
+  do
+  {
+    for (Index k=start; k<end; ++k)
+    {
+      RhsPacket ptmp0 = pset1<RhsPacket>(alpha*rhs[k*rhsIncr]);
+      const LhsScalar* lhs0 = lhs + k*lhsStride;
+
+      if (Vectorizable)
+      {
+        /* explicit vectorization */
+        // process first unaligned result's coeffs
+        for (Index j=0; j<alignedStart; ++j)
+          res[j] += cj.pmul(lhs0[j], pfirst(ptmp0));
+        // process aligned result's coeffs
+        if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
+          for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
+            pstore(&res[i], pcj.pmadd(pload<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+        else
+          for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
+            pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
+      }
+
+      // process remaining scalars (or all if no explicit vectorization)
+      for (Index i=alignedSize; i<size; ++i)
+        res[i] += cj.pmul(lhs0[i], pfirst(ptmp0));
+    }
+    if (skipColumns)
+    {
+      start = 0;
+      end = skipColumns;
+      skipColumns = 0;
+    }
+    else
+      break;
+  } while(Vectorizable);
+  #undef _EIGEN_ACCUMULATE_PACKETS
+}
+
+/* Optimized row-major matrix * vector product:
+ * This algorithm processes 4 rows at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 4 and to reduce
+ * the instruction dependency. Moreover, we know that all bands have the
+ * same alignment pattern.
+ *
+ * Mixing type logic:
+ *  - alpha is always a complex (or converted to a complex)
+ *  - no vectorization
+ */
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+struct general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>
+{
+typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+
+enum {
+  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable
+              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),
+  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
+  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
+  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1
+};
+
+typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
+typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
+typedef typename packet_traits<ResScalar>::type  _ResPacket;
+
+typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
+typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
+typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
+  
+EIGEN_DONT_INLINE static void run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index resIncr,
+  ResScalar alpha);
+};
+
+template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjugateLhs,RhsScalar,ConjugateRhs,Version>::run(
+  Index rows, Index cols,
+  const LhsScalar* lhs, Index lhsStride,
+  const RhsScalar* rhs, Index rhsIncr,
+  ResScalar* res, Index resIncr,
+  ResScalar alpha)
+{
+  EIGEN_UNUSED_VARIABLE(rhsIncr);
+  eigen_internal_assert(rhsIncr==1);
+  #ifdef _EIGEN_ACCUMULATE_PACKETS
+  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
+  #endif
+
+  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\
+    RhsPacket b = pload<RhsPacket>(&rhs[j]); \
+    ptmp0 = pcj.pmadd(EIGEN_CAT(ploa,A0) <LhsPacket>(&lhs0[j]), b, ptmp0); \
+    ptmp1 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs1[j]), b, ptmp1); \
+    ptmp2 = pcj.pmadd(EIGEN_CAT(ploa,A2) <LhsPacket>(&lhs2[j]), b, ptmp2); \
+    ptmp3 = pcj.pmadd(EIGEN_CAT(ploa,A13)<LhsPacket>(&lhs3[j]), b, ptmp3); }
+
+  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
+  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;
+
+  enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
+  const Index rowsAtOnce = 4;
+  const Index peels = 2;
+  const Index RhsPacketAlignedMask = RhsPacketSize-1;
+  const Index LhsPacketAlignedMask = LhsPacketSize-1;
+//   const Index PeelAlignedMask = RhsPacketSize*peels-1;
+  const Index depth = cols;
+
+  // How many coeffs of the result do we have to skip to be aligned.
+  // Here we assume data are at least aligned on the base scalar type
+  // if that's not the case then vectorization is discarded, see below.
+  Index alignedStart = internal::first_aligned(rhs, depth);
+  Index alignedSize = RhsPacketSize>1 ? alignedStart + ((depth-alignedStart) & ~RhsPacketAlignedMask) : 0;
+  const Index peeledSize = alignedSize - RhsPacketSize*peels - RhsPacketSize + 1;
+
+  const Index alignmentStep = LhsPacketSize>1 ? (LhsPacketSize - lhsStride % LhsPacketSize) & LhsPacketAlignedMask : 0;
+  Index alignmentPattern = alignmentStep==0 ? AllAligned
+                         : alignmentStep==(LhsPacketSize/2) ? EvenAligned
+                         : FirstAligned;
+
+  // we cannot assume the first element is aligned because of sub-matrices
+  const Index lhsAlignmentOffset = internal::first_aligned(lhs,depth);
+
+  // find how many rows do we have to skip to be aligned with rhs (if possible)
+  Index skipRows = 0;
+  // if the data cannot be aligned (TODO add some compile time tests when possible, e.g. for floats)
+  if( (sizeof(LhsScalar)!=sizeof(RhsScalar)) || (size_t(lhs)%sizeof(LhsScalar)) || (size_t(rhs)%sizeof(RhsScalar)) )
+  {
+    alignedSize = 0;
+    alignedStart = 0;
+  }
+  else if (LhsPacketSize>1)
+  {
+    eigen_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(LhsPacket)==0  || depth<LhsPacketSize);
+
+    while (skipRows<LhsPacketSize &&
+           alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%LhsPacketSize))
+      ++skipRows;
+    if (skipRows==LhsPacketSize)
+    {
+      // nothing can be aligned, no need to skip any column
+      alignmentPattern = NoneAligned;
+      skipRows = 0;
+    }
+    else
+    {
+      skipRows = (std::min)(skipRows,Index(rows));
+      // note that the skiped columns are processed later.
+    }
+    eigen_internal_assert(  alignmentPattern==NoneAligned
+                      || LhsPacketSize==1
+                      || (skipRows + rowsAtOnce >= rows)
+                      || LhsPacketSize > depth
+                      || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(LhsPacket))==0);
+  }
+  else if(Vectorizable)
+  {
+    alignedStart = 0;
+    alignedSize = depth;
+    alignmentPattern = AllAligned;
+  }
+
+  Index offset1 = (FirstAligned && alignmentStep==1?3:1);
+  Index offset3 = (FirstAligned && alignmentStep==1?1:3);
+
+  Index rowBound = ((rows-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
+  for (Index i=skipRows; i<rowBound; i+=rowsAtOnce)
+  {
+    EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
+    ResScalar tmp1 = ResScalar(0), tmp2 = ResScalar(0), tmp3 = ResScalar(0);
+
+    // this helps the compiler generating good binary code
+    const LhsScalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
+                    *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
+
+    if (Vectorizable)
+    {
+      /* explicit vectorization */
+      ResPacket ptmp0 = pset1<ResPacket>(ResScalar(0)), ptmp1 = pset1<ResPacket>(ResScalar(0)),
+                ptmp2 = pset1<ResPacket>(ResScalar(0)), ptmp3 = pset1<ResPacket>(ResScalar(0));
+
+      // process initial unaligned coeffs
+      // FIXME this loop get vectorized by the compiler !
+      for (Index j=0; j<alignedStart; ++j)
+      {
+        RhsScalar b = rhs[j];
+        tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
+        tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
+      }
+
+      if (alignedSize>alignedStart)
+      {
+        switch(alignmentPattern)
+        {
+          case AllAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,d,d);
+            break;
+          case EvenAligned:
+            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,d);
+            break;
+          case FirstAligned:
+          {
+            Index j = alignedStart;
+            if (peels>1)
+            {
+              /* Here we proccess 4 rows with with two peeled iterations to hide
+               * the overhead of unaligned loads. Moreover unaligned loads are handled
+               * using special shift/move operations between the two aligned packets
+               * overlaping the desired unaligned packet. This is *much* more efficient
+               * than basic unaligned loads.
+               */
+              LhsPacket A01, A02, A03, A11, A12, A13;
+              A01 = pload<LhsPacket>(&lhs1[alignedStart-1]);
+              A02 = pload<LhsPacket>(&lhs2[alignedStart-2]);
+              A03 = pload<LhsPacket>(&lhs3[alignedStart-3]);
+
+              for (; j<peeledSize; j+=peels*RhsPacketSize)
+              {
+                RhsPacket b = pload<RhsPacket>(&rhs[j]);
+                A11 = pload<LhsPacket>(&lhs1[j-1+LhsPacketSize]);  palign<1>(A01,A11);
+                A12 = pload<LhsPacket>(&lhs2[j-2+LhsPacketSize]);  palign<2>(A02,A12);
+                A13 = pload<LhsPacket>(&lhs3[j-3+LhsPacketSize]);  palign<3>(A03,A13);
+
+                ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), b, ptmp0);
+                ptmp1 = pcj.pmadd(A01, b, ptmp1);
+                A01 = pload<LhsPacket>(&lhs1[j-1+2*LhsPacketSize]);  palign<1>(A11,A01);
+                ptmp2 = pcj.pmadd(A02, b, ptmp2);
+                A02 = pload<LhsPacket>(&lhs2[j-2+2*LhsPacketSize]);  palign<2>(A12,A02);
+                ptmp3 = pcj.pmadd(A03, b, ptmp3);
+                A03 = pload<LhsPacket>(&lhs3[j-3+2*LhsPacketSize]);  palign<3>(A13,A03);
+
+                b = pload<RhsPacket>(&rhs[j+RhsPacketSize]);
+                ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j+LhsPacketSize]), b, ptmp0);
+                ptmp1 = pcj.pmadd(A11, b, ptmp1);
+                ptmp2 = pcj.pmadd(A12, b, ptmp2);
+                ptmp3 = pcj.pmadd(A13, b, ptmp3);
+              }
+            }
+            for (; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(d,du,du);
+            break;
+          }
+          default:
+            for (Index j = alignedStart; j<alignedSize; j+=RhsPacketSize)
+              _EIGEN_ACCUMULATE_PACKETS(du,du,du);
+            break;
+        }
+        tmp0 += predux(ptmp0);
+        tmp1 += predux(ptmp1);
+        tmp2 += predux(ptmp2);
+        tmp3 += predux(ptmp3);
+      }
+    } // end explicit vectorization
+
+    // process remaining coeffs (or all if no explicit vectorization)
+    // FIXME this loop get vectorized by the compiler !
+    for (Index j=alignedSize; j<depth; ++j)
+    {
+      RhsScalar b = rhs[j];
+      tmp0 += cj.pmul(lhs0[j],b); tmp1 += cj.pmul(lhs1[j],b);
+      tmp2 += cj.pmul(lhs2[j],b); tmp3 += cj.pmul(lhs3[j],b);
+    }
+    res[i*resIncr]            += alpha*tmp0;
+    res[(i+offset1)*resIncr]  += alpha*tmp1;
+    res[(i+2)*resIncr]        += alpha*tmp2;
+    res[(i+offset3)*resIncr]  += alpha*tmp3;
+  }
+
+  // process remaining first and last rows (at most columnsAtOnce-1)
+  Index end = rows;
+  Index start = rowBound;
+  do
+  {
+    for (Index i=start; i<end; ++i)
+    {
+      EIGEN_ALIGN16 ResScalar tmp0 = ResScalar(0);
+      ResPacket ptmp0 = pset1<ResPacket>(tmp0);
+      const LhsScalar* lhs0 = lhs + i*lhsStride;
+      // process first unaligned result's coeffs
+      // FIXME this loop get vectorized by the compiler !
+      for (Index j=0; j<alignedStart; ++j)
+        tmp0 += cj.pmul(lhs0[j], rhs[j]);
+
+      if (alignedSize>alignedStart)
+      {
+        // process aligned rhs coeffs
+        if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
+          for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
+            ptmp0 = pcj.pmadd(pload<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
+        else
+          for (Index j = alignedStart;j<alignedSize;j+=RhsPacketSize)
+            ptmp0 = pcj.pmadd(ploadu<LhsPacket>(&lhs0[j]), pload<RhsPacket>(&rhs[j]), ptmp0);
+        tmp0 += predux(ptmp0);
+      }
+
+      // process remaining scalars
+      // FIXME this loop get vectorized by the compiler !
+      for (Index j=alignedSize; j<depth; ++j)
+        tmp0 += cj.pmul(lhs0[j], rhs[j]);
+      res[i*resIncr] += alpha*tmp0;
+    }
+    if (skipRows)
+    {
+      start = 0;
+      end = skipRows;
+      skipRows = 0;
+    }
+    else
+      break;
+  } while(Vectorizable);
+
+  #undef _EIGEN_ACCUMULATE_PACKETS
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_VECTOR_H
diff --git a/Eigen/src/Core/products/GeneralMatrixVector_MKL.h b/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
new file mode 100644
index 0000000..1cb9fe6
--- /dev/null
+++ b/Eigen/src/Core/products/GeneralMatrixVector_MKL.h
@@ -0,0 +1,131 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   General matrix-vector product functionality based on ?GEMV.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_GENERAL_MATRIX_VECTOR_MKL_H
+#define EIGEN_GENERAL_MATRIX_VECTOR_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/**********************************************************************
+* This file implements general matrix-vector multiplication using BLAS
+* gemv function via partial specialization of
+* general_matrix_vector_product::run(..) method for float, double,
+* std::complex<float> and std::complex<double> types
+**********************************************************************/
+
+// gemv specialization
+
+template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
+struct general_matrix_vector_product_gemv :
+  general_matrix_vector_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,ConjugateRhs,BuiltIn> {};
+
+#define EIGEN_MKL_GEMV_SPECIALIZE(Scalar) \
+template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
+struct general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
+static void run( \
+  Index rows, Index cols, \
+  const Scalar* lhs, Index lhsStride, \
+  const Scalar* rhs, Index rhsIncr, \
+  Scalar* res, Index resIncr, Scalar alpha) \
+{ \
+  if (ConjugateLhs) { \
+    general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,BuiltIn>::run( \
+      rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \
+  } else { \
+    general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
+      rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \
+  } \
+} \
+}; \
+template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
+struct general_matrix_vector_product<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \
+static void run( \
+  Index rows, Index cols, \
+  const Scalar* lhs, Index lhsStride, \
+  const Scalar* rhs, Index rhsIncr, \
+  Scalar* res, Index resIncr, Scalar alpha) \
+{ \
+    general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
+      rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \
+} \
+}; \
+
+EIGEN_MKL_GEMV_SPECIALIZE(double)
+EIGEN_MKL_GEMV_SPECIALIZE(float)
+EIGEN_MKL_GEMV_SPECIALIZE(dcomplex)
+EIGEN_MKL_GEMV_SPECIALIZE(scomplex)
+
+#define EIGEN_MKL_GEMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLPREFIX) \
+template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \
+struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \
+{ \
+typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\
+\
+static void run( \
+  Index rows, Index cols, \
+  const EIGTYPE* lhs, Index lhsStride, \
+  const EIGTYPE* rhs, Index rhsIncr, \
+  EIGTYPE* res, Index resIncr, EIGTYPE alpha) \
+{ \
+  MKL_INT m=rows, n=cols, lda=lhsStride, incx=rhsIncr, incy=resIncr; \
+  MKLTYPE alpha_, beta_; \
+  const EIGTYPE *x_ptr, myone(1); \
+  char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \
+  if (LhsStorageOrder==RowMajor) { \
+    m=cols; \
+    n=rows; \
+  }\
+  assign_scalar_eig2mkl(alpha_, alpha); \
+  assign_scalar_eig2mkl(beta_, myone); \
+  GEMVVector x_tmp; \
+  if (ConjugateRhs) { \
+    Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \
+    x_tmp=map_x.conjugate(); \
+    x_ptr=x_tmp.data(); \
+    incx=1; \
+  } else x_ptr=rhs; \
+  MKLPREFIX##gemv(&trans, &m, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \
+}\
+};
+
+EIGEN_MKL_GEMV_SPECIALIZATION(double,   double,        d)
+EIGEN_MKL_GEMV_SPECIALIZATION(float,    float,         s)
+EIGEN_MKL_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_GEMV_SPECIALIZATION(scomplex, MKL_Complex8,  c)
+
+} // end namespase internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERAL_MATRIX_VECTOR_MKL_H
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
new file mode 100644
index 0000000..6937ee3
--- /dev/null
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -0,0 +1,162 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PARALLELIZER_H
+#define EIGEN_PARALLELIZER_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal */
+inline void manage_multi_threading(Action action, int* v)
+{
+  static EIGEN_UNUSED int m_maxThreads = -1;
+
+  if(action==SetAction)
+  {
+    eigen_internal_assert(v!=0);
+    m_maxThreads = *v;
+  }
+  else if(action==GetAction)
+  {
+    eigen_internal_assert(v!=0);
+    #ifdef EIGEN_HAS_OPENMP
+    if(m_maxThreads>0)
+      *v = m_maxThreads;
+    else
+      *v = omp_get_max_threads();
+    #else
+    *v = 1;
+    #endif
+  }
+  else
+  {
+    eigen_internal_assert(false);
+  }
+}
+
+}
+
+/** Must be call first when calling Eigen from multiple threads */
+inline void initParallel()
+{
+  int nbt;
+  internal::manage_multi_threading(GetAction, &nbt);
+  std::ptrdiff_t l1, l2;
+  internal::manage_caching_sizes(GetAction, &l1, &l2);
+}
+
+/** \returns the max number of threads reserved for Eigen
+  * \sa setNbThreads */
+inline int nbThreads()
+{
+  int ret;
+  internal::manage_multi_threading(GetAction, &ret);
+  return ret;
+}
+
+/** Sets the max number of threads reserved for Eigen
+  * \sa nbThreads */
+inline void setNbThreads(int v)
+{
+  internal::manage_multi_threading(SetAction, &v);
+}
+
+namespace internal {
+
+template<typename Index> struct GemmParallelInfo
+{
+  GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
+
+  int volatile sync;
+  int volatile users;
+
+  Index rhs_start;
+  Index rhs_length;
+};
+
+template<bool Condition, typename Functor, typename Index>
+void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose)
+{
+  // TODO when EIGEN_USE_BLAS is defined,
+  // we should still enable OMP for other scalar types
+#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
+  // FIXME the transpose variable is only needed to properly split
+  // the matrix product when multithreading is enabled. This is a temporary
+  // fix to support row-major destination matrices. This whole
+  // parallelizer mechanism has to be redisigned anyway.
+  EIGEN_UNUSED_VARIABLE(transpose);
+  func(0,rows, 0,cols);
+#else
+
+  // Dynamically check whether we should enable or disable OpenMP.
+  // The conditions are:
+  // - the max number of threads we can create is greater than 1
+  // - we are not already in a parallel code
+  // - the sizes are large enough
+
+  // 1- are we already in a parallel session?
+  // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
+  if((!Condition) || (omp_get_num_threads()>1))
+    return func(0,rows, 0,cols);
+
+  Index size = transpose ? cols : rows;
+
+  // 2- compute the maximal number of threads from the size of the product:
+  // FIXME this has to be fine tuned
+  Index max_threads = std::max<Index>(1,size / 32);
+
+  // 3 - compute the number of threads we are going to use
+  Index threads = std::min<Index>(nbThreads(), max_threads);
+
+  if(threads==1)
+    return func(0,rows, 0,cols);
+
+  Eigen::initParallel();
+  func.initParallelSession();
+
+  if(transpose)
+    std::swap(rows,cols);
+
+  GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
+
+  #pragma omp parallel num_threads(threads)
+  {
+    Index i = omp_get_thread_num();
+    // Note that the actual number of threads might be lower than the number of request ones.
+    Index actual_threads = omp_get_num_threads();
+    
+    Index blockCols = (cols / actual_threads) & ~Index(0x3);
+    Index blockRows = (rows / actual_threads) & ~Index(0x7);
+    
+    Index r0 = i*blockRows;
+    Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
+
+    Index c0 = i*blockCols;
+    Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
+
+    info[i].rhs_start = c0;
+    info[i].rhs_length = actualBlockCols;
+
+    if(transpose)
+      func(0, cols, r0, actualBlockRows, info);
+    else
+      func(r0, actualBlockRows, 0,cols, info);
+  }
+
+  delete[] info;
+#endif
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARALLELIZER_H
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
new file mode 100644
index 0000000..99cf9e0
--- /dev/null
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -0,0 +1,436 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H
+#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// pack a selfadjoint block diagonal for use with the gebp_kernel
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder>
+struct symm_pack_lhs
+{
+  template<int BlockRows> inline
+  void pack(Scalar* blockA, const const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)
+  {
+    // normal copy
+    for(Index k=0; k<i; k++)
+      for(Index w=0; w<BlockRows; w++)
+        blockA[count++] = lhs(i+w,k);           // normal
+    // symmetric copy
+    Index h = 0;
+    for(Index k=i; k<i+BlockRows; k++)
+    {
+      for(Index w=0; w<h; w++)
+        blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
+
+      blockA[count++] = numext::real(lhs(k,k));   // real (diagonal)
+
+      for(Index w=h+1; w<BlockRows; w++)
+        blockA[count++] = lhs(i+w, k);          // normal
+      ++h;
+    }
+    // transposed copy
+    for(Index k=i+BlockRows; k<cols; k++)
+      for(Index w=0; w<BlockRows; w++)
+        blockA[count++] = numext::conj(lhs(k, i+w)); // transposed
+  }
+  void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
+  {
+    const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
+    Index count = 0;
+    Index peeled_mc = (rows/Pack1)*Pack1;
+    for(Index i=0; i<peeled_mc; i+=Pack1)
+    {
+      pack<Pack1>(blockA, lhs, cols, i, count);
+    }
+
+    if(rows-peeled_mc>=Pack2)
+    {
+      pack<Pack2>(blockA, lhs, cols, peeled_mc, count);
+      peeled_mc += Pack2;
+    }
+
+    // do the same with mr==1
+    for(Index i=peeled_mc; i<rows; i++)
+    {
+      for(Index k=0; k<i; k++)
+        blockA[count++] = lhs(i, k);              // normal
+
+      blockA[count++] = numext::real(lhs(i, i));       // real (diagonal)
+
+      for(Index k=i+1; k<cols; k++)
+        blockA[count++] = numext::conj(lhs(k, i));     // transposed
+    }
+  }
+};
+
+template<typename Scalar, typename Index, int nr, int StorageOrder>
+struct symm_pack_rhs
+{
+  enum { PacketSize = packet_traits<Scalar>::size };
+  void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2)
+  {
+    Index end_k = k2 + rows;
+    Index count = 0;
+    const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
+    Index packet_cols = (cols/nr)*nr;
+
+    // first part: normal case
+    for(Index j2=0; j2<k2; j2+=nr)
+    {
+      for(Index k=k2; k<end_k; k++)
+      {
+        blockB[count+0] = rhs(k,j2+0);
+        blockB[count+1] = rhs(k,j2+1);
+        if (nr==4)
+        {
+          blockB[count+2] = rhs(k,j2+2);
+          blockB[count+3] = rhs(k,j2+3);
+        }
+        count += nr;
+      }
+    }
+
+    // second part: diagonal block
+    for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
+    {
+      // again we can split vertically in three different parts (transpose, symmetric, normal)
+      // transpose
+      for(Index k=k2; k<j2; k++)
+      {
+        blockB[count+0] = numext::conj(rhs(j2+0,k));
+        blockB[count+1] = numext::conj(rhs(j2+1,k));
+        if (nr==4)
+        {
+          blockB[count+2] = numext::conj(rhs(j2+2,k));
+          blockB[count+3] = numext::conj(rhs(j2+3,k));
+        }
+        count += nr;
+      }
+      // symmetric
+      Index h = 0;
+      for(Index k=j2; k<j2+nr; k++)
+      {
+        // normal
+        for (Index w=0 ; w<h; ++w)
+          blockB[count+w] = rhs(k,j2+w);
+
+        blockB[count+h] = numext::real(rhs(k,k));
+
+        // transpose
+        for (Index w=h+1 ; w<nr; ++w)
+          blockB[count+w] = numext::conj(rhs(j2+w,k));
+        count += nr;
+        ++h;
+      }
+      // normal
+      for(Index k=j2+nr; k<end_k; k++)
+      {
+        blockB[count+0] = rhs(k,j2+0);
+        blockB[count+1] = rhs(k,j2+1);
+        if (nr==4)
+        {
+          blockB[count+2] = rhs(k,j2+2);
+          blockB[count+3] = rhs(k,j2+3);
+        }
+        count += nr;
+      }
+    }
+
+    // third part: transposed
+    for(Index j2=k2+rows; j2<packet_cols; j2+=nr)
+    {
+      for(Index k=k2; k<end_k; k++)
+      {
+        blockB[count+0] = numext::conj(rhs(j2+0,k));
+        blockB[count+1] = numext::conj(rhs(j2+1,k));
+        if (nr==4)
+        {
+          blockB[count+2] = numext::conj(rhs(j2+2,k));
+          blockB[count+3] = numext::conj(rhs(j2+3,k));
+        }
+        count += nr;
+      }
+    }
+
+    // copy the remaining columns one at a time (=> the same with nr==1)
+    for(Index j2=packet_cols; j2<cols; ++j2)
+    {
+      // transpose
+      Index half = (std::min)(end_k,j2);
+      for(Index k=k2; k<half; k++)
+      {
+        blockB[count] = numext::conj(rhs(j2,k));
+        count += 1;
+      }
+
+      if(half==j2 && half<k2+rows)
+      {
+        blockB[count] = numext::real(rhs(j2,j2));
+        count += 1;
+      }
+      else
+        half--;
+
+      // normal
+      for(Index k=half+1; k<k2+rows; k++)
+      {
+        blockB[count] = rhs(k,j2);
+        count += 1;
+      }
+    }
+  }
+};
+
+/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of
+ * the general matrix matrix product.
+ */
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
+          int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,
+          int ResStorageOrder>
+struct product_selfadjoint_matrix;
+
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,
+          int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>
+{
+
+  static EIGEN_STRONG_INLINE void run(
+    Index rows, Index cols,
+    const Scalar* lhs, Index lhsStride,
+    const Scalar* rhs, Index rhsStride,
+    Scalar* res,       Index resStride,
+    const Scalar& alpha)
+  {
+    product_selfadjoint_matrix<Scalar, Index,
+      EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
+      RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),
+      EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,
+      LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),
+      ColMajor>
+      ::run(cols, rows,  rhs, rhsStride,  lhs, lhsStride,  res, resStride,  alpha);
+  }
+};
+
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>
+{
+
+  static EIGEN_DONT_INLINE void run(
+    Index rows, Index cols,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha);
+};
+
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(
+    Index rows, Index cols,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha)
+  {
+    Index size = rows;
+
+    const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+    const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+    typedef gebp_traits<Scalar,Scalar> Traits;
+
+    Index kc = size;  // cache block size along the K direction
+    Index mc = rows;  // cache block size along the M direction
+    Index nc = cols;  // cache block size along the N direction
+    computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
+    // kc must smaller than mc
+    kc = (std::min)(kc,mc);
+
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+    std::size_t sizeB = sizeW + kc*cols;
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+    ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+    Scalar* blockB = allocatedBlockB + sizeW;
+
+    gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+    symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+    gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+    gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;
+
+    for(Index k2=0; k2<size; k2+=kc)
+    {
+      const Index actual_kc = (std::min)(k2+kc,size)-k2;
+
+      // we have selected one row panel of rhs and one column panel of lhs
+      // pack rhs's panel into a sequential chunk of memory
+      // and expand each coeff to a constant packet for further reuse
+      pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
+
+      // the select lhs's panel has to be split in three different parts:
+      //  1 - the transposed panel above the diagonal block => transposed packed copy
+      //  2 - the diagonal block => special packed copy
+      //  3 - the panel below the diagonal block => generic packed copy
+      for(Index i2=0; i2<k2; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(i2+mc,k2)-i2;
+        // transposed packed copy
+        pack_lhs_transposed(blockA, &lhs(k2, i2), lhsStride, actual_kc, actual_mc);
+
+        gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+      }
+      // the block diagonal
+      {
+        const Index actual_mc = (std::min)(k2+kc,size)-k2;
+        // symmetric packed copy
+        pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);
+
+        gebp_kernel(res+k2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+      }
+
+      for(Index i2=k2+kc; i2<size; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(i2+mc,size)-i2;
+        gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()
+          (blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
+
+        gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+      }
+    }
+  }
+
+// matrix * selfadjoint product
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs>
+struct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>
+{
+
+  static EIGEN_DONT_INLINE void run(
+    Index rows, Index cols,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha);
+};
+
+template <typename Scalar, typename Index,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs>
+EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(
+    Index rows, Index cols,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha)
+  {
+    Index size = cols;
+
+    const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+
+    typedef gebp_traits<Scalar,Scalar> Traits;
+
+    Index kc = size; // cache block size along the K direction
+    Index mc = rows;  // cache block size along the M direction
+    Index nc = cols;  // cache block size along the N direction
+    computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+    std::size_t sizeB = sizeW + kc*cols;
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
+    ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
+    Scalar* blockB = allocatedBlockB + sizeW;
+
+    gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+    gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+    symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+
+    for(Index k2=0; k2<size; k2+=kc)
+    {
+      const Index actual_kc = (std::min)(k2+kc,size)-k2;
+
+      pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);
+
+      // => GEPP
+      for(Index i2=0; i2<rows; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(i2+mc,rows)-i2;
+        pack_lhs(blockA, &lhs(i2, k2), lhsStride, actual_kc, actual_mc);
+
+        gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha);
+      }
+    }
+  }
+
+} // end namespace internal
+
+/***************************************************************************
+* Wrapper to product_selfadjoint_matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, int LhsMode, typename Rhs, int RhsMode>
+struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false> >
+  : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, int LhsMode, typename Rhs, int RhsMode>
+struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>
+  : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,RhsMode,false>, Lhs, Rhs >
+{
+  EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
+
+  SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+  enum {
+    LhsIsUpper = (LhsMode&(Upper|Lower))==Upper,
+    LhsIsSelfAdjoint = (LhsMode&SelfAdjoint)==SelfAdjoint,
+    RhsIsUpper = (RhsMode&(Upper|Lower))==Upper,
+    RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint
+  };
+
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
+  {
+    eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
+    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
+
+    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+                               * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+    internal::product_selfadjoint_matrix<Scalar, Index,
+      EIGEN_LOGICAL_XOR(LhsIsUpper,
+                        internal::traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,
+      NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),
+      EIGEN_LOGICAL_XOR(RhsIsUpper,
+                        internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,
+      NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)),
+      internal::traits<Dest>::Flags&RowMajorBit  ? RowMajor : ColMajor>
+      ::run(
+        lhs.rows(), rhs.cols(),                 // sizes
+        &lhs.coeffRef(0,0),    lhs.outerStride(),  // lhs info
+        &rhs.coeffRef(0,0),    rhs.outerStride(),  // rhs info
+        &dst.coeffRef(0,0), dst.outerStride(),  // result info
+        actualAlpha                             // alpha
+      );
+  }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
new file mode 100644
index 0000000..dfa687f
--- /dev/null
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix_MKL.h
@@ -0,0 +1,295 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H
+#define EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+
+/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */
+
+#define EIGEN_MKL_SYMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
+{\
+\
+  static void run( \
+    Index rows, Index cols, \
+    const EIGTYPE* _lhs, Index lhsStride, \
+    const EIGTYPE* _rhs, Index rhsStride, \
+    EIGTYPE* res,        Index resStride, \
+    EIGTYPE alpha) \
+  { \
+    char side='L', uplo='L'; \
+    MKL_INT m, n, lda, ldb, ldc; \
+    const EIGTYPE *a, *b; \
+    MKLTYPE alpha_, beta_; \
+    MatrixX##EIGPREFIX b_tmp; \
+    EIGTYPE myone(1);\
+\
+/* Set transpose options */ \
+/* Set m, n, k */ \
+    m = (MKL_INT)rows;  \
+    n = (MKL_INT)cols;  \
+\
+/* Set alpha_ & beta_ */ \
+    assign_scalar_eig2mkl(alpha_, alpha); \
+    assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+    lda = (MKL_INT)lhsStride; \
+    ldb = (MKL_INT)rhsStride; \
+    ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+    if (LhsStorageOrder==RowMajor) uplo='U'; \
+    a = _lhs; \
+\
+    if (RhsStorageOrder==RowMajor) { \
+      Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
+      b_tmp = rhs.adjoint(); \
+      b = b_tmp.data(); \
+      ldb = b_tmp.outerStride(); \
+    } else b = _rhs; \
+\
+    MKLPREFIX##symm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+\
+  } \
+};
+
+
+#define EIGEN_MKL_HEMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \
+{\
+  static void run( \
+    Index rows, Index cols, \
+    const EIGTYPE* _lhs, Index lhsStride, \
+    const EIGTYPE* _rhs, Index rhsStride, \
+    EIGTYPE* res,        Index resStride, \
+    EIGTYPE alpha) \
+  { \
+    char side='L', uplo='L'; \
+    MKL_INT m, n, lda, ldb, ldc; \
+    const EIGTYPE *a, *b; \
+    MKLTYPE alpha_, beta_; \
+    MatrixX##EIGPREFIX b_tmp; \
+    Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> a_tmp; \
+    EIGTYPE myone(1); \
+\
+/* Set transpose options */ \
+/* Set m, n, k */ \
+    m = (MKL_INT)rows; \
+    n = (MKL_INT)cols; \
+\
+/* Set alpha_ & beta_ */ \
+    assign_scalar_eig2mkl(alpha_, alpha); \
+    assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+    lda = (MKL_INT)lhsStride; \
+    ldb = (MKL_INT)rhsStride; \
+    ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+    if (((LhsStorageOrder==ColMajor) && ConjugateLhs) || ((LhsStorageOrder==RowMajor) && (!ConjugateLhs))) { \
+      Map<const Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder>, 0, OuterStride<> > lhs(_lhs,m,m,OuterStride<>(lhsStride)); \
+      a_tmp = lhs.conjugate(); \
+      a = a_tmp.data(); \
+      lda = a_tmp.outerStride(); \
+    } else a = _lhs; \
+    if (LhsStorageOrder==RowMajor) uplo='U'; \
+\
+    if (RhsStorageOrder==ColMajor && (!ConjugateRhs)) { \
+       b = _rhs; } \
+    else { \
+      if (RhsStorageOrder==ColMajor && ConjugateRhs) { \
+        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,m,n,OuterStride<>(rhsStride)); \
+        b_tmp = rhs.conjugate(); \
+      } else \
+      if (ConjugateRhs) { \
+        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
+        b_tmp = rhs.adjoint(); \
+      } else { \
+        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \
+        b_tmp = rhs.transpose(); \
+      } \
+      b = b_tmp.data(); \
+      ldb = b_tmp.outerStride(); \
+    } \
+\
+    MKLPREFIX##hemm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+\
+  } \
+};
+
+EIGEN_MKL_SYMM_L(double, double, d, d)
+EIGEN_MKL_SYMM_L(float, float, f, s)
+EIGEN_MKL_HEMM_L(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_HEMM_L(scomplex, MKL_Complex8, cf, c)
+
+
+/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */
+
+#define EIGEN_MKL_SYMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
+{\
+\
+  static void run( \
+    Index rows, Index cols, \
+    const EIGTYPE* _lhs, Index lhsStride, \
+    const EIGTYPE* _rhs, Index rhsStride, \
+    EIGTYPE* res,        Index resStride, \
+    EIGTYPE alpha) \
+  { \
+    char side='R', uplo='L'; \
+    MKL_INT m, n, lda, ldb, ldc; \
+    const EIGTYPE *a, *b; \
+    MKLTYPE alpha_, beta_; \
+    MatrixX##EIGPREFIX b_tmp; \
+    EIGTYPE myone(1);\
+\
+/* Set m, n, k */ \
+    m = (MKL_INT)rows;  \
+    n = (MKL_INT)cols;  \
+\
+/* Set alpha_ & beta_ */ \
+    assign_scalar_eig2mkl(alpha_, alpha); \
+    assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+    lda = (MKL_INT)rhsStride; \
+    ldb = (MKL_INT)lhsStride; \
+    ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+    if (RhsStorageOrder==RowMajor) uplo='U'; \
+    a = _rhs; \
+\
+    if (LhsStorageOrder==RowMajor) { \
+      Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(rhsStride)); \
+      b_tmp = lhs.adjoint(); \
+      b = b_tmp.data(); \
+      ldb = b_tmp.outerStride(); \
+    } else b = _lhs; \
+\
+    MKLPREFIX##symm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+\
+  } \
+};
+
+
+#define EIGEN_MKL_HEMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \
+{\
+  static void run( \
+    Index rows, Index cols, \
+    const EIGTYPE* _lhs, Index lhsStride, \
+    const EIGTYPE* _rhs, Index rhsStride, \
+    EIGTYPE* res,        Index resStride, \
+    EIGTYPE alpha) \
+  { \
+    char side='R', uplo='L'; \
+    MKL_INT m, n, lda, ldb, ldc; \
+    const EIGTYPE *a, *b; \
+    MKLTYPE alpha_, beta_; \
+    MatrixX##EIGPREFIX b_tmp; \
+    Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> a_tmp; \
+    EIGTYPE myone(1); \
+\
+/* Set m, n, k */ \
+    m = (MKL_INT)rows; \
+    n = (MKL_INT)cols; \
+\
+/* Set alpha_ & beta_ */ \
+    assign_scalar_eig2mkl(alpha_, alpha); \
+    assign_scalar_eig2mkl(beta_, myone); \
+\
+/* Set lda, ldb, ldc */ \
+    lda = (MKL_INT)rhsStride; \
+    ldb = (MKL_INT)lhsStride; \
+    ldc = (MKL_INT)resStride; \
+\
+/* Set a, b, c */ \
+    if (((RhsStorageOrder==ColMajor) && ConjugateRhs) || ((RhsStorageOrder==RowMajor) && (!ConjugateRhs))) { \
+      Map<const Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder>, 0, OuterStride<> > rhs(_rhs,n,n,OuterStride<>(rhsStride)); \
+      a_tmp = rhs.conjugate(); \
+      a = a_tmp.data(); \
+      lda = a_tmp.outerStride(); \
+    } else a = _rhs; \
+    if (RhsStorageOrder==RowMajor) uplo='U'; \
+\
+    if (LhsStorageOrder==ColMajor && (!ConjugateLhs)) { \
+       b = _lhs; } \
+    else { \
+      if (LhsStorageOrder==ColMajor && ConjugateLhs) { \
+        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,n,OuterStride<>(lhsStride)); \
+        b_tmp = lhs.conjugate(); \
+      } else \
+      if (ConjugateLhs) { \
+        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \
+        b_tmp = lhs.adjoint(); \
+      } else { \
+        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \
+        b_tmp = lhs.transpose(); \
+      } \
+      b = b_tmp.data(); \
+      ldb = b_tmp.outerStride(); \
+    } \
+\
+    MKLPREFIX##hemm(&side, &uplo, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)b, &ldb, &beta_, (MKLTYPE*)res, &ldc); \
+  } \
+};
+
+EIGEN_MKL_SYMM_R(double, double, d, d)
+EIGEN_MKL_SYMM_R(float, float, f, s)
+EIGEN_MKL_HEMM_R(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_HEMM_R(scomplex, MKL_Complex8, cf, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_MKL_H
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector.h b/Eigen/src/Core/products/SelfadjointMatrixVector.h
new file mode 100644
index 0000000..f698f67
--- /dev/null
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector.h
@@ -0,0 +1,281 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
+#define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/* Optimized selfadjoint matrix * vector product:
+ * This algorithm processes 2 columns at onces that allows to both reduce
+ * the number of load/stores of the result by a factor 2 and to reduce
+ * the instruction dependency.
+ */
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized>
+struct selfadjoint_matrix_vector_product;
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
+struct selfadjoint_matrix_vector_product
+
+{
+static EIGEN_DONT_INLINE void run(
+  Index size,
+  const Scalar*  lhs, Index lhsStride,
+  const Scalar* _rhs, Index rhsIncr,
+  Scalar* res,
+  Scalar alpha);
+};
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(
+  Index size,
+  const Scalar*  lhs, Index lhsStride,
+  const Scalar* _rhs, Index rhsIncr,
+  Scalar* res,
+  Scalar alpha)
+{
+  typedef typename packet_traits<Scalar>::type Packet;
+  const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
+
+  enum {
+    IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
+    IsLower = UpLo == Lower ? 1 : 0,
+    FirstTriangular = IsRowMajor == IsLower
+  };
+
+  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> cj0;
+  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
+  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex, ConjugateRhs> cjd;
+
+  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;
+  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
+
+  Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;
+
+  // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
+  // if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
+  // this is because we need to extract packets
+  ei_declare_aligned_stack_constructed_variable(Scalar,rhs,size,rhsIncr==1 ? const_cast<Scalar*>(_rhs) : 0);  
+  if (rhsIncr!=1)
+  {
+    const Scalar* it = _rhs;
+    for (Index i=0; i<size; ++i, it+=rhsIncr)
+      rhs[i] = *it;
+  }
+
+  Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
+  if (FirstTriangular)
+    bound = size - bound;
+
+  for (Index j=FirstTriangular ? bound : 0;
+       j<(FirstTriangular ? size : bound);j+=2)
+  {
+    const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
+    const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
+
+    Scalar t0 = cjAlpha * rhs[j];
+    Packet ptmp0 = pset1<Packet>(t0);
+    Scalar t1 = cjAlpha * rhs[j+1];
+    Packet ptmp1 = pset1<Packet>(t1);
+
+    Scalar t2(0);
+    Packet ptmp2 = pset1<Packet>(t2);
+    Scalar t3(0);
+    Packet ptmp3 = pset1<Packet>(t3);
+
+    size_t starti = FirstTriangular ? 0 : j+2;
+    size_t endi   = FirstTriangular ? j : size;
+    size_t alignedStart = (starti) + internal::first_aligned(&res[starti], endi-starti);
+    size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
+
+    // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
+    res[j]   += cjd.pmul(numext::real(A0[j]), t0);
+    res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);
+    if(FirstTriangular)
+    {
+      res[j]   += cj0.pmul(A1[j],   t1);
+      t3       += cj1.pmul(A1[j],   rhs[j]);
+    }
+    else
+    {
+      res[j+1] += cj0.pmul(A0[j+1],t0);
+      t2 += cj1.pmul(A0[j+1], rhs[j+1]);
+    }
+
+    for (size_t i=starti; i<alignedStart; ++i)
+    {
+      res[i] += t0 * A0[i] + t1 * A1[i];
+      t2 += numext::conj(A0[i]) * rhs[i];
+      t3 += numext::conj(A1[i]) * rhs[i];
+    }
+    // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
+    // gcc 4.2 does this optimization automatically.
+    const Scalar* EIGEN_RESTRICT a0It  = A0  + alignedStart;
+    const Scalar* EIGEN_RESTRICT a1It  = A1  + alignedStart;
+    const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
+          Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
+    for (size_t i=alignedStart; i<alignedEnd; i+=PacketSize)
+    {
+      Packet A0i = ploadu<Packet>(a0It);  a0It  += PacketSize;
+      Packet A1i = ploadu<Packet>(a1It);  a1It  += PacketSize;
+      Packet Bi  = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
+      Packet Xi  = pload <Packet>(resIt);
+
+      Xi    = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
+      ptmp2 = pcj1.pmadd(A0i,  Bi, ptmp2);
+      ptmp3 = pcj1.pmadd(A1i,  Bi, ptmp3);
+      pstore(resIt,Xi); resIt += PacketSize;
+    }
+    for (size_t i=alignedEnd; i<endi; i++)
+    {
+      res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
+      t2 += cj1.pmul(A0[i], rhs[i]);
+      t3 += cj1.pmul(A1[i], rhs[i]);
+    }
+
+    res[j]   += alpha * (t2 + predux(ptmp2));
+    res[j+1] += alpha * (t3 + predux(ptmp3));
+  }
+  for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
+  {
+    const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
+
+    Scalar t1 = cjAlpha * rhs[j];
+    Scalar t2(0);
+    // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
+    res[j] += cjd.pmul(numext::real(A0[j]), t1);
+    for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
+    {
+      res[i] += cj0.pmul(A0[i], t1);
+      t2 += cj1.pmul(A0[i], rhs[i]);
+    }
+    res[j] += alpha * t2;
+  }
+}
+
+} // end namespace internal 
+
+/***************************************************************************
+* Wrapper to product_selfadjoint_vector
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, int LhsMode, typename Rhs>
+struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true> >
+  : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, int LhsMode, typename Rhs>
+struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
+  : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs >
+{
+  EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
+
+  enum {
+    LhsUpLo = LhsMode&(Upper|Lower)
+  };
+
+  SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+  template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+  {
+    typedef typename Dest::Scalar ResScalar;
+    typedef typename Base::RhsScalar RhsScalar;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+    
+    eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols());
+
+    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
+    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
+
+    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+                               * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+    enum {
+      EvalToDest = (Dest::InnerStrideAtCompileTime==1),
+      UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1)
+    };
+    
+    internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
+    internal::gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!UseRhs> static_rhs;
+
+    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+                                                  EvalToDest ? dest.data() : static_dest.data());
+                                                  
+    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
+        UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
+    
+    if(!EvalToDest)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = dest.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      MappedDest(actualDestPtr, dest.size()) = dest;
+    }
+      
+    if(!UseRhs)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = rhs.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
+    }
+      
+      
+    internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run
+      (
+        lhs.rows(),                             // size
+        &lhs.coeffRef(0,0),  lhs.outerStride(), // lhs info
+        actualRhsPtr, 1,                        // rhs info
+        actualDestPtr,                          // result info
+        actualAlpha                             // scale factor
+      );
+    
+    if(!EvalToDest)
+      dest = MappedDest(actualDestPtr, dest.size());
+  }
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int RhsMode>
+struct traits<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false> >
+  : traits<ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int RhsMode>
+struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
+  : public ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs >
+{
+  EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
+
+  enum {
+    RhsUpLo = RhsMode&(Upper|Lower)
+  };
+
+  SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+  template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+  {
+    // let's simply transpose the product
+    Transpose<Dest> destT(dest);
+    SelfadjointProductMatrix<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
+                             Transpose<const Lhs>, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha);
+  }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H
diff --git a/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h b/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
new file mode 100644
index 0000000..86684b6
--- /dev/null
+++ b/Eigen/src/Core/products/SelfadjointMatrixVector_MKL.h
@@ -0,0 +1,114 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H
+#define EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/**********************************************************************
+* This file implements selfadjoint matrix-vector multiplication using BLAS
+**********************************************************************/
+
+// symv/hemv specialization
+
+template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
+struct selfadjoint_matrix_vector_product_symv :
+  selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn> {};
+
+#define EIGEN_MKL_SYMV_SPECIALIZE(Scalar) \
+template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
+struct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \
+static void run( \
+  Index size, const Scalar*  lhs, Index lhsStride, \
+  const Scalar* _rhs, Index rhsIncr, Scalar* res, Scalar alpha) { \
+    enum {\
+      IsColMajor = StorageOrder==ColMajor \
+    }; \
+    if (IsColMajor == ConjugateLhs) {\
+      selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn>::run( \
+        size, lhs, lhsStride, _rhs, rhsIncr, res, alpha);  \
+    } else {\
+      selfadjoint_matrix_vector_product_symv<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs>::run( \
+        size, lhs, lhsStride, _rhs, rhsIncr, res, alpha);  \
+    }\
+  } \
+}; \
+
+EIGEN_MKL_SYMV_SPECIALIZE(double)
+EIGEN_MKL_SYMV_SPECIALIZE(float)
+EIGEN_MKL_SYMV_SPECIALIZE(dcomplex)
+EIGEN_MKL_SYMV_SPECIALIZE(scomplex)
+
+#define EIGEN_MKL_SYMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLFUNC) \
+template<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \
+struct selfadjoint_matrix_vector_product_symv<EIGTYPE,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs> \
+{ \
+typedef Matrix<EIGTYPE,Dynamic,1,ColMajor> SYMVVector;\
+\
+static void run( \
+Index size, const EIGTYPE*  lhs, Index lhsStride, \
+const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* res, EIGTYPE alpha) \
+{ \
+  enum {\
+    IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \
+    IsLower = UpLo == Lower ? 1 : 0 \
+  }; \
+  MKL_INT n=size, lda=lhsStride, incx=rhsIncr, incy=1; \
+  MKLTYPE alpha_, beta_; \
+  const EIGTYPE *x_ptr, myone(1); \
+  char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \
+  assign_scalar_eig2mkl(alpha_, alpha); \
+  assign_scalar_eig2mkl(beta_, myone); \
+  SYMVVector x_tmp; \
+  if (ConjugateRhs) { \
+    Map<const SYMVVector, 0, InnerStride<> > map_x(_rhs,size,1,InnerStride<>(incx)); \
+    x_tmp=map_x.conjugate(); \
+    x_ptr=x_tmp.data(); \
+    incx=1; \
+  } else x_ptr=_rhs; \
+  MKLFUNC(&uplo, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \
+}\
+};
+
+EIGEN_MKL_SYMV_SPECIALIZATION(double,   double,        dsymv)
+EIGEN_MKL_SYMV_SPECIALIZATION(float,    float,         ssymv)
+EIGEN_MKL_SYMV_SPECIALIZATION(dcomplex, MKL_Complex16, zhemv)
+EIGEN_MKL_SYMV_SPECIALIZATION(scomplex, MKL_Complex8,  chemv)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_MKL_H
diff --git a/Eigen/src/Core/products/SelfadjointProduct.h b/Eigen/src/Core/products/SelfadjointProduct.h
new file mode 100644
index 0000000..6ca4ae6
--- /dev/null
+++ b/Eigen/src/Core/products/SelfadjointProduct.h
@@ -0,0 +1,123 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFADJOINT_PRODUCT_H
+#define EIGEN_SELFADJOINT_PRODUCT_H
+
+/**********************************************************************
+* This file implements a self adjoint product: C += A A^T updating only
+* half of the selfadjoint matrix C.
+* It corresponds to the level 3 SYRK and level 2 SYR Blas routines.
+**********************************************************************/
+
+namespace Eigen { 
+
+
+template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>
+{
+  static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
+  {
+    internal::conj_if<ConjRhs> cj;
+    typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;
+    typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjLhsType;
+    for (Index i=0; i<size; ++i)
+    {
+      Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1)))
+          += (alpha * cj(vecY[i])) * ConjLhsType(OtherMap(vecX+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));
+    }
+  }
+};
+
+template<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>
+struct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>
+{
+  static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)
+  {
+    selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vecY,vecX,alpha);
+  }
+};
+
+template<typename MatrixType, typename OtherType, int UpLo, bool OtherIsVector = OtherType::IsVectorAtCompileTime>
+struct selfadjoint_product_selector;
+
+template<typename MatrixType, typename OtherType, int UpLo>
+struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>
+{
+  static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
+  {
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    typedef internal::blas_traits<OtherType> OtherBlasTraits;
+    typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
+    typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
+    typename internal::add_const_on_value_type<ActualOtherType>::type actualOther = OtherBlasTraits::extract(other.derived());
+
+    Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());
+
+    enum {
+      StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+      UseOtherDirectly = _ActualOtherType::InnerStrideAtCompileTime==1
+    };
+    internal::gemv_static_vector_if<Scalar,OtherType::SizeAtCompileTime,OtherType::MaxSizeAtCompileTime,!UseOtherDirectly> static_other;
+
+    ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(),
+      (UseOtherDirectly ? const_cast<Scalar*>(actualOther.data()) : static_other.data()));
+      
+    if(!UseOtherDirectly)
+      Map<typename _ActualOtherType::PlainObject>(actualOtherPtr, actualOther.size()) = actualOther;
+    
+    selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,
+                              OtherBlasTraits::NeedToConjugate  && NumTraits<Scalar>::IsComplex,
+                            (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>
+          ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualOtherPtr, actualAlpha);
+  }
+};
+
+template<typename MatrixType, typename OtherType, int UpLo>
+struct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>
+{
+  static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)
+  {
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    typedef internal::blas_traits<OtherType> OtherBlasTraits;
+    typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;
+    typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;
+    typename internal::add_const_on_value_type<ActualOtherType>::type actualOther = OtherBlasTraits::extract(other.derived());
+
+    Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());
+
+    enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
+
+    internal::general_matrix_matrix_triangular_product<Index,
+      Scalar, _ActualOtherType::Flags&RowMajorBit ? RowMajor : ColMajor,   OtherBlasTraits::NeedToConjugate  && NumTraits<Scalar>::IsComplex,
+      Scalar, _ActualOtherType::Flags&RowMajorBit ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex,
+      MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor, UpLo>
+      ::run(mat.cols(), actualOther.cols(),
+            &actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(),
+            mat.data(), mat.outerStride(), actualAlpha);
+  }
+};
+
+// high level API
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
+::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)
+{
+  selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);
+
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINT_PRODUCT_H
diff --git a/Eigen/src/Core/products/SelfadjointRank2Update.h b/Eigen/src/Core/products/SelfadjointRank2Update.h
new file mode 100644
index 0000000..8594a97
--- /dev/null
+++ b/Eigen/src/Core/products/SelfadjointRank2Update.h
@@ -0,0 +1,93 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H
+#define EIGEN_SELFADJOINTRANK2UPTADE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/* Optimized selfadjoint matrix += alpha * uv' + conj(alpha)*vu'
+ * It corresponds to the Level2 syr2 BLAS routine
+ */
+
+template<typename Scalar, typename Index, typename UType, typename VType, int UpLo>
+struct selfadjoint_rank2_update_selector;
+
+template<typename Scalar, typename Index, typename UType, typename VType>
+struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>
+{
+  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
+  {
+    const Index size = u.size();
+    for (Index i=0; i<size; ++i)
+    {
+      Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=
+                        (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i)
+                      + (alpha * numext::conj(v.coeff(i))) * u.tail(size-i);
+    }
+  }
+};
+
+template<typename Scalar, typename Index, typename UType, typename VType>
+struct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>
+{
+  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)
+  {
+    const Index size = u.size();
+    for (Index i=0; i<size; ++i)
+      Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=
+                        (numext::conj(alpha)  * numext::conj(u.coeff(i))) * v.head(i+1)
+                      + (alpha * numext::conj(v.coeff(i))) * u.head(i+1);
+  }
+};
+
+template<bool Cond, typename T> struct conj_expr_if
+  : conditional<!Cond, const T&,
+      CwiseUnaryOp<scalar_conjugate_op<typename traits<T>::Scalar>,T> > {};
+
+} // end namespace internal
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU, typename DerivedV>
+SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>
+::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)
+{
+  typedef internal::blas_traits<DerivedU> UBlasTraits;
+  typedef typename UBlasTraits::DirectLinearAccessType ActualUType;
+  typedef typename internal::remove_all<ActualUType>::type _ActualUType;
+  typename internal::add_const_on_value_type<ActualUType>::type actualU = UBlasTraits::extract(u.derived());
+
+  typedef internal::blas_traits<DerivedV> VBlasTraits;
+  typedef typename VBlasTraits::DirectLinearAccessType ActualVType;
+  typedef typename internal::remove_all<ActualVType>::type _ActualVType;
+  typename internal::add_const_on_value_type<ActualVType>::type actualV = VBlasTraits::extract(v.derived());
+
+  // If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and
+  // vice versa, and take the complex conjugate of all coefficients and vector entries.
+
+  enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };
+  Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())
+                             * numext::conj(VBlasTraits::extractScalarFactor(v.derived()));
+  if (IsRowMajor)
+    actualAlpha = numext::conj(actualAlpha);
+
+  internal::selfadjoint_rank2_update_selector<Scalar, Index,
+    typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type,
+    typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::type>::type,
+    (IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>
+    ::run(_expression().const_cast_derived().data(),_expression().outerStride(),actualU,actualV,actualAlpha);
+
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINTRANK2UPTADE_H
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix.h b/Eigen/src/Core/products/TriangularMatrixMatrix.h
new file mode 100644
index 0000000..8110507
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix.h
@@ -0,0 +1,427 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H
+#define EIGEN_TRIANGULAR_MATRIX_MATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>
+// struct gemm_pack_lhs_triangular
+// {
+//   Matrix<Scalar,mr,mr,
+//   void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* _lhs, int lhsStride, int depth, int rows)
+//   {
+//     conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+//     const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);
+//     int count = 0;
+//     const int peeled_mc = (rows/mr)*mr;
+//     for(int i=0; i<peeled_mc; i+=mr)
+//     {
+//       for(int k=0; k<depth; k++)
+//         for(int w=0; w<mr; w++)
+//           blockA[count++] = cj(lhs(i+w, k));
+//     }
+//     for(int i=peeled_mc; i<rows; i++)
+//     {
+//       for(int k=0; k<depth; k++)
+//         blockA[count++] = cj(lhs(i, k));
+//     }
+//   }
+// };
+
+/* Optimized triangular matrix * matrix (_TRMM++) product built on top of
+ * the general matrix matrix product.
+ */
+template <typename Scalar, typename Index,
+          int Mode, bool LhsIsTriangular,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs,
+          int ResStorageOrder, int Version = Specialized>
+struct product_triangular_matrix_matrix;
+
+template <typename Scalar, typename Index,
+          int Mode, bool LhsIsTriangular,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+struct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,
+                                           LhsStorageOrder,ConjugateLhs,
+                                           RhsStorageOrder,ConjugateRhs,RowMajor,Version>
+{
+  static EIGEN_STRONG_INLINE void run(
+    Index rows, Index cols, Index depth,
+    const Scalar* lhs, Index lhsStride,
+    const Scalar* rhs, Index rhsStride,
+    Scalar* res,       Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
+  {
+    product_triangular_matrix_matrix<Scalar, Index,
+      (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),
+      (!LhsIsTriangular),
+      RhsStorageOrder==RowMajor ? ColMajor : RowMajor,
+      ConjugateRhs,
+      LhsStorageOrder==RowMajor ? ColMajor : RowMajor,
+      ConjugateLhs,
+      ColMajor>
+      ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);
+  }
+};
+
+// implements col-major += alpha * op(triangular) * op(general)
+template <typename Scalar, typename Index, int Mode,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+struct product_triangular_matrix_matrix<Scalar,Index,Mode,true,
+                                           LhsStorageOrder,ConjugateLhs,
+                                           RhsStorageOrder,ConjugateRhs,ColMajor,Version>
+{
+  
+  typedef gebp_traits<Scalar,Scalar> Traits;
+  enum {
+    SmallPanelWidth   = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+    IsLower = (Mode&Lower) == Lower,
+    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
+  };
+
+  static EIGEN_DONT_INLINE void run(
+    Index _rows, Index _cols, Index _depth,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
+};
+
+template <typename Scalar, typename Index, int Mode,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,
+                                                        LhsStorageOrder,ConjugateLhs,
+                                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+    Index _rows, Index _cols, Index _depth,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
+  {
+    // strip zeros
+    Index diagSize  = (std::min)(_rows,_depth);
+    Index rows      = IsLower ? _rows : diagSize;
+    Index depth     = IsLower ? diagSize : _depth;
+    Index cols      = _cols;
+    
+    const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+    const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+    Index kc = blocking.kc();                   // cache block size along the K direction
+    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction
+
+    std::size_t sizeA = kc*mc;
+    std::size_t sizeB = kc*cols;
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
+
+    Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer;
+    triangularBuffer.setZero();
+    if((Mode&ZeroDiag)==ZeroDiag)
+      triangularBuffer.diagonal().setZero();
+    else
+      triangularBuffer.diagonal().setOnes();
+
+    gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+    gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+    gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+
+    for(Index k2=IsLower ? depth : 0;
+        IsLower ? k2>0 : k2<depth;
+        IsLower ? k2-=kc : k2+=kc)
+    {
+      Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);
+      Index actual_k2 = IsLower ? k2-actual_kc : k2;
+
+      // align blocks with the end of the triangular part for trapezoidal lhs
+      if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))
+      {
+        actual_kc = rows-k2;
+        k2 = k2+actual_kc-kc;
+      }
+
+      pack_rhs(blockB, &rhs(actual_k2,0), rhsStride, actual_kc, cols);
+
+      // the selected lhs's panel has to be split in three different parts:
+      //  1 - the part which is zero => skip it
+      //  2 - the diagonal block => special kernel
+      //  3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP
+
+      // the block diagonal, if any:
+      if(IsLower || actual_k2<rows)
+      {
+        // for each small vertical panels of lhs
+        for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
+        {
+          Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
+          Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;
+          Index startBlock   = actual_k2+k1;
+          Index blockBOffset = k1;
+
+          // => GEBP with the micro triangular block
+          // The trick is to pack this micro block while filling the opposite triangular part with zeros.
+          // To this end we do an extra triangular copy to a small temporary buffer
+          for (Index k=0;k<actualPanelWidth;++k)
+          {
+            if (SetDiag)
+              triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);
+            for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)
+              triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);
+          }
+          pack_lhs(blockA, triangularBuffer.data(), triangularBuffer.outerStride(), actualPanelWidth, actualPanelWidth);
+
+          gebp_kernel(res+startBlock, resStride, blockA, blockB, actualPanelWidth, actualPanelWidth, cols, alpha,
+                      actualPanelWidth, actual_kc, 0, blockBOffset, blockW);
+
+          // GEBP with remaining micro panel
+          if (lengthTarget>0)
+          {
+            Index startTarget  = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;
+
+            pack_lhs(blockA, &lhs(startTarget,startBlock), lhsStride, actualPanelWidth, lengthTarget);
+
+            gebp_kernel(res+startTarget, resStride, blockA, blockB, lengthTarget, actualPanelWidth, cols, alpha,
+                        actualPanelWidth, actual_kc, 0, blockBOffset, blockW);
+          }
+        }
+      }
+      // the part below (lower case) or above (upper case) the diagonal => GEPP
+      {
+        Index start = IsLower ? k2 : 0;
+        Index end   = IsLower ? rows : (std::min)(actual_k2,rows);
+        for(Index i2=start; i2<end; i2+=mc)
+        {
+          const Index actual_mc = (std::min)(i2+mc,end)-i2;
+          gemm_pack_lhs<Scalar, Index, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()
+            (blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
+
+          gebp_kernel(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
+        }
+      }
+    }
+  }
+
+// implements col-major += alpha * op(general) * op(triangular)
+template <typename Scalar, typename Index, int Mode,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+struct product_triangular_matrix_matrix<Scalar,Index,Mode,false,
+                                        LhsStorageOrder,ConjugateLhs,
+                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>
+{
+  typedef gebp_traits<Scalar,Scalar> Traits;
+  enum {
+    SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+    IsLower = (Mode&Lower) == Lower,
+    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1
+  };
+
+  static EIGEN_DONT_INLINE void run(
+    Index _rows, Index _cols, Index _depth,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);
+};
+
+template <typename Scalar, typename Index, int Mode,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs, int Version>
+EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
+                                                        LhsStorageOrder,ConjugateLhs,
+                                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(
+    Index _rows, Index _cols, Index _depth,
+    const Scalar* _lhs, Index lhsStride,
+    const Scalar* _rhs, Index rhsStride,
+    Scalar* res,        Index resStride,
+    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)
+  {
+    // strip zeros
+    Index diagSize  = (std::min)(_cols,_depth);
+    Index rows      = _rows;
+    Index depth     = IsLower ? _depth : diagSize;
+    Index cols      = IsLower ? diagSize : _cols;
+    
+    const_blas_data_mapper<Scalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
+    const_blas_data_mapper<Scalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
+
+    Index kc = blocking.kc();                   // cache block size along the K direction
+    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction
+
+    std::size_t sizeA = kc*mc;
+    std::size_t sizeB = kc*cols;
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
+
+    Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer;
+    triangularBuffer.setZero();
+    if((Mode&ZeroDiag)==ZeroDiag)
+      triangularBuffer.diagonal().setZero();
+    else
+      triangularBuffer.diagonal().setOnes();
+
+    gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
+    gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
+    gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+    gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
+
+    for(Index k2=IsLower ? 0 : depth;
+        IsLower ? k2<depth  : k2>0;
+        IsLower ? k2+=kc   : k2-=kc)
+    {
+      Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);
+      Index actual_k2 = IsLower ? k2 : k2-actual_kc;
+
+      // align blocks with the end of the triangular part for trapezoidal rhs
+      if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))
+      {
+        actual_kc = cols-k2;
+        k2 = actual_k2 + actual_kc - kc;
+      }
+
+      // remaining size
+      Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;
+      // size of the triangular part
+      Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;
+
+      Scalar* geb = blockB+ts*ts;
+
+      pack_rhs(geb, &rhs(actual_k2,IsLower ? 0 : k2), rhsStride, actual_kc, rs);
+
+      // pack the triangular part of the rhs padding the unrolled blocks with zeros
+      if(ts>0)
+      {
+        for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+        {
+          Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+          Index actual_j2 = actual_k2 + j2;
+          Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+          Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
+          // general part
+          pack_rhs_panel(blockB+j2*actual_kc,
+                         &rhs(actual_k2+panelOffset, actual_j2), rhsStride,
+                         panelLength, actualPanelWidth,
+                         actual_kc, panelOffset);
+
+          // append the triangular part via a temporary buffer
+          for (Index j=0;j<actualPanelWidth;++j)
+          {
+            if (SetDiag)
+              triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);
+            for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)
+              triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);
+          }
+
+          pack_rhs_panel(blockB+j2*actual_kc,
+                         triangularBuffer.data(), triangularBuffer.outerStride(),
+                         actualPanelWidth, actualPanelWidth,
+                         actual_kc, j2);
+        }
+      }
+
+      for (Index i2=0; i2<rows; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(mc,rows-i2);
+        pack_lhs(blockA, &lhs(i2, actual_k2), lhsStride, actual_kc, actual_mc);
+
+        // triangular kernel
+        if(ts>0)
+        {
+          for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+          {
+            Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+            Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;
+            Index blockOffset = IsLower ? j2 : 0;
+
+            gebp_kernel(res+i2+(actual_k2+j2)*resStride, resStride,
+                        blockA, blockB+j2*actual_kc,
+                        actual_mc, panelLength, actualPanelWidth,
+                        alpha,
+                        actual_kc, actual_kc,  // strides
+                        blockOffset, blockOffset,// offsets
+                        blockW); // workspace
+          }
+        }
+        gebp_kernel(res+i2+(IsLower ? 0 : k2)*resStride, resStride,
+                    blockA, geb, actual_mc, actual_kc, rs,
+                    alpha,
+                    -1, -1, 0, 0, blockW);
+      }
+    }
+  }
+
+/***************************************************************************
+* Wrapper to product_triangular_matrix_matrix
+***************************************************************************/
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false> >
+  : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>, Lhs, Rhs> >
+{};
+
+} // end namespace internal
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>
+  : public ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,false>, Lhs, Rhs >
+{
+  EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
+
+  TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
+  {
+    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
+    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
+
+    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
+                               * RhsBlasTraits::extractScalarFactor(m_rhs);
+
+    typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,
+              Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;
+
+    enum { IsLower = (Mode&Lower) == Lower };
+    Index stripedRows  = ((!LhsIsTriangular) || (IsLower))  ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols());
+    Index stripedCols  = ((LhsIsTriangular)  || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows());
+    Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows()))
+                                         : ((IsLower)  ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols()));
+
+    BlockingType blocking(stripedRows, stripedCols, stripedDepth);
+
+    internal::product_triangular_matrix_matrix<Scalar, Index,
+      Mode, LhsIsTriangular,
+      (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,
+      (internal::traits<_ActualRhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,
+      (internal::traits<Dest          >::Flags&RowMajorBit) ? RowMajor : ColMajor>
+      ::run(
+        stripedRows, stripedCols, stripedDepth,   // sizes
+        &lhs.coeffRef(0,0),    lhs.outerStride(), // lhs info
+        &rhs.coeffRef(0,0),    rhs.outerStride(), // rhs info
+        &dst.coeffRef(0,0), dst.outerStride(),    // result info
+        actualAlpha, blocking
+      );
+  }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H
diff --git a/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h b/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
new file mode 100644
index 0000000..ba41a1c
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularMatrixMatrix_MKL.h
@@ -0,0 +1,309 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Triangular matrix * matrix product functionality based on ?TRMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H
+#define EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+
+template <typename Scalar, typename Index,
+          int Mode, bool LhsIsTriangular,
+          int LhsStorageOrder, bool ConjugateLhs,
+          int RhsStorageOrder, bool ConjugateRhs,
+          int ResStorageOrder>
+struct product_triangular_matrix_matrix_trmm :
+       product_triangular_matrix_matrix<Scalar,Index,Mode,
+          LhsIsTriangular,LhsStorageOrder,ConjugateLhs,
+          RhsStorageOrder, ConjugateRhs, ResStorageOrder, BuiltIn> {};
+
+
+// try to go to BLAS specialization
+#define EIGEN_MKL_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \
+template <typename Index, int Mode, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \
+           LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \
+  static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\
+    const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \
+      product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \
+        LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \
+        RhsStorageOrder, ConjugateRhs, ColMajor>::run( \
+        _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
+  } \
+};
+
+EIGEN_MKL_TRMM_SPECIALIZE(double, true)
+EIGEN_MKL_TRMM_SPECIALIZE(double, false)
+EIGEN_MKL_TRMM_SPECIALIZE(dcomplex, true)
+EIGEN_MKL_TRMM_SPECIALIZE(dcomplex, false)
+EIGEN_MKL_TRMM_SPECIALIZE(float, true)
+EIGEN_MKL_TRMM_SPECIALIZE(float, false)
+EIGEN_MKL_TRMM_SPECIALIZE(scomplex, true)
+EIGEN_MKL_TRMM_SPECIALIZE(scomplex, false)
+
+// implements col-major += alpha * op(triangular) * op(general)
+#define EIGEN_MKL_TRMM_L(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, int Mode, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
+         LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \
+{ \
+  enum { \
+    IsLower = (Mode&Lower) == Lower, \
+    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \
+    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
+    LowUp = IsLower ? Lower : Upper, \
+    conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \
+  }; \
+\
+  static void run( \
+    Index _rows, Index _cols, Index _depth, \
+    const EIGTYPE* _lhs, Index lhsStride, \
+    const EIGTYPE* _rhs, Index rhsStride, \
+    EIGTYPE* res,        Index resStride, \
+    EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
+  { \
+   Index diagSize  = (std::min)(_rows,_depth); \
+   Index rows      = IsLower ? _rows : diagSize; \
+   Index depth     = IsLower ? diagSize : _depth; \
+   Index cols      = _cols; \
+\
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \
+\
+/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
+   if (rows != depth) { \
+\
+     int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+\
+     if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
+     /* Most likely no benefit to call TRMM or GEMM from MKL*/ \
+       product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \
+       LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
+           _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
+     /*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \
+     } else { \
+     /* Make sense to call GEMM */ \
+       Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \
+       MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \
+       MKL_INT aStride = aa_tmp.outerStride(); \
+       gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth); \
+       general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
+       rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \
+\
+     /*std::cout << "TRMM_L: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
+     } \
+     return; \
+   } \
+   char side = 'L', transa, uplo, diag = 'N'; \
+   EIGTYPE *b; \
+   const EIGTYPE *a; \
+   MKL_INT m, n, lda, ldb; \
+   MKLTYPE alpha_; \
+\
+/* Set alpha_*/ \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+\
+/* Set m, n */ \
+   m = (MKL_INT)diagSize; \
+   n = (MKL_INT)cols; \
+\
+/* Set trans */ \
+   transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \
+\
+/* Set b, ldb */ \
+   Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \
+   MatrixX##EIGPREFIX b_tmp; \
+\
+   if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \
+   b = b_tmp.data(); \
+   ldb = b_tmp.outerStride(); \
+\
+/* Set uplo */ \
+   uplo = IsLower ? 'L' : 'U'; \
+   if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+   Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \
+   MatrixLhs a_tmp; \
+\
+   if ((conjA!=0) || (SetDiag==0)) { \
+     if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \
+     if (IsZeroDiag) \
+       a_tmp.diagonal().setZero(); \
+     else if (IsUnitDiag) \
+       a_tmp.diagonal().setOnes();\
+     a = a_tmp.data(); \
+     lda = a_tmp.outerStride(); \
+   } else { \
+     a = _lhs; \
+     lda = lhsStride; \
+   } \
+   /*std::cout << "TRMM_L: A is square! Go to MKL TRMM implementation! \n";*/ \
+/* call ?trmm*/ \
+   MKLPREFIX##trmm(&side, &uplo, &transa, &diag, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (MKLTYPE*)b, &ldb); \
+\
+/* Add op(a_triangular)*b into res*/ \
+   Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
+   res_tmp=res_tmp+b_tmp; \
+  } \
+};
+
+EIGEN_MKL_TRMM_L(double, double, d, d)
+EIGEN_MKL_TRMM_L(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMM_L(float, float, f, s)
+EIGEN_MKL_TRMM_L(scomplex, MKL_Complex8, cf, c)
+
+// implements col-major += alpha * op(general) * op(triangular)
+#define EIGEN_MKL_TRMM_R(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template <typename Index, int Mode, \
+          int LhsStorageOrder, bool ConjugateLhs, \
+          int RhsStorageOrder, bool ConjugateRhs> \
+struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
+         LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \
+{ \
+  enum { \
+    IsLower = (Mode&Lower) == Lower, \
+    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \
+    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
+    LowUp = IsLower ? Lower : Upper, \
+    conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \
+  }; \
+\
+  static void run( \
+    Index _rows, Index _cols, Index _depth, \
+    const EIGTYPE* _lhs, Index lhsStride, \
+    const EIGTYPE* _rhs, Index rhsStride, \
+    EIGTYPE* res,        Index resStride, \
+    EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \
+  { \
+   Index diagSize  = (std::min)(_cols,_depth); \
+   Index rows      = _rows; \
+   Index depth     = IsLower ? _depth : diagSize; \
+   Index cols      = IsLower ? diagSize : _cols; \
+\
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \
+\
+/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
+   if (cols != depth) { \
+\
+     int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
+\
+     if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
+     /* Most likely no benefit to call TRMM or GEMM from MKL*/ \
+       product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \
+       LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \
+           _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \
+       /*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \
+     } else { \
+     /* Make sense to call GEMM */ \
+       Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \
+       MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \
+       MKL_INT aStride = aa_tmp.outerStride(); \
+       gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth); \
+       general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \
+       rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \
+\
+     /*std::cout << "TRMM_R: A is not square! Go to MKL GEMM implementation! " << nthr<<" \n";*/ \
+     } \
+     return; \
+   } \
+   char side = 'R', transa, uplo, diag = 'N'; \
+   EIGTYPE *b; \
+   const EIGTYPE *a; \
+   MKL_INT m, n, lda, ldb; \
+   MKLTYPE alpha_; \
+\
+/* Set alpha_*/ \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+\
+/* Set m, n */ \
+   m = (MKL_INT)rows; \
+   n = (MKL_INT)diagSize; \
+\
+/* Set trans */ \
+   transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \
+\
+/* Set b, ldb */ \
+   Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \
+   MatrixX##EIGPREFIX b_tmp; \
+\
+   if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \
+   b = b_tmp.data(); \
+   ldb = b_tmp.outerStride(); \
+\
+/* Set uplo */ \
+   uplo = IsLower ? 'L' : 'U'; \
+   if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+   Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \
+   MatrixRhs a_tmp; \
+\
+   if ((conjA!=0) || (SetDiag==0)) { \
+     if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \
+     if (IsZeroDiag) \
+       a_tmp.diagonal().setZero(); \
+     else if (IsUnitDiag) \
+       a_tmp.diagonal().setOnes();\
+     a = a_tmp.data(); \
+     lda = a_tmp.outerStride(); \
+   } else { \
+     a = _rhs; \
+     lda = rhsStride; \
+   } \
+   /*std::cout << "TRMM_R: A is square! Go to MKL TRMM implementation! \n";*/ \
+/* call ?trmm*/ \
+   MKLPREFIX##trmm(&side, &uplo, &transa, &diag, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (MKLTYPE*)b, &ldb); \
+\
+/* Add op(a_triangular)*b into res*/ \
+   Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \
+   res_tmp=res_tmp+b_tmp; \
+  } \
+};
+
+EIGEN_MKL_TRMM_R(double, double, d, d)
+EIGEN_MKL_TRMM_R(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMM_R(float, float, f, s)
+EIGEN_MKL_TRMM_R(scomplex, MKL_Complex8, cf, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_MKL_H
diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h
new file mode 100644
index 0000000..6117d5a
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularMatrixVector.h
@@ -0,0 +1,348 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULARMATRIXVECTOR_H
+#define EIGEN_TRIANGULARMATRIXVECTOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version=Specialized>
+struct triangular_matrix_vector_product;
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
+struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
+{
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+  enum {
+    IsLower = ((Mode&Lower)==Lower),
+    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
+    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
+  };
+  static EIGEN_DONT_INLINE  void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+                                     const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>
+EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>
+  ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+        const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
+  {
+    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+    Index size = (std::min)(_rows,_cols);
+    Index rows = IsLower ? _rows : (std::min)(_rows,_cols);
+    Index cols = IsLower ? (std::min)(_rows,_cols) : _cols;
+
+    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
+    const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
+    typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
+    
+    typedef Map<const Matrix<RhsScalar,Dynamic,1>, 0, InnerStride<> > RhsMap;
+    const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr));
+    typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
+
+    typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap;
+    ResMap res(_res,rows);
+
+    for (Index pi=0; pi<size; pi+=PanelWidth)
+    {
+      Index actualPanelWidth = (std::min)(PanelWidth, size-pi);
+      for (Index k=0; k<actualPanelWidth; ++k)
+      {
+        Index i = pi + k;
+        Index s = IsLower ? ((HasUnitDiag||HasZeroDiag) ? i+1 : i ) : pi;
+        Index r = IsLower ? actualPanelWidth-k : k+1;
+        if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0)
+          res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r);
+        if (HasUnitDiag)
+          res.coeffRef(i) += alpha * cjRhs.coeff(i);
+      }
+      Index r = IsLower ? rows - pi - actualPanelWidth : pi;
+      if (r>0)
+      {
+        Index s = IsLower ? pi+actualPanelWidth : 0;
+        general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjLhs,RhsScalar,ConjRhs,BuiltIn>::run(
+            r, actualPanelWidth,
+            &lhs.coeffRef(s,pi), lhsStride,
+            &rhs.coeffRef(pi), rhsIncr,
+            &res.coeffRef(s), resIncr, alpha);
+      }
+    }
+    if((!IsLower) && cols>size)
+    {
+      general_matrix_vector_product<Index,LhsScalar,ColMajor,ConjLhs,RhsScalar,ConjRhs>::run(
+          rows, cols-size,
+          &lhs.coeffRef(0,size), lhsStride,
+          &rhs.coeffRef(size), rhsIncr,
+          _res, resIncr, alpha);
+    }
+  }
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
+struct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
+{
+  typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
+  enum {
+    IsLower = ((Mode&Lower)==Lower),
+    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,
+    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag
+  };
+  static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+                                    const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);
+};
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>
+EIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>
+  ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,
+        const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)
+  {
+    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+    Index diagSize = (std::min)(_rows,_cols);
+    Index rows = IsLower ? _rows : diagSize;
+    Index cols = IsLower ? diagSize : _cols;
+
+    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
+    const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));
+    typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);
+
+    typedef Map<const Matrix<RhsScalar,Dynamic,1> > RhsMap;
+    const RhsMap rhs(_rhs,cols);
+    typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);
+
+    typedef Map<Matrix<ResScalar,Dynamic,1>, 0, InnerStride<> > ResMap;
+    ResMap res(_res,rows,InnerStride<>(resIncr));
+    
+    for (Index pi=0; pi<diagSize; pi+=PanelWidth)
+    {
+      Index actualPanelWidth = (std::min)(PanelWidth, diagSize-pi);
+      for (Index k=0; k<actualPanelWidth; ++k)
+      {
+        Index i = pi + k;
+        Index s = IsLower ? pi  : ((HasUnitDiag||HasZeroDiag) ? i+1 : i);
+        Index r = IsLower ? k+1 : actualPanelWidth-k;
+        if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0)
+          res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum();
+        if (HasUnitDiag)
+          res.coeffRef(i) += alpha * cjRhs.coeff(i);
+      }
+      Index r = IsLower ? pi : cols - pi - actualPanelWidth;
+      if (r>0)
+      {
+        Index s = IsLower ? 0 : pi + actualPanelWidth;
+        general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjLhs,RhsScalar,ConjRhs,BuiltIn>::run(
+            actualPanelWidth, r,
+            &lhs.coeffRef(pi,s), lhsStride,
+            &rhs.coeffRef(s), rhsIncr,
+            &res.coeffRef(pi), resIncr, alpha);
+      }
+    }
+    if(IsLower && rows>diagSize)
+    {
+      general_matrix_vector_product<Index,LhsScalar,RowMajor,ConjLhs,RhsScalar,ConjRhs>::run(
+            rows-diagSize, cols,
+            &lhs.coeffRef(diagSize,0), lhsStride,
+            &rhs.coeffRef(0), rhsIncr,
+            &res.coeffRef(diagSize), resIncr, alpha);
+    }
+  }
+
+/***************************************************************************
+* Wrapper to product_triangular_vector
+***************************************************************************/
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,true> >
+ : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,false,Rhs,true>, Lhs, Rhs> >
+{};
+
+template<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>
+struct traits<TriangularProduct<Mode,LhsIsTriangular,Lhs,true,Rhs,false> >
+ : traits<ProductBase<TriangularProduct<Mode,LhsIsTriangular,Lhs,true,Rhs,false>, Lhs, Rhs> >
+{};
+
+
+template<int StorageOrder>
+struct trmv_selector;
+
+} // end namespace internal
+
+template<int Mode, typename Lhs, typename Rhs>
+struct TriangularProduct<Mode,true,Lhs,false,Rhs,true>
+  : public ProductBase<TriangularProduct<Mode,true,Lhs,false,Rhs,true>, Lhs, Rhs >
+{
+  EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
+
+  TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
+  {
+    eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+  
+    internal::trmv_selector<(int(internal::traits<Lhs>::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dst, alpha);
+  }
+};
+
+template<int Mode, typename Lhs, typename Rhs>
+struct TriangularProduct<Mode,false,Lhs,true,Rhs,false>
+  : public ProductBase<TriangularProduct<Mode,false,Lhs,true,Rhs,false>, Lhs, Rhs >
+{
+  EIGEN_PRODUCT_PUBLIC_INTERFACE(TriangularProduct)
+
+  TriangularProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
+
+  template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
+  {
+    eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
+
+    typedef TriangularProduct<(Mode & (UnitDiag|ZeroDiag)) | ((Mode & Lower) ? Upper : Lower),true,Transpose<const Rhs>,false,Transpose<const Lhs>,true> TriangularProductTranspose;
+    Transpose<Dest> dstT(dst);
+    internal::trmv_selector<(int(internal::traits<Rhs>::Flags)&RowMajorBit) ? ColMajor : RowMajor>::run(
+      TriangularProductTranspose(m_rhs.transpose(),m_lhs.transpose()), dstT, alpha);
+  }
+};
+
+namespace internal {
+
+// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same.
+  
+template<> struct trmv_selector<ColMajor>
+{
+  template<int Mode, typename Lhs, typename Rhs, typename Dest>
+  static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
+  {
+    typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
+    typedef typename ProductType::Index Index;
+    typedef typename ProductType::LhsScalar   LhsScalar;
+    typedef typename ProductType::RhsScalar   RhsScalar;
+    typedef typename ProductType::Scalar      ResScalar;
+    typedef typename ProductType::RealScalar  RealScalar;
+    typedef typename ProductType::ActualLhsType ActualLhsType;
+    typedef typename ProductType::ActualRhsType ActualRhsType;
+    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+    typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
+
+    typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    typename internal::add_const_on_value_type<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+    enum {
+      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
+      // on, the other hand it is good for the cache to pack the vector anyways...
+      EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
+      ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
+      MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
+    };
+
+    gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
+
+    bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
+    bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
+    
+    RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);
+
+    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
+                                                  evalToDest ? dest.data() : static_dest.data());
+
+    if(!evalToDest)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      Index size = dest.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      if(!alphaIsCompatible)
+      {
+        MappedDest(actualDestPtr, dest.size()).setZero();
+        compatibleAlpha = RhsScalar(1);
+      }
+      else
+        MappedDest(actualDestPtr, dest.size()) = dest;
+    }
+    
+    internal::triangular_matrix_vector_product
+      <Index,Mode,
+       LhsScalar, LhsBlasTraits::NeedToConjugate,
+       RhsScalar, RhsBlasTraits::NeedToConjugate,
+       ColMajor>
+      ::run(actualLhs.rows(),actualLhs.cols(),
+            actualLhs.data(),actualLhs.outerStride(),
+            actualRhs.data(),actualRhs.innerStride(),
+            actualDestPtr,1,compatibleAlpha);
+
+    if (!evalToDest)
+    {
+      if(!alphaIsCompatible)
+        dest += actualAlpha * MappedDest(actualDestPtr, dest.size());
+      else
+        dest = MappedDest(actualDestPtr, dest.size());
+    }
+  }
+};
+
+template<> struct trmv_selector<RowMajor>
+{
+  template<int Mode, typename Lhs, typename Rhs, typename Dest>
+  static void run(const TriangularProduct<Mode,true,Lhs,false,Rhs,true>& prod, Dest& dest, const typename TriangularProduct<Mode,true,Lhs,false,Rhs,true>::Scalar& alpha)
+  {
+    typedef TriangularProduct<Mode,true,Lhs,false,Rhs,true> ProductType;
+    typedef typename ProductType::LhsScalar LhsScalar;
+    typedef typename ProductType::RhsScalar RhsScalar;
+    typedef typename ProductType::Scalar    ResScalar;
+    typedef typename ProductType::Index Index;
+    typedef typename ProductType::ActualLhsType ActualLhsType;
+    typedef typename ProductType::ActualRhsType ActualRhsType;
+    typedef typename ProductType::_ActualRhsType _ActualRhsType;
+    typedef typename ProductType::LhsBlasTraits LhsBlasTraits;
+    typedef typename ProductType::RhsBlasTraits RhsBlasTraits;
+
+    typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(prod.lhs());
+    typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(prod.rhs());
+
+    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
+                                  * RhsBlasTraits::extractScalarFactor(prod.rhs());
+
+    enum {
+      DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1
+    };
+
+    gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;
+
+    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),
+        DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());
+
+    if(!DirectlyUseRhs)
+    {
+      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      int size = actualRhs.size();
+      EIGEN_DENSE_STORAGE_CTOR_PLUGIN
+      #endif
+      Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;
+    }
+    
+    internal::triangular_matrix_vector_product
+      <Index,Mode,
+       LhsScalar, LhsBlasTraits::NeedToConjugate,
+       RhsScalar, RhsBlasTraits::NeedToConjugate,
+       RowMajor>
+      ::run(actualLhs.rows(),actualLhs.cols(),
+            actualLhs.data(),actualLhs.outerStride(),
+            actualRhsPtr,1,
+            dest.data(),dest.innerStride(),
+            actualAlpha);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULARMATRIXVECTOR_H
diff --git a/Eigen/src/Core/products/TriangularMatrixVector_MKL.h b/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
new file mode 100644
index 0000000..09f110d
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularMatrixVector_MKL.h
@@ -0,0 +1,247 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Triangular matrix-vector product functionality based on ?TRMV.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H
+#define EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/**********************************************************************
+* This file implements triangular matrix-vector multiplication using BLAS
+**********************************************************************/
+
+// trmv/hemv specialization
+
+template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>
+struct triangular_matrix_vector_product_trmv :
+  triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,StorageOrder,BuiltIn> {};
+
+#define EIGEN_MKL_TRMV_SPECIALIZE(Scalar) \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \
+ static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+                                     const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
+      triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( \
+        _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+  } \
+}; \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \
+ static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \
+                                     const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \
+      triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \
+        _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+  } \
+};
+
+EIGEN_MKL_TRMV_SPECIALIZE(double)
+EIGEN_MKL_TRMV_SPECIALIZE(float)
+EIGEN_MKL_TRMV_SPECIALIZE(dcomplex)
+EIGEN_MKL_TRMV_SPECIALIZE(scomplex)
+
+// implements col-major: res += alpha * op(triangular) * vector
+#define EIGEN_MKL_TRMV_CM(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor> { \
+  enum { \
+    IsLower = (Mode&Lower) == Lower, \
+    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \
+    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
+    LowUp = IsLower ? Lower : Upper \
+  }; \
+ static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+                 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
+ { \
+   if (ConjLhs || IsZeroDiag) { \
+     triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \
+       _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+     return; \
+   }\
+   Index size = (std::min)(_rows,_cols); \
+   Index rows = IsLower ? _rows : size; \
+   Index cols = IsLower ? size : _cols; \
+\
+   typedef VectorX##EIGPREFIX VectorRhs; \
+   EIGTYPE *x, *y;\
+\
+/* Set x*/ \
+   Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \
+   VectorRhs x_tmp; \
+   if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+   x = x_tmp.data(); \
+\
+/* Square part handling */\
+\
+   char trans, uplo, diag; \
+   MKL_INT m, n, lda, incx, incy; \
+   EIGTYPE const *a; \
+   MKLTYPE alpha_, beta_; \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1)); \
+\
+/* Set m, n */ \
+   n = (MKL_INT)size; \
+   lda = lhsStride; \
+   incx = 1; \
+   incy = resIncr; \
+\
+/* Set uplo, trans and diag*/ \
+   trans = 'N'; \
+   uplo = IsLower ? 'L' : 'U'; \
+   diag = IsUnitDiag ? 'U' : 'N'; \
+\
+/* call ?TRMV*/ \
+   MKLPREFIX##trmv(&uplo, &trans, &diag, &n, (const MKLTYPE*)_lhs, &lda, (MKLTYPE*)x, &incx); \
+\
+/* Add op(a_tr)rhs into res*/ \
+   MKLPREFIX##axpy(&n, &alpha_,(const MKLTYPE*)x, &incx, (MKLTYPE*)_res, &incy); \
+/* Non-square case - doesn't fit to MKL ?TRMV. Fall to default triangular product*/ \
+   if (size<(std::max)(rows,cols)) { \
+     typedef Matrix<EIGTYPE, Dynamic, Dynamic> MatrixLhs; \
+     if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+     x = x_tmp.data(); \
+     if (size<rows) { \
+       y = _res + size*resIncr; \
+       a = _lhs + size; \
+       m = rows-size; \
+       n = size; \
+     } \
+     else { \
+       x += size; \
+       y = _res; \
+       a = _lhs + size*lda; \
+       m = size; \
+       n = cols-size; \
+     } \
+     MKLPREFIX##gemv(&trans, &m, &n, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)x, &incx, &beta_, (MKLTYPE*)y, &incy); \
+   } \
+  } \
+};
+
+EIGEN_MKL_TRMV_CM(double, double, d, d)
+EIGEN_MKL_TRMV_CM(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMV_CM(float, float, f, s)
+EIGEN_MKL_TRMV_CM(scomplex, MKL_Complex8, cf, c)
+
+// implements row-major: res += alpha * op(triangular) * vector
+#define EIGEN_MKL_TRMV_RM(EIGTYPE, MKLTYPE, EIGPREFIX, MKLPREFIX) \
+template<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \
+struct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor> { \
+  enum { \
+    IsLower = (Mode&Lower) == Lower, \
+    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \
+    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \
+    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
+    LowUp = IsLower ? Lower : Upper \
+  }; \
+ static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \
+                 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \
+ { \
+   if (IsZeroDiag) { \
+     triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \
+       _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \
+     return; \
+   }\
+   Index size = (std::min)(_rows,_cols); \
+   Index rows = IsLower ? _rows : size; \
+   Index cols = IsLower ? size : _cols; \
+\
+   typedef VectorX##EIGPREFIX VectorRhs; \
+   EIGTYPE *x, *y;\
+\
+/* Set x*/ \
+   Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \
+   VectorRhs x_tmp; \
+   if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+   x = x_tmp.data(); \
+\
+/* Square part handling */\
+\
+   char trans, uplo, diag; \
+   MKL_INT m, n, lda, incx, incy; \
+   EIGTYPE const *a; \
+   MKLTYPE alpha_, beta_; \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(alpha_, alpha); \
+   assign_scalar_eig2mkl<MKLTYPE, EIGTYPE>(beta_, EIGTYPE(1)); \
+\
+/* Set m, n */ \
+   n = (MKL_INT)size; \
+   lda = lhsStride; \
+   incx = 1; \
+   incy = resIncr; \
+\
+/* Set uplo, trans and diag*/ \
+   trans = ConjLhs ? 'C' : 'T'; \
+   uplo = IsLower ? 'U' : 'L'; \
+   diag = IsUnitDiag ? 'U' : 'N'; \
+\
+/* call ?TRMV*/ \
+   MKLPREFIX##trmv(&uplo, &trans, &diag, &n, (const MKLTYPE*)_lhs, &lda, (MKLTYPE*)x, &incx); \
+\
+/* Add op(a_tr)rhs into res*/ \
+   MKLPREFIX##axpy(&n, &alpha_,(const MKLTYPE*)x, &incx, (MKLTYPE*)_res, &incy); \
+/* Non-square case - doesn't fit to MKL ?TRMV. Fall to default triangular product*/ \
+   if (size<(std::max)(rows,cols)) { \
+     typedef Matrix<EIGTYPE, Dynamic, Dynamic> MatrixLhs; \
+     if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \
+     x = x_tmp.data(); \
+     if (size<rows) { \
+       y = _res + size*resIncr; \
+       a = _lhs + size*lda; \
+       m = rows-size; \
+       n = size; \
+     } \
+     else { \
+       x += size; \
+       y = _res; \
+       a = _lhs + size; \
+       m = size; \
+       n = cols-size; \
+     } \
+     MKLPREFIX##gemv(&trans, &n, &m, &alpha_, (const MKLTYPE*)a, &lda, (const MKLTYPE*)x, &incx, &beta_, (MKLTYPE*)y, &incy); \
+   } \
+  } \
+};
+
+EIGEN_MKL_TRMV_RM(double, double, d, d)
+EIGEN_MKL_TRMV_RM(dcomplex, MKL_Complex16, cd, z)
+EIGEN_MKL_TRMV_RM(float, float, f, s)
+EIGEN_MKL_TRMV_RM(scomplex, MKL_Complex8, cf, c)
+
+} // end namespase internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_MATRIX_VECTOR_MKL_H
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix.h b/Eigen/src/Core/products/TriangularSolverMatrix.h
new file mode 100644
index 0000000..f103eae
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularSolverMatrix.h
@@ -0,0 +1,329 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H
+#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H
+
+namespace Eigen { 
+
+namespace internal {
+
+// if the rhs is row major, let's transpose the product
+template <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>
+struct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>
+{
+  static void run(
+    Index size, Index cols,
+    const Scalar*  tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking)
+  {
+    triangular_solve_matrix<
+      Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,
+      (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),
+      NumTraits<Scalar>::IsComplex && Conjugate,
+      TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>
+      ::run(size, cols, tri, triStride, _other, otherStride, blocking);
+  }
+};
+
+/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left
+ */
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+struct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>
+{
+  static EIGEN_DONT_INLINE void run(
+    Index size, Index otherSize,
+    const Scalar* _tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking);
+};
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
+    Index size, Index otherSize,
+    const Scalar* _tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking)
+  {
+    Index cols = otherSize;
+    const_blas_data_mapper<Scalar, Index, TriStorageOrder> tri(_tri,triStride);
+    blas_data_mapper<Scalar, Index, ColMajor> other(_other,otherStride);
+
+    typedef gebp_traits<Scalar,Scalar> Traits;
+    enum {
+      SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+      IsLower = (Mode&Lower) == Lower
+    };
+
+    Index kc = blocking.kc();                   // cache block size along the K direction
+    Index mc = (std::min)(size,blocking.mc());  // cache block size along the M direction
+
+    std::size_t sizeA = kc*mc;
+    std::size_t sizeB = kc*cols;
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
+
+    conj_if<Conjugate> conj;
+    gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;
+    gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, TriStorageOrder> pack_lhs;
+    gemm_pack_rhs<Scalar, Index, Traits::nr, ColMajor, false, true> pack_rhs;
+
+    // the goal here is to subdivise the Rhs panels such that we keep some cache
+    // coherence when accessing the rhs elements
+    std::ptrdiff_t l1, l2;
+    manage_caching_sizes(GetAction, &l1, &l2);
+    Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * otherStride) : 0;
+    subcols = std::max<Index>((subcols/Traits::nr)*Traits::nr, Traits::nr);
+
+    for(Index k2=IsLower ? 0 : size;
+        IsLower ? k2<size : k2>0;
+        IsLower ? k2+=kc : k2-=kc)
+    {
+      const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);
+
+      // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,
+      // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into
+      // A11 (the triangular part) and A21 the remaining rectangular part.
+      // Then the high level algorithm is:
+      //  - B = R1                    => general block copy (done during the next step)
+      //  - R1 = A11^-1 B             => tricky part
+      //  - update B from the new R1  => actually this has to be performed continuously during the above step
+      //  - R2 -= A21 * B             => GEPP
+
+      // The tricky part: compute R1 = A11^-1 B while updating B from R1
+      // The idea is to split A11 into multiple small vertical panels.
+      // Each panel can be split into a small triangular part T1k which is processed without optimization,
+      // and the remaining small part T2k which is processed using gebp with appropriate block strides
+      for(Index j2=0; j2<cols; j2+=subcols)
+      {
+        Index actual_cols = (std::min)(cols-j2,subcols);
+        // for each small vertical panels [T1k^T, T2k^T]^T of lhs
+        for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)
+        {
+          Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);
+          // tr solve
+          for (Index k=0; k<actualPanelWidth; ++k)
+          {
+            // TODO write a small kernel handling this (can be shared with trsv)
+            Index i  = IsLower ? k2+k1+k : k2-k1-k-1;
+            Index s  = IsLower ? k2+k1 : i+1;
+            Index rs = actualPanelWidth - k - 1; // remaining size
+
+            Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
+            for (Index j=j2; j<j2+actual_cols; ++j)
+            {
+              if (TriStorageOrder==RowMajor)
+              {
+                Scalar b(0);
+                const Scalar* l = &tri(i,s);
+                Scalar* r = &other(s,j);
+                for (Index i3=0; i3<k; ++i3)
+                  b += conj(l[i3]) * r[i3];
+
+                other(i,j) = (other(i,j) - b)*a;
+              }
+              else
+              {
+                Index s = IsLower ? i+1 : i-rs;
+                Scalar b = (other(i,j) *= a);
+                Scalar* r = &other(s,j);
+                const Scalar* l = &tri(s,i);
+                for (Index i3=0;i3<rs;++i3)
+                  r[i3] -= b * conj(l[i3]);
+              }
+            }
+          }
+
+          Index lengthTarget = actual_kc-k1-actualPanelWidth;
+          Index startBlock   = IsLower ? k2+k1 : k2-k1-actualPanelWidth;
+          Index blockBOffset = IsLower ? k1 : lengthTarget;
+
+          // update the respective rows of B from other
+          pack_rhs(blockB+actual_kc*j2, &other(startBlock,j2), otherStride, actualPanelWidth, actual_cols, actual_kc, blockBOffset);
+
+          // GEBP
+          if (lengthTarget>0)
+          {
+            Index startTarget  = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;
+
+            pack_lhs(blockA, &tri(startTarget,startBlock), triStride, actualPanelWidth, lengthTarget);
+
+            gebp_kernel(&other(startTarget,j2), otherStride, blockA, blockB+actual_kc*j2, lengthTarget, actualPanelWidth, actual_cols, Scalar(-1),
+                        actualPanelWidth, actual_kc, 0, blockBOffset, blockW);
+          }
+        }
+      }
+      
+      // R2 -= A21 * B => GEPP
+      {
+        Index start = IsLower ? k2+kc : 0;
+        Index end   = IsLower ? size : k2-kc;
+        for(Index i2=start; i2<end; i2+=mc)
+        {
+          const Index actual_mc = (std::min)(mc,end-i2);
+          if (actual_mc>0)
+          {
+            pack_lhs(blockA, &tri(i2, IsLower ? k2 : k2-kc), triStride, actual_kc, actual_mc);
+
+            gebp_kernel(_other+i2, otherStride, blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0, blockW);
+          }
+        }
+      }
+    }
+  }
+
+/* Optimized triangular solver with multiple left hand sides and the trinagular matrix on the right
+ */
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+struct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>
+{
+  static EIGEN_DONT_INLINE void run(
+    Index size, Index otherSize,
+    const Scalar* _tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking);
+};
+template <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>
+EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>::run(
+    Index size, Index otherSize,
+    const Scalar* _tri, Index triStride,
+    Scalar* _other, Index otherStride,
+    level3_blocking<Scalar,Scalar>& blocking)
+  {
+    Index rows = otherSize;
+    const_blas_data_mapper<Scalar, Index, TriStorageOrder> rhs(_tri,triStride);
+    blas_data_mapper<Scalar, Index, ColMajor> lhs(_other,otherStride);
+
+    typedef gebp_traits<Scalar,Scalar> Traits;
+    enum {
+      RhsStorageOrder   = TriStorageOrder,
+      SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),
+      IsLower = (Mode&Lower) == Lower
+    };
+
+    Index kc = blocking.kc();                   // cache block size along the K direction
+    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction
+
+    std::size_t sizeA = kc*mc;
+    std::size_t sizeB = kc*size;
+    std::size_t sizeW = kc*Traits::WorkSpaceFactor;
+
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
+    ei_declare_aligned_stack_constructed_variable(Scalar, blockW, sizeW, blocking.blockW());
+
+    conj_if<Conjugate> conj;
+    gebp_kernel<Scalar,Scalar, Index, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;
+    gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;
+    gemm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;
+    gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, ColMajor, false, true> pack_lhs_panel;
+
+    for(Index k2=IsLower ? size : 0;
+        IsLower ? k2>0 : k2<size;
+        IsLower ? k2-=kc : k2+=kc)
+    {
+      const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);
+      Index actual_k2 = IsLower ? k2-actual_kc : k2 ;
+
+      Index startPanel = IsLower ? 0 : k2+actual_kc;
+      Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;
+      Scalar* geb = blockB+actual_kc*actual_kc;
+
+      if (rs>0) pack_rhs(geb, &rhs(actual_k2,startPanel), triStride, actual_kc, rs);
+
+      // triangular packing (we only pack the panels off the diagonal,
+      // neglecting the blocks overlapping the diagonal
+      {
+        for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)
+        {
+          Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+          Index actual_j2 = actual_k2 + j2;
+          Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+          Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;
+
+          if (panelLength>0)
+          pack_rhs_panel(blockB+j2*actual_kc,
+                         &rhs(actual_k2+panelOffset, actual_j2), triStride,
+                         panelLength, actualPanelWidth,
+                         actual_kc, panelOffset);
+        }
+      }
+
+      for(Index i2=0; i2<rows; i2+=mc)
+      {
+        const Index actual_mc = (std::min)(mc,rows-i2);
+
+        // triangular solver kernel
+        {
+          // for each small block of the diagonal (=> vertical panels of rhs)
+          for (Index j2 = IsLower
+                      ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth)
+                                                                  : Index(SmallPanelWidth)))
+                      : 0;
+               IsLower ? j2>=0 : j2<actual_kc;
+               IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth)
+          {
+            Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);
+            Index absolute_j2 = actual_k2 + j2;
+            Index panelOffset = IsLower ? j2+actualPanelWidth : 0;
+            Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;
+
+            // GEBP
+            if(panelLength>0)
+            {
+              gebp_kernel(&lhs(i2,absolute_j2), otherStride,
+                          blockA, blockB+j2*actual_kc,
+                          actual_mc, panelLength, actualPanelWidth,
+                          Scalar(-1),
+                          actual_kc, actual_kc, // strides
+                          panelOffset, panelOffset, // offsets
+                          blockW);  // workspace
+            }
+
+            // unblocked triangular solve
+            for (Index k=0; k<actualPanelWidth; ++k)
+            {
+              Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;
+
+              Scalar* r = &lhs(i2,j);
+              for (Index k3=0; k3<k; ++k3)
+              {
+                Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));
+                Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);
+                for (Index i=0; i<actual_mc; ++i)
+                  r[i] -= a[i] * b;
+              }
+              Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
+              for (Index i=0; i<actual_mc; ++i)
+                r[i] *= b;
+            }
+
+            // pack the just computed part of lhs to A
+            pack_lhs_panel(blockA, _other+absolute_j2*otherStride+i2, otherStride,
+                           actualPanelWidth, actual_mc,
+                           actual_kc, j2);
+          }
+        }
+
+        if (rs>0)
+          gebp_kernel(_other+i2+startPanel*otherStride, otherStride, blockA, geb,
+                      actual_mc, actual_kc, rs, Scalar(-1),
+                      -1, -1, 0, 0, blockW);
+      }
+    }
+  }
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_H
diff --git a/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h b/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
new file mode 100644
index 0000000..6a0bb83
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularSolverMatrix_MKL.h
@@ -0,0 +1,155 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Triangular matrix * matrix product functionality based on ?TRMM.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H
+#define EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H
+
+namespace Eigen {
+
+namespace internal {
+
+// implements LeftSide op(triangular)^-1 * general
+#define EIGEN_MKL_TRSM_L(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
+struct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> \
+{ \
+  enum { \
+    IsLower = (Mode&Lower) == Lower, \
+    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \
+    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
+    conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
+  }; \
+  static void run( \
+      Index size, Index otherSize, \
+      const EIGTYPE* _tri, Index triStride, \
+      EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
+  { \
+   MKL_INT m = size, n = otherSize, lda, ldb; \
+   char side = 'L', uplo, diag='N', transa; \
+   /* Set alpha_ */ \
+   MKLTYPE alpha; \
+   EIGTYPE myone(1); \
+   assign_scalar_eig2mkl(alpha, myone); \
+   ldb = otherStride;\
+\
+   const EIGTYPE *a; \
+/* Set trans */ \
+   transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \
+/* Set uplo */ \
+   uplo = IsLower ? 'L' : 'U'; \
+   if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \
+   Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \
+   MatrixTri a_tmp; \
+\
+   if (conjA) { \
+     a_tmp = tri.conjugate(); \
+     a = a_tmp.data(); \
+     lda = a_tmp.outerStride(); \
+   } else { \
+     a = _tri; \
+     lda = triStride; \
+   } \
+   if (IsUnitDiag) diag='U'; \
+/* call ?trsm*/ \
+   MKLPREFIX##trsm(&side, &uplo, &transa, &diag, &m, &n, &alpha, (const MKLTYPE*)a, &lda, (MKLTYPE*)_other, &ldb); \
+ } \
+};
+
+EIGEN_MKL_TRSM_L(double, double, d)
+EIGEN_MKL_TRSM_L(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_TRSM_L(float, float, s)
+EIGEN_MKL_TRSM_L(scomplex, MKL_Complex8, c)
+
+
+// implements RightSide general * op(triangular)^-1
+#define EIGEN_MKL_TRSM_R(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \
+struct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> \
+{ \
+  enum { \
+    IsLower = (Mode&Lower) == Lower, \
+    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \
+    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \
+    conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \
+  }; \
+  static void run( \
+      Index size, Index otherSize, \
+      const EIGTYPE* _tri, Index triStride, \
+      EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \
+  { \
+   MKL_INT m = otherSize, n = size, lda, ldb; \
+   char side = 'R', uplo, diag='N', transa; \
+   /* Set alpha_ */ \
+   MKLTYPE alpha; \
+   EIGTYPE myone(1); \
+   assign_scalar_eig2mkl(alpha, myone); \
+   ldb = otherStride;\
+\
+   const EIGTYPE *a; \
+/* Set trans */ \
+   transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \
+/* Set uplo */ \
+   uplo = IsLower ? 'L' : 'U'; \
+   if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \
+/* Set a, lda */ \
+   typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \
+   Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \
+   MatrixTri a_tmp; \
+\
+   if (conjA) { \
+     a_tmp = tri.conjugate(); \
+     a = a_tmp.data(); \
+     lda = a_tmp.outerStride(); \
+   } else { \
+     a = _tri; \
+     lda = triStride; \
+   } \
+   if (IsUnitDiag) diag='U'; \
+/* call ?trsm*/ \
+   MKLPREFIX##trsm(&side, &uplo, &transa, &diag, &m, &n, &alpha, (const MKLTYPE*)a, &lda, (MKLTYPE*)_other, &ldb); \
+   /*std::cout << "TRMS_L specialization!\n";*/ \
+ } \
+};
+
+EIGEN_MKL_TRSM_R(double, double, d)
+EIGEN_MKL_TRSM_R(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_TRSM_R(float, float, s)
+EIGEN_MKL_TRSM_R(scomplex, MKL_Complex8, c)
+
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_MKL_H
diff --git a/Eigen/src/Core/products/TriangularSolverVector.h b/Eigen/src/Core/products/TriangularSolverVector.h
new file mode 100644
index 0000000..ce4d100
--- /dev/null
+++ b/Eigen/src/Core/products/TriangularSolverVector.h
@@ -0,0 +1,139 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H
+#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate, int StorageOrder>
+struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder>
+{
+  static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
+  {
+    triangular_solve_vector<LhsScalar,RhsScalar,Index,OnTheLeft,
+        ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),
+        Conjugate,StorageOrder==RowMajor?ColMajor:RowMajor
+      >::run(size, _lhs, lhsStride, rhs);
+  }
+};
+    
+// forward and backward substitution, row-major, rhs is a vector
+template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
+struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor>
+{
+  enum {
+    IsLower = ((Mode&Lower)==Lower)
+  };
+  static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
+  {
+    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;
+    const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
+    typename internal::conditional<
+                          Conjugate,
+                          const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
+                          const LhsMap&>
+                        ::type cjLhs(lhs);
+    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+    for(Index pi=IsLower ? 0 : size;
+        IsLower ? pi<size : pi>0;
+        IsLower ? pi+=PanelWidth : pi-=PanelWidth)
+    {
+      Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
+
+      Index r = IsLower ? pi : size - pi; // remaining size
+      if (r > 0)
+      {
+        // let's directly call the low level product function because:
+        // 1 - it is faster to compile
+        // 2 - it is slighlty faster at runtime
+        Index startRow = IsLower ? pi : pi-actualPanelWidth;
+        Index startCol = IsLower ? 0 : pi;
+
+        general_matrix_vector_product<Index,LhsScalar,RowMajor,Conjugate,RhsScalar,false>::run(
+          actualPanelWidth, r,
+          &lhs.coeffRef(startRow,startCol), lhsStride,
+          rhs + startCol, 1,
+          rhs + startRow, 1,
+          RhsScalar(-1));
+      }
+
+      for(Index k=0; k<actualPanelWidth; ++k)
+      {
+        Index i = IsLower ? pi+k : pi-k-1;
+        Index s = IsLower ? pi   : i+1;
+        if (k>0)
+          rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();
+        
+        if(!(Mode & UnitDiag))
+          rhs[i] /= cjLhs(i,i);
+      }
+    }
+  }
+};
+
+// forward and backward substitution, column-major, rhs is a vector
+template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>
+struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor>
+{
+  enum {
+    IsLower = ((Mode&Lower)==Lower)
+  };
+  static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)
+  {
+    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;
+    const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));
+    typename internal::conditional<Conjugate,
+                                   const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,
+                                   const LhsMap&
+                                  >::type cjLhs(lhs);
+    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;
+
+    for(Index pi=IsLower ? 0 : size;
+        IsLower ? pi<size : pi>0;
+        IsLower ? pi+=PanelWidth : pi-=PanelWidth)
+    {
+      Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);
+      Index startBlock = IsLower ? pi : pi-actualPanelWidth;
+      Index endBlock = IsLower ? pi + actualPanelWidth : 0;
+
+      for(Index k=0; k<actualPanelWidth; ++k)
+      {
+        Index i = IsLower ? pi+k : pi-k-1;
+        if(!(Mode & UnitDiag))
+          rhs[i] /= cjLhs.coeff(i,i);
+
+        Index r = actualPanelWidth - k - 1; // remaining size
+        Index s = IsLower ? i+1 : i-r;
+        if (r>0)
+          Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);
+      }
+      Index r = IsLower ? size - endBlock : startBlock; // remaining size
+      if (r > 0)
+      {
+        // let's directly call the low level product function because:
+        // 1 - it is faster to compile
+        // 2 - it is slighlty faster at runtime
+        general_matrix_vector_product<Index,LhsScalar,ColMajor,Conjugate,RhsScalar,false>::run(
+            r, actualPanelWidth,
+            &lhs.coeffRef(endBlock,startBlock), lhsStride,
+            rhs+startBlock, 1,
+            rhs+endBlock, 1, RhsScalar(-1));
+      }
+    }
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H
diff --git a/Eigen/src/Core/util/BlasUtil.h b/Eigen/src/Core/util/BlasUtil.h
new file mode 100644
index 0000000..a28f16f
--- /dev/null
+++ b/Eigen/src/Core/util/BlasUtil.h
@@ -0,0 +1,264 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLASUTIL_H
+#define EIGEN_BLASUTIL_H
+
+// This file contains many lightweight helper classes used to
+// implement and control fast level 2 and level 3 BLAS-like routines.
+
+namespace Eigen {
+
+namespace internal {
+
+// forward declarations
+template<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjugateLhs=false, bool ConjugateRhs=false>
+struct gebp_kernel;
+
+template<typename Scalar, typename Index, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>
+struct gemm_pack_rhs;
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
+struct gemm_pack_lhs;
+
+template<
+  typename Index,
+  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
+  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
+  int ResStorageOrder>
+struct general_matrix_matrix_product;
+
+template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version=Specialized>
+struct general_matrix_vector_product;
+
+
+template<bool Conjugate> struct conj_if;
+
+template<> struct conj_if<true> {
+  template<typename T>
+  inline T operator()(const T& x) { return numext::conj(x); }
+  template<typename T>
+  inline T pconj(const T& x) { return internal::pconj(x); }
+};
+
+template<> struct conj_if<false> {
+  template<typename T>
+  inline const T& operator()(const T& x) { return x; }
+  template<typename T>
+  inline const T& pconj(const T& x) { return x; }
+};
+
+template<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>
+{
+  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); }
+  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, false,true>
+{
+  typedef std::complex<RealScalar> Scalar;
+  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+  { return c + pmul(x,y); }
+
+  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+  { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>
+{
+  typedef std::complex<RealScalar> Scalar;
+  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+  { return c + pmul(x,y); }
+
+  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+  { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
+};
+
+template<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>
+{
+  typedef std::complex<RealScalar> Scalar;
+  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const
+  { return c + pmul(x,y); }
+
+  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const
+  { return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }
+};
+
+template<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>
+{
+  typedef std::complex<RealScalar> Scalar;
+  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const
+  { return padd(c, pmul(x,y)); }
+  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const
+  { return conj_if<Conj>()(x)*y; }
+};
+
+template<typename RealScalar,bool Conj> struct conj_helper<RealScalar, std::complex<RealScalar>, false,Conj>
+{
+  typedef std::complex<RealScalar> Scalar;
+  EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const
+  { return padd(c, pmul(x,y)); }
+  EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const
+  { return x*conj_if<Conj>()(y); }
+};
+
+template<typename From,typename To> struct get_factor {
+  static EIGEN_STRONG_INLINE To run(const From& x) { return x; }
+};
+
+template<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {
+  static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return numext::real(x); }
+};
+
+// Lightweight helper class to access matrix coefficients.
+// Yes, this is somehow redundant with Map<>, but this version is much much lighter,
+// and so I hope better compilation performance (time and code quality).
+template<typename Scalar, typename Index, int StorageOrder>
+class blas_data_mapper
+{
+  public:
+    blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+    EIGEN_STRONG_INLINE Scalar& operator()(Index i, Index j)
+    { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
+  protected:
+    Scalar* EIGEN_RESTRICT m_data;
+    Index m_stride;
+};
+
+// lightweight helper class to access matrix coefficients (const version)
+template<typename Scalar, typename Index, int StorageOrder>
+class const_blas_data_mapper
+{
+  public:
+    const_blas_data_mapper(const Scalar* data, Index stride) : m_data(data), m_stride(stride) {}
+    EIGEN_STRONG_INLINE const Scalar& operator()(Index i, Index j) const
+    { return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride]; }
+  protected:
+    const Scalar* EIGEN_RESTRICT m_data;
+    Index m_stride;
+};
+
+
+/* Helper class to analyze the factors of a Product expression.
+ * In particular it allows to pop out operator-, scalar multiples,
+ * and conjugate */
+template<typename XprType> struct blas_traits
+{
+  typedef typename traits<XprType>::Scalar Scalar;
+  typedef const XprType& ExtractType;
+  typedef XprType _ExtractType;
+  enum {
+    IsComplex = NumTraits<Scalar>::IsComplex,
+    IsTransposed = false,
+    NeedToConjugate = false,
+    HasUsableDirectAccess = (    (int(XprType::Flags)&DirectAccessBit)
+                              && (   bool(XprType::IsVectorAtCompileTime)
+                                  || int(inner_stride_at_compile_time<XprType>::ret) == 1)
+                             ) ?  1 : 0
+  };
+  typedef typename conditional<bool(HasUsableDirectAccess),
+    ExtractType,
+    typename _ExtractType::PlainObject
+    >::type DirectLinearAccessType;
+  static inline ExtractType extract(const XprType& x) { return x; }
+  static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
+};
+
+// pop conjugate
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+  typedef blas_traits<NestedXpr> Base;
+  typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> XprType;
+  typedef typename Base::ExtractType ExtractType;
+
+  enum {
+    IsComplex = NumTraits<Scalar>::IsComplex,
+    NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex
+  };
+  static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+  static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); }
+};
+
+// pop scalar multiple
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+  typedef blas_traits<NestedXpr> Base;
+  typedef CwiseUnaryOp<scalar_multiple_op<Scalar>, NestedXpr> XprType;
+  typedef typename Base::ExtractType ExtractType;
+  static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+  static inline Scalar extractScalarFactor(const XprType& x)
+  { return x.functor().m_other * Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+// pop opposite
+template<typename Scalar, typename NestedXpr>
+struct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+  typedef blas_traits<NestedXpr> Base;
+  typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;
+  typedef typename Base::ExtractType ExtractType;
+  static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+  static inline Scalar extractScalarFactor(const XprType& x)
+  { return - Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+// pop/push transpose
+template<typename NestedXpr>
+struct blas_traits<Transpose<NestedXpr> >
+ : blas_traits<NestedXpr>
+{
+  typedef typename NestedXpr::Scalar Scalar;
+  typedef blas_traits<NestedXpr> Base;
+  typedef Transpose<NestedXpr> XprType;
+  typedef Transpose<const typename Base::_ExtractType>  ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS
+  typedef Transpose<const typename Base::_ExtractType> _ExtractType;
+  typedef typename conditional<bool(Base::HasUsableDirectAccess),
+    ExtractType,
+    typename ExtractType::PlainObject
+    >::type DirectLinearAccessType;
+  enum {
+    IsTransposed = Base::IsTransposed ? 0 : 1
+  };
+  static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }
+  static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); }
+};
+
+template<typename T>
+struct blas_traits<const T>
+     : blas_traits<T>
+{};
+
+template<typename T, bool HasUsableDirectAccess=blas_traits<T>::HasUsableDirectAccess>
+struct extract_data_selector {
+  static const typename T::Scalar* run(const T& m)
+  {
+    return blas_traits<T>::extract(m).data();
+  }
+};
+
+template<typename T>
+struct extract_data_selector<T,false> {
+  static typename T::Scalar* run(const T&) { return 0; }
+};
+
+template<typename T> const typename T::Scalar* extract_data(const T& m)
+{
+  return extract_data_selector<T>::run(m);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BLASUTIL_H
diff --git a/Eigen/src/Core/util/CMakeLists.txt b/Eigen/src/Core/util/CMakeLists.txt
new file mode 100644
index 0000000..a1e2e52
--- /dev/null
+++ b/Eigen/src/Core/util/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Core_util_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_Core_util_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Core/util COMPONENT Devel
+  )
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
new file mode 100644
index 0000000..1e6277c
--- /dev/null
+++ b/Eigen/src/Core/util/Constants.h
@@ -0,0 +1,451 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONSTANTS_H
+#define EIGEN_CONSTANTS_H
+
+namespace Eigen {
+
+/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is
+  * stored in some runtime variable.
+  *
+  * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.
+  */
+const int Dynamic = -1;
+
+/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value
+  * has to be specified at runtime.
+  */
+const int DynamicIndex = 0xffffff;
+
+/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().
+  * The value Infinity there means the L-infinity norm.
+  */
+const int Infinity = -1;
+
+/** \defgroup flags Flags
+  * \ingroup Core_Module
+  *
+  * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
+  * expression.
+  *
+  * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of
+  * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any
+  * runtime overhead.
+  *
+  * \sa MatrixBase::Flags
+  */
+
+/** \ingroup flags
+  *
+  * for a matrix, this means that the storage order is row-major.
+  * If this bit is not set, the storage order is column-major.
+  * For an expression, this determines the storage order of
+  * the matrix created by evaluation of that expression. 
+  * \sa \ref TopicStorageOrders */
+const unsigned int RowMajorBit = 0x1;
+
+/** \ingroup flags
+  *
+  * means the expression should be evaluated by the calling expression */
+const unsigned int EvalBeforeNestingBit = 0x2;
+
+/** \ingroup flags
+  *
+  * means the expression should be evaluated before any assignment */
+const unsigned int EvalBeforeAssigningBit = 0x4;
+
+/** \ingroup flags
+  *
+  * Short version: means the expression might be vectorized
+  *
+  * Long version: means that the coefficients can be handled by packets
+  * and start at a memory location whose alignment meets the requirements
+  * of the present CPU architecture for optimized packet access. In the fixed-size
+  * case, there is the additional condition that it be possible to access all the
+  * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,
+  * and that any nontrivial strides don't break the alignment). In the dynamic-size case,
+  * there is no such condition on the total size and strides, so it might not be possible to access
+  * all coeffs by packets.
+  *
+  * \note This bit can be set regardless of whether vectorization is actually enabled.
+  *       To check for actual vectorizability, see \a ActualPacketAccessBit.
+  */
+const unsigned int PacketAccessBit = 0x8;
+
+#ifdef EIGEN_VECTORIZE
+/** \ingroup flags
+  *
+  * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
+  * is set to the value \a PacketAccessBit.
+  *
+  * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
+  * is set to the value 0.
+  */
+const unsigned int ActualPacketAccessBit = PacketAccessBit;
+#else
+const unsigned int ActualPacketAccessBit = 0x0;
+#endif
+
+/** \ingroup flags
+  *
+  * Short version: means the expression can be seen as 1D vector.
+  *
+  * Long version: means that one can access the coefficients
+  * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
+  * index-based access methods are guaranteed
+  * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
+  * is guaranteed that whenever it is available, index-based access is at least as fast as
+  * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
+  *
+  * If both PacketAccessBit and LinearAccessBit are set, then the
+  * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
+  * lvalue expression.
+  *
+  * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
+  * Product expressions don't have it, because it would be troublesome for vectorization, even when the
+  * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
+  * not index-based packet access, so they don't have the LinearAccessBit.
+  */
+const unsigned int LinearAccessBit = 0x10;
+
+/** \ingroup flags
+  *
+  * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.
+  * This rules out read-only expressions.
+  *
+  * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note
+  * the other:
+  *   \li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit
+  *   \li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit
+  *
+  * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.
+  */
+const unsigned int LvalueBit = 0x20;
+
+/** \ingroup flags
+  *
+  * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout
+  * of the array of coefficients must be exactly the natural one suggested by rows(), cols(),
+  * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,
+  * though referencable, do not have such a regular memory layout.
+  *
+  * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.
+  */
+const unsigned int DirectAccessBit = 0x40;
+
+/** \ingroup flags
+  *
+  * means the first coefficient packet is guaranteed to be aligned */
+const unsigned int AlignedBit = 0x80;
+
+const unsigned int NestByRefBit = 0x100;
+
+// list of flags that are inherited by default
+const unsigned int HereditaryBits = RowMajorBit
+                                  | EvalBeforeNestingBit
+                                  | EvalBeforeAssigningBit;
+
+/** \defgroup enums Enumerations
+  * \ingroup Core_Module
+  *
+  * Various enumerations used in %Eigen. Many of these are used as template parameters.
+  */
+
+/** \ingroup enums
+  * Enum containing possible values for the \p Mode parameter of 
+  * MatrixBase::selfadjointView() and MatrixBase::triangularView(). */
+enum {
+  /** View matrix as a lower triangular matrix. */
+  Lower=0x1,                      
+  /** View matrix as an upper triangular matrix. */
+  Upper=0x2,                      
+  /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */
+  UnitDiag=0x4, 
+  /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */
+  ZeroDiag=0x8,
+  /** View matrix as a lower triangular matrix with ones on the diagonal. */
+  UnitLower=UnitDiag|Lower, 
+  /** View matrix as an upper triangular matrix with ones on the diagonal. */
+  UnitUpper=UnitDiag|Upper,
+  /** View matrix as a lower triangular matrix with zeros on the diagonal. */
+  StrictlyLower=ZeroDiag|Lower, 
+  /** View matrix as an upper triangular matrix with zeros on the diagonal. */
+  StrictlyUpper=ZeroDiag|Upper,
+  /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */
+  SelfAdjoint=0x10,
+  /** Used to support symmetric, non-selfadjoint, complex matrices. */
+  Symmetric=0x20
+};
+
+/** \ingroup enums
+  * Enum for indicating whether an object is aligned or not. */
+enum { 
+  /** Object is not correctly aligned for vectorization. */
+  Unaligned=0, 
+  /** Object is aligned for vectorization. */
+  Aligned=1 
+};
+
+/** \ingroup enums
+ * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */
+// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox
+// TODO: find out what to do with that. Adapt the AlignedBox API ?
+enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
+
+/** \ingroup enums
+  * Enum containing possible values for the \p Direction parameter of
+  * Reverse, PartialReduxExpr and VectorwiseOp. */
+enum DirectionType { 
+  /** For Reverse, all columns are reversed; 
+    * for PartialReduxExpr and VectorwiseOp, act on columns. */
+  Vertical, 
+  /** For Reverse, all rows are reversed; 
+    * for PartialReduxExpr and VectorwiseOp, act on rows. */
+  Horizontal, 
+  /** For Reverse, both rows and columns are reversed; 
+    * not used for PartialReduxExpr and VectorwiseOp. */
+  BothDirections 
+};
+
+/** \internal \ingroup enums
+  * Enum to specify how to traverse the entries of a matrix. */
+enum {
+  /** \internal Default traversal, no vectorization, no index-based access */
+  DefaultTraversal,
+  /** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
+  LinearTraversal,
+  /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment
+    * and good size */
+  InnerVectorizedTraversal,
+  /** \internal Vectorization path using a single loop plus scalar loops for the
+    * unaligned boundaries */
+  LinearVectorizedTraversal,
+  /** \internal Generic vectorization path using one vectorized loop per row/column with some
+    * scalar loops to handle the unaligned boundaries */
+  SliceVectorizedTraversal,
+  /** \internal Special case to properly handle incompatible scalar types or other defecting cases*/
+  InvalidTraversal,
+  /** \internal Evaluate all entries at once */
+  AllAtOnceTraversal
+};
+
+/** \internal \ingroup enums
+  * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
+enum {
+  /** \internal Do not unroll loops. */
+  NoUnrolling,
+  /** \internal Unroll only the inner loop, but not the outer loop. */
+  InnerUnrolling,
+  /** \internal Unroll both the inner and the outer loop. If there is only one loop, 
+    * because linear traversal is used, then unroll that loop. */
+  CompleteUnrolling
+};
+
+/** \internal \ingroup enums
+  * Enum to specify whether to use the default (built-in) implementation or the specialization. */
+enum {
+  Specialized,
+  BuiltIn
+};
+
+/** \ingroup enums
+  * Enum containing possible values for the \p _Options template parameter of
+  * Matrix, Array and BandMatrix. */
+enum {
+  /** Storage order is column major (see \ref TopicStorageOrders). */
+  ColMajor = 0,
+  /** Storage order is row major (see \ref TopicStorageOrders). */
+  RowMajor = 0x1,  // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
+  /** Align the matrix itself if it is vectorizable fixed-size */
+  AutoAlign = 0,
+  /** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation
+  DontAlign = 0x2
+};
+
+/** \ingroup enums
+  * Enum for specifying whether to apply or solve on the left or right. */
+enum {
+  /** Apply transformation on the left. */
+  OnTheLeft = 1,  
+  /** Apply transformation on the right. */
+  OnTheRight = 2  
+};
+
+/* the following used to be written as:
+ *
+ *   struct NoChange_t {};
+ *   namespace {
+ *     EIGEN_UNUSED NoChange_t NoChange;
+ *   }
+ *
+ * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types.  
+ * However, this leads to "variable declared but never referenced" warnings on Intel Composer XE,
+ * and we do not know how to get rid of them (bug 450).
+ */
+
+enum NoChange_t   { NoChange };
+enum Sequential_t { Sequential };
+enum Default_t    { Default };
+
+/** \internal \ingroup enums
+  * Used in AmbiVector. */
+enum {
+  IsDense         = 0,
+  IsSparse
+};
+
+/** \ingroup enums
+  * Used as template parameter in DenseCoeffBase and MapBase to indicate 
+  * which accessors should be provided. */
+enum AccessorLevels {
+  /** Read-only access via a member function. */
+  ReadOnlyAccessors, 
+  /** Read/write access via member functions. */
+  WriteAccessors, 
+  /** Direct read-only access to the coefficients. */
+  DirectAccessors, 
+  /** Direct read/write access to the coefficients. */
+  DirectWriteAccessors
+};
+
+/** \ingroup enums
+  * Enum with options to give to various decompositions. */
+enum DecompositionOptions {
+  /** \internal Not used (meant for LDLT?). */
+  Pivoting            = 0x01, 
+  /** \internal Not used (meant for LDLT?). */
+  NoPivoting          = 0x02, 
+  /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */
+  ComputeFullU        = 0x04,
+  /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */
+  ComputeThinU        = 0x08,
+  /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */
+  ComputeFullV        = 0x10,
+  /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */
+  ComputeThinV        = 0x20,
+  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+    * that only the eigenvalues are to be computed and not the eigenvectors. */
+  EigenvaluesOnly     = 0x40,
+  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify
+    * that both the eigenvalues and the eigenvectors are to be computed. */
+  ComputeEigenvectors = 0x80,
+  /** \internal */
+  EigVecMask = EigenvaluesOnly | ComputeEigenvectors,
+  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+    * solve the generalized eigenproblem \f$ Ax = \lambda B x \f$. */
+  Ax_lBx              = 0x100,
+  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+    * solve the generalized eigenproblem \f$ ABx = \lambda x \f$. */
+  ABx_lx              = 0x200,
+  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should
+    * solve the generalized eigenproblem \f$ BAx = \lambda x \f$. */
+  BAx_lx              = 0x400,
+  /** \internal */
+  GenEigMask = Ax_lBx | ABx_lx | BAx_lx
+};
+
+/** \ingroup enums
+  * Possible values for the \p QRPreconditioner template parameter of JacobiSVD. */
+enum QRPreconditioners {
+  /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */
+  NoQRPreconditioner,
+  /** Use a QR decomposition without pivoting as the first step. */
+  HouseholderQRPreconditioner,
+  /** Use a QR decomposition with column pivoting as the first step. */
+  ColPivHouseholderQRPreconditioner,
+  /** Use a QR decomposition with full pivoting as the first step. */
+  FullPivHouseholderQRPreconditioner
+};
+
+#ifdef Success
+#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h
+#endif
+
+/** \ingroup enums
+  * Enum for reporting the status of a computation. */
+enum ComputationInfo {
+  /** Computation was successful. */
+  Success = 0,        
+  /** The provided data did not satisfy the prerequisites. */
+  NumericalIssue = 1, 
+  /** Iterative procedure did not converge. */
+  NoConvergence = 2,
+  /** The inputs are invalid, or the algorithm has been improperly called.
+    * When assertions are enabled, such errors trigger an assert. */
+  InvalidInput = 3
+};
+
+/** \ingroup enums
+  * Enum used to specify how a particular transformation is stored in a matrix.
+  * \sa Transform, Hyperplane::transform(). */
+enum TransformTraits {
+  /** Transformation is an isometry. */
+  Isometry      = 0x1,
+  /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is 
+    * assumed to be [0 ... 0 1]. */
+  Affine        = 0x2,
+  /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */
+  AffineCompact = 0x10 | Affine,
+  /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */
+  Projective    = 0x20
+};
+
+/** \internal \ingroup enums
+  * Enum used to choose between implementation depending on the computer architecture. */
+namespace Architecture
+{
+  enum Type {
+    Generic = 0x0,
+    SSE = 0x1,
+    AltiVec = 0x2,
+#if defined EIGEN_VECTORIZE_SSE
+    Target = SSE
+#elif defined EIGEN_VECTORIZE_ALTIVEC
+    Target = AltiVec
+#else
+    Target = Generic
+#endif
+  };
+}
+
+/** \internal \ingroup enums
+  * Enum used as template parameter in GeneralProduct. */
+enum { CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
+
+/** \internal \ingroup enums
+  * Enum used in experimental parallel implementation. */
+enum Action {GetAction, SetAction};
+
+/** The type used to identify a dense storage. */
+struct Dense {};
+
+/** The type used to identify a matrix expression */
+struct MatrixXpr {};
+
+/** The type used to identify an array expression */
+struct ArrayXpr {};
+
+namespace internal {
+  /** \internal
+  * Constants for comparison functors
+  */
+  enum ComparisonName {
+    cmp_EQ = 0,
+    cmp_LT = 1,
+    cmp_LE = 2,
+    cmp_UNORD = 3,
+    cmp_NEQ = 4
+  };
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSTANTS_H
diff --git a/Eigen/src/Core/util/DisableStupidWarnings.h b/Eigen/src/Core/util/DisableStupidWarnings.h
new file mode 100644
index 0000000..6a0bf06
--- /dev/null
+++ b/Eigen/src/Core/util/DisableStupidWarnings.h
@@ -0,0 +1,40 @@
+#ifndef EIGEN_WARNINGS_DISABLED
+#define EIGEN_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+  // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))
+  // 4101 - unreferenced local variable
+  // 4127 - conditional expression is constant
+  // 4181 - qualifier applied to reference type ignored
+  // 4211 - nonstandard extension used : redefined extern to static
+  // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data
+  // 4273 - QtAlignedMalloc, inconsistent DLL linkage
+  // 4324 - structure was padded due to declspec(align())
+  // 4512 - assignment operator could not be generated
+  // 4522 - 'class' : multiple assignment operators specified
+  // 4700 - uninitialized local variable 'xyz' used
+  // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow
+  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+    #pragma warning( push )
+  #endif
+  #pragma warning( disable : 4100 4101 4127 4181 4211 4244 4273 4324 4512 4522 4700 4717 )
+#elif defined __INTEL_COMPILER
+  // 2196 - routine is both "inline" and "noinline" ("noinline" assumed)
+  //        ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body
+  //        typedef that may be a reference type.
+  // 279  - controlling expression is constant
+  //        ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case.
+  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+    #pragma warning push
+  #endif
+  #pragma warning disable 2196 279
+#elif defined __clang__
+  // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant
+  //     this is really a stupid warning as it warns on compile-time expressions involving enums
+  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+    #pragma clang diagnostic push
+  #endif
+  #pragma clang diagnostic ignored "-Wconstant-logical-operand"
+#endif
+
+#endif // not EIGEN_WARNINGS_DISABLED
diff --git a/Eigen/src/Core/util/ForwardDeclarations.h b/Eigen/src/Core/util/ForwardDeclarations.h
new file mode 100644
index 0000000..d6a8145
--- /dev/null
+++ b/Eigen/src/Core/util/ForwardDeclarations.h
@@ -0,0 +1,299 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FORWARDDECLARATIONS_H
+#define EIGEN_FORWARDDECLARATIONS_H
+
+namespace Eigen {
+namespace internal {
+
+template<typename T> struct traits;
+
+// here we say once and for all that traits<const T> == traits<T>
+// When constness must affect traits, it has to be constness on template parameters on which T itself depends.
+// For example, traits<Map<const T> > != traits<Map<T> >, but
+//              traits<const Map<T> > == traits<Map<T> >
+template<typename T> struct traits<const T> : traits<T> {};
+
+template<typename Derived> struct has_direct_access
+{
+  enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };
+};
+
+template<typename Derived> struct accessors_level
+{
+  enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,
+         has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,
+         value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)
+                                   : (has_write_access ? WriteAccessors       : ReadOnlyAccessors)
+  };
+};
+
+} // end namespace internal
+
+template<typename T> struct NumTraits;
+
+template<typename Derived> struct EigenBase;
+template<typename Derived> class DenseBase;
+template<typename Derived> class PlainObjectBase;
+
+
+template<typename Derived,
+         int Level = internal::accessors_level<Derived>::value >
+class DenseCoeffsBase;
+
+template<typename _Scalar, int _Rows, int _Cols,
+         int _Options = AutoAlign |
+#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
+    // workaround a bug in at least gcc 3.4.6
+    // the innermost ?: ternary operator is misparsed. We write it slightly
+    // differently and this makes gcc 3.4.6 happy, but it's ugly.
+    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+                          : ColMajor ),
+#else
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : (_Cols==1 && _Rows!=1) ? ColMajor
+                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+         int _MaxRows = _Rows,
+         int _MaxCols = _Cols
+> class Matrix;
+
+template<typename Derived> class MatrixBase;
+template<typename Derived> class ArrayBase;
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
+template<typename ExpressionType, template <typename> class StorageBase > class NoAlias;
+template<typename ExpressionType> class NestByValue;
+template<typename ExpressionType> class ForceAlignedAccess;
+template<typename ExpressionType> class SwapWrapper;
+
+template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;
+
+template<typename MatrixType, int Size=Dynamic> class VectorBlock;
+template<typename MatrixType> class Transpose;
+template<typename MatrixType> class Conjugate;
+template<typename NullaryOp, typename MatrixType>         class CwiseNullaryOp;
+template<typename UnaryOp,   typename MatrixType>         class CwiseUnaryOp;
+template<typename ViewOp,    typename MatrixType>         class CwiseUnaryView;
+template<typename BinaryOp,  typename Lhs, typename Rhs>  class CwiseBinaryOp;
+template<typename BinOp,     typename Lhs, typename Rhs>  class SelfCwiseBinaryOp;
+template<typename Derived,   typename Lhs, typename Rhs>  class ProductBase;
+template<typename Lhs, typename Rhs, int Mode>            class GeneralProduct;
+template<typename Lhs, typename Rhs, int NestingFlags>    class CoeffBasedProduct;
+
+template<typename Derived> class DiagonalBase;
+template<typename _DiagonalVectorType> class DiagonalWrapper;
+template<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;
+template<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;
+template<typename MatrixType, int Index = 0> class Diagonal;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;
+template<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;
+template<typename Derived> class PermutationBase;
+template<typename Derived> class TranspositionsBase;
+template<typename _IndicesType> class PermutationWrapper;
+template<typename _IndicesType> class TranspositionsWrapper;
+
+template<typename Derived,
+         int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
+> class MapBase;
+template<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;
+template<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;
+
+template<typename Derived> class TriangularBase;
+template<typename MatrixType, unsigned int Mode> class TriangularView;
+template<typename MatrixType, unsigned int Mode> class SelfAdjointView;
+template<typename MatrixType> class SparseView;
+template<typename ExpressionType> class WithFormat;
+template<typename MatrixType> struct CommaInitializer;
+template<typename Derived> class ReturnByValue;
+template<typename ExpressionType> class ArrayWrapper;
+template<typename ExpressionType> class MatrixWrapper;
+
+namespace internal {
+template<typename DecompositionType, typename Rhs> struct solve_retval_base;
+template<typename DecompositionType, typename Rhs> struct solve_retval;
+template<typename DecompositionType> struct kernel_retval_base;
+template<typename DecompositionType> struct kernel_retval;
+template<typename DecompositionType> struct image_retval_base;
+template<typename DecompositionType> struct image_retval;
+} // end namespace internal
+
+namespace internal {
+template<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;
+}
+
+namespace internal {
+template<typename Lhs, typename Rhs> struct product_type;
+}
+
+template<typename Lhs, typename Rhs,
+         int ProductType = internal::product_type<Lhs,Rhs>::value>
+struct ProductReturnType;
+
+// this is a workaround for sun CC
+template<typename Lhs, typename Rhs> struct LazyProductReturnType;
+
+namespace internal {
+
+// Provides scalar/packet-wise product and product with accumulation
+// with optional conjugation of the arguments.
+template<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;
+
+template<typename Scalar> struct scalar_sum_op;
+template<typename Scalar> struct scalar_difference_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_conj_product_op;
+template<typename Scalar> struct scalar_opposite_op;
+template<typename Scalar> struct scalar_conjugate_op;
+template<typename Scalar> struct scalar_real_op;
+template<typename Scalar> struct scalar_imag_op;
+template<typename Scalar> struct scalar_abs_op;
+template<typename Scalar> struct scalar_abs2_op;
+template<typename Scalar> struct scalar_sqrt_op;
+template<typename Scalar> struct scalar_exp_op;
+template<typename Scalar> struct scalar_log_op;
+template<typename Scalar> struct scalar_cos_op;
+template<typename Scalar> struct scalar_sin_op;
+template<typename Scalar> struct scalar_acos_op;
+template<typename Scalar> struct scalar_asin_op;
+template<typename Scalar> struct scalar_tan_op;
+template<typename Scalar> struct scalar_pow_op;
+template<typename Scalar> struct scalar_inverse_op;
+template<typename Scalar> struct scalar_square_op;
+template<typename Scalar> struct scalar_cube_op;
+template<typename Scalar, typename NewType> struct scalar_cast_op;
+template<typename Scalar> struct scalar_multiple_op;
+template<typename Scalar> struct scalar_quotient1_op;
+template<typename Scalar> struct scalar_min_op;
+template<typename Scalar> struct scalar_max_op;
+template<typename Scalar> struct scalar_random_op;
+template<typename Scalar> struct scalar_add_op;
+template<typename Scalar> struct scalar_constant_op;
+template<typename Scalar> struct scalar_identity_op;
+
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;
+template<typename LhsScalar,typename RhsScalar> struct scalar_multiple2_op;
+template<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;
+
+} // end namespace internal
+
+struct IOFormat;
+
+// Array module
+template<typename _Scalar, int _Rows, int _Cols,
+         int _Options = AutoAlign |
+#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
+    // workaround a bug in at least gcc 3.4.6
+    // the innermost ?: ternary operator is misparsed. We write it slightly
+    // differently and this makes gcc 3.4.6 happy, but it's ugly.
+    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
+    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
+                          : ColMajor ),
+#else
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : (_Cols==1 && _Rows!=1) ? ColMajor
+                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+#endif
+         int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;
+template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;
+template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
+template<typename ExpressionType, int Direction> class VectorwiseOp;
+template<typename MatrixType,int RowFactor,int ColFactor> class Replicate;
+template<typename MatrixType, int Direction = BothDirections> class Reverse;
+
+template<typename MatrixType> class FullPivLU;
+template<typename MatrixType> class PartialPivLU;
+namespace internal {
+template<typename MatrixType> struct inverse_impl;
+}
+template<typename MatrixType> class HouseholderQR;
+template<typename MatrixType> class ColPivHouseholderQR;
+template<typename MatrixType> class FullPivHouseholderQR;
+template<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;
+template<typename MatrixType, int UpLo = Lower> class LLT;
+template<typename MatrixType, int UpLo = Lower> class LDLT;
+template<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;
+template<typename Scalar>     class JacobiRotation;
+
+// Geometry module:
+template<typename Derived, int _Dim> class RotationBase;
+template<typename Lhs, typename Rhs> class Cross;
+template<typename Derived> class QuaternionBase;
+template<typename Scalar> class Rotation2D;
+template<typename Scalar> class AngleAxis;
+template<typename Scalar,int Dim> class Translation;
+
+#ifdef EIGEN2_SUPPORT
+template<typename Derived, int _Dim> class eigen2_RotationBase;
+template<typename Lhs, typename Rhs> class eigen2_Cross;
+template<typename Scalar> class eigen2_Quaternion;
+template<typename Scalar> class eigen2_Rotation2D;
+template<typename Scalar> class eigen2_AngleAxis;
+template<typename Scalar,int Dim> class eigen2_Transform;
+template <typename _Scalar, int _AmbientDim> class eigen2_ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class eigen2_Hyperplane;
+template<typename Scalar,int Dim> class eigen2_Translation;
+template<typename Scalar,int Dim> class eigen2_Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar> class Quaternion;
+template<typename Scalar,int Dim> class Transform;
+template <typename _Scalar, int _AmbientDim> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim> class Hyperplane;
+template<typename Scalar,int Dim> class Scaling;
+#endif
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+template<typename Scalar, int Options = AutoAlign> class Quaternion;
+template<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;
+template <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;
+template<typename Scalar> class UniformScaling;
+template<typename MatrixType,int Direction> class Homogeneous;
+#endif
+
+// MatrixFunctions module
+template<typename Derived> struct MatrixExponentialReturnValue;
+template<typename Derived> class MatrixFunctionReturnValue;
+template<typename Derived> class MatrixSquareRootReturnValue;
+template<typename Derived> class MatrixLogarithmReturnValue;
+template<typename Derived> class MatrixPowerReturnValue;
+template<typename Derived, typename Lhs, typename Rhs> class MatrixPowerProduct;
+
+namespace internal {
+template <typename Scalar>
+struct stem_function
+{
+  typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+  typedef ComplexScalar type(ComplexScalar, int);
+};
+}
+
+
+#ifdef EIGEN2_SUPPORT
+template<typename ExpressionType> class Cwise;
+template<typename MatrixType> class Minor;
+template<typename MatrixType> class LU;
+template<typename MatrixType> class QR;
+template<typename MatrixType> class SVD;
+namespace internal {
+template<typename MatrixType, unsigned int Mode> struct eigen2_part_return_type;
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_FORWARDDECLARATIONS_H
diff --git a/Eigen/src/Core/util/MKL_support.h b/Eigen/src/Core/util/MKL_support.h
new file mode 100644
index 0000000..8acca9c
--- /dev/null
+++ b/Eigen/src/Core/util/MKL_support.h
@@ -0,0 +1,126 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *   Include file with common MKL declarations
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_MKL_SUPPORT_H
+#define EIGEN_MKL_SUPPORT_H
+
+#ifdef EIGEN_USE_MKL_ALL
+  #ifndef EIGEN_USE_BLAS
+    #define EIGEN_USE_BLAS
+  #endif
+  #ifndef EIGEN_USE_LAPACKE
+    #define EIGEN_USE_LAPACKE
+  #endif
+  #ifndef EIGEN_USE_MKL_VML
+    #define EIGEN_USE_MKL_VML
+  #endif
+#endif
+
+#ifdef EIGEN_USE_LAPACKE_STRICT
+  #define EIGEN_USE_LAPACKE
+#endif
+
+#if defined(EIGEN_USE_BLAS) || defined(EIGEN_USE_LAPACKE) || defined(EIGEN_USE_MKL_VML)
+  #define EIGEN_USE_MKL
+#endif
+
+#if defined EIGEN_USE_MKL
+#   include <mkl.h> 
+/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
+#   ifndef INTEL_MKL_VERSION
+#       undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
+#   elif INTEL_MKL_VERSION < 100305    /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
+#       undef EIGEN_USE_MKL
+#   endif
+#   ifndef EIGEN_USE_MKL
+    /*If the MKL version is too old, undef everything*/
+#       undef   EIGEN_USE_MKL_ALL
+#       undef   EIGEN_USE_BLAS
+#       undef   EIGEN_USE_LAPACKE
+#       undef   EIGEN_USE_MKL_VML
+#       undef   EIGEN_USE_LAPACKE_STRICT
+#       undef   EIGEN_USE_LAPACKE
+#   endif
+#endif
+
+#if defined EIGEN_USE_MKL
+#include <mkl_lapacke.h>
+#define EIGEN_MKL_VML_THRESHOLD 128
+
+namespace Eigen {
+
+typedef std::complex<double> dcomplex;
+typedef std::complex<float>  scomplex;
+
+namespace internal {
+
+template<typename MKLType, typename EigenType>
+static inline void assign_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) {
+  mklScalar=eigenScalar;
+}
+
+template<typename MKLType, typename EigenType>
+static inline void assign_conj_scalar_eig2mkl(MKLType& mklScalar, const EigenType& eigenScalar) {
+  mklScalar=eigenScalar;
+}
+
+template <>
+inline void assign_scalar_eig2mkl<MKL_Complex16,dcomplex>(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) {
+  mklScalar.real=eigenScalar.real();
+  mklScalar.imag=eigenScalar.imag();
+}
+
+template <>
+inline void assign_scalar_eig2mkl<MKL_Complex8,scomplex>(MKL_Complex8& mklScalar, const scomplex& eigenScalar) {
+  mklScalar.real=eigenScalar.real();
+  mklScalar.imag=eigenScalar.imag();
+}
+
+template <>
+inline void assign_conj_scalar_eig2mkl<MKL_Complex16,dcomplex>(MKL_Complex16& mklScalar, const dcomplex& eigenScalar) {
+  mklScalar.real=eigenScalar.real();
+  mklScalar.imag=-eigenScalar.imag();
+}
+
+template <>
+inline void assign_conj_scalar_eig2mkl<MKL_Complex8,scomplex>(MKL_Complex8& mklScalar, const scomplex& eigenScalar) {
+  mklScalar.real=eigenScalar.real();
+  mklScalar.imag=-eigenScalar.imag();
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
+
+#endif // EIGEN_MKL_SUPPORT_H
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
new file mode 100644
index 0000000..f69970f
--- /dev/null
+++ b/Eigen/src/Core/util/Macros.h
@@ -0,0 +1,435 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MACROS_H
+#define EIGEN_MACROS_H
+
+#define EIGEN_WORLD_VERSION 3
+#define EIGEN_MAJOR_VERSION 2
+#define EIGEN_MINOR_VERSION 5
+
+#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
+                                      (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
+                                                                 EIGEN_MINOR_VERSION>=z))))
+#ifdef __GNUC__
+  #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
+#else
+  #define EIGEN_GNUC_AT_LEAST(x,y) 0
+#endif
+ 
+#ifdef __GNUC__
+  #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
+#else
+  #define EIGEN_GNUC_AT_MOST(x,y) 0
+#endif
+
+#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
+  // see bug 89
+  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
+#else
+  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
+#endif
+
+#if defined(__GNUC__) && (__GNUC__ <= 3)
+#define EIGEN_GCC3_OR_OLDER 1
+#else
+#define EIGEN_GCC3_OR_OLDER 0
+#endif
+
+// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
+// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
+// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
+// certain common platform (compiler+architecture combinations) to avoid these problems.
+// Only static alignment is really problematic (relies on nonstandard compiler extensions that don't
+// work everywhere, for example don't work on GCC/ARM), try to keep heap alignment even
+// when we have to disable static alignment.
+#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ppc__) || defined(__ia64__))
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1
+#else
+#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0
+#endif
+
+// static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX
+#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \
+ && !EIGEN_GCC3_OR_OLDER \
+ && !defined(__SUNPRO_CC) \
+ && !defined(__QNXNTO__)
+  #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1
+#else
+  #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0
+#endif
+
+#ifdef EIGEN_DONT_ALIGN
+  #ifndef EIGEN_DONT_ALIGN_STATICALLY
+    #define EIGEN_DONT_ALIGN_STATICALLY
+  #endif
+  #define EIGEN_ALIGN 0
+#else
+  #define EIGEN_ALIGN 1
+#endif
+
+// EIGEN_ALIGN_STATICALLY is the true test whether we want to align arrays on the stack or not. It takes into account both the user choice to explicitly disable
+// alignment (EIGEN_DONT_ALIGN_STATICALLY) and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). Henceforth, only EIGEN_ALIGN_STATICALLY should be used.
+#if EIGEN_ARCH_WANTS_STACK_ALIGNMENT && !defined(EIGEN_DONT_ALIGN_STATICALLY)
+  #define EIGEN_ALIGN_STATICALLY 1
+#else
+  #define EIGEN_ALIGN_STATICALLY 0
+  #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+    #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT
+  #endif
+#endif
+
+#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor
+#else
+#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor
+#endif
+
+#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE
+#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
+#endif
+
+// Cross compiler wrapper around LLVM's __has_builtin
+#ifdef __has_builtin
+#  define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
+#else
+#  define EIGEN_HAS_BUILTIN(x) 0
+#endif
+
+/** Allows to disable some optimizations which might affect the accuracy of the result.
+  * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
+  * They currently include:
+  *   - single precision Cwise::sin() and Cwise::cos() when SSE vectorization is enabled.
+  */
+#ifndef EIGEN_FAST_MATH
+#define EIGEN_FAST_MATH 1
+#endif
+
+#define EIGEN_DEBUG_VAR(x) std::cerr << #x << " = " << x << std::endl;
+
+// concatenate two tokens
+#define EIGEN_CAT2(a,b) a ## b
+#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)
+
+// convert a token to a string
+#define EIGEN_MAKESTRING2(a) #a
+#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)
+
+// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,
+// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline
+// but GCC is still doing fine with just inline.
+#if (defined _MSC_VER) || (defined __INTEL_COMPILER)
+#define EIGEN_STRONG_INLINE __forceinline
+#else
+#define EIGEN_STRONG_INLINE inline
+#endif
+
+// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible
+// attribute to maximize inlining. This should only be used when really necessary: in particular,
+// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.
+// FIXME with the always_inline attribute,
+// gcc 3.4.x reports the following compilation error:
+//   Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
+//    : function body not available
+#if EIGEN_GNUC_AT_LEAST(4,0)
+#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
+#else
+#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_DONT_INLINE __attribute__((noinline))
+#elif (defined _MSC_VER)
+#define EIGEN_DONT_INLINE __declspec(noinline)
+#else
+#define EIGEN_DONT_INLINE
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_PERMISSIVE_EXPR __extension__
+#else
+#define EIGEN_PERMISSIVE_EXPR
+#endif
+
+// this macro allows to get rid of linking errors about multiply defined functions.
+//  - static is not very good because it prevents definitions from different object files to be merged.
+//           So static causes the resulting linked executable to be bloated with multiple copies of the same function.
+//  - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.
+#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
+#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS inline
+
+#ifdef NDEBUG
+# ifndef EIGEN_NO_DEBUG
+#  define EIGEN_NO_DEBUG
+# endif
+#endif
+
+// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89
+#ifdef EIGEN_NO_DEBUG
+  #define eigen_plain_assert(x)
+#else
+  #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO
+    namespace Eigen {
+    namespace internal {
+    inline bool copy_bool(bool b) { return b; }
+    }
+    }
+    #define eigen_plain_assert(x) assert(x)
+  #else
+    // work around bug 89
+    #include <cstdlib>   // for abort
+    #include <iostream>  // for std::cerr
+
+    namespace Eigen {
+    namespace internal {
+    // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.
+    // see bug 89.
+    namespace {
+    EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }
+    }
+    inline void assert_fail(const char *condition, const char *function, const char *file, int line)
+    {
+      std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
+      abort();
+    }
+    }
+    }
+    #define eigen_plain_assert(x) \
+      do { \
+        if(!Eigen::internal::copy_bool(x)) \
+          Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \
+      } while(false)
+  #endif
+#endif
+
+// eigen_assert can be overridden
+#ifndef eigen_assert
+#define eigen_assert(x) eigen_plain_assert(x)
+#endif
+
+#ifdef EIGEN_INTERNAL_DEBUGGING
+#define eigen_internal_assert(x) eigen_assert(x)
+#else
+#define eigen_internal_assert(x)
+#endif
+
+#ifdef EIGEN_NO_DEBUG
+#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
+#else
+#define EIGEN_ONLY_USED_FOR_DEBUG(x)
+#endif
+
+#ifndef EIGEN_NO_DEPRECATED_WARNING
+  #if (defined __GNUC__)
+    #define EIGEN_DEPRECATED __attribute__((deprecated))
+  #elif (defined _MSC_VER)
+    #define EIGEN_DEPRECATED __declspec(deprecated)
+  #else
+    #define EIGEN_DEPRECATED
+  #endif
+#else
+  #define EIGEN_DEPRECATED
+#endif
+
+#if (defined __GNUC__)
+#define EIGEN_UNUSED __attribute__((unused))
+#else
+#define EIGEN_UNUSED
+#endif
+
+// Suppresses 'unused variable' warnings.
+namespace Eigen {
+  namespace internal {
+    template<typename T> void ignore_unused_variable(const T&) {}
+  }
+}
+#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
+
+#if !defined(EIGEN_ASM_COMMENT)
+  #if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
+    #define EIGEN_ASM_COMMENT(X)  __asm__("#" X)
+  #else
+    #define EIGEN_ASM_COMMENT(X)
+  #endif
+#endif
+
+/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.
+ * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,
+ * so that vectorization doesn't affect binary compatibility.
+ *
+ * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link
+ * vectorized and non-vectorized code.
+ */
+#if (defined __GNUC__) || (defined __PGI) || (defined __IBMCPP__) || (defined __ARMCC_VERSION)
+  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#elif (defined _MSC_VER)
+  #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))
+#elif (defined __SUNPRO_CC)
+  // FIXME not sure about this one:
+  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))
+#else
+  #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
+#endif
+
+#define EIGEN_ALIGN8  EIGEN_ALIGN_TO_BOUNDARY(8)
+#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
+
+#if EIGEN_ALIGN_STATICALLY
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n) EIGEN_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16 EIGEN_ALIGN16
+#else
+#define EIGEN_USER_ALIGN_TO_BOUNDARY(n)
+#define EIGEN_USER_ALIGN16
+#endif
+
+#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD
+  #define EIGEN_RESTRICT
+#endif
+#ifndef EIGEN_RESTRICT
+  #define EIGEN_RESTRICT __restrict
+#endif
+
+#ifndef EIGEN_STACK_ALLOCATION_LIMIT
+// 131072 == 128 KB
+#define EIGEN_STACK_ALLOCATION_LIMIT 131072
+#endif
+
+#ifndef EIGEN_DEFAULT_IO_FORMAT
+#ifdef EIGEN_MAKING_DOCS
+// format used in Eigen's documentation
+// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, " ", "\n", "", "")
+#else
+#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()
+#endif
+#endif
+
+// just an empty macro !
+#define EIGEN_EMPTY
+
+#if defined(_MSC_VER) && (_MSC_VER < 1800) && (!defined(__INTEL_COMPILER))
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+  using Base::operator =;
+#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+  using Base::operator =; \
+  EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
+  template <typename OtherDerived> \
+  EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
+#else
+#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
+  using Base::operator =; \
+  EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
+  { \
+    Base::operator=(other); \
+    return *this; \
+  }
+#endif
+
+/** \internal
+ * \brief Macro to manually inherit assignment operators.
+ * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
+ */
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+
+/**
+* Just a side note. Commenting within defines works only by documenting
+* behind the object (via '!<'). Comments cannot be multi-line and thus
+* we have these extra long lines. What is confusing doxygen over here is
+* that we use '\' and basically have a bunch of typedefs with their
+* documentation in a single line.
+**/
+
+#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
+  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+  typedef typename Eigen::internal::nested<Derived>::type Nested; \
+  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+  typedef typename Eigen::internal::traits<Derived>::Index Index; \
+  enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+        Flags = Eigen::internal::traits<Derived>::Flags, \
+        CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+        SizeAtCompileTime = Base::SizeAtCompileTime, \
+        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+        IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
+
+
+#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \
+  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \brief Numeric type, e.g. float, double, int or std::complex<float>. */ \
+  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \brief The underlying numeric type for composed scalar types. \details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \
+  typedef typename Base::PacketScalar PacketScalar; \
+  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \brief The return type for coefficient access. \details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \
+  typedef typename Eigen::internal::nested<Derived>::type Nested; \
+  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+  typedef typename Eigen::internal::traits<Derived>::Index Index; \
+  enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+        MaxRowsAtCompileTime = Eigen::internal::traits<Derived>::MaxRowsAtCompileTime, \
+        MaxColsAtCompileTime = Eigen::internal::traits<Derived>::MaxColsAtCompileTime, \
+        Flags = Eigen::internal::traits<Derived>::Flags, \
+        CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+        SizeAtCompileTime = Base::SizeAtCompileTime, \
+        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
+        IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+  using Base::derived; \
+  using Base::const_cast_derived;
+
+
+#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
+#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,
+// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over
+// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.
+#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \
+                           : ((int)a == 1 || (int)b == 1) ? 1 \
+                           : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+                           : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values
+// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is
+// (between 0 and 3), it is not more than 3.
+#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b)  (((int)a == 0 || (int)b == 0) ? 0 \
+                           : ((int)a == 1 || (int)b == 1) ? 1 \
+                           : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \
+                           : ((int)a == Dynamic) ? (int)b \
+                           : ((int)b == Dynamic) ? (int)a \
+                           : ((int)a <= (int)b) ? (int)a : (int)b)
+
+// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.
+#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
+                           : ((int)a >= (int)b) ? (int)a : (int)b)
+
+#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
+
+#define EIGEN_IMPLIES(a,b) (!(a) || (b))
+
+#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR) \
+  template<typename OtherDerived> \
+  EIGEN_STRONG_INLINE const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> \
+  (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+  { \
+    return CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); \
+  }
+
+// the expression type of a cwise product
+#define EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS) \
+    CwiseBinaryOp< \
+      internal::scalar_product_op< \
+          typename internal::traits<LHS>::Scalar, \
+          typename internal::traits<RHS>::Scalar \
+      >, \
+      const LHS, \
+      const RHS \
+    >
+
+#endif // EIGEN_MACROS_H
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
new file mode 100644
index 0000000..b9af5cf
--- /dev/null
+++ b/Eigen/src/Core/util/Memory.h
@@ -0,0 +1,977 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/*****************************************************************************
+*** Platform checks for aligned malloc functions                           ***
+*****************************************************************************/
+
+#ifndef EIGEN_MEMORY_H
+#define EIGEN_MEMORY_H
+
+#ifndef EIGEN_MALLOC_ALREADY_ALIGNED
+
+// Try to determine automatically if malloc is already aligned.
+
+// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
+//   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
+// This is true at least since glibc 2.8.
+// This leaves the question how to detect 64-bit. According to this document,
+//   http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
+// page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
+// quite safe, at least within the context of glibc, to equate 64-bit with LP64.
+#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
+ && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ )
+  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
+#else
+  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+// FreeBSD 6 seems to have 16-byte aligned malloc
+//   See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
+// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
+//   See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
+#if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)
+  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
+#else
+  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+#if defined(__APPLE__) \
+ || defined(_WIN64) \
+ || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
+ || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
+  #define EIGEN_MALLOC_ALREADY_ALIGNED 1
+#else
+  #define EIGEN_MALLOC_ALREADY_ALIGNED 0
+#endif
+
+#endif
+
+// See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554)
+// It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first.
+// Currently, let's include it only on unix systems:
+#if defined(__unix__) || defined(__unix)
+  #include <unistd.h>
+  #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
+    #define EIGEN_HAS_POSIX_MEMALIGN 1
+  #endif
+#endif
+
+#ifndef EIGEN_HAS_POSIX_MEMALIGN
+  #define EIGEN_HAS_POSIX_MEMALIGN 0
+#endif
+
+#ifdef EIGEN_VECTORIZE_SSE
+  #define EIGEN_HAS_MM_MALLOC 1
+#else
+  #define EIGEN_HAS_MM_MALLOC 0
+#endif
+
+namespace Eigen {
+
+namespace internal {
+
+inline void throw_std_bad_alloc()
+{
+  #ifdef EIGEN_EXCEPTIONS
+    throw std::bad_alloc();
+  #else
+    std::size_t huge = -1;
+    new int[huge];
+  #endif
+}
+
+/*****************************************************************************
+*** Implementation of handmade aligned functions                           ***
+*****************************************************************************/
+
+/* ----- Hand made implementations of aligned malloc/free and realloc ----- */
+
+/** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
+  * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
+  */
+inline void* handmade_aligned_malloc(std::size_t size)
+{
+  void *original = std::malloc(size+16);
+  if (original == 0) return 0;
+  void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16);
+  *(reinterpret_cast<void**>(aligned) - 1) = original;
+  return aligned;
+}
+
+/** \internal Frees memory allocated with handmade_aligned_malloc */
+inline void handmade_aligned_free(void *ptr)
+{
+  if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
+}
+
+/** \internal
+  * \brief Reallocates aligned memory.
+  * Since we know that our handmade version is based on std::realloc
+  * we can use std::realloc to implement efficient reallocation.
+  */
+inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)
+{
+  if (ptr == 0) return handmade_aligned_malloc(size);
+  void *original = *(reinterpret_cast<void**>(ptr) - 1);
+  std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
+  original = std::realloc(original,size+16);
+  if (original == 0) return 0;
+  void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16);
+  void *previous_aligned = static_cast<char *>(original)+previous_offset;
+  if(aligned!=previous_aligned)
+    std::memmove(aligned, previous_aligned, size);
+  
+  *(reinterpret_cast<void**>(aligned) - 1) = original;
+  return aligned;
+}
+
+/*****************************************************************************
+*** Implementation of generic aligned realloc (when no realloc can be used)***
+*****************************************************************************/
+
+void* aligned_malloc(std::size_t size);
+void  aligned_free(void *ptr);
+
+/** \internal
+  * \brief Reallocates aligned memory.
+  * Allows reallocation with aligned ptr types. This implementation will
+  * always create a new memory chunk and copy the old data.
+  */
+inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
+{
+  if (ptr==0)
+    return aligned_malloc(size);
+
+  if (size==0)
+  {
+    aligned_free(ptr);
+    return 0;
+  }
+
+  void* newptr = aligned_malloc(size);
+  if (newptr == 0)
+  {
+    #ifdef EIGEN_HAS_ERRNO
+    errno = ENOMEM; // according to the standard
+    #endif
+    return 0;
+  }
+
+  if (ptr != 0)
+  {
+    std::memcpy(newptr, ptr, (std::min)(size,old_size));
+    aligned_free(ptr);
+  }
+
+  return newptr;
+}
+
+/*****************************************************************************
+*** Implementation of portable aligned versions of malloc/free/realloc     ***
+*****************************************************************************/
+
+#ifdef EIGEN_NO_MALLOC
+inline void check_that_malloc_is_allowed()
+{
+  eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
+}
+#elif defined EIGEN_RUNTIME_NO_MALLOC
+inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
+{
+  static bool value = true;
+  if (update == 1)
+    value = new_value;
+  return value;
+}
+inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
+inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
+inline void check_that_malloc_is_allowed()
+{
+  eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
+}
+#else 
+inline void check_that_malloc_is_allowed()
+{}
+#endif
+
+/** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
+  * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
+  */
+inline void* aligned_malloc(size_t size)
+{
+  check_that_malloc_is_allowed();
+
+  void *result;
+  #if !EIGEN_ALIGN
+    result = std::malloc(size);
+  #elif EIGEN_MALLOC_ALREADY_ALIGNED
+    result = std::malloc(size);
+  #elif EIGEN_HAS_POSIX_MEMALIGN
+    if(posix_memalign(&result, 16, size)) result = 0;
+  #elif EIGEN_HAS_MM_MALLOC
+    result = _mm_malloc(size, 16);
+  #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+    result = _aligned_malloc(size, 16);
+  #else
+    result = handmade_aligned_malloc(size);
+  #endif
+
+  if(!result && size)
+    throw_std_bad_alloc();
+
+  return result;
+}
+
+/** \internal Frees memory allocated with aligned_malloc. */
+inline void aligned_free(void *ptr)
+{
+  #if !EIGEN_ALIGN
+    std::free(ptr);
+  #elif EIGEN_MALLOC_ALREADY_ALIGNED
+    std::free(ptr);
+  #elif EIGEN_HAS_POSIX_MEMALIGN
+    std::free(ptr);
+  #elif EIGEN_HAS_MM_MALLOC
+    _mm_free(ptr);
+  #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+    _aligned_free(ptr);
+  #else
+    handmade_aligned_free(ptr);
+  #endif
+}
+
+/**
+* \internal
+* \brief Reallocates an aligned block of memory.
+* \throws std::bad_alloc on allocation failure
+**/
+inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
+{
+  EIGEN_UNUSED_VARIABLE(old_size);
+
+  void *result;
+#if !EIGEN_ALIGN
+  result = std::realloc(ptr,new_size);
+#elif EIGEN_MALLOC_ALREADY_ALIGNED
+  result = std::realloc(ptr,new_size);
+#elif EIGEN_HAS_POSIX_MEMALIGN
+  result = generic_aligned_realloc(ptr,new_size,old_size);
+#elif EIGEN_HAS_MM_MALLOC
+  // The defined(_mm_free) is just here to verify that this MSVC version
+  // implements _mm_malloc/_mm_free based on the corresponding _aligned_
+  // functions. This may not always be the case and we just try to be safe.
+  #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free)
+    result = _aligned_realloc(ptr,new_size,16);
+  #else
+    result = generic_aligned_realloc(ptr,new_size,old_size);
+  #endif
+#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
+  result = _aligned_realloc(ptr,new_size,16);
+#else
+  result = handmade_aligned_realloc(ptr,new_size,old_size);
+#endif
+
+  if (!result && new_size)
+    throw_std_bad_alloc();
+
+  return result;
+}
+
+/*****************************************************************************
+*** Implementation of conditionally aligned functions                      ***
+*****************************************************************************/
+
+/** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
+  * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
+  */
+template<bool Align> inline void* conditional_aligned_malloc(size_t size)
+{
+  return aligned_malloc(size);
+}
+
+template<> inline void* conditional_aligned_malloc<false>(size_t size)
+{
+  check_that_malloc_is_allowed();
+
+  void *result = std::malloc(size);
+  if(!result && size)
+    throw_std_bad_alloc();
+  return result;
+}
+
+/** \internal Frees memory allocated with conditional_aligned_malloc */
+template<bool Align> inline void conditional_aligned_free(void *ptr)
+{
+  aligned_free(ptr);
+}
+
+template<> inline void conditional_aligned_free<false>(void *ptr)
+{
+  std::free(ptr);
+}
+
+template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
+{
+  return aligned_realloc(ptr, new_size, old_size);
+}
+
+template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t)
+{
+  return std::realloc(ptr, new_size);
+}
+
+/*****************************************************************************
+*** Construction/destruction of array elements                             ***
+*****************************************************************************/
+
+/** \internal Constructs the elements of an array.
+  * The \a size parameter tells on how many objects to call the constructor of T.
+  */
+template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
+{
+  for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
+  return ptr;
+}
+
+/** \internal Destructs the elements of an array.
+  * The \a size parameters tells on how many objects to call the destructor of T.
+  */
+template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
+{
+  // always destruct an array starting from the end.
+  if(ptr)
+    while(size) ptr[--size].~T();
+}
+
+/*****************************************************************************
+*** Implementation of aligned new/delete-like functions                    ***
+*****************************************************************************/
+
+template<typename T>
+EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size)
+{
+  if(size > size_t(-1) / sizeof(T))
+    throw_std_bad_alloc();
+}
+
+/** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
+  * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
+  * The default constructor of T is called.
+  */
+template<typename T> inline T* aligned_new(size_t size)
+{
+  check_size_for_overflow<T>(size);
+  T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
+  return construct_elements_of_array(result, size);
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
+{
+  check_size_for_overflow<T>(size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+  return construct_elements_of_array(result, size);
+}
+
+/** \internal Deletes objects constructed with aligned_new
+  * The \a size parameters tells on how many objects to call the destructor of T.
+  */
+template<typename T> inline void aligned_delete(T *ptr, size_t size)
+{
+  destruct_elements_of_array<T>(ptr, size);
+  aligned_free(ptr);
+}
+
+/** \internal Deletes objects constructed with conditional_aligned_new
+  * The \a size parameters tells on how many objects to call the destructor of T.
+  */
+template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size)
+{
+  destruct_elements_of_array<T>(ptr, size);
+  conditional_aligned_free<Align>(ptr);
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
+{
+  check_size_for_overflow<T>(new_size);
+  check_size_for_overflow<T>(old_size);
+  if(new_size < old_size)
+    destruct_elements_of_array(pts+new_size, old_size-new_size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+  if(new_size > old_size)
+    construct_elements_of_array(result+old_size, new_size-old_size);
+  return result;
+}
+
+
+template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
+{
+  if(size==0)
+    return 0; // short-cut. Also fixes Bug 884
+  check_size_for_overflow<T>(size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
+  if(NumTraits<T>::RequireInitialization)
+    construct_elements_of_array(result, size);
+  return result;
+}
+
+template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
+{
+  check_size_for_overflow<T>(new_size);
+  check_size_for_overflow<T>(old_size);
+  if(NumTraits<T>::RequireInitialization && (new_size < old_size))
+    destruct_elements_of_array(pts+new_size, old_size-new_size);
+  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
+  if(NumTraits<T>::RequireInitialization && (new_size > old_size))
+    construct_elements_of_array(result+old_size, new_size-old_size);
+  return result;
+}
+
+template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size)
+{
+  if(NumTraits<T>::RequireInitialization)
+    destruct_elements_of_array<T>(ptr, size);
+  conditional_aligned_free<Align>(ptr);
+}
+
+/****************************************************************************/
+
+/** \internal Returns the index of the first element of the array that is well aligned for vectorization.
+  *
+  * \param array the address of the start of the array
+  * \param size the size of the array
+  *
+  * \note If no element of the array is well aligned, the size of the array is returned. Typically,
+  * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the
+  * packet size for the given scalar type is 1, then everything is considered well-aligned.
+  *
+  * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a
+  * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the
+  * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
+  * example with Scalar=double on certain 32-bit platforms, see bug #79.
+  *
+  * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
+  */
+template<typename Scalar, typename Index>
+static inline Index first_aligned(const Scalar* array, Index size)
+{
+  static const Index PacketSize = packet_traits<Scalar>::size;
+  static const Index PacketAlignedMask = PacketSize-1;
+
+  if(PacketSize==1)
+  {
+    // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements
+    // of the array have the same alignment.
+    return 0;
+  }
+  else if(size_t(array) & (sizeof(Scalar)-1))
+  {
+    // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
+    // Consequently, no element of the array is well aligned.
+    return size;
+  }
+  else
+  {
+    return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
+                           & PacketAlignedMask, size);
+  }
+}
+
+/** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
+  */ 
+template<typename Index> 
+inline static Index first_multiple(Index size, Index base)
+{
+  return ((size+base-1)/base)*base;
+}
+
+// std::copy is much slower than memcpy, so let's introduce a smart_copy which
+// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
+template<typename T, bool UseMemcpy> struct smart_copy_helper;
+
+template<typename T> void smart_copy(const T* start, const T* end, T* target)
+{
+  smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
+}
+
+template<typename T> struct smart_copy_helper<T,true> {
+  static inline void run(const T* start, const T* end, T* target)
+  { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
+};
+
+template<typename T> struct smart_copy_helper<T,false> {
+  static inline void run(const T* start, const T* end, T* target)
+  { std::copy(start, end, target); }
+};
+
+
+/*****************************************************************************
+*** Implementation of runtime stack allocation (falling back to malloc)    ***
+*****************************************************************************/
+
+// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
+// to the appropriate stack allocation function
+#ifndef EIGEN_ALLOCA
+  #if (defined __linux__) || (defined __APPLE__) || (defined alloca)
+    #define EIGEN_ALLOCA alloca
+  #elif defined(_MSC_VER)
+    #define EIGEN_ALLOCA _alloca
+  #endif
+#endif
+
+// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
+// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
+template<typename T> class aligned_stack_memory_handler
+{
+  public:
+    /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
+     * Note that \a ptr can be 0 regardless of the other parameters.
+     * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
+     * In this case, the buffer elements will also be destructed when this handler will be destructed.
+     * Finally, if \a dealloc is true, then the pointer \a ptr is freed.
+     **/
+    aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
+      : m_ptr(ptr), m_size(size), m_deallocate(dealloc)
+    {
+      if(NumTraits<T>::RequireInitialization && m_ptr)
+        Eigen::internal::construct_elements_of_array(m_ptr, size);
+    }
+    ~aligned_stack_memory_handler()
+    {
+      if(NumTraits<T>::RequireInitialization && m_ptr)
+        Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
+      if(m_deallocate)
+        Eigen::internal::aligned_free(m_ptr);
+    }
+  protected:
+    T* m_ptr;
+    size_t m_size;
+    bool m_deallocate;
+};
+
+} // end namespace internal
+
+/** \internal
+  * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
+  * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
+  * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
+  * The allocated buffer is automatically deleted when exiting the scope of this declaration.
+  * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
+  * Here is an example:
+  * \code
+  * {
+  *   ei_declare_aligned_stack_constructed_variable(float,data,size,0);
+  *   // use data[0] to data[size-1]
+  * }
+  * \endcode
+  * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
+  */
+#ifdef EIGEN_ALLOCA
+
+  #if defined(__arm__) || defined(_WIN32)
+    #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
+  #else
+    #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
+  #endif
+
+  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
+    TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
+               : reinterpret_cast<TYPE*>( \
+                      (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
+                    : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) );  \
+    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
+
+#else
+
+  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
+    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
+    TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE));    \
+    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
+    
+#endif
+
+
+/*****************************************************************************
+*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF]                ***
+*****************************************************************************/
+
+#if EIGEN_ALIGN
+  #ifdef EIGEN_EXCEPTIONS
+    #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+      void* operator new(size_t size, const std::nothrow_t&) throw() { \
+        try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
+        catch (...) { return 0; } \
+      }
+  #else
+    #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+      void* operator new(size_t size, const std::nothrow_t&) throw() { \
+        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+      }
+  #endif
+
+  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
+      void *operator new(size_t size) { \
+        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+      } \
+      void *operator new[](size_t size) { \
+        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
+      } \
+      void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+      void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
+      /* in-place new and delete. since (at least afaik) there is no actual   */ \
+      /* memory allocated we can safely let the default implementation handle */ \
+      /* this particular case. */ \
+      static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
+      static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \
+      void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
+      void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \
+      /* nothrow-new (returns zero instead of std::bad_alloc) */ \
+      EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
+      void operator delete(void *ptr, const std::nothrow_t&) throw() { \
+        Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
+      } \
+      typedef void eigen_aligned_operator_new_marker_type;
+#else
+  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
+#endif
+
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
+#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0)))
+
+/****************************************************************************/
+
+/** \class aligned_allocator
+* \ingroup Core_Module
+*
+* \brief STL compatible allocator to use with with 16 byte aligned types
+*
+* Example:
+* \code
+* // Matrix4f requires 16 bytes alignment:
+* std::map< int, Matrix4f, std::less<int>, 
+*           aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
+* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
+* std::map< int, Vector3f > my_map_vec3;
+* \endcode
+*
+* \sa \ref TopicStlContainers.
+*/
+template<class T>
+class aligned_allocator
+{
+public:
+    typedef size_t    size_type;
+    typedef std::ptrdiff_t difference_type;
+    typedef T*        pointer;
+    typedef const T*  const_pointer;
+    typedef T&        reference;
+    typedef const T&  const_reference;
+    typedef T         value_type;
+
+    template<class U>
+    struct rebind
+    {
+        typedef aligned_allocator<U> other;
+    };
+
+    pointer address( reference value ) const
+    {
+        return &value;
+    }
+
+    const_pointer address( const_reference value ) const
+    {
+        return &value;
+    }
+
+    aligned_allocator()
+    {
+    }
+
+    aligned_allocator( const aligned_allocator& )
+    {
+    }
+
+    template<class U>
+    aligned_allocator( const aligned_allocator<U>& )
+    {
+    }
+
+    ~aligned_allocator()
+    {
+    }
+
+    size_type max_size() const
+    {
+        return (std::numeric_limits<size_type>::max)();
+    }
+
+    pointer allocate( size_type num, const void* hint = 0 )
+    {
+        EIGEN_UNUSED_VARIABLE(hint);
+        internal::check_size_for_overflow<T>(num);
+        return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
+    }
+
+    void construct( pointer p, const T& value )
+    {
+        ::new( p ) T( value );
+    }
+
+    void destroy( pointer p )
+    {
+        p->~T();
+    }
+
+    void deallocate( pointer p, size_type /*num*/ )
+    {
+        internal::aligned_free( p );
+    }
+
+    bool operator!=(const aligned_allocator<T>& ) const
+    { return false; }
+
+    bool operator==(const aligned_allocator<T>& ) const
+    { return true; }
+};
+
+//---------- Cache sizes ----------
+
+#if !defined(EIGEN_NO_CPUID)
+#  if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
+#    if defined(__PIC__) && defined(__i386__)
+       // Case for x86 with PIC
+#      define EIGEN_CPUID(abcd,func,id) \
+         __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
+#    elif defined(__PIC__) && defined(__x86_64__)
+       // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
+       // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
+#      define EIGEN_CPUID(abcd,func,id) \
+        __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
+#    else
+       // Case for x86_64 or x86 w/o PIC
+#      define EIGEN_CPUID(abcd,func,id) \
+         __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
+#    endif
+#  elif defined(_MSC_VER)
+#    if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) )
+#      define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
+#    endif
+#  endif
+#endif
+
+namespace internal {
+
+#ifdef EIGEN_CPUID
+
+inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
+{
+  return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
+}
+
+inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
+{
+  int abcd[4];
+  l1 = l2 = l3 = 0;
+  int cache_id = 0;
+  int cache_type = 0;
+  do {
+    abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+    EIGEN_CPUID(abcd,0x4,cache_id);
+    cache_type  = (abcd[0] & 0x0F) >> 0;
+    if(cache_type==1||cache_type==3) // data or unified cache
+    {
+      int cache_level = (abcd[0] & 0xE0) >> 5;  // A[7:5]
+      int ways        = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
+      int partitions  = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
+      int line_size   = (abcd[1] & 0x00000FFF) >>  0; // B[11:0]
+      int sets        = (abcd[2]);                    // C[31:0]
+
+      int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
+
+      switch(cache_level)
+      {
+        case 1: l1 = cache_size; break;
+        case 2: l2 = cache_size; break;
+        case 3: l3 = cache_size; break;
+        default: break;
+      }
+    }
+    cache_id++;
+  } while(cache_type>0 && cache_id<16);
+}
+
+inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
+{
+  int abcd[4];
+  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+  l1 = l2 = l3 = 0;
+  EIGEN_CPUID(abcd,0x00000002,0);
+  unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
+  bool check_for_p2_core2 = false;
+  for(int i=0; i<14; ++i)
+  {
+    switch(bytes[i])
+    {
+      case 0x0A: l1 = 8; break;   // 0Ah   data L1 cache, 8 KB, 2 ways, 32 byte lines
+      case 0x0C: l1 = 16; break;  // 0Ch   data L1 cache, 16 KB, 4 ways, 32 byte lines
+      case 0x0E: l1 = 24; break;  // 0Eh   data L1 cache, 24 KB, 6 ways, 64 byte lines
+      case 0x10: l1 = 16; break;  // 10h   data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+      case 0x15: l1 = 16; break;  // 15h   code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
+      case 0x2C: l1 = 32; break;  // 2Ch   data L1 cache, 32 KB, 8 ways, 64 byte lines
+      case 0x30: l1 = 32; break;  // 30h   code L1 cache, 32 KB, 8 ways, 64 byte lines
+      case 0x60: l1 = 16; break;  // 60h   data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
+      case 0x66: l1 = 8; break;   // 66h   data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
+      case 0x67: l1 = 16; break;  // 67h   data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
+      case 0x68: l1 = 32; break;  // 68h   data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
+      case 0x1A: l2 = 96; break;   // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
+      case 0x22: l3 = 512; break;   // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
+      case 0x23: l3 = 1024; break;   // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x25: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x29: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x39: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
+      case 0x3A: l2 = 192; break;   // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
+      case 0x3B: l2 = 128; break;   // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
+      case 0x3C: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
+      case 0x3D: l2 = 384; break;   // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
+      case 0x3E: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
+      case 0x40: l2 = 0; break;   // no integrated L2 cache (P6 core) or L3 cache (P4 core)
+      case 0x41: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
+      case 0x42: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
+      case 0x43: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
+      case 0x44: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
+      case 0x45: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
+      case 0x46: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
+      case 0x47: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
+      case 0x48: l2 = 3072; break;   // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
+      case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
+      case 0x4A: l3 = 6144; break;   // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
+      case 0x4B: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
+      case 0x4C: l3 = 12288; break;   // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
+      case 0x4D: l3 = 16384; break;   // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
+      case 0x4E: l2 = 6144; break;   // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
+      case 0x78: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
+      case 0x79: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7A: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7B: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7C: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
+      case 0x7D: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
+      case 0x7E: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
+      case 0x7F: l2 = 512; break;   // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
+      case 0x80: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
+      case 0x81: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
+      case 0x82: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
+      case 0x83: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
+      case 0x84: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
+      case 0x85: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
+      case 0x86: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
+      case 0x87: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
+      case 0x88: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
+      case 0x89: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
+      case 0x8A: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
+      case 0x8D: l3 = 3072; break;   // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
+
+      default: break;
+    }
+  }
+  if(check_for_p2_core2 && l2 == l3)
+    l3 = 0;
+  l1 *= 1024;
+  l2 *= 1024;
+  l3 *= 1024;
+}
+
+inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
+{
+  if(max_std_funcs>=4)
+    queryCacheSizes_intel_direct(l1,l2,l3);
+  else
+    queryCacheSizes_intel_codes(l1,l2,l3);
+}
+
+inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
+{
+  int abcd[4];
+  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+  EIGEN_CPUID(abcd,0x80000005,0);
+  l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
+  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
+  EIGEN_CPUID(abcd,0x80000006,0);
+  l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
+  l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
+}
+#endif
+
+/** \internal
+ * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
+inline void queryCacheSizes(int& l1, int& l2, int& l3)
+{
+  #ifdef EIGEN_CPUID
+  int abcd[4];
+  const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
+  const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
+  const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
+
+  // identify the CPU vendor
+  EIGEN_CPUID(abcd,0x0,0);
+  int max_std_funcs = abcd[1];
+  if(cpuid_is_vendor(abcd,GenuineIntel))
+    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+  else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
+    queryCacheSizes_amd(l1,l2,l3);
+  else
+    // by default let's use Intel's API
+    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
+
+  // here is the list of other vendors:
+//   ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
+//   ||cpuid_is_vendor(abcd,"CyrixInstead")
+//   ||cpuid_is_vendor(abcd,"CentaurHauls")
+//   ||cpuid_is_vendor(abcd,"GenuineTMx86")
+//   ||cpuid_is_vendor(abcd,"TransmetaCPU")
+//   ||cpuid_is_vendor(abcd,"RiseRiseRise")
+//   ||cpuid_is_vendor(abcd,"Geode by NSC")
+//   ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
+//   ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
+//   ||cpuid_is_vendor(abcd,"NexGenDriven")
+  #else
+  l1 = l2 = l3 = -1;
+  #endif
+}
+
+/** \internal
+ * \returns the size in Bytes of the L1 data cache */
+inline int queryL1CacheSize()
+{
+  int l1(-1), l2, l3;
+  queryCacheSizes(l1,l2,l3);
+  return l1;
+}
+
+/** \internal
+ * \returns the size in Bytes of the L2 or L3 cache if this later is present */
+inline int queryTopLevelCacheSize()
+{
+  int l1, l2(-1), l3(-1);
+  queryCacheSizes(l1,l2,l3);
+  return (std::max)(l2,l3);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MEMORY_H
diff --git a/Eigen/src/Core/util/Meta.h b/Eigen/src/Core/util/Meta.h
new file mode 100644
index 0000000..71d5871
--- /dev/null
+++ b/Eigen/src/Core/util/Meta.h
@@ -0,0 +1,243 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_META_H
+#define EIGEN_META_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+  * \file Meta.h
+  * This file contains generic metaprogramming classes which are not specifically related to Eigen.
+  * \note In case you wonder, yes we're aware that Boost already provides all these features,
+  * we however don't want to add a dependency to Boost.
+  */
+
+struct true_type {  enum { value = 1 }; };
+struct false_type { enum { value = 0 }; };
+
+template<bool Condition, typename Then, typename Else>
+struct conditional { typedef Then type; };
+
+template<typename Then, typename Else>
+struct conditional <false, Then, Else> { typedef Else type; };
+
+template<typename T, typename U> struct is_same { enum { value = 0 }; };
+template<typename T> struct is_same<T,T> { enum { value = 1 }; };
+
+template<typename T> struct remove_reference { typedef T type; };
+template<typename T> struct remove_reference<T&> { typedef T type; };
+
+template<typename T> struct remove_pointer { typedef T type; };
+template<typename T> struct remove_pointer<T*> { typedef T type; };
+template<typename T> struct remove_pointer<T*const> { typedef T type; };
+
+template <class T> struct remove_const { typedef T type; };
+template <class T> struct remove_const<const T> { typedef T type; };
+template <class T> struct remove_const<const T[]> { typedef T type[]; };
+template <class T, unsigned int Size> struct remove_const<const T[Size]> { typedef T type[Size]; };
+
+template<typename T> struct remove_all { typedef T type; };
+template<typename T> struct remove_all<const T>   { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T const&>  { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T&>        { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T const*>  { typedef typename remove_all<T>::type type; };
+template<typename T> struct remove_all<T*>        { typedef typename remove_all<T>::type type; };
+
+template<typename T> struct is_arithmetic      { enum { value = false }; };
+template<> struct is_arithmetic<float>         { enum { value = true }; };
+template<> struct is_arithmetic<double>        { enum { value = true }; };
+template<> struct is_arithmetic<long double>   { enum { value = true }; };
+template<> struct is_arithmetic<bool>          { enum { value = true }; };
+template<> struct is_arithmetic<char>          { enum { value = true }; };
+template<> struct is_arithmetic<signed char>   { enum { value = true }; };
+template<> struct is_arithmetic<unsigned char> { enum { value = true }; };
+template<> struct is_arithmetic<signed short>  { enum { value = true }; };
+template<> struct is_arithmetic<unsigned short>{ enum { value = true }; };
+template<> struct is_arithmetic<signed int>    { enum { value = true }; };
+template<> struct is_arithmetic<unsigned int>  { enum { value = true }; };
+template<> struct is_arithmetic<signed long>   { enum { value = true }; };
+template<> struct is_arithmetic<unsigned long> { enum { value = true }; };
+
+template <typename T> struct add_const { typedef const T type; };
+template <typename T> struct add_const<T&> { typedef T& type; };
+
+template <typename T> struct is_const { enum { value = 0 }; };
+template <typename T> struct is_const<T const> { enum { value = 1 }; };
+
+template<typename T> struct add_const_on_value_type            { typedef const T type;  };
+template<typename T> struct add_const_on_value_type<T&>        { typedef T const& type; };
+template<typename T> struct add_const_on_value_type<T*>        { typedef T const* type; };
+template<typename T> struct add_const_on_value_type<T* const>  { typedef T const* const type; };
+template<typename T> struct add_const_on_value_type<T const* const>  { typedef T const* const type; };
+
+/** \internal Allows to enable/disable an overload
+  * according to a compile time condition.
+  */
+template<bool Condition, typename T> struct enable_if;
+
+template<typename T> struct enable_if<true,T>
+{ typedef T type; };
+
+
+
+/** \internal
+  * A base class do disable default copy ctor and copy assignement operator.
+  */
+class noncopyable
+{
+  noncopyable(const noncopyable&);
+  const noncopyable& operator=(const noncopyable&);
+protected:
+  noncopyable() {}
+  ~noncopyable() {}
+};
+
+
+/** \internal
+  * Convenient struct to get the result type of a unary or binary functor.
+  *
+  * It supports both the current STL mechanism (using the result_type member) as well as
+  * upcoming next STL generation (using a templated result member).
+  * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.
+  */
+template<typename T> struct result_of {};
+
+struct has_none {int a[1];};
+struct has_std_result_type {int a[2];};
+struct has_tr1_result {int a[3];};
+
+template<typename Func, typename ArgType, int SizeOf=sizeof(has_none)>
+struct unary_result_of_select {typedef ArgType type;};
+
+template<typename Func, typename ArgType>
+struct unary_result_of_select<Func, ArgType, sizeof(has_std_result_type)> {typedef typename Func::result_type type;};
+
+template<typename Func, typename ArgType>
+struct unary_result_of_select<Func, ArgType, sizeof(has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};
+
+template<typename Func, typename ArgType>
+struct result_of<Func(ArgType)> {
+    template<typename T>
+    static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+    template<typename T>
+    static has_tr1_result      testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);
+    static has_none            testFunctor(...);
+
+    // note that the following indirection is needed for gcc-3.3
+    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+    typedef typename unary_result_of_select<Func, ArgType, FunctorType>::type type;
+};
+
+template<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(has_none)>
+struct binary_result_of_select {typedef ArgType0 type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
+{typedef typename Func::result_type type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
+{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};
+
+template<typename Func, typename ArgType0, typename ArgType1>
+struct result_of<Func(ArgType0,ArgType1)> {
+    template<typename T>
+    static has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
+    template<typename T>
+    static has_tr1_result      testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);
+    static has_none            testFunctor(...);
+
+    // note that the following indirection is needed for gcc-3.3
+    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
+    typedef typename binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;
+};
+
+/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
+  * Usage example: \code meta_sqrt<1023>::ret \endcode
+  */
+template<int Y,
+         int InfX = 0,
+         int SupX = ((Y==1) ? 1 : Y/2),
+         bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
+                                // use ?: instead of || just to shut up a stupid gcc 4.3 warning
+class meta_sqrt
+{
+    enum {
+      MidX = (InfX+SupX)/2,
+      TakeInf = MidX*MidX > Y ? 1 : 0,
+      NewInf = int(TakeInf) ? InfX : int(MidX),
+      NewSup = int(TakeInf) ? int(MidX) : SupX
+    };
+  public:
+    enum { ret = meta_sqrt<Y,NewInf,NewSup>::ret };
+};
+
+template<int Y, int InfX, int SupX>
+class meta_sqrt<Y, InfX, SupX, true> { public:  enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
+
+/** \internal determines whether the product of two numeric types is allowed and what the return type is */
+template<typename T, typename U> struct scalar_product_traits
+{
+  enum { Defined = 0 };
+};
+
+template<typename T> struct scalar_product_traits<T,T>
+{
+  enum {
+    // Cost = NumTraits<T>::MulCost,
+    Defined = 1
+  };
+  typedef T ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<T,std::complex<T> >
+{
+  enum {
+    // Cost = 2*NumTraits<T>::MulCost,
+    Defined = 1
+  };
+  typedef std::complex<T> ReturnType;
+};
+
+template<typename T> struct scalar_product_traits<std::complex<T>, T>
+{
+  enum {
+    // Cost = 2*NumTraits<T>::MulCost,
+    Defined = 1
+  };
+  typedef std::complex<T> ReturnType;
+};
+
+// FIXME quick workaround around current limitation of result_of
+// template<typename Scalar, typename ArgType0, typename ArgType1>
+// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {
+// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;
+// };
+
+template<typename T> struct is_diagonal
+{ enum { ret = false }; };
+
+template<typename T> struct is_diagonal<DiagonalBase<T> >
+{ enum { ret = true }; };
+
+template<typename T> struct is_diagonal<DiagonalWrapper<T> >
+{ enum { ret = true }; };
+
+template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
+{ enum { ret = true }; };
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_META_H
diff --git a/Eigen/src/Core/util/NonMPL2.h b/Eigen/src/Core/util/NonMPL2.h
new file mode 100644
index 0000000..1af67cf
--- /dev/null
+++ b/Eigen/src/Core/util/NonMPL2.h
@@ -0,0 +1,3 @@
+#ifdef EIGEN_MPL2_ONLY
+#error Including non-MPL2 code in EIGEN_MPL2_ONLY mode
+#endif
diff --git a/Eigen/src/Core/util/ReenableStupidWarnings.h b/Eigen/src/Core/util/ReenableStupidWarnings.h
new file mode 100644
index 0000000..5ddfbd4
--- /dev/null
+++ b/Eigen/src/Core/util/ReenableStupidWarnings.h
@@ -0,0 +1,14 @@
+#ifdef EIGEN_WARNINGS_DISABLED
+#undef EIGEN_WARNINGS_DISABLED
+
+#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
+  #ifdef _MSC_VER
+    #pragma warning( pop )
+  #elif defined __INTEL_COMPILER
+    #pragma warning pop
+  #elif defined __clang__
+    #pragma clang diagnostic pop
+  #endif
+#endif
+
+#endif // EIGEN_WARNINGS_DISABLED
diff --git a/Eigen/src/Core/util/StaticAssert.h b/Eigen/src/Core/util/StaticAssert.h
new file mode 100644
index 0000000..bac5d9f
--- /dev/null
+++ b/Eigen/src/Core/util/StaticAssert.h
@@ -0,0 +1,208 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STATIC_ASSERT_H
+#define EIGEN_STATIC_ASSERT_H
+
+/* Some notes on Eigen's static assertion mechanism:
+ *
+ *  - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
+ *    expression, and MSG an enum listed in struct internal::static_assertion<true>
+ *
+ *  - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
+ *    in that case, the static assertion is converted to the following runtime assert:
+ *      eigen_assert(CONDITION && "MSG")
+ *
+ *  - currently EIGEN_STATIC_ASSERT can only be used in function scope
+ *
+ */
+
+#ifndef EIGEN_NO_STATIC_ASSERT
+
+  #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
+
+    // if native static_assert is enabled, let's use it
+    #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
+
+  #else // not CXX0X
+
+    namespace Eigen {
+
+    namespace internal {
+
+    template<bool condition>
+    struct static_assertion {};
+
+    template<>
+    struct static_assertion<true>
+    {
+      enum {
+        YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,
+        YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,
+        YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,
+        THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,
+        THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,
+        THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,
+        YOU_MADE_A_PROGRAMMING_MISTAKE,
+        EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,
+        EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,
+        YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,
+        YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,
+        UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,
+        THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,
+        FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,
+        NUMERIC_TYPE_MUST_BE_REAL,
+        COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,
+        WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,
+        THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,
+        INVALID_MATRIX_PRODUCT,
+        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
+        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
+        THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,
+        THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,
+        INVALID_MATRIX_TEMPLATE_PARAMETERS,
+        INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,
+        BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,
+        THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,
+        THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,
+        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,
+        YOU_ALREADY_SPECIFIED_THIS_STRIDE,
+        INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,
+        THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,
+        PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,
+        THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,
+        YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,
+        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,
+        THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,
+        YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,
+        THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,
+        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,
+        THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,
+        YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,
+        YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
+        THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
+        THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
+        OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,
+        IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,
+        STORAGE_LAYOUT_DOES_NOT_MATCH
+      };
+    };
+
+    } // end namespace internal
+
+    } // end namespace Eigen
+
+    // Specialized implementation for MSVC to avoid "conditional
+    // expression is constant" warnings.  This implementation doesn't
+    // appear to work under GCC, hence the multiple implementations.
+    #ifdef _MSC_VER
+
+      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
+        {Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}
+
+    #else
+
+      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
+        if (Eigen::internal::static_assertion<bool(CONDITION)>::MSG) {}
+
+    #endif
+
+  #endif // not CXX0X
+
+#else // EIGEN_NO_STATIC_ASSERT
+
+  #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);
+
+#endif // EIGEN_NO_STATIC_ASSERT
+
+
+// static assertion failing if the type \a TYPE is not a vector type
+#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
+  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \
+                      YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX)
+
+// static assertion failing if the type \a TYPE is not fixed-size
+#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \
+  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
+                      YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)
+
+// static assertion failing if the type \a TYPE is not dynamic-size
+#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \
+  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \
+                      YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)
+
+// static assertion failing if the type \a TYPE is not a vector type of the given size
+#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
+  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
+                      THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)
+
+// static assertion failing if the type \a TYPE is not a vector type of the given size
+#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \
+  EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \
+                      THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)
+
+// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)
+#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \
+  EIGEN_STATIC_ASSERT( \
+      (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \
+    || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \
+    || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\
+    YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)
+
+#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
+     ( \
+        (int(TYPE0::SizeAtCompileTime)==0 && int(TYPE1::SizeAtCompileTime)==0) \
+    || (\
+          (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
+      &&  (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
+        || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\
+       ) \
+     )
+
+#ifdef EIGEN2_SUPPORT
+  #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+    eigen_assert(!NumTraits<Scalar>::IsInteger);
+#else
+  #define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \
+    EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+#endif
+
+
+// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes
+#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
+  EIGEN_STATIC_ASSERT( \
+     EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\
+    YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)
+
+#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \
+      EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \
+                          (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \
+                          THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)
+
+#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \
+      EIGEN_STATIC_ASSERT(internal::is_lvalue<Derived>::value, \
+                          THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)
+
+#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived>::XprKind, ArrayXpr>::value), \
+                          THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES)
+
+#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \
+      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::traits<Derived1>::XprKind, \
+                                             typename internal::traits<Derived2>::XprKind \
+                                            >::value), \
+                          YOU_CANNOT_MIX_ARRAYS_AND_MATRICES)
+
+
+#endif // EIGEN_STATIC_ASSERT_H
diff --git a/Eigen/src/Core/util/XprHelper.h b/Eigen/src/Core/util/XprHelper.h
new file mode 100644
index 0000000..781965d
--- /dev/null
+++ b/Eigen/src/Core/util/XprHelper.h
@@ -0,0 +1,469 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_XPRHELPER_H
+#define EIGEN_XPRHELPER_H
+
+// just a workaround because GCC seems to not really like empty structs
+// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
+// so currently we simply disable this optimization for gcc 4.3
+#if (defined __GNUG__) && !((__GNUC__==4) && (__GNUC_MINOR__==3))
+  #define EIGEN_EMPTY_STRUCT_CTOR(X) \
+    EIGEN_STRONG_INLINE X() {} \
+    EIGEN_STRONG_INLINE X(const X& ) {}
+#else
+  #define EIGEN_EMPTY_STRUCT_CTOR(X)
+#endif
+
+namespace Eigen {
+
+typedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;
+
+namespace internal {
+
+//classes inheriting no_assignment_operator don't generate a default operator=.
+class no_assignment_operator
+{
+  private:
+    no_assignment_operator& operator=(const no_assignment_operator&);
+};
+
+/** \internal return the index type with the largest number of bits */
+template<typename I1, typename I2>
+struct promote_index_type
+{
+  typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
+};
+
+/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
+  * can be accessed using value() and setValue().
+  * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
+  */
+template<typename T, int Value> class variable_if_dynamic
+{
+  public:
+    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)
+    explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
+    static T value() { return T(Value); }
+    void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamic<T, Dynamic>
+{
+    T m_value;
+    variable_if_dynamic() { assert(false); }
+  public:
+    explicit variable_if_dynamic(T value) : m_value(value) {}
+    T value() const { return m_value; }
+    void setValue(T value) { m_value = value; }
+};
+
+/** \internal like variable_if_dynamic but for DynamicIndex
+  */
+template<typename T, int Value> class variable_if_dynamicindex
+{
+  public:
+    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
+    explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); assert(v == T(Value)); }
+    static T value() { return T(Value); }
+    void setValue(T) {}
+};
+
+template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
+{
+    T m_value;
+    variable_if_dynamicindex() { assert(false); }
+  public:
+    explicit variable_if_dynamicindex(T value) : m_value(value) {}
+    T value() const { return m_value; }
+    void setValue(T value) { m_value = value; }
+};
+
+template<typename T> struct functor_traits
+{
+  enum
+  {
+    Cost = 10,
+    PacketAccess = false,
+    IsRepeatable = false
+  };
+};
+
+template<typename T> struct packet_traits;
+
+template<typename T> struct unpacket_traits
+{
+  typedef T type;
+  enum {size=1};
+};
+
+template<typename _Scalar, int _Rows, int _Cols,
+         int _Options = AutoAlign |
+                          ( (_Rows==1 && _Cols!=1) ? RowMajor
+                          : (_Cols==1 && _Rows!=1) ? ColMajor
+                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
+         int _MaxRows = _Rows,
+         int _MaxCols = _Cols
+> class make_proper_matrix_type
+{
+    enum {
+      IsColVector = _Cols==1 && _Rows!=1,
+      IsRowVector = _Rows==1 && _Cols!=1,
+      Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
+              : IsRowVector ? (_Options | RowMajor) & ~ColMajor
+              : _Options
+    };
+  public:
+    typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+class compute_matrix_flags
+{
+    enum {
+      row_major_bit = Options&RowMajor ? RowMajorBit : 0,
+      is_dynamic_size_storage = MaxRows==Dynamic || MaxCols==Dynamic,
+
+      aligned_bit =
+      (
+            ((Options&DontAlign)==0)
+        && (
+#if EIGEN_ALIGN_STATICALLY
+             ((!is_dynamic_size_storage) && (((MaxCols*MaxRows*int(sizeof(Scalar))) % 16) == 0))
+#else
+             0
+#endif
+
+          ||
+
+#if EIGEN_ALIGN
+             is_dynamic_size_storage
+#else
+             0
+#endif
+
+          )
+      ) ? AlignedBit : 0,
+      packet_access_bit = packet_traits<Scalar>::Vectorizable && aligned_bit ? PacketAccessBit : 0
+    };
+
+  public:
+    enum { ret = LinearAccessBit | LvalueBit | DirectAccessBit | NestByRefBit | packet_access_bit | row_major_bit | aligned_bit };
+};
+
+template<int _Rows, int _Cols> struct size_at_compile_time
+{
+  enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
+};
+
+/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
+ * whereas eval is a const reference in the case of a matrix
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
+template<typename T, typename BaseClassType> struct plain_matrix_type_dense;
+template<typename T> struct plain_matrix_type<T,Dense>
+{
+  typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind>::type type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,MatrixXpr>
+{
+  typedef Matrix<typename traits<T>::Scalar,
+                traits<T>::RowsAtCompileTime,
+                traits<T>::ColsAtCompileTime,
+                AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                traits<T>::MaxRowsAtCompileTime,
+                traits<T>::MaxColsAtCompileTime
+          > type;
+};
+
+template<typename T> struct plain_matrix_type_dense<T,ArrayXpr>
+{
+  typedef Array<typename traits<T>::Scalar,
+                traits<T>::RowsAtCompileTime,
+                traits<T>::ColsAtCompileTime,
+                AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+                traits<T>::MaxRowsAtCompileTime,
+                traits<T>::MaxColsAtCompileTime
+          > type;
+};
+
+/* eval : the return type of eval(). For matrices, this is just a const reference
+ * in order to avoid a useless copy
+ */
+
+template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
+
+template<typename T> struct eval<T,Dense>
+{
+  typedef typename plain_matrix_type<T>::type type;
+//   typedef typename T::PlainObject type;
+//   typedef T::Matrix<typename traits<T>::Scalar,
+//                 traits<T>::RowsAtCompileTime,
+//                 traits<T>::ColsAtCompileTime,
+//                 AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
+//                 traits<T>::MaxRowsAtCompileTime,
+//                 traits<T>::MaxColsAtCompileTime
+//           > type;
+};
+
+// for matrices, no need to evaluate, just use a const reference to avoid a useless copy
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+  typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
+struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
+{
+  typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
+};
+
+
+
+/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
+ */
+template<typename T> struct plain_matrix_type_column_major
+{
+  enum { Rows = traits<T>::RowsAtCompileTime,
+         Cols = traits<T>::ColsAtCompileTime,
+         MaxRows = traits<T>::MaxRowsAtCompileTime,
+         MaxCols = traits<T>::MaxColsAtCompileTime
+  };
+  typedef Matrix<typename traits<T>::Scalar,
+                Rows,
+                Cols,
+                (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
+                MaxRows,
+                MaxCols
+          > type;
+};
+
+/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
+ */
+template<typename T> struct plain_matrix_type_row_major
+{
+  enum { Rows = traits<T>::RowsAtCompileTime,
+         Cols = traits<T>::ColsAtCompileTime,
+         MaxRows = traits<T>::MaxRowsAtCompileTime,
+         MaxCols = traits<T>::MaxColsAtCompileTime
+  };
+  typedef Matrix<typename traits<T>::Scalar,
+                Rows,
+                Cols,
+                (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
+                MaxRows,
+                MaxCols
+          > type;
+};
+
+// we should be able to get rid of this one too
+template<typename T> struct must_nest_by_value { enum { ret = false }; };
+
+/** \internal The reference selector for template expressions. The idea is that we don't
+  * need to use references for expressions since they are light weight proxy
+  * objects which should generate no copying overhead. */
+template <typename T>
+struct ref_selector
+{
+  typedef typename conditional<
+    bool(traits<T>::Flags & NestByRefBit),
+    T const&,
+    const T
+  >::type type;
+};
+
+/** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
+template<typename T1, typename T2>
+struct transfer_constness
+{
+  typedef typename conditional<
+    bool(internal::is_const<T1>::value),
+    typename internal::add_const_on_value_type<T2>::type,
+    T2
+  >::type type;
+};
+
+/** \internal Determines how a given expression should be nested into another one.
+  * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
+  * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or
+  * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
+  * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
+  * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
+  *
+  * \param T the type of the expression being nested
+  * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
+  *
+  * Note that if no evaluation occur, then the constness of T is preserved.
+  *
+  * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c).
+  * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it,
+  * the Product expression uses: nested<S, 3>::ret, which turns out to be Matrix3d because the internal logic of
+  * nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand,
+  * since a is of type Matrix3d, the Product expression nests it as nested<Matrix3d, 3>::ret, which turns out to be
+  * const Matrix3d&, because the internal logic of nested determined that since a was already a matrix, there was no point
+  * in copying it into another matrix.
+  */
+template<typename T, int n=1, typename PlainObject = typename eval<T>::type> struct nested
+{
+  enum {
+    // for the purpose of this test, to keep it reasonably simple, we arbitrarily choose a value of Dynamic values.
+    // the choice of 10000 makes it larger than any practical fixed value and even most dynamic values.
+    // in extreme cases where these assumptions would be wrong, we would still at worst suffer performance issues
+    // (poor choice of temporaries).
+    // it's important that this value can still be squared without integer overflowing.
+    DynamicAsInteger = 10000,
+    ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
+    ScalarReadCostAsInteger = ScalarReadCost == Dynamic ? int(DynamicAsInteger) : int(ScalarReadCost),
+    CoeffReadCost = traits<T>::CoeffReadCost,
+    CoeffReadCostAsInteger = CoeffReadCost == Dynamic ? int(DynamicAsInteger) : int(CoeffReadCost),
+    NAsInteger = n == Dynamic ? int(DynamicAsInteger) : n,
+    CostEvalAsInteger   = (NAsInteger+1) * ScalarReadCostAsInteger + CoeffReadCostAsInteger,
+    CostNoEvalAsInteger = NAsInteger * CoeffReadCostAsInteger
+  };
+
+  typedef typename conditional<
+      ( (int(traits<T>::Flags) & EvalBeforeNestingBit) ||
+        int(CostEvalAsInteger) < int(CostNoEvalAsInteger)
+      ),
+      PlainObject,
+      typename ref_selector<T>::type
+  >::type type;
+};
+
+template<typename T>
+inline T* const_cast_ptr(const T* ptr)
+{
+  return const_cast<T*>(ptr);
+}
+
+template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
+struct dense_xpr_base
+{
+  /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, MatrixXpr>
+{
+  typedef MatrixBase<Derived> type;
+};
+
+template<typename Derived>
+struct dense_xpr_base<Derived, ArrayXpr>
+{
+  typedef ArrayBase<Derived> type;
+};
+
+/** \internal Helper base class to add a scalar multiple operator
+  * overloads for complex types */
+template<typename Derived,typename Scalar,typename OtherScalar,
+         bool EnableIt = !is_same<Scalar,OtherScalar>::value >
+struct special_scalar_op_base : public DenseCoeffsBase<Derived>
+{
+  // dummy operator* so that the
+  // "using special_scalar_op_base::operator*" compiles
+  void operator*() const;
+};
+
+template<typename Derived,typename Scalar,typename OtherScalar>
+struct special_scalar_op_base<Derived,Scalar,OtherScalar,true>  : public DenseCoeffsBase<Derived>
+{
+  const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+  operator*(const OtherScalar& scalar) const
+  {
+    return CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+      (*static_cast<const Derived*>(this), scalar_multiple2_op<Scalar,OtherScalar>(scalar));
+  }
+
+  inline friend const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
+  operator*(const OtherScalar& scalar, const Derived& matrix)
+  { return static_cast<const special_scalar_op_base&>(matrix).operator*(scalar); }
+};
+
+template<typename XprType, typename CastType> struct cast_return_type
+{
+  typedef typename XprType::Scalar CurrentScalarType;
+  typedef typename remove_all<CastType>::type _CastType;
+  typedef typename _CastType::Scalar NewScalarType;
+  typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
+                              const XprType&,CastType>::type type;
+};
+
+template <typename A, typename B> struct promote_storage_type;
+
+template <typename A> struct promote_storage_type<A,A>
+{
+  typedef A ret;
+};
+
+/** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
+  * \param Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
+  */
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_row_type
+{
+  typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
+                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
+  typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
+                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
+
+  typedef typename conditional<
+    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+    MatrixRowType,
+    ArrayRowType 
+  >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_col_type
+{
+  typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
+                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
+  typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
+                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
+
+  typedef typename conditional<
+    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+    MatrixColType,
+    ArrayColType 
+  >::type type;
+};
+
+template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
+struct plain_diag_type
+{
+  enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
+         max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
+  };
+  typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
+  typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
+
+  typedef typename conditional<
+    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
+    MatrixDiagType,
+    ArrayDiagType 
+  >::type type;
+};
+
+template<typename ExpressionType>
+struct is_lvalue
+{
+  enum { value = !bool(is_const<ExpressionType>::value) &&
+                 bool(traits<ExpressionType>::Flags & LvalueBit) };
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_XPRHELPER_H
diff --git a/Eigen/src/Eigen2Support/Block.h b/Eigen/src/Eigen2Support/Block.h
new file mode 100644
index 0000000..604456f
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Block.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLOCK2_H
+#define EIGEN_BLOCK2_H
+
+namespace Eigen { 
+
+/** \returns a dynamic-size expression of a corner of *this.
+  *
+  * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
+  * \a Eigen::BottomLeft, \a Eigen::BottomRight.
+  * \param cRows the number of rows in the corner
+  * \param cCols the number of columns in the corner
+  *
+  * Example: \include MatrixBase_corner_enum_int_int.cpp
+  * Output: \verbinclude MatrixBase_corner_enum_int_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<typename Derived>
+inline Block<Derived> DenseBase<Derived>
+  ::corner(CornerType type, Index cRows, Index cCols)
+{
+  switch(type)
+  {
+    default:
+      eigen_assert(false && "Bad corner type.");
+    case TopLeft:
+      return Block<Derived>(derived(), 0, 0, cRows, cCols);
+    case TopRight:
+      return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+    case BottomLeft:
+      return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+    case BottomRight:
+      return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+  }
+}
+
+/** This is the const version of corner(CornerType, Index, Index).*/
+template<typename Derived>
+inline const Block<Derived>
+DenseBase<Derived>::corner(CornerType type, Index cRows, Index cCols) const
+{
+  switch(type)
+  {
+    default:
+      eigen_assert(false && "Bad corner type.");
+    case TopLeft:
+      return Block<Derived>(derived(), 0, 0, cRows, cCols);
+    case TopRight:
+      return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+    case BottomLeft:
+      return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+    case BottomRight:
+      return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+  }
+}
+
+/** \returns a fixed-size expression of a corner of *this.
+  *
+  * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
+  * \a Eigen::BottomLeft, \a Eigen::BottomRight.
+  *
+  * The template parameters CRows and CCols arethe number of rows and columns in the corner.
+  *
+  * Example: \include MatrixBase_template_int_int_corner_enum.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<typename Derived>
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols>
+DenseBase<Derived>::corner(CornerType type)
+{
+  switch(type)
+  {
+    default:
+      eigen_assert(false && "Bad corner type.");
+    case TopLeft:
+      return Block<Derived, CRows, CCols>(derived(), 0, 0);
+    case TopRight:
+      return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+    case BottomLeft:
+      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+    case BottomRight:
+      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+  }
+}
+
+/** This is the const version of corner<int, int>(CornerType).*/
+template<typename Derived>
+template<int CRows, int CCols>
+inline const Block<Derived, CRows, CCols>
+DenseBase<Derived>::corner(CornerType type) const
+{
+  switch(type)
+  {
+    default:
+      eigen_assert(false && "Bad corner type.");
+    case TopLeft:
+      return Block<Derived, CRows, CCols>(derived(), 0, 0);
+    case TopRight:
+      return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+    case BottomLeft:
+      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+    case BottomRight:
+      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_BLOCK2_H
diff --git a/Eigen/src/Eigen2Support/CMakeLists.txt b/Eigen/src/Eigen2Support/CMakeLists.txt
new file mode 100644
index 0000000..7ae41b3
--- /dev/null
+++ b/Eigen/src/Eigen2Support/CMakeLists.txt
@@ -0,0 +1,8 @@
+FILE(GLOB Eigen_Eigen2Support_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Eigen2Support_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigen2Support COMPONENT Devel
+  )
+
+ADD_SUBDIRECTORY(Geometry)
\ No newline at end of file
diff --git a/Eigen/src/Eigen2Support/Cwise.h b/Eigen/src/Eigen2Support/Cwise.h
new file mode 100644
index 0000000..d95009b
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Cwise.h
@@ -0,0 +1,192 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CWISE_H
+#define EIGEN_CWISE_H
+
+namespace Eigen { 
+
+/** \internal
+  * convenient macro to defined the return type of a cwise binary operation */
+#define EIGEN_CWISE_BINOP_RETURN_TYPE(OP) \
+    CwiseBinaryOp<OP<typename internal::traits<ExpressionType>::Scalar>, ExpressionType, OtherDerived>
+
+/** \internal
+  * convenient macro to defined the return type of a cwise unary operation */
+#define EIGEN_CWISE_UNOP_RETURN_TYPE(OP) \
+    CwiseUnaryOp<OP<typename internal::traits<ExpressionType>::Scalar>, ExpressionType>
+
+/** \internal
+  * convenient macro to defined the return type of a cwise comparison to a scalar */
+#define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \
+    CwiseBinaryOp<OP<typename internal::traits<ExpressionType>::Scalar>, ExpressionType, \
+        typename ExpressionType::ConstantReturnType >
+
+/** \class Cwise
+  *
+  * \brief Pseudo expression providing additional coefficient-wise operations
+  *
+  * \param ExpressionType the type of the object on which to do coefficient-wise operations
+  *
+  * This class represents an expression with additional coefficient-wise features.
+  * It is the return type of MatrixBase::cwise()
+  * and most of the time this is the only way it is used.
+  *
+  * Example: \include MatrixBase_cwise_const.cpp
+  * Output: \verbinclude MatrixBase_cwise_const.out
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_CWISE_PLUGIN.
+  *
+  * \sa MatrixBase::cwise() const, MatrixBase::cwise()
+  */
+template<typename ExpressionType> class Cwise
+{
+  public:
+
+    typedef typename internal::traits<ExpressionType>::Scalar Scalar;
+    typedef typename internal::conditional<internal::must_nest_by_value<ExpressionType>::ret,
+        ExpressionType, const ExpressionType&>::type ExpressionTypeNested;
+    typedef CwiseUnaryOp<internal::scalar_add_op<Scalar>, ExpressionType> ScalarAddReturnType;
+
+    inline Cwise(const ExpressionType& matrix) : m_matrix(matrix) {}
+
+    /** \internal */
+    inline const ExpressionType& _expression() const { return m_matrix; }
+
+    template<typename OtherDerived>
+    const EIGEN_CWISE_PRODUCT_RETURN_TYPE(ExpressionType,OtherDerived)
+    operator*(const MatrixBase<OtherDerived> &other) const;
+
+    template<typename OtherDerived>
+    const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+    operator/(const MatrixBase<OtherDerived> &other) const;
+
+    /** \deprecated ArrayBase::min() */
+    template<typename OtherDerived>
+    const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)
+    (min)(const MatrixBase<OtherDerived> &other) const
+    { return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_min_op)(_expression(), other.derived()); }
+
+    /** \deprecated ArrayBase::max() */
+    template<typename OtherDerived>
+    const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)
+    (max)(const MatrixBase<OtherDerived> &other) const
+    { return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_max_op)(_expression(), other.derived()); }
+
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op)      abs() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op)     abs2() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_square_op)   square() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cube_op)     cube() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_inverse_op)  inverse() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sqrt_op)     sqrt() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_exp_op)      exp() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_log_op)      log() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cos_op)      cos() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sin_op)      sin() const;
+    const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op)      pow(const Scalar& exponent) const;
+
+    const ScalarAddReturnType
+    operator+(const Scalar& scalar) const;
+
+    /** \relates Cwise */
+    friend const ScalarAddReturnType
+    operator+(const Scalar& scalar, const Cwise& mat)
+    { return mat + scalar; }
+
+    ExpressionType& operator+=(const Scalar& scalar);
+
+    const ScalarAddReturnType
+    operator-(const Scalar& scalar) const;
+
+    ExpressionType& operator-=(const Scalar& scalar);
+
+    template<typename OtherDerived>
+    inline ExpressionType& operator*=(const MatrixBase<OtherDerived> &other);
+
+    template<typename OtherDerived>
+    inline ExpressionType& operator/=(const MatrixBase<OtherDerived> &other);
+
+    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
+    operator<(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
+    operator<=(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
+    operator>(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
+    operator>=(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
+    operator==(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
+    operator!=(const MatrixBase<OtherDerived>& other) const;
+
+    // comparisons to a scalar value
+    const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)
+    operator<(Scalar s) const;
+
+    const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)
+    operator<=(Scalar s) const;
+
+    const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)
+    operator>(Scalar s) const;
+
+    const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)
+    operator>=(Scalar s) const;
+
+    const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)
+    operator==(Scalar s) const;
+
+    const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)
+    operator!=(Scalar s) const;
+
+    // allow to extend Cwise outside Eigen
+    #ifdef EIGEN_CWISE_PLUGIN
+    #include EIGEN_CWISE_PLUGIN
+    #endif
+
+  protected:
+    ExpressionTypeNested m_matrix;
+};
+
+
+/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations
+  *
+  * Example: \include MatrixBase_cwise_const.cpp
+  * Output: \verbinclude MatrixBase_cwise_const.out
+  *
+  * \sa class Cwise, cwise()
+  */
+template<typename Derived>
+inline const Cwise<Derived> MatrixBase<Derived>::cwise() const
+{
+  return derived();
+}
+
+/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations
+  *
+  * Example: \include MatrixBase_cwise.cpp
+  * Output: \verbinclude MatrixBase_cwise.out
+  *
+  * \sa class Cwise, cwise() const
+  */
+template<typename Derived>
+inline Cwise<Derived> MatrixBase<Derived>::cwise()
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_CWISE_H
diff --git a/Eigen/src/Eigen2Support/CwiseOperators.h b/Eigen/src/Eigen2Support/CwiseOperators.h
new file mode 100644
index 0000000..482f306
--- /dev/null
+++ b/Eigen/src/Eigen2Support/CwiseOperators.h
@@ -0,0 +1,298 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARRAY_CWISE_OPERATORS_H
+#define EIGEN_ARRAY_CWISE_OPERATORS_H
+
+namespace Eigen { 
+
+/***************************************************************************
+* The following functions were defined in Core
+***************************************************************************/
+
+
+/** \deprecated ArrayBase::abs() */
+template<typename ExpressionType>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs_op)
+Cwise<ExpressionType>::abs() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::abs2() */
+template<typename ExpressionType>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_abs2_op)
+Cwise<ExpressionType>::abs2() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::exp() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_exp_op)
+Cwise<ExpressionType>::exp() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::log() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_log_op)
+Cwise<ExpressionType>::log() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::operator*() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(ExpressionType,OtherDerived)
+Cwise<ExpressionType>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_PRODUCT_RETURN_TYPE(ExpressionType,OtherDerived)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator/() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+Cwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator*=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline ExpressionType& Cwise<ExpressionType>::operator*=(const MatrixBase<OtherDerived> &other)
+{
+  return m_matrix.const_cast_derived() = *this * other;
+}
+
+/** \deprecated ArrayBase::operator/=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline ExpressionType& Cwise<ExpressionType>::operator/=(const MatrixBase<OtherDerived> &other)
+{
+  return m_matrix.const_cast_derived() = *this / other;
+}
+
+/***************************************************************************
+* The following functions were defined in Array
+***************************************************************************/
+
+// -- unary operators --
+
+/** \deprecated ArrayBase::sqrt() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sqrt_op)
+Cwise<ExpressionType>::sqrt() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::cos() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cos_op)
+Cwise<ExpressionType>::cos() const
+{
+  return _expression();
+}
+
+
+/** \deprecated ArrayBase::sin() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_sin_op)
+Cwise<ExpressionType>::sin() const
+{
+  return _expression();
+}
+
+
+/** \deprecated ArrayBase::log() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op)
+Cwise<ExpressionType>::pow(const Scalar& exponent) const
+{
+  return EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_pow_op)(_expression(), internal::scalar_pow_op<Scalar>(exponent));
+}
+
+
+/** \deprecated ArrayBase::inverse() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_inverse_op)
+Cwise<ExpressionType>::inverse() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::square() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_square_op)
+Cwise<ExpressionType>::square() const
+{
+  return _expression();
+}
+
+/** \deprecated ArrayBase::cube() */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_UNOP_RETURN_TYPE(internal::scalar_cube_op)
+Cwise<ExpressionType>::cube() const
+{
+  return _expression();
+}
+
+
+// -- binary operators --
+
+/** \deprecated ArrayBase::operator<() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
+Cwise<ExpressionType>::operator<(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::<=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
+Cwise<ExpressionType>::operator<=(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator>() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
+Cwise<ExpressionType>::operator>(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator>=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
+Cwise<ExpressionType>::operator>=(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator==() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
+Cwise<ExpressionType>::operator==(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)(_expression(), other.derived());
+}
+
+/** \deprecated ArrayBase::operator!=() */
+template<typename ExpressionType>
+template<typename OtherDerived>
+inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
+Cwise<ExpressionType>::operator!=(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)(_expression(), other.derived());
+}
+
+// comparisons to scalar value
+
+/** \deprecated ArrayBase::operator<(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)
+Cwise<ExpressionType>::operator<(Scalar s) const
+{
+  return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)(_expression(),
+            typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator<=(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)
+Cwise<ExpressionType>::operator<=(Scalar s) const
+{
+  return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)(_expression(),
+            typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator>(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)
+Cwise<ExpressionType>::operator>(Scalar s) const
+{
+  return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)(_expression(),
+            typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator>=(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)
+Cwise<ExpressionType>::operator>=(Scalar s) const
+{
+  return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)(_expression(),
+            typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator==(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)
+Cwise<ExpressionType>::operator==(Scalar s) const
+{
+  return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)(_expression(),
+            typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+/** \deprecated ArrayBase::operator!=(Scalar) */
+template<typename ExpressionType>
+inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)
+Cwise<ExpressionType>::operator!=(Scalar s) const
+{
+  return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)(_expression(),
+            typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s));
+}
+
+// scalar addition
+
+/** \deprecated ArrayBase::operator+(Scalar) */
+template<typename ExpressionType>
+inline const typename Cwise<ExpressionType>::ScalarAddReturnType
+Cwise<ExpressionType>::operator+(const Scalar& scalar) const
+{
+  return typename Cwise<ExpressionType>::ScalarAddReturnType(m_matrix, internal::scalar_add_op<Scalar>(scalar));
+}
+
+/** \deprecated ArrayBase::operator+=(Scalar) */
+template<typename ExpressionType>
+inline ExpressionType& Cwise<ExpressionType>::operator+=(const Scalar& scalar)
+{
+  return m_matrix.const_cast_derived() = *this + scalar;
+}
+
+/** \deprecated ArrayBase::operator-(Scalar) */
+template<typename ExpressionType>
+inline const typename Cwise<ExpressionType>::ScalarAddReturnType
+Cwise<ExpressionType>::operator-(const Scalar& scalar) const
+{
+  return *this + (-scalar);
+}
+
+/** \deprecated ArrayBase::operator-=(Scalar) */
+template<typename ExpressionType>
+inline ExpressionType& Cwise<ExpressionType>::operator-=(const Scalar& scalar)
+{
+  return m_matrix.const_cast_derived() = *this - scalar;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARRAY_CWISE_OPERATORS_H
diff --git a/Eigen/src/Eigen2Support/Geometry/AlignedBox.h b/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
new file mode 100644
index 0000000..2e4309d
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/AlignedBox.h
@@ -0,0 +1,159 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  * \nonstableyet
+  *
+  * \class AlignedBox
+  *
+  * \brief An axis aligned box
+  *
+  * \param _Scalar the type of the scalar coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *
+  * This class represents an axis aligned box as a pair of the minimal and maximal corners.
+  */
+template <typename _Scalar, int _AmbientDim>
+class AlignedBox
+{
+public:
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+
+  /** Default constructor initializing a null box. */
+  inline AlignedBox()
+  { if (AmbientDimAtCompileTime!=Dynamic) setNull(); }
+
+  /** Constructs a null box with \a _dim the dimension of the ambient space. */
+  inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim)
+  { setNull(); }
+
+  /** Constructs a box with extremities \a _min and \a _max. */
+  inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {}
+
+  /** Constructs a box containing a single point \a p. */
+  inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {}
+
+  ~AlignedBox() {}
+
+  /** \returns the dimension in which the box holds */
+  inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; }
+
+  /** \returns true if the box is null, i.e, empty. */
+  inline bool isNull() const { return (m_min.cwise() > m_max).any(); }
+
+  /** Makes \c *this a null/empty box. */
+  inline void setNull()
+  {
+    m_min.setConstant( (std::numeric_limits<Scalar>::max)());
+    m_max.setConstant(-(std::numeric_limits<Scalar>::max)());
+  }
+
+  /** \returns the minimal corner */
+  inline const VectorType& (min)() const { return m_min; }
+  /** \returns a non const reference to the minimal corner */
+  inline VectorType& (min)() { return m_min; }
+  /** \returns the maximal corner */
+  inline const VectorType& (max)() const { return m_max; }
+  /** \returns a non const reference to the maximal corner */
+  inline VectorType& (max)() { return m_max; }
+
+  /** \returns true if the point \a p is inside the box \c *this. */
+  inline bool contains(const VectorType& p) const
+  { return (m_min.cwise()<=p).all() && (p.cwise()<=m_max).all(); }
+
+  /** \returns true if the box \a b is entirely inside the box \c *this. */
+  inline bool contains(const AlignedBox& b) const
+  { return (m_min.cwise()<=(b.min)()).all() && ((b.max)().cwise()<=m_max).all(); }
+
+  /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
+  inline AlignedBox& extend(const VectorType& p)
+  { m_min = (m_min.cwise().min)(p); m_max = (m_max.cwise().max)(p); return *this; }
+
+  /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
+  inline AlignedBox& extend(const AlignedBox& b)
+  { m_min = (m_min.cwise().min)(b.m_min); m_max = (m_max.cwise().max)(b.m_max); return *this; }
+
+  /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
+  inline AlignedBox& clamp(const AlignedBox& b)
+  { m_min = (m_min.cwise().max)(b.m_min); m_max = (m_max.cwise().min)(b.m_max); return *this; }
+
+  /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
+  inline AlignedBox& translate(const VectorType& t)
+  { m_min += t; m_max += t; return *this; }
+
+  /** \returns the squared distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa exteriorDistance()
+    */
+  inline Scalar squaredExteriorDistance(const VectorType& p) const;
+
+  /** \returns the distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa squaredExteriorDistance()
+    */
+  inline Scalar exteriorDistance(const VectorType& p) const
+  { return ei_sqrt(squaredExteriorDistance(p)); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AlignedBox,
+           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<AlignedBox,
+                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
+  {
+    m_min = (other.min)().template cast<Scalar>();
+    m_max = (other.max)().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AlignedBox& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
+
+protected:
+
+  VectorType m_min, m_max;
+};
+
+template<typename Scalar,int AmbiantDim>
+inline Scalar AlignedBox<Scalar,AmbiantDim>::squaredExteriorDistance(const VectorType& p) const
+{
+  Scalar dist2(0);
+  Scalar aux;
+  for (int k=0; k<dim(); ++k)
+  {
+    if ((aux = (p[k]-m_min[k]))<Scalar(0))
+      dist2 += aux*aux;
+    else if ( (aux = (m_max[k]-p[k]))<Scalar(0))
+      dist2 += aux*aux;
+  }
+  return dist2;
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/All.h b/Eigen/src/Eigen2Support/Geometry/All.h
new file mode 100644
index 0000000..e0b00fc
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/All.h
@@ -0,0 +1,115 @@
+#ifndef EIGEN2_GEOMETRY_MODULE_H
+#define EIGEN2_GEOMETRY_MODULE_H
+
+#include <limits>
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+#include "RotationBase.h"
+#include "Rotation2D.h"
+#include "Quaternion.h"
+#include "AngleAxis.h"
+#include "Transform.h"
+#include "Translation.h"
+#include "Scaling.h"
+#include "AlignedBox.h"
+#include "Hyperplane.h"
+#include "ParametrizedLine.h"
+#endif
+
+
+#define RotationBase eigen2_RotationBase
+#define Rotation2D eigen2_Rotation2D
+#define Rotation2Df eigen2_Rotation2Df
+#define Rotation2Dd eigen2_Rotation2Dd
+
+#define Quaternion  eigen2_Quaternion
+#define Quaternionf eigen2_Quaternionf
+#define Quaterniond eigen2_Quaterniond
+
+#define AngleAxis eigen2_AngleAxis
+#define AngleAxisf eigen2_AngleAxisf
+#define AngleAxisd eigen2_AngleAxisd
+
+#define Transform   eigen2_Transform
+#define Transform2f eigen2_Transform2f
+#define Transform2d eigen2_Transform2d
+#define Transform3f eigen2_Transform3f
+#define Transform3d eigen2_Transform3d
+
+#define Translation eigen2_Translation
+#define Translation2f eigen2_Translation2f
+#define Translation2d eigen2_Translation2d
+#define Translation3f eigen2_Translation3f
+#define Translation3d eigen2_Translation3d
+
+#define Scaling eigen2_Scaling
+#define Scaling2f eigen2_Scaling2f
+#define Scaling2d eigen2_Scaling2d
+#define Scaling3f eigen2_Scaling3f
+#define Scaling3d eigen2_Scaling3d
+
+#define AlignedBox eigen2_AlignedBox
+
+#define Hyperplane eigen2_Hyperplane
+#define ParametrizedLine eigen2_ParametrizedLine
+
+#define ei_toRotationMatrix eigen2_ei_toRotationMatrix
+#define ei_quaternion_assign_impl eigen2_ei_quaternion_assign_impl
+#define ei_transform_product_impl eigen2_ei_transform_product_impl
+
+#include "RotationBase.h"
+#include "Rotation2D.h"
+#include "Quaternion.h"
+#include "AngleAxis.h"
+#include "Transform.h"
+#include "Translation.h"
+#include "Scaling.h"
+#include "AlignedBox.h"
+#include "Hyperplane.h"
+#include "ParametrizedLine.h"
+
+#undef ei_toRotationMatrix
+#undef ei_quaternion_assign_impl
+#undef ei_transform_product_impl
+
+#undef RotationBase
+#undef Rotation2D
+#undef Rotation2Df
+#undef Rotation2Dd
+
+#undef Quaternion
+#undef Quaternionf
+#undef Quaterniond
+
+#undef AngleAxis
+#undef AngleAxisf
+#undef AngleAxisd
+
+#undef Transform
+#undef Transform2f
+#undef Transform2d
+#undef Transform3f
+#undef Transform3d
+
+#undef Translation
+#undef Translation2f
+#undef Translation2d
+#undef Translation3f
+#undef Translation3d
+
+#undef Scaling
+#undef Scaling2f
+#undef Scaling2d
+#undef Scaling3f
+#undef Scaling3d
+
+#undef AlignedBox
+
+#undef Hyperplane
+#undef ParametrizedLine
+
+#endif // EIGEN2_GEOMETRY_MODULE_H
diff --git a/Eigen/src/Eigen2Support/Geometry/AngleAxis.h b/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
new file mode 100644
index 0000000..af598a4
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/AngleAxis.h
@@ -0,0 +1,214 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class AngleAxis
+  *
+  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c AngleAxisf for \c float
+  * \li \c AngleAxisd for \c double
+  *
+  * \addexample AngleAxisForEuler \label How to define a rotation from Euler-angles
+  *
+  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
+  * mimic Euler-angles. Here is an example:
+  * \include AngleAxis_mimic_euler.cpp
+  * Output: \verbinclude AngleAxis_mimic_euler.out
+  *
+  * \note This class is not aimed to be used to store a rotation transformation,
+  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
+  * and transformation objects.
+  *
+  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
+  */
+
+template<typename _Scalar> struct ei_traits<AngleAxis<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+
+template<typename _Scalar>
+class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
+{
+  typedef RotationBase<AngleAxis<_Scalar>,3> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 3 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,3,3> Matrix3;
+  typedef Matrix<Scalar,3,1> Vector3;
+  typedef Quaternion<Scalar> QuaternionType;
+
+protected:
+
+  Vector3 m_axis;
+  Scalar m_angle;
+
+public:
+
+  /** Default constructor without initialization. */
+  AngleAxis() {}
+  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
+    * and an \a axis which must be normalized. */
+  template<typename Derived>
+  inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
+  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
+  inline AngleAxis(const QuaternionType& q) { *this = q; }
+  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
+  template<typename Derived>
+  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
+
+  Scalar angle() const { return m_angle; }
+  Scalar& angle() { return m_angle; }
+
+  const Vector3& axis() const { return m_axis; }
+  Vector3& axis() { return m_axis; }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const AngleAxis& other) const
+  { return QuaternionType(*this) * QuaternionType(other); }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const QuaternionType& other) const
+  { return QuaternionType(*this) * other; }
+
+  /** Concatenates two rotations */
+  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
+  { return a * QuaternionType(b); }
+
+  /** Concatenates two rotations */
+  inline Matrix3 operator* (const Matrix3& other) const
+  { return toRotationMatrix() * other; }
+
+  /** Concatenates two rotations */
+  inline friend Matrix3 operator* (const Matrix3& a, const AngleAxis& b)
+  { return a * b.toRotationMatrix(); }
+
+  /** Applies rotation to vector */
+  inline Vector3 operator* (const Vector3& other) const
+  { return toRotationMatrix() * other; }
+
+  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
+  AngleAxis inverse() const
+  { return AngleAxis(-m_angle, m_axis); }
+
+  AngleAxis& operator=(const QuaternionType& q);
+  template<typename Derived>
+  AngleAxis& operator=(const MatrixBase<Derived>& m);
+
+  template<typename Derived>
+  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix3 toRotationMatrix(void) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
+  {
+    m_axis = other.axis().template cast<Scalar>();
+    m_angle = Scalar(other.angle());
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AngleAxis& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision angle-axis type */
+typedef AngleAxis<float> AngleAxisf;
+/** \ingroup Geometry_Module
+  * double precision angle-axis type */
+typedef AngleAxis<double> AngleAxisd;
+
+/** Set \c *this from a quaternion.
+  * The axis is normalized.
+  */
+template<typename Scalar>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionType& q)
+{
+  Scalar n2 = q.vec().squaredNorm();
+  if (n2 < precision<Scalar>()*precision<Scalar>())
+  {
+    m_angle = 0;
+    m_axis << 1, 0, 0;
+  }
+  else
+  {
+    m_angle = 2*std::acos(q.w());
+    m_axis = q.vec() / ei_sqrt(n2);
+  }
+  return *this;
+}
+
+/** Set \c *this from a 3x3 rotation matrix \a mat.
+  */
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
+{
+  // Since a direct conversion would not be really faster,
+  // let's use the robust Quaternion implementation:
+  return *this = QuaternionType(mat);
+}
+
+/** Constructs and \returns an equivalent 3x3 rotation matrix.
+  */
+template<typename Scalar>
+typename AngleAxis<Scalar>::Matrix3
+AngleAxis<Scalar>::toRotationMatrix(void) const
+{
+  Matrix3 res;
+  Vector3 sin_axis  = ei_sin(m_angle) * m_axis;
+  Scalar c = ei_cos(m_angle);
+  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
+
+  Scalar tmp;
+  tmp = cos1_axis.x() * m_axis.y();
+  res.coeffRef(0,1) = tmp - sin_axis.z();
+  res.coeffRef(1,0) = tmp + sin_axis.z();
+
+  tmp = cos1_axis.x() * m_axis.z();
+  res.coeffRef(0,2) = tmp + sin_axis.y();
+  res.coeffRef(2,0) = tmp - sin_axis.y();
+
+  tmp = cos1_axis.y() * m_axis.z();
+  res.coeffRef(1,2) = tmp - sin_axis.x();
+  res.coeffRef(2,1) = tmp + sin_axis.x();
+
+  res.diagonal() = (cos1_axis.cwise() * m_axis).cwise() + c;
+
+  return res;
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt b/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
new file mode 100644
index 0000000..c347a8f
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Eigen2Support_Geometry_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Eigen2Support_Geometry_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigen2Support/Geometry
+  )
diff --git a/Eigen/src/Eigen2Support/Geometry/Hyperplane.h b/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
new file mode 100644
index 0000000..b95bf00
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/Hyperplane.h
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Hyperplane
+  *
+  * \brief A hyperplane
+  *
+  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
+  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *             Notice that the dimension of the hyperplane is _AmbientDim-1.
+  *
+  * This class represents an hyperplane as the zero set of the implicit equation
+  * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
+  * and \f$ d \f$ is the distance (offset) to the origin.
+  */
+template <typename _Scalar, int _AmbientDim>
+class Hyperplane
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+  typedef Matrix<Scalar,int(AmbientDimAtCompileTime)==Dynamic
+                        ? Dynamic
+                        : int(AmbientDimAtCompileTime)+1,1> Coefficients;
+  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
+
+  /** Default constructor without initialization */
+  inline Hyperplane() {}
+
+  /** Constructs a dynamic-size hyperplane with \a _dim the dimension
+    * of the ambient space */
+  inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {}
+
+  /** Construct a plane from its normal \a n and a point \a e onto the plane.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, const VectorType& e)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = -e.eigen2_dot(n);
+  }
+
+  /** Constructs a plane from its normal \a n and distance to the origin \a d
+    * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, Scalar d)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = d;
+  }
+
+  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
+    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
+  {
+    Hyperplane result(p0.size());
+    result.normal() = (p1 - p0).unitOrthogonal();
+    result.offset() = -result.normal().eigen2_dot(p0);
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
+    * is required to be exactly 3.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
+    Hyperplane result(p0.size());
+    result.normal() = (p2 - p0).cross(p1 - p0).normalized();
+    result.offset() = -result.normal().eigen2_dot(p0);
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the parametrized line \a parametrized.
+    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
+    * so an arbitrary choice is made.
+    */
+  // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+  explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
+  {
+    normal() = parametrized.direction().unitOrthogonal();
+    offset() = -normal().eigen2_dot(parametrized.origin());
+  }
+
+  ~Hyperplane() {}
+
+  /** \returns the dimension in which the plane holds */
+  inline int dim() const { return int(AmbientDimAtCompileTime)==Dynamic ? m_coeffs.size()-1 : int(AmbientDimAtCompileTime); }
+
+  /** normalizes \c *this */
+  void normalize(void)
+  {
+    m_coeffs /= normal().norm();
+  }
+
+  /** \returns the signed distance between the plane \c *this and a point \a p.
+    * \sa absDistance()
+    */
+  inline Scalar signedDistance(const VectorType& p) const { return p.eigen2_dot(normal()) + offset(); }
+
+  /** \returns the absolute distance between the plane \c *this and a point \a p.
+    * \sa signedDistance()
+    */
+  inline Scalar absDistance(const VectorType& p) const { return ei_abs(signedDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the plane \c *this.
+    */
+  inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
+
+  /** \returns a constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline const NormalReturnType normal() const { return NormalReturnType(*const_cast<Coefficients*>(&m_coeffs),0,0,dim(),1); }
+
+  /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
+
+  /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
+
+  /** \returns a non-constant reference to the distance to the origin, which is also the constant part
+    * of the implicit equation */
+  inline Scalar& offset() { return m_coeffs(dim()); }
+
+  /** \returns a constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  /** \returns a non-constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline Coefficients& coeffs() { return m_coeffs; }
+
+  /** \returns the intersection of *this with \a other.
+    *
+    * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
+    *
+    * \note If \a other is approximately parallel to *this, this method will return any point on *this.
+    */
+  VectorType intersection(const Hyperplane& other)
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
+    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
+    // whether the two lines are approximately parallel.
+    if(ei_isMuchSmallerThan(det, Scalar(1)))
+    {   // special case where the two lines are approximately parallel. Pick any point on the first line.
+        if(ei_abs(coeffs().coeff(1))>ei_abs(coeffs().coeff(0)))
+            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
+        else
+            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
+    }
+    else
+    {   // general case
+        Scalar invdet = Scalar(1) / det;
+        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
+                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
+    }
+  }
+
+  /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
+    *
+    * \param mat the Dim x Dim transformation matrix
+    * \param traits specifies whether the matrix \a mat represents an Isometry
+    *               or a more generic Affine transformation. The default is Affine.
+    */
+  template<typename XprType>
+  inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
+  {
+    if (traits==Affine)
+      normal() = mat.inverse().transpose() * normal();
+    else if (traits==Isometry)
+      normal() = mat * normal();
+    else
+    {
+      ei_assert("invalid traits value in Hyperplane::transform()");
+    }
+    return *this;
+  }
+
+  /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
+    *
+    * \param t the transformation of dimension Dim
+    * \param traits specifies whether the transformation \a t represents an Isometry
+    *               or a more generic Affine transformation. The default is Affine.
+    *               Other kind of transformations are not supported.
+    */
+  inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime>& t,
+                                TransformTraits traits = Affine)
+  {
+    transform(t.linear(), traits);
+    offset() -= t.translation().eigen2_dot(normal());
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Hyperplane,
+           Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<Hyperplane,
+                    Hyperplane<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Hyperplane& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+
+  Coefficients m_coeffs;
+};
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h b/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
new file mode 100644
index 0000000..9b57b7e
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/ParametrizedLine.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class ParametrizedLine
+  *
+  * \brief A parametrized line
+  *
+  * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
+  * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
+  * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  */
+template <typename _Scalar, int _AmbientDim>
+class ParametrizedLine
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+
+  /** Default constructor without initialization */
+  inline ParametrizedLine() {}
+
+  /** Constructs a dynamic-size line with \a _dim the dimension
+    * of the ambient space */
+  inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {}
+
+  /** Initializes a parametrized line of direction \a direction and origin \a origin.
+    * \warning the vector direction is assumed to be normalized.
+    */
+  ParametrizedLine(const VectorType& origin, const VectorType& direction)
+    : m_origin(origin), m_direction(direction) {}
+
+  explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
+
+  /** Constructs a parametrized line going from \a p0 to \a p1. */
+  static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
+  { return ParametrizedLine(p0, (p1-p0).normalized()); }
+
+  ~ParametrizedLine() {}
+
+  /** \returns the dimension in which the line holds */
+  inline int dim() const { return m_direction.size(); }
+
+  const VectorType& origin() const { return m_origin; }
+  VectorType& origin() { return m_origin; }
+
+  const VectorType& direction() const { return m_direction; }
+  VectorType& direction() { return m_direction; }
+
+  /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
+    * \sa distance()
+    */
+  RealScalar squaredDistance(const VectorType& p) const
+  {
+    VectorType diff = p-origin();
+    return (diff - diff.eigen2_dot(direction())* direction()).squaredNorm();
+  }
+  /** \returns the distance of a point \a p to its projection onto the line \c *this.
+    * \sa squaredDistance()
+    */
+  RealScalar distance(const VectorType& p) const { return ei_sqrt(squaredDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the line \c *this. */
+  VectorType projection(const VectorType& p) const
+  { return origin() + (p-origin()).eigen2_dot(direction()) * direction(); }
+
+  Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane);
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<ParametrizedLine,
+           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<ParametrizedLine,
+                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime>& other)
+  {
+    m_origin = other.origin().template cast<Scalar>();
+    m_direction = other.direction().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
+
+protected:
+
+  VectorType m_origin, m_direction;
+};
+
+/** Constructs a parametrized line from a 2D hyperplane
+  *
+  * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
+  */
+template <typename _Scalar, int _AmbientDim>
+inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+  direction() = hyperplane.normal().unitOrthogonal();
+  origin() = -hyperplane.normal()*hyperplane.offset();
+}
+
+/** \returns the parameter value of the intersection between \c *this and the given hyperplane
+  */
+template <typename _Scalar, int _AmbientDim>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane)
+{
+  return -(hyperplane.offset()+origin().eigen2_dot(hyperplane.normal()))
+          /(direction().eigen2_dot(hyperplane.normal()));
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/Quaternion.h b/Eigen/src/Eigen2Support/Geometry/Quaternion.h
new file mode 100644
index 0000000..4b6390c
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/Quaternion.h
@@ -0,0 +1,495 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+template<typename Other,
+         int OtherRows=Other::RowsAtCompileTime,
+         int OtherCols=Other::ColsAtCompileTime>
+struct ei_quaternion_assign_impl;
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Quaternion
+  *
+  * \brief The quaternion class used to represent 3D orientations and rotations
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
+  * orientations and rotations of objects in three dimensions. Compared to other representations
+  * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
+  * \li \b compact storage (4 scalars)
+  * \li \b efficient to compose (28 flops),
+  * \li \b stable spherical interpolation
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c Quaternionf for \c float
+  * \li \c Quaterniond for \c double
+  *
+  * \sa  class AngleAxis, class Transform
+  */
+
+template<typename _Scalar> struct ei_traits<Quaternion<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+
+template<typename _Scalar>
+class Quaternion : public RotationBase<Quaternion<_Scalar>,3>
+{
+  typedef RotationBase<Quaternion<_Scalar>,3> Base;
+
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,4)
+
+  using Base::operator*;
+
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+
+  /** the type of the Coefficients 4-vector */
+  typedef Matrix<Scalar, 4, 1> Coefficients;
+  /** the type of a 3D vector */
+  typedef Matrix<Scalar,3,1> Vector3;
+  /** the equivalent rotation matrix type */
+  typedef Matrix<Scalar,3,3> Matrix3;
+  /** the equivalent angle-axis type */
+  typedef AngleAxis<Scalar> AngleAxisType;
+
+  /** \returns the \c x coefficient */
+  inline Scalar x() const { return m_coeffs.coeff(0); }
+  /** \returns the \c y coefficient */
+  inline Scalar y() const { return m_coeffs.coeff(1); }
+  /** \returns the \c z coefficient */
+  inline Scalar z() const { return m_coeffs.coeff(2); }
+  /** \returns the \c w coefficient */
+  inline Scalar w() const { return m_coeffs.coeff(3); }
+
+  /** \returns a reference to the \c x coefficient */
+  inline Scalar& x() { return m_coeffs.coeffRef(0); }
+  /** \returns a reference to the \c y coefficient */
+  inline Scalar& y() { return m_coeffs.coeffRef(1); }
+  /** \returns a reference to the \c z coefficient */
+  inline Scalar& z() { return m_coeffs.coeffRef(2); }
+  /** \returns a reference to the \c w coefficient */
+  inline Scalar& w() { return m_coeffs.coeffRef(3); }
+
+  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
+  inline const Block<const Coefficients,3,1> vec() const { return m_coeffs.template start<3>(); }
+
+  /** \returns a vector expression of the imaginary part (x,y,z) */
+  inline Block<Coefficients,3,1> vec() { return m_coeffs.template start<3>(); }
+
+  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
+  inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  /** \returns a vector expression of the coefficients (x,y,z,w) */
+  inline Coefficients& coeffs() { return m_coeffs; }
+
+  /** Default constructor leaving the quaternion uninitialized. */
+  inline Quaternion() {}
+
+  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
+    * its four coefficients \a w, \a x, \a y and \a z.
+    *
+    * \warning Note the order of the arguments: the real \a w coefficient first,
+    * while internally the coefficients are stored in the following order:
+    * [\c x, \c y, \c z, \c w]
+    */
+  inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z)
+  { m_coeffs << x, y, z, w; }
+
+  /** Copy constructor */
+  inline Quaternion(const Quaternion& other) { m_coeffs = other.m_coeffs; }
+
+  /** Constructs and initializes a quaternion from the angle-axis \a aa */
+  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
+
+  /** Constructs and initializes a quaternion from either:
+    *  - a rotation matrix expression,
+    *  - a 4D vector expression representing quaternion coefficients.
+    * \sa operator=(MatrixBase<Derived>)
+    */
+  template<typename Derived>
+  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
+
+  Quaternion& operator=(const Quaternion& other);
+  Quaternion& operator=(const AngleAxisType& aa);
+  template<typename Derived>
+  Quaternion& operator=(const MatrixBase<Derived>& m);
+
+  /** \returns a quaternion representing an identity rotation
+    * \sa MatrixBase::Identity()
+    */
+  static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); }
+
+  /** \sa Quaternion::Identity(), MatrixBase::setIdentity()
+    */
+  inline Quaternion& setIdentity() { m_coeffs << 0, 0, 0, 1; return *this; }
+
+  /** \returns the squared norm of the quaternion's coefficients
+    * \sa Quaternion::norm(), MatrixBase::squaredNorm()
+    */
+  inline Scalar squaredNorm() const { return m_coeffs.squaredNorm(); }
+
+  /** \returns the norm of the quaternion's coefficients
+    * \sa Quaternion::squaredNorm(), MatrixBase::norm()
+    */
+  inline Scalar norm() const { return m_coeffs.norm(); }
+
+  /** Normalizes the quaternion \c *this
+    * \sa normalized(), MatrixBase::normalize() */
+  inline void normalize() { m_coeffs.normalize(); }
+  /** \returns a normalized version of \c *this
+    * \sa normalize(), MatrixBase::normalized() */
+  inline Quaternion normalized() const { return Quaternion(m_coeffs.normalized()); }
+
+  /** \returns the dot product of \c *this and \a other
+    * Geometrically speaking, the dot product of two unit quaternions
+    * corresponds to the cosine of half the angle between the two rotations.
+    * \sa angularDistance()
+    */
+  inline Scalar eigen2_dot(const Quaternion& other) const { return m_coeffs.eigen2_dot(other.m_coeffs); }
+
+  inline Scalar angularDistance(const Quaternion& other) const;
+
+  Matrix3 toRotationMatrix(void) const;
+
+  template<typename Derived1, typename Derived2>
+  Quaternion& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+  inline Quaternion operator* (const Quaternion& q) const;
+  inline Quaternion& operator*= (const Quaternion& q);
+
+  Quaternion inverse(void) const;
+  Quaternion conjugate(void) const;
+
+  Quaternion slerp(Scalar t, const Quaternion& other) const;
+
+  template<typename Derived>
+  Vector3 operator* (const MatrixBase<Derived>& vec) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<Quaternion,Quaternion<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Quaternion(const Quaternion<OtherScalarType>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Quaternion& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+  Coefficients m_coeffs;
+};
+
+/** \ingroup Geometry_Module
+  * single precision quaternion type */
+typedef Quaternion<float> Quaternionf;
+/** \ingroup Geometry_Module
+  * double precision quaternion type */
+typedef Quaternion<double> Quaterniond;
+
+// Generic Quaternion * Quaternion product
+template<typename Scalar> inline Quaternion<Scalar>
+ei_quaternion_product(const Quaternion<Scalar>& a, const Quaternion<Scalar>& b)
+{
+  return Quaternion<Scalar>
+  (
+    a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
+    a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
+    a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
+    a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
+  );
+}
+
+/** \returns the concatenation of two rotations as a quaternion-quaternion product */
+template <typename Scalar>
+inline Quaternion<Scalar> Quaternion<Scalar>::operator* (const Quaternion& other) const
+{
+  return ei_quaternion_product(*this,other);
+}
+
+/** \sa operator*(Quaternion) */
+template <typename Scalar>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator*= (const Quaternion& other)
+{
+  return (*this = *this * other);
+}
+
+/** Rotation of a vector by a quaternion.
+  * \remarks If the quaternion is used to rotate several points (>1)
+  * then it is much more efficient to first convert it to a 3x3 Matrix.
+  * Comparison of the operation cost for n transformations:
+  *   - Quaternion:    30n
+  *   - Via a Matrix3: 24 + 15n
+  */
+template <typename Scalar>
+template<typename Derived>
+inline typename Quaternion<Scalar>::Vector3
+Quaternion<Scalar>::operator* (const MatrixBase<Derived>& v) const
+{
+    // Note that this algorithm comes from the optimization by hand
+    // of the conversion to a Matrix followed by a Matrix/Vector product.
+    // It appears to be much faster than the common algorithm found
+    // in the litterature (30 versus 39 flops). It also requires two
+    // Vector3 as temporaries.
+    Vector3 uv;
+    uv = 2 * this->vec().cross(v);
+    return v + this->w() * uv + this->vec().cross(uv);
+}
+
+template<typename Scalar>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const Quaternion& other)
+{
+  m_coeffs = other.m_coeffs;
+  return *this;
+}
+
+/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
+  */
+template<typename Scalar>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const AngleAxisType& aa)
+{
+  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
+  this->w() = ei_cos(ha);
+  this->vec() = ei_sin(ha) * aa.axis();
+  return *this;
+}
+
+/** Set \c *this from the expression \a xpr:
+  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
+  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
+  *     and \a xpr is converted to a quaternion
+  */
+template<typename Scalar>
+template<typename Derived>
+inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const MatrixBase<Derived>& xpr)
+{
+  ei_quaternion_assign_impl<Derived>::run(*this, xpr.derived());
+  return *this;
+}
+
+/** Convert the quaternion to a 3x3 rotation matrix */
+template<typename Scalar>
+inline typename Quaternion<Scalar>::Matrix3
+Quaternion<Scalar>::toRotationMatrix(void) const
+{
+  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
+  // if not inlined then the cost of the return by value is huge ~ +35%,
+  // however, not inlining this function is an order of magnitude slower, so
+  // it has to be inlined, and so the return by value is not an issue
+  Matrix3 res;
+
+  const Scalar tx  = Scalar(2)*this->x();
+  const Scalar ty  = Scalar(2)*this->y();
+  const Scalar tz  = Scalar(2)*this->z();
+  const Scalar twx = tx*this->w();
+  const Scalar twy = ty*this->w();
+  const Scalar twz = tz*this->w();
+  const Scalar txx = tx*this->x();
+  const Scalar txy = ty*this->x();
+  const Scalar txz = tz*this->x();
+  const Scalar tyy = ty*this->y();
+  const Scalar tyz = tz*this->y();
+  const Scalar tzz = tz*this->z();
+
+  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
+  res.coeffRef(0,1) = txy-twz;
+  res.coeffRef(0,2) = txz+twy;
+  res.coeffRef(1,0) = txy+twz;
+  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
+  res.coeffRef(1,2) = tyz-twx;
+  res.coeffRef(2,0) = txz-twy;
+  res.coeffRef(2,1) = tyz+twx;
+  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
+
+  return res;
+}
+
+/** Sets *this to be a quaternion representing a rotation sending the vector \a a to the vector \a b.
+  *
+  * \returns a reference to *this.
+  *
+  * Note that the two input vectors do \b not have to be normalized.
+  */
+template<typename Scalar>
+template<typename Derived1, typename Derived2>
+inline Quaternion<Scalar>& Quaternion<Scalar>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+  Vector3 v0 = a.normalized();
+  Vector3 v1 = b.normalized();
+  Scalar c = v0.eigen2_dot(v1);
+
+  // if dot == 1, vectors are the same
+  if (ei_isApprox(c,Scalar(1)))
+  {
+    // set to identity
+    this->w() = 1; this->vec().setZero();
+    return *this;
+  }
+  // if dot == -1, vectors are opposites
+  if (ei_isApprox(c,Scalar(-1)))
+  {
+    this->vec() = v0.unitOrthogonal();
+    this->w() = 0;
+    return *this;
+  }
+
+  Vector3 axis = v0.cross(v1);
+  Scalar s = ei_sqrt((Scalar(1)+c)*Scalar(2));
+  Scalar invs = Scalar(1)/s;
+  this->vec() = axis * invs;
+  this->w() = s * Scalar(0.5);
+
+  return *this;
+}
+
+/** \returns the multiplicative inverse of \c *this
+  * Note that in most cases, i.e., if you simply want the opposite rotation,
+  * and/or the quaternion is normalized, then it is enough to use the conjugate.
+  *
+  * \sa Quaternion::conjugate()
+  */
+template <typename Scalar>
+inline Quaternion<Scalar> Quaternion<Scalar>::inverse() const
+{
+  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
+  Scalar n2 = this->squaredNorm();
+  if (n2 > 0)
+    return Quaternion(conjugate().coeffs() / n2);
+  else
+  {
+    // return an invalid result to flag the error
+    return Quaternion(Coefficients::Zero());
+  }
+}
+
+/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
+  * if the quaternion is normalized.
+  * The conjugate of a quaternion represents the opposite rotation.
+  *
+  * \sa Quaternion::inverse()
+  */
+template <typename Scalar>
+inline Quaternion<Scalar> Quaternion<Scalar>::conjugate() const
+{
+  return Quaternion(this->w(),-this->x(),-this->y(),-this->z());
+}
+
+/** \returns the angle (in radian) between two rotations
+  * \sa eigen2_dot()
+  */
+template <typename Scalar>
+inline Scalar Quaternion<Scalar>::angularDistance(const Quaternion& other) const
+{
+  double d = ei_abs(this->eigen2_dot(other));
+  if (d>=1.0)
+    return 0;
+  return Scalar(2) * std::acos(d);
+}
+
+/** \returns the spherical linear interpolation between the two quaternions
+  * \c *this and \a other at the parameter \a t
+  */
+template <typename Scalar>
+Quaternion<Scalar> Quaternion<Scalar>::slerp(Scalar t, const Quaternion& other) const
+{
+  static const Scalar one = Scalar(1) - machine_epsilon<Scalar>();
+  Scalar d = this->eigen2_dot(other);
+  Scalar absD = ei_abs(d);
+
+  Scalar scale0;
+  Scalar scale1;
+
+  if (absD>=one)
+  {
+    scale0 = Scalar(1) - t;
+    scale1 = t;
+  }
+  else
+  {
+    // theta is the angle between the 2 quaternions
+    Scalar theta = std::acos(absD);
+    Scalar sinTheta = ei_sin(theta);
+
+    scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta;
+    scale1 = ei_sin( ( t * theta) ) / sinTheta;
+    if (d<0)
+      scale1 = -scale1;
+  }
+
+  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
+}
+
+// set from a rotation matrix
+template<typename Other>
+struct ei_quaternion_assign_impl<Other,3,3>
+{
+  typedef typename Other::Scalar Scalar;
+  static inline void run(Quaternion<Scalar>& q, const Other& mat)
+  {
+    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
+    // Ken Shoemake, 1987 SIGGRAPH course notes
+    Scalar t = mat.trace();
+    if (t > 0)
+    {
+      t = ei_sqrt(t + Scalar(1.0));
+      q.w() = Scalar(0.5)*t;
+      t = Scalar(0.5)/t;
+      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
+      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
+      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
+    }
+    else
+    {
+      int i = 0;
+      if (mat.coeff(1,1) > mat.coeff(0,0))
+        i = 1;
+      if (mat.coeff(2,2) > mat.coeff(i,i))
+        i = 2;
+      int j = (i+1)%3;
+      int k = (j+1)%3;
+
+      t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
+      q.coeffs().coeffRef(i) = Scalar(0.5) * t;
+      t = Scalar(0.5)/t;
+      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
+      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
+      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
+    }
+  }
+};
+
+// set from a vector of coefficients assumed to be a quaternion
+template<typename Other>
+struct ei_quaternion_assign_impl<Other,4,1>
+{
+  typedef typename Other::Scalar Scalar;
+  static inline void run(Quaternion<Scalar>& q, const Other& vec)
+  {
+    q.coeffs() = vec;
+  }
+};
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/Rotation2D.h b/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
new file mode 100644
index 0000000..19b8582
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/Rotation2D.h
@@ -0,0 +1,145 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Rotation2D
+  *
+  * \brief Represents a rotation/orientation in a 2 dimensional space.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class is equivalent to a single scalar representing a counter clock wise rotation
+  * as a single angle in radian. It provides some additional features such as the automatic
+  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
+  * interface to Quaternion in order to facilitate the writing of generic algorithms
+  * dealing with rotations.
+  *
+  * \sa class Quaternion, class Transform
+  */
+template<typename _Scalar> struct ei_traits<Rotation2D<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+
+template<typename _Scalar>
+class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
+{
+  typedef RotationBase<Rotation2D<_Scalar>,2> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 2 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,2,1> Vector2;
+  typedef Matrix<Scalar,2,2> Matrix2;
+
+protected:
+
+  Scalar m_angle;
+
+public:
+
+  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
+  inline Rotation2D(Scalar a) : m_angle(a) {}
+
+  /** \returns the rotation angle */
+  inline Scalar angle() const { return m_angle; }
+
+  /** \returns a read-write reference to the rotation angle */
+  inline Scalar& angle() { return m_angle; }
+
+  /** \returns the inverse rotation */
+  inline Rotation2D inverse() const { return -m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D operator*(const Rotation2D& other) const
+  { return m_angle + other.m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D& operator*=(const Rotation2D& other)
+  { return m_angle += other.m_angle; return *this; }
+
+  /** Applies the rotation to a 2D vector */
+  Vector2 operator* (const Vector2& vec) const
+  { return toRotationMatrix() * vec; }
+
+  template<typename Derived>
+  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix2 toRotationMatrix(void) const;
+
+  /** \returns the spherical interpolation between \c *this and \a other using
+    * parameter \a t. It is in fact equivalent to a linear interpolation.
+    */
+  inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
+  { return m_angle * (1-t) + other.angle() * t; }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
+  {
+    m_angle = Scalar(other.angle());
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Rotation2D& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return ei_isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision 2D rotation type */
+typedef Rotation2D<float> Rotation2Df;
+/** \ingroup Geometry_Module
+  * double precision 2D rotation type */
+typedef Rotation2D<double> Rotation2Dd;
+
+/** Set \c *this from a 2x2 rotation matrix \a mat.
+  * In other words, this function extract the rotation angle
+  * from the rotation matrix.
+  */
+template<typename Scalar>
+template<typename Derived>
+Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0));
+  return *this;
+}
+
+/** Constructs and \returns an equivalent 2x2 rotation matrix.
+  */
+template<typename Scalar>
+typename Rotation2D<Scalar>::Matrix2
+Rotation2D<Scalar>::toRotationMatrix(void) const
+{
+  Scalar sinA = ei_sin(m_angle);
+  Scalar cosA = ei_cos(m_angle);
+  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/RotationBase.h b/Eigen/src/Eigen2Support/Geometry/RotationBase.h
new file mode 100644
index 0000000..b1c8f38
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/RotationBase.h
@@ -0,0 +1,123 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+// this file aims to contains the various representations of rotation/orientation
+// in 2D and 3D space excepted Matrix and Quaternion.
+
+/** \class RotationBase
+  *
+  * \brief Common base class for compact rotation representations
+  *
+  * \param Derived is the derived type, i.e., a rotation type
+  * \param _Dim the dimension of the space
+  */
+template<typename Derived, int _Dim>
+class RotationBase
+{
+  public:
+    enum { Dim = _Dim };
+    /** the scalar type of the coefficients */
+    typedef typename ei_traits<Derived>::Scalar Scalar;
+    
+    /** corresponding linear transformation matrix type */
+    typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
+
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+    /** \returns an equivalent rotation matrix */
+    inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }
+
+    /** \returns the inverse rotation */
+    inline Derived inverse() const { return derived().inverse(); }
+
+    /** \returns the concatenation of the rotation \c *this with a translation \a t */
+    inline Transform<Scalar,Dim> operator*(const Translation<Scalar,Dim>& t) const
+    { return toRotationMatrix() * t; }
+
+    /** \returns the concatenation of the rotation \c *this with a scaling \a s */
+    inline RotationMatrixType operator*(const Scaling<Scalar,Dim>& s) const
+    { return toRotationMatrix() * s; }
+
+    /** \returns the concatenation of the rotation \c *this with an affine transformation \a t */
+    inline Transform<Scalar,Dim> operator*(const Transform<Scalar,Dim>& t) const
+    { return toRotationMatrix() * t; }
+};
+
+/** \geometry_module
+  *
+  * Constructs a Dim x Dim rotation matrix from the rotation \a r
+  */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+  *this = r.toRotationMatrix();
+}
+
+/** \geometry_module
+  *
+  * Set a Dim x Dim rotation matrix from the rotation \a r
+  */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+  return *this = r.toRotationMatrix();
+}
+
+/** \internal
+  *
+  * Helper function to return an arbitrary rotation object to a rotation matrix.
+  *
+  * \param Scalar the numeric type of the matrix coefficients
+  * \param Dim the dimension of the current space
+  *
+  * It returns a Dim x Dim fixed size matrix.
+  *
+  * Default specializations are provided for:
+  *   - any scalar type (2D),
+  *   - any matrix expression,
+  *   - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)
+  *
+  * Currently ei_toRotationMatrix is only used by Transform.
+  *
+  * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
+  */
+template<typename Scalar, int Dim>
+static inline Matrix<Scalar,2,2> ei_toRotationMatrix(const Scalar& s)
+{
+  EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return Rotation2D<Scalar>(s).toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+static inline Matrix<Scalar,Dim,Dim> ei_toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)
+{
+  return r.toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+static inline const MatrixBase<OtherDerived>& ei_toRotationMatrix(const MatrixBase<OtherDerived>& mat)
+{
+  EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
+    YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return mat;
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/Scaling.h b/Eigen/src/Eigen2Support/Geometry/Scaling.h
new file mode 100644
index 0000000..b8fa6cd
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/Scaling.h
@@ -0,0 +1,167 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Scaling
+  *
+  * \brief Represents a possibly non uniform scaling transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
+  *
+  * \note This class is not aimed to be used to store a scaling transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * \sa class Translation, class Transform
+  */
+template<typename _Scalar, int _Dim>
+class Scaling
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
+  /** dimension of the space */
+  enum { Dim = _Dim };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** corresponding vector type */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** corresponding linear transformation matrix type */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** corresponding translation type */
+  typedef Translation<Scalar,Dim> TranslationType;
+  /** corresponding affine transformation type */
+  typedef Transform<Scalar,Dim> TransformType;
+
+protected:
+
+  VectorType m_coeffs;
+
+public:
+
+  /** Default constructor without initialization. */
+  Scaling() {}
+  /** Constructs and initialize a uniform scaling transformation */
+  explicit inline Scaling(const Scalar& s) { m_coeffs.setConstant(s); }
+  /** 2D only */
+  inline Scaling(const Scalar& sx, const Scalar& sy)
+  {
+    ei_assert(Dim==2);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+  }
+  /** 3D only */
+  inline Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+  {
+    ei_assert(Dim==3);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+    m_coeffs.z() = sz;
+  }
+  /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */
+  explicit inline Scaling(const VectorType& coeffs) : m_coeffs(coeffs) {}
+
+  const VectorType& coeffs() const { return m_coeffs; }
+  VectorType& coeffs() { return m_coeffs; }
+
+  /** Concatenates two scaling */
+  inline Scaling operator* (const Scaling& other) const
+  { return Scaling(coeffs().cwise() * other.coeffs()); }
+
+  /** Concatenates a scaling and a translation */
+  inline TransformType operator* (const TranslationType& t) const;
+
+  /** Concatenates a scaling and an affine transformation */
+  inline TransformType operator* (const TransformType& t) const;
+
+  /** Concatenates a scaling and a linear transformation matrix */
+  // TODO returns an expression
+  inline LinearMatrixType operator* (const LinearMatrixType& other) const
+  { return coeffs().asDiagonal() * other; }
+
+  /** Concatenates a linear transformation matrix and a scaling */
+  // TODO returns an expression
+  friend inline LinearMatrixType operator* (const LinearMatrixType& other, const Scaling& s)
+  { return other * s.coeffs().asDiagonal(); }
+
+  template<typename Derived>
+  inline LinearMatrixType operator*(const RotationBase<Derived,Dim>& r) const
+  { return *this * r.toRotationMatrix(); }
+
+  /** Applies scaling to vector */
+  inline VectorType operator* (const VectorType& other) const
+  { return coeffs().asDiagonal() * other; }
+
+  /** \returns the inverse scaling */
+  inline Scaling inverse() const
+  { return Scaling(coeffs().cwise().inverse()); }
+
+  inline Scaling& operator=(const Scaling& other)
+  {
+    m_coeffs = other.m_coeffs;
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Scaling,Scaling<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Scaling(const Scaling<OtherScalarType,Dim>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Scaling& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+};
+
+/** \addtogroup Geometry_Module */
+//@{
+typedef Scaling<float, 2> Scaling2f;
+typedef Scaling<double,2> Scaling2d;
+typedef Scaling<float, 3> Scaling3f;
+typedef Scaling<double,3> Scaling3d;
+//@}
+
+template<typename Scalar, int Dim>
+inline typename Scaling<Scalar,Dim>::TransformType
+Scaling<Scalar,Dim>::operator* (const TranslationType& t) const
+{
+  TransformType res;
+  res.matrix().setZero();
+  res.linear().diagonal() = coeffs();
+  res.translation() = m_coeffs.cwise() * t.vector();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline typename Scaling<Scalar,Dim>::TransformType
+Scaling<Scalar,Dim>::operator* (const TransformType& t) const
+{
+  TransformType res = t;
+  res.prescale(m_coeffs);
+  return res;
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/Transform.h b/Eigen/src/Eigen2Support/Geometry/Transform.h
new file mode 100644
index 0000000..fab60b2
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/Transform.h
@@ -0,0 +1,786 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+// Note that we have to pass Dim and HDim because it is not allowed to use a template
+// parameter to define a template specialization. To be more precise, in the following
+// specializations, it is not allowed to use Dim+1 instead of HDim.
+template< typename Other,
+          int Dim,
+          int HDim,
+          int OtherRows=Other::RowsAtCompileTime,
+          int OtherCols=Other::ColsAtCompileTime>
+struct ei_transform_product_impl;
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Transform
+  *
+  * \brief Represents an homogeneous transformation in a N dimensional space
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _Dim the dimension of the space
+  *
+  * The homography is internally represented and stored as a (Dim+1)^2 matrix which
+  * is available through the matrix() method.
+  *
+  * Conversion methods from/to Qt's QMatrix and QTransform are available if the
+  * preprocessor token EIGEN_QT_SUPPORT is defined.
+  *
+  * \sa class Matrix, class Quaternion
+  */
+template<typename _Scalar, int _Dim>
+class Transform
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
+  enum {
+    Dim = _Dim,     ///< space dimension in which the transformation holds
+    HDim = _Dim+1   ///< size of a respective homogeneous vector
+  };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** type of the matrix used to represent the transformation */
+  typedef Matrix<Scalar,HDim,HDim> MatrixType;
+  /** type of the matrix used to represent the linear part of the transformation */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** type of read/write reference to the linear part of the transformation */
+  typedef Block<MatrixType,Dim,Dim> LinearPart;
+  /** type of read/write reference to the linear part of the transformation */
+  typedef const Block<const MatrixType,Dim,Dim> ConstLinearPart;
+  /** type of a vector */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** type of a read/write reference to the translation part of the rotation */
+  typedef Block<MatrixType,Dim,1> TranslationPart;
+  /** type of a read/write reference to the translation part of the rotation */
+  typedef const Block<const MatrixType,Dim,1> ConstTranslationPart;
+  /** corresponding translation type */
+  typedef Translation<Scalar,Dim> TranslationType;
+  /** corresponding scaling transformation type */
+  typedef Scaling<Scalar,Dim> ScalingType;
+
+protected:
+
+  MatrixType m_matrix;
+
+public:
+
+  /** Default constructor without initialization of the coefficients. */
+  inline Transform() { }
+
+  inline Transform(const Transform& other)
+  {
+    m_matrix = other.m_matrix;
+  }
+
+  inline explicit Transform(const TranslationType& t) { *this = t; }
+  inline explicit Transform(const ScalingType& s) { *this = s; }
+  template<typename Derived>
+  inline explicit Transform(const RotationBase<Derived, Dim>& r) { *this = r; }
+
+  inline Transform& operator=(const Transform& other)
+  { m_matrix = other.m_matrix; return *this; }
+
+  template<typename OtherDerived, bool BigMatrix> // MSVC 2005 will commit suicide if BigMatrix has a default value
+  struct construct_from_matrix
+  {
+    static inline void run(Transform *transform, const MatrixBase<OtherDerived>& other)
+    {
+      transform->matrix() = other;
+    }
+  };
+
+  template<typename OtherDerived> struct construct_from_matrix<OtherDerived, true>
+  {
+    static inline void run(Transform *transform, const MatrixBase<OtherDerived>& other)
+    {
+      transform->linear() = other;
+      transform->translation().setZero();
+      transform->matrix()(Dim,Dim) = Scalar(1);
+      transform->matrix().template block<1,Dim>(Dim,0).setZero();
+    }
+  };
+
+  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline explicit Transform(const MatrixBase<OtherDerived>& other)
+  {
+    construct_from_matrix<OtherDerived, int(OtherDerived::RowsAtCompileTime) == Dim>::run(this, other);
+  }
+
+  /** Set \c *this from a (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline Transform& operator=(const MatrixBase<OtherDerived>& other)
+  { m_matrix = other; return *this; }
+
+  #ifdef EIGEN_QT_SUPPORT
+  inline Transform(const QMatrix& other);
+  inline Transform& operator=(const QMatrix& other);
+  inline QMatrix toQMatrix(void) const;
+  inline Transform(const QTransform& other);
+  inline Transform& operator=(const QTransform& other);
+  inline QTransform toQTransform(void) const;
+  #endif
+
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operaror(int,int) const */
+  inline Scalar operator() (int row, int col) const { return m_matrix(row,col); }
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operaror(int,int) */
+  inline Scalar& operator() (int row, int col) { return m_matrix(row,col); }
+
+  /** \returns a read-only expression of the transformation matrix */
+  inline const MatrixType& matrix() const { return m_matrix; }
+  /** \returns a writable expression of the transformation matrix */
+  inline MatrixType& matrix() { return m_matrix; }
+
+  /** \returns a read-only expression of the linear (linear) part of the transformation */
+  inline ConstLinearPart linear() const { return m_matrix.template block<Dim,Dim>(0,0); }
+  /** \returns a writable expression of the linear (linear) part of the transformation */
+  inline LinearPart linear() { return m_matrix.template block<Dim,Dim>(0,0); }
+
+  /** \returns a read-only expression of the translation vector of the transformation */
+  inline ConstTranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
+  /** \returns a writable expression of the translation vector of the transformation */
+  inline TranslationPart translation() { return m_matrix.template block<Dim,1>(0,Dim); }
+
+  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
+  *
+  * The right hand side \a other might be either:
+  * \li a vector of size Dim,
+  * \li an homogeneous vector of size Dim+1,
+  * \li a transformation matrix of size Dim+1 x Dim+1.
+  */
+  // note: this function is defined here because some compilers cannot find the respective declaration
+  template<typename OtherDerived>
+  inline const typename ei_transform_product_impl<OtherDerived,_Dim,_Dim+1>::ResultType
+  operator * (const MatrixBase<OtherDerived> &other) const
+  { return ei_transform_product_impl<OtherDerived,Dim,HDim>::run(*this,other.derived()); }
+
+  /** \returns the product expression of a transformation matrix \a a times a transform \a b
+    * The transformation matrix \a a must have a Dim+1 x Dim+1 sizes. */
+  template<typename OtherDerived>
+  friend inline const typename ProductReturnType<OtherDerived,MatrixType>::Type
+  operator * (const MatrixBase<OtherDerived> &a, const Transform &b)
+  { return a.derived() * b.matrix(); }
+
+  /** Contatenates two transformations */
+  inline const Transform
+  operator * (const Transform& other) const
+  { return Transform(m_matrix * other.matrix()); }
+
+  /** \sa MatrixBase::setIdentity() */
+  void setIdentity() { m_matrix.setIdentity(); }
+  static const typename MatrixType::IdentityReturnType Identity()
+  {
+    return MatrixType::Identity();
+  }
+
+  template<typename OtherDerived>
+  inline Transform& scale(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& prescale(const MatrixBase<OtherDerived> &other);
+
+  inline Transform& scale(Scalar s);
+  inline Transform& prescale(Scalar s);
+
+  template<typename OtherDerived>
+  inline Transform& translate(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
+
+  template<typename RotationType>
+  inline Transform& rotate(const RotationType& rotation);
+
+  template<typename RotationType>
+  inline Transform& prerotate(const RotationType& rotation);
+
+  Transform& shear(Scalar sx, Scalar sy);
+  Transform& preshear(Scalar sx, Scalar sy);
+
+  inline Transform& operator=(const TranslationType& t);
+  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
+  inline Transform operator*(const TranslationType& t) const;
+
+  inline Transform& operator=(const ScalingType& t);
+  inline Transform& operator*=(const ScalingType& s) { return scale(s.coeffs()); }
+  inline Transform operator*(const ScalingType& s) const;
+  friend inline Transform operator*(const LinearMatrixType& mat, const Transform& t)
+  {
+    Transform res = t;
+    res.matrix().row(Dim) = t.matrix().row(Dim);
+    res.matrix().template block<Dim,HDim>(0,0) = (mat * t.matrix().template block<Dim,HDim>(0,0)).lazy();
+    return res;
+  }
+
+  template<typename Derived>
+  inline Transform& operator=(const RotationBase<Derived,Dim>& r);
+  template<typename Derived>
+  inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
+  template<typename Derived>
+  inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
+
+  LinearMatrixType rotation() const;
+  template<typename RotationMatrixType, typename ScalingMatrixType>
+  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
+  template<typename ScalingMatrixType, typename RotationMatrixType>
+  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
+
+  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
+
+  inline const MatrixType inverse(TransformTraits traits = Affine) const;
+
+  /** \returns a const pointer to the column major internal matrix */
+  const Scalar* data() const { return m_matrix.data(); }
+  /** \returns a non-const pointer to the column major internal matrix */
+  Scalar* data() { return m_matrix.data(); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Transform(const Transform<OtherScalarType,Dim>& other)
+  { m_matrix = other.matrix().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Transform& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_matrix.isApprox(other.m_matrix, prec); }
+
+  #ifdef EIGEN_TRANSFORM_PLUGIN
+  #include EIGEN_TRANSFORM_PLUGIN
+  #endif
+
+protected:
+
+};
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2> Transform2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3> Transform3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2> Transform2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3> Transform3d;
+
+/**************************
+*** Optional QT support ***
+**************************/
+
+#ifdef EIGEN_QT_SUPPORT
+/** Initialises \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>::Transform(const QMatrix& other)
+{
+  *this = other;
+}
+
+/** Set \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QMatrix& other)
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix << other.m11(), other.m21(), other.dx(),
+              other.m12(), other.m22(), other.dy(),
+              0, 0, 1;
+   return *this;
+}
+
+/** \returns a QMatrix from \c *this assuming the dimension is 2.
+  *
+  * \warning this convertion might loss data if \c *this is not affine
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+QMatrix Transform<Scalar,Dim>::toQMatrix(void) const
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+}
+
+/** Initialises \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>::Transform(const QTransform& other)
+{
+  *this = other;
+}
+
+/** Set \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QTransform& other)
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix << other.m11(), other.m21(), other.dx(),
+              other.m12(), other.m22(), other.dy(),
+              other.m13(), other.m23(), other.m33();
+   return *this;
+}
+
+/** \returns a QTransform from \c *this assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim>
+QTransform Transform<Scalar,Dim>::toQTransform(void) const
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
+                    m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
+                    m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
+}
+#endif
+
+/*********************
+*** Procedural API ***
+*********************/
+
+/** Applies on the right the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa prescale()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::scale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  linear() = (linear() * other.asDiagonal()).lazy();
+  return *this;
+}
+
+/** Applies on the right a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa prescale(Scalar)
+  */
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::scale(Scalar s)
+{
+  linear() *= s;
+  return *this;
+}
+
+/** Applies on the left the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa scale()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::prescale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  m_matrix.template block<Dim,HDim>(0,0) = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0)).lazy();
+  return *this;
+}
+
+/** Applies on the left a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa scale(Scalar)
+  */
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::prescale(Scalar s)
+{
+  m_matrix.template corner<Dim,HDim>(TopLeft) *= s;
+  return *this;
+}
+
+/** Applies on the right the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa pretranslate()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::translate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  translation() += linear() * other;
+  return *this;
+}
+
+/** Applies on the left the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa translate()
+  */
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::pretranslate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  translation() += other;
+  return *this;
+}
+
+/** Applies on the right the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * The template parameter \a RotationType is the type of the rotation which
+  * must be known by ei_toRotationMatrix<>.
+  *
+  * Natively supported types includes:
+  *   - any scalar (2D),
+  *   - a Dim x Dim matrix expression,
+  *   - a Quaternion (3D),
+  *   - a AngleAxis (3D)
+  *
+  * This mechanism is easily extendable to support user types such as Euler angles,
+  * or a pair of Quaternion for 4D rotations.
+  *
+  * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
+  */
+template<typename Scalar, int Dim>
+template<typename RotationType>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::rotate(const RotationType& rotation)
+{
+  linear() *= ei_toRotationMatrix<Scalar,Dim>(rotation);
+  return *this;
+}
+
+/** Applies on the left the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * See rotate() for further details.
+  *
+  * \sa rotate()
+  */
+template<typename Scalar, int Dim>
+template<typename RotationType>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::prerotate(const RotationType& rotation)
+{
+  m_matrix.template block<Dim,HDim>(0,0) = ei_toRotationMatrix<Scalar,Dim>(rotation)
+                                         * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/** Applies on the right the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa preshear()
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::shear(Scalar sx, Scalar sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  VectorType tmp = linear().col(0)*sy + linear().col(1);
+  linear() << linear().col(0) + linear().col(1)*sx, tmp;
+  return *this;
+}
+
+/** Applies on the left the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa shear()
+  */
+template<typename Scalar, int Dim>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::preshear(Scalar sx, Scalar sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/******************************************************
+*** Scaling, Translation and Rotation compatibility ***
+******************************************************/
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const TranslationType& t)
+{
+  linear().setIdentity();
+  translation() = t.vector();
+  m_matrix.template block<1,Dim>(Dim,0).setZero();
+  m_matrix(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const TranslationType& t) const
+{
+  Transform res = *this;
+  res.translate(t.vector());
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const ScalingType& s)
+{
+  m_matrix.setZero();
+  linear().diagonal() = s.coeffs();
+  m_matrix.coeffRef(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+template<typename Scalar, int Dim>
+inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const ScalingType& s) const
+{
+  Transform res = *this;
+  res.scale(s.coeffs());
+  return res;
+}
+
+template<typename Scalar, int Dim>
+template<typename Derived>
+inline Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const RotationBase<Derived,Dim>& r)
+{
+  linear() = ei_toRotationMatrix<Scalar,Dim>(r);
+  translation().setZero();
+  m_matrix.template block<1,Dim>(Dim,0).setZero();
+  m_matrix.coeffRef(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+template<typename Scalar, int Dim>
+template<typename Derived>
+inline Transform<Scalar,Dim> Transform<Scalar,Dim>::operator*(const RotationBase<Derived,Dim>& r) const
+{
+  Transform res = *this;
+  res.rotate(r.derived());
+  return res;
+}
+
+/************************
+*** Special functions ***
+************************/
+
+/** \returns the rotation part of the transformation
+  * \nonstableyet
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), computeScalingRotation(), class SVD
+  */
+template<typename Scalar, int Dim>
+typename Transform<Scalar,Dim>::LinearMatrixType
+Transform<Scalar,Dim>::rotation() const
+{
+  LinearMatrixType result;
+  computeRotationScaling(&result, (LinearMatrixType*)0);
+  return result;
+}
+
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * \nonstableyet
+  *
+  * \svd_module
+  *
+  * \sa computeScalingRotation(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim>
+template<typename RotationMatrixType, typename ScalingMatrixType>
+void Transform<Scalar,Dim>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, Dim, 1> sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling)
+  {
+    scaling->noalias() = svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint();
+  }
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->noalias() = m * svd.matrixV().adjoint();
+  }
+}
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * \nonstableyet
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim>
+template<typename ScalingMatrixType, typename RotationMatrixType>
+void Transform<Scalar,Dim>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU|ComputeFullV);
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, Dim, 1> sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling)
+  {
+    scaling->noalias() = svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint();
+  }
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->noalias() = m * svd.matrixV().adjoint();
+  }
+}
+
+/** Convenient method to set \c *this from a position, orientation and scale
+  * of a 3D object.
+  */
+template<typename Scalar, int Dim>
+template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+Transform<Scalar,Dim>&
+Transform<Scalar,Dim>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
+{
+  linear() = ei_toRotationMatrix<Scalar,Dim>(orientation);
+  linear() *= scale.asDiagonal();
+  translation() = position;
+  m_matrix.template block<1,Dim>(Dim,0).setZero();
+  m_matrix(Dim,Dim) = Scalar(1);
+  return *this;
+}
+
+/** \nonstableyet
+  *
+  * \returns the inverse transformation matrix according to some given knowledge
+  * on \c *this.
+  *
+  * \param traits allows to optimize the inversion process when the transformion
+  * is known to be not a general transformation. The possible values are:
+  *  - Projective if the transformation is not necessarily affine, i.e., if the
+  *    last row is not guaranteed to be [0 ... 0 1]
+  *  - Affine is the default, the last row is assumed to be [0 ... 0 1]
+  *  - Isometry if the transformation is only a concatenations of translations
+  *    and rotations.
+  *
+  * \warning unless \a traits is always set to NoShear or NoScaling, this function
+  * requires the generic inverse method of MatrixBase defined in the LU module. If
+  * you forget to include this module, then you will get hard to debug linking errors.
+  *
+  * \sa MatrixBase::inverse()
+  */
+template<typename Scalar, int Dim>
+inline const typename Transform<Scalar,Dim>::MatrixType
+Transform<Scalar,Dim>::inverse(TransformTraits traits) const
+{
+  if (traits == Projective)
+  {
+    return m_matrix.inverse();
+  }
+  else
+  {
+    MatrixType res;
+    if (traits == Affine)
+    {
+      res.template corner<Dim,Dim>(TopLeft) = linear().inverse();
+    }
+    else if (traits == Isometry)
+    {
+      res.template corner<Dim,Dim>(TopLeft) = linear().transpose();
+    }
+    else
+    {
+      ei_assert("invalid traits value in Transform::inverse()");
+    }
+    // translation and remaining parts
+    res.template corner<Dim,1>(TopRight) = - res.template corner<Dim,Dim>(TopLeft) * translation();
+    res.template corner<1,Dim>(BottomLeft).setZero();
+    res.coeffRef(Dim,Dim) = Scalar(1);
+    return res;
+  }
+}
+
+/*****************************************************
+*** Specializations of operator* with a MatrixBase ***
+*****************************************************/
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, HDim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  { return tr.matrix() * other; }
+};
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, Dim,Dim>
+{
+  typedef Transform<typename Other::Scalar,Dim> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  {
+    TransformType res;
+    res.translation() = tr.translation();
+    res.matrix().row(Dim) = tr.matrix().row(Dim);
+    res.linear() = (tr.linear() * other).lazy();
+    return res;
+  }
+};
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, HDim,1>
+{
+  typedef Transform<typename Other::Scalar,Dim> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  { return tr.matrix() * other; }
+};
+
+template<typename Other, int Dim, int HDim>
+struct ei_transform_product_impl<Other,Dim,HDim, Dim,1>
+{
+  typedef typename Other::Scalar Scalar;
+  typedef Transform<Scalar,Dim> TransformType;
+  typedef Matrix<Scalar,Dim,1> ResultType;
+  static ResultType run(const TransformType& tr, const Other& other)
+  { return ((tr.linear() * other) + tr.translation())
+          * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); }
+};
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/Geometry/Translation.h b/Eigen/src/Eigen2Support/Geometry/Translation.h
new file mode 100644
index 0000000..2b9859f
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Geometry/Translation.h
@@ -0,0 +1,184 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// no include guard, we'll include this twice from All.h from Eigen2Support, and it's internal anyway
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Translation
+  *
+  * \brief Represents a translation transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
+  *
+  * \note This class is not aimed to be used to store a translation transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * \sa class Scaling, class Transform
+  */
+template<typename _Scalar, int _Dim>
+class Translation
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
+  /** dimension of the space */
+  enum { Dim = _Dim };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** corresponding vector type */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** corresponding linear transformation matrix type */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** corresponding scaling transformation type */
+  typedef Scaling<Scalar,Dim> ScalingType;
+  /** corresponding affine transformation type */
+  typedef Transform<Scalar,Dim> TransformType;
+
+protected:
+
+  VectorType m_coeffs;
+
+public:
+
+  /** Default constructor without initialization. */
+  Translation() {}
+  /**  */
+  inline Translation(const Scalar& sx, const Scalar& sy)
+  {
+    ei_assert(Dim==2);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+  }
+  /**  */
+  inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+  {
+    ei_assert(Dim==3);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+    m_coeffs.z() = sz;
+  }
+  /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */
+  explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
+
+  const VectorType& vector() const { return m_coeffs; }
+  VectorType& vector() { return m_coeffs; }
+
+  /** Concatenates two translation */
+  inline Translation operator* (const Translation& other) const
+  { return Translation(m_coeffs + other.m_coeffs); }
+
+  /** Concatenates a translation and a scaling */
+  inline TransformType operator* (const ScalingType& other) const;
+
+  /** Concatenates a translation and a linear transformation */
+  inline TransformType operator* (const LinearMatrixType& linear) const;
+
+  template<typename Derived>
+  inline TransformType operator*(const RotationBase<Derived,Dim>& r) const
+  { return *this * r.toRotationMatrix(); }
+
+  /** Concatenates a linear transformation and a translation */
+  // its a nightmare to define a templated friend function outside its declaration
+  friend inline TransformType operator* (const LinearMatrixType& linear, const Translation& t)
+  {
+    TransformType res;
+    res.matrix().setZero();
+    res.linear() = linear;
+    res.translation() = linear * t.m_coeffs;
+    res.matrix().row(Dim).setZero();
+    res(Dim,Dim) = Scalar(1);
+    return res;
+  }
+
+  /** Concatenates a translation and an affine transformation */
+  inline TransformType operator* (const TransformType& t) const;
+
+  /** Applies translation to vector */
+  inline VectorType operator* (const VectorType& other) const
+  { return m_coeffs + other; }
+
+  /** \returns the inverse translation (opposite) */
+  Translation inverse() const { return Translation(-m_coeffs); }
+
+  Translation& operator=(const Translation& other)
+  {
+    m_coeffs = other.m_coeffs;
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Translation(const Translation<OtherScalarType,Dim>& other)
+  { m_coeffs = other.vector().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = precision<Scalar>()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+};
+
+/** \addtogroup Geometry_Module */
+//@{
+typedef Translation<float, 2> Translation2f;
+typedef Translation<double,2> Translation2d;
+typedef Translation<float, 3> Translation3f;
+typedef Translation<double,3> Translation3d;
+//@}
+
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::TransformType
+Translation<Scalar,Dim>::operator* (const ScalingType& other) const
+{
+  TransformType res;
+  res.matrix().setZero();
+  res.linear().diagonal() = other.coeffs();
+  res.translation() = m_coeffs;
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::TransformType
+Translation<Scalar,Dim>::operator* (const LinearMatrixType& linear) const
+{
+  TransformType res;
+  res.matrix().setZero();
+  res.linear() = linear;
+  res.translation() = m_coeffs;
+  res.matrix().row(Dim).setZero();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::TransformType
+Translation<Scalar,Dim>::operator* (const TransformType& t) const
+{
+  TransformType res = t;
+  res.pretranslate(m_coeffs);
+  return res;
+}
+
+} // end namespace Eigen
diff --git a/Eigen/src/Eigen2Support/LU.h b/Eigen/src/Eigen2Support/LU.h
new file mode 100644
index 0000000..49f19ad
--- /dev/null
+++ b/Eigen/src/Eigen2Support/LU.h
@@ -0,0 +1,120 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_LU_H
+#define EIGEN2_LU_H
+
+namespace Eigen { 
+
+template<typename MatrixType>
+class LU : public FullPivLU<MatrixType>
+{
+  public:
+
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime, MatrixType::Options, 1, MatrixType::MaxColsAtCompileTime> IntRowVectorType;
+    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1, MatrixType::Options, MatrixType::MaxRowsAtCompileTime, 1> IntColVectorType;
+    typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime, MatrixType::Options, 1, MatrixType::MaxColsAtCompileTime> RowVectorType;
+    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1, MatrixType::Options, MatrixType::MaxRowsAtCompileTime, 1> ColVectorType;
+
+    typedef Matrix<typename MatrixType::Scalar,
+                  MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" is the number of cols of the original matrix
+                                                 // so that the product "matrix * kernel = zero" makes sense
+                  Dynamic,                       // we don't know at compile-time the dimension of the kernel
+                  MatrixType::Options,
+                  MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
+                  MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, whose dimension is the number
+                                                   // of columns of the original matrix
+    > KernelResultType;
+
+    typedef Matrix<typename MatrixType::Scalar,
+                   MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose dimension is the number
+                                                  // of rows of the original matrix
+                   Dynamic,                       // we don't know at compile time the dimension of the image (the rank)
+                   MatrixType::Options,
+                   MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
+                   MatrixType::MaxColsAtCompileTime  // so it has the same number of rows and at most as many columns.
+    > ImageResultType;
+
+    typedef FullPivLU<MatrixType> Base;
+
+    template<typename T>
+    explicit LU(const T& t) : Base(t), m_originalMatrix(t) {}
+
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+    {
+      *result = static_cast<const Base*>(this)->solve(b);
+      return true;
+    }
+
+    template<typename ResultType>
+    inline void computeInverse(ResultType *result) const
+    {
+      solve(MatrixType::Identity(this->rows(), this->cols()), result);
+    }
+    
+    template<typename KernelMatrixType>
+    void computeKernel(KernelMatrixType *result) const
+    {
+      *result = static_cast<const Base*>(this)->kernel();
+    }
+    
+    template<typename ImageMatrixType>
+    void computeImage(ImageMatrixType *result) const
+    {
+      *result = static_cast<const Base*>(this)->image(m_originalMatrix);
+    }
+    
+    const ImageResultType image() const
+    {
+      return static_cast<const Base*>(this)->image(m_originalMatrix);
+    }
+    
+    const MatrixType& m_originalMatrix;
+};
+
+#if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS
+/** \lu_module
+  *
+  * Synonym of partialPivLu().
+  *
+  * \return the partial-pivoting LU decomposition of \c *this.
+  *
+  * \sa class PartialPivLU
+  */
+template<typename Derived>
+inline const LU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::lu() const
+{
+  return LU<PlainObject>(eval());
+}
+#endif
+
+#ifdef EIGEN2_SUPPORT
+/** \lu_module
+  *
+  * Synonym of partialPivLu().
+  *
+  * \return the partial-pivoting LU decomposition of \c *this.
+  *
+  * \sa class PartialPivLU
+  */
+template<typename Derived>
+inline const LU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::eigen2_lu() const
+{
+  return LU<PlainObject>(eval());
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN2_LU_H
diff --git a/Eigen/src/Eigen2Support/Lazy.h b/Eigen/src/Eigen2Support/Lazy.h
new file mode 100644
index 0000000..593fc78
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Lazy.h
@@ -0,0 +1,71 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LAZY_H
+#define EIGEN_LAZY_H
+
+namespace Eigen { 
+
+/** \deprecated it is only used by lazy() which is deprecated
+  *
+  * \returns an expression of *this with added flags
+  *
+  * Example: \include MatrixBase_marked.cpp
+  * Output: \verbinclude MatrixBase_marked.out
+  *
+  * \sa class Flagged, extract(), part()
+  */
+template<typename Derived>
+template<unsigned int Added>
+inline const Flagged<Derived, Added, 0>
+MatrixBase<Derived>::marked() const
+{
+  return derived();
+}
+
+/** \deprecated use MatrixBase::noalias()
+  *
+  * \returns an expression of *this with the EvalBeforeAssigningBit flag removed.
+  *
+  * Example: \include MatrixBase_lazy.cpp
+  * Output: \verbinclude MatrixBase_lazy.out
+  *
+  * \sa class Flagged, marked()
+  */
+template<typename Derived>
+inline const Flagged<Derived, 0, EvalBeforeAssigningBit>
+MatrixBase<Derived>::lazy() const
+{
+  return derived();
+}
+
+
+/** \internal
+  * Overloaded to perform an efficient C += (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::operator+=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+                                                       EvalBeforeAssigningBit>& other)
+{
+  other._expression().derived().addTo(derived()); return derived();
+}
+
+/** \internal
+  * Overloaded to perform an efficient C -= (A*B).lazy() */
+template<typename Derived>
+template<typename ProductDerived, typename Lhs, typename Rhs>
+Derived& MatrixBase<Derived>::operator-=(const Flagged<ProductBase<ProductDerived, Lhs,Rhs>, 0,
+                                                       EvalBeforeAssigningBit>& other)
+{
+  other._expression().derived().subTo(derived()); return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LAZY_H
diff --git a/Eigen/src/Eigen2Support/LeastSquares.h b/Eigen/src/Eigen2Support/LeastSquares.h
new file mode 100644
index 0000000..7992d49
--- /dev/null
+++ b/Eigen/src/Eigen2Support/LeastSquares.h
@@ -0,0 +1,169 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_LEASTSQUARES_H
+#define EIGEN2_LEASTSQUARES_H
+
+namespace Eigen { 
+
+/** \ingroup LeastSquares_Module
+  *
+  * \leastsquares_module
+  *
+  * For a set of points, this function tries to express
+  * one of the coords as a linear (affine) function of the other coords.
+  *
+  * This is best explained by an example. This function works in full
+  * generality, for points in a space of arbitrary dimension, and also over
+  * the complex numbers, but for this example we will work in dimension 3
+  * over the real numbers (doubles).
+  *
+  * So let us work with the following set of 5 points given by their
+  * \f$(x,y,z)\f$ coordinates:
+  * @code
+    Vector3d points[5];
+    points[0] = Vector3d( 3.02, 6.89, -4.32 );
+    points[1] = Vector3d( 2.01, 5.39, -3.79 );
+    points[2] = Vector3d( 2.41, 6.01, -4.01 );
+    points[3] = Vector3d( 2.09, 5.55, -3.86 );
+    points[4] = Vector3d( 2.58, 6.32, -4.10 );
+  * @endcode
+  * Suppose that we want to express the second coordinate (\f$y\f$) as a linear
+  * expression in \f$x\f$ and \f$z\f$, that is,
+  * \f[ y=ax+bz+c \f]
+  * for some constants \f$a,b,c\f$. Thus, we want to find the best possible
+  * constants \f$a,b,c\f$ so that the plane of equation \f$y=ax+bz+c\f$ fits
+  * best the five above points. To do that, call this function as follows:
+  * @code
+    Vector3d coeffs; // will store the coefficients a, b, c
+    linearRegression(
+      5,
+      &points,
+      &coeffs,
+      1 // the coord to express as a function of
+        // the other ones. 0 means x, 1 means y, 2 means z.
+    );
+  * @endcode
+  * Now the vector \a coeffs is approximately
+  * \f$( 0.495 ,  -1.927 ,  -2.906 )\f$.
+  * Thus, we get \f$a=0.495, b = -1.927, c = -2.906\f$. Let us check for
+  * instance how near points[0] is from the plane of equation \f$y=ax+bz+c\f$.
+  * Looking at the coords of points[0], we see that:
+  * \f[ax+bz+c = 0.495 * 3.02 + (-1.927) * (-4.32) + (-2.906) = 6.91.\f]
+  * On the other hand, we have \f$y=6.89\f$. We see that the values
+  * \f$6.91\f$ and \f$6.89\f$
+  * are near, so points[0] is very near the plane of equation \f$y=ax+bz+c\f$.
+  *
+  * Let's now describe precisely the parameters:
+  * @param numPoints the number of points
+  * @param points the array of pointers to the points on which to perform the linear regression
+  * @param result pointer to the vector in which to store the result.
+                  This vector must be of the same type and size as the
+                  data points. The meaning of its coords is as follows.
+                  For brevity, let \f$n=Size\f$,
+                  \f$r_i=result[i]\f$,
+                  and \f$f=funcOfOthers\f$. Denote by
+                  \f$x_0,\ldots,x_{n-1}\f$
+                  the n coordinates in the n-dimensional space.
+                  Then the resulting equation is:
+                  \f[ x_f = r_0 x_0 + \cdots + r_{f-1}x_{f-1}
+                   + r_{f+1}x_{f+1} + \cdots + r_{n-1}x_{n-1} + r_n. \f]
+  * @param funcOfOthers Determines which coord to express as a function of the
+                        others. Coords are numbered starting from 0, so that a
+                        value of 0 means \f$x\f$, 1 means \f$y\f$,
+                        2 means \f$z\f$, ...
+  *
+  * \sa fitHyperplane()
+  */
+template<typename VectorType>
+void linearRegression(int numPoints,
+                      VectorType **points,
+                      VectorType *result,
+                      int funcOfOthers )
+{
+  typedef typename VectorType::Scalar Scalar;
+  typedef Hyperplane<Scalar, VectorType::SizeAtCompileTime> HyperplaneType;
+  const int size = points[0]->size();
+  result->resize(size);
+  HyperplaneType h(size);
+  fitHyperplane(numPoints, points, &h);
+  for(int i = 0; i < funcOfOthers; i++)
+    result->coeffRef(i) = - h.coeffs()[i] / h.coeffs()[funcOfOthers];
+  for(int i = funcOfOthers; i < size; i++)
+    result->coeffRef(i) = - h.coeffs()[i+1] / h.coeffs()[funcOfOthers];
+}
+
+/** \ingroup LeastSquares_Module
+  *
+  * \leastsquares_module
+  *
+  * This function is quite similar to linearRegression(), so we refer to the
+  * documentation of this function and only list here the differences.
+  *
+  * The main difference from linearRegression() is that this function doesn't
+  * take a \a funcOfOthers argument. Instead, it finds a general equation
+  * of the form
+  * \f[ r_0 x_0 + \cdots + r_{n-1}x_{n-1} + r_n = 0, \f]
+  * where \f$n=Size\f$, \f$r_i=retCoefficients[i]\f$, and we denote by
+  * \f$x_0,\ldots,x_{n-1}\f$ the n coordinates in the n-dimensional space.
+  *
+  * Thus, the vector \a retCoefficients has size \f$n+1\f$, which is another
+  * difference from linearRegression().
+  *
+  * In practice, this function performs an hyper-plane fit in a total least square sense
+  * via the following steps:
+  *  1 - center the data to the mean
+  *  2 - compute the covariance matrix
+  *  3 - pick the eigenvector corresponding to the smallest eigenvalue of the covariance matrix
+  * The ratio of the smallest eigenvalue and the second one gives us a hint about the relevance
+  * of the solution. This value is optionally returned in \a soundness.
+  *
+  * \sa linearRegression()
+  */
+template<typename VectorType, typename HyperplaneType>
+void fitHyperplane(int numPoints,
+                   VectorType **points,
+                   HyperplaneType *result,
+                   typename NumTraits<typename VectorType::Scalar>::Real* soundness = 0)
+{
+  typedef typename VectorType::Scalar Scalar;
+  typedef Matrix<Scalar,VectorType::SizeAtCompileTime,VectorType::SizeAtCompileTime> CovMatrixType;
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType)
+  ei_assert(numPoints >= 1);
+  int size = points[0]->size();
+  ei_assert(size+1 == result->coeffs().size());
+
+  // compute the mean of the data
+  VectorType mean = VectorType::Zero(size);
+  for(int i = 0; i < numPoints; ++i)
+    mean += *(points[i]);
+  mean /= numPoints;
+
+  // compute the covariance matrix
+  CovMatrixType covMat = CovMatrixType::Zero(size, size);
+  for(int i = 0; i < numPoints; ++i)
+  {
+    VectorType diff = (*(points[i]) - mean).conjugate();
+    covMat += diff * diff.adjoint();
+  }
+
+  // now we just have to pick the eigen vector with smallest eigen value
+  SelfAdjointEigenSolver<CovMatrixType> eig(covMat);
+  result->normal() = eig.eigenvectors().col(0);
+  if (soundness)
+    *soundness = eig.eigenvalues().coeff(0)/eig.eigenvalues().coeff(1);
+
+  // let's compute the constant coefficient such that the
+  // plane pass trough the mean point:
+  result->offset() = - (result->normal().cwise()* mean).sum();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_LEASTSQUARES_H
diff --git a/Eigen/src/Eigen2Support/Macros.h b/Eigen/src/Eigen2Support/Macros.h
new file mode 100644
index 0000000..351c32a
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Macros.h
@@ -0,0 +1,20 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_MACROS_H
+#define EIGEN2_MACROS_H
+
+#define ei_assert eigen_assert
+#define ei_internal_assert eigen_internal_assert
+
+#define EIGEN_ALIGN_128 EIGEN_ALIGN16
+
+#define EIGEN_ARCH_WANTS_ALIGNMENT EIGEN_ALIGN_STATICALLY
+
+#endif // EIGEN2_MACROS_H
diff --git a/Eigen/src/Eigen2Support/MathFunctions.h b/Eigen/src/Eigen2Support/MathFunctions.h
new file mode 100644
index 0000000..3544af2
--- /dev/null
+++ b/Eigen/src/Eigen2Support/MathFunctions.h
@@ -0,0 +1,57 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_MATH_FUNCTIONS_H
+#define EIGEN2_MATH_FUNCTIONS_H
+
+namespace Eigen { 
+
+template<typename T> inline typename NumTraits<T>::Real ei_real(const T& x) { return numext::real(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_imag(const T& x) { return numext::imag(x); }
+template<typename T> inline T ei_conj(const T& x) { return numext::conj(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs (const T& x) { using std::abs; return abs(x); }
+template<typename T> inline typename NumTraits<T>::Real ei_abs2(const T& x) { return numext::abs2(x); }
+template<typename T> inline T ei_sqrt(const T& x) { using std::sqrt; return sqrt(x); }
+template<typename T> inline T ei_exp (const T& x) { using std::exp;  return exp(x); }
+template<typename T> inline T ei_log (const T& x) { using std::log;  return log(x); }
+template<typename T> inline T ei_sin (const T& x) { using std::sin;  return sin(x); }
+template<typename T> inline T ei_cos (const T& x) { using std::cos;  return cos(x); }
+template<typename T> inline T ei_atan2(const T& x,const T& y) { using std::atan2; return atan2(x,y); }
+template<typename T> inline T ei_pow (const T& x,const T& y) { return numext::pow(x,y); }
+template<typename T> inline T ei_random () { return internal::random<T>(); }
+template<typename T> inline T ei_random (const T& x, const T& y) { return internal::random(x, y); }
+
+template<typename T> inline T precision () { return NumTraits<T>::dummy_precision(); }
+template<typename T> inline T machine_epsilon () { return NumTraits<T>::epsilon(); }
+
+
+template<typename Scalar, typename OtherScalar>
+inline bool ei_isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
+                                   typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return internal::isMuchSmallerThan(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool ei_isApprox(const Scalar& x, const Scalar& y,
+                          typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return internal::isApprox(x, y, precision);
+}
+
+template<typename Scalar>
+inline bool ei_isApproxOrLessThan(const Scalar& x, const Scalar& y,
+                                    typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
+{
+  return internal::isApproxOrLessThan(x, y, precision);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_MATH_FUNCTIONS_H
diff --git a/Eigen/src/Eigen2Support/Memory.h b/Eigen/src/Eigen2Support/Memory.h
new file mode 100644
index 0000000..f86372b
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Memory.h
@@ -0,0 +1,45 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_MEMORY_H
+#define EIGEN2_MEMORY_H
+
+namespace Eigen { 
+
+inline void* ei_aligned_malloc(size_t size) { return internal::aligned_malloc(size); }
+inline void  ei_aligned_free(void *ptr) { internal::aligned_free(ptr); }
+inline void* ei_aligned_realloc(void *ptr, size_t new_size, size_t old_size) { return internal::aligned_realloc(ptr, new_size, old_size); }
+inline void* ei_handmade_aligned_malloc(size_t size) { return internal::handmade_aligned_malloc(size); }
+inline void  ei_handmade_aligned_free(void *ptr) { internal::handmade_aligned_free(ptr); }
+
+template<bool Align> inline void* ei_conditional_aligned_malloc(size_t size)
+{
+  return internal::conditional_aligned_malloc<Align>(size);
+}
+template<bool Align> inline void ei_conditional_aligned_free(void *ptr)
+{
+  internal::conditional_aligned_free<Align>(ptr);
+}
+template<bool Align> inline void* ei_conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
+{
+  return internal::conditional_aligned_realloc<Align>(ptr, new_size, old_size);
+}
+
+template<typename T> inline T* ei_aligned_new(size_t size)
+{
+  return internal::aligned_new<T>(size);
+}
+template<typename T> inline void ei_aligned_delete(T *ptr, size_t size)
+{
+  return internal::aligned_delete(ptr, size);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_MACROS_H
diff --git a/Eigen/src/Eigen2Support/Meta.h b/Eigen/src/Eigen2Support/Meta.h
new file mode 100644
index 0000000..fa37cfc
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Meta.h
@@ -0,0 +1,75 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_META_H
+#define EIGEN2_META_H
+
+namespace Eigen { 
+
+template<typename T>
+struct ei_traits : internal::traits<T>
+{};
+
+struct ei_meta_true {  enum { ret = 1 }; };
+struct ei_meta_false { enum { ret = 0 }; };
+
+template<bool Condition, typename Then, typename Else>
+struct ei_meta_if { typedef Then ret; };
+
+template<typename Then, typename Else>
+struct ei_meta_if <false, Then, Else> { typedef Else ret; };
+
+template<typename T, typename U> struct ei_is_same_type { enum { ret = 0 }; };
+template<typename T> struct ei_is_same_type<T,T> { enum { ret = 1 }; };
+
+template<typename T> struct ei_unref { typedef T type; };
+template<typename T> struct ei_unref<T&> { typedef T type; };
+
+template<typename T> struct ei_unpointer { typedef T type; };
+template<typename T> struct ei_unpointer<T*> { typedef T type; };
+template<typename T> struct ei_unpointer<T*const> { typedef T type; };
+
+template<typename T> struct ei_unconst { typedef T type; };
+template<typename T> struct ei_unconst<const T> { typedef T type; };
+template<typename T> struct ei_unconst<T const &> { typedef T & type; };
+template<typename T> struct ei_unconst<T const *> { typedef T * type; };
+
+template<typename T> struct ei_cleantype { typedef T type; };
+template<typename T> struct ei_cleantype<const T>   { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<const T&>  { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<T&>        { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<const T*>  { typedef typename ei_cleantype<T>::type type; };
+template<typename T> struct ei_cleantype<T*>        { typedef typename ei_cleantype<T>::type type; };
+
+/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer.
+  * Usage example: \code ei_meta_sqrt<1023>::ret \endcode
+  */
+template<int Y,
+         int InfX = 0,
+         int SupX = ((Y==1) ? 1 : Y/2),
+         bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >
+                                // use ?: instead of || just to shut up a stupid gcc 4.3 warning
+class ei_meta_sqrt
+{
+    enum {
+      MidX = (InfX+SupX)/2,
+      TakeInf = MidX*MidX > Y ? 1 : 0,
+      NewInf = int(TakeInf) ? InfX : int(MidX),
+      NewSup = int(TakeInf) ? int(MidX) : SupX
+    };
+  public:
+    enum { ret = ei_meta_sqrt<Y,NewInf,NewSup>::ret };
+};
+
+template<int Y, int InfX, int SupX>
+class ei_meta_sqrt<Y, InfX, SupX, true> { public:  enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };
+
+} // end namespace Eigen
+
+#endif // EIGEN2_META_H
diff --git a/Eigen/src/Eigen2Support/Minor.h b/Eigen/src/Eigen2Support/Minor.h
new file mode 100644
index 0000000..4cded57
--- /dev/null
+++ b/Eigen/src/Eigen2Support/Minor.h
@@ -0,0 +1,117 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MINOR_H
+#define EIGEN_MINOR_H
+
+namespace Eigen { 
+
+/**
+  * \class Minor
+  *
+  * \brief Expression of a minor
+  *
+  * \param MatrixType the type of the object in which we are taking a minor
+  *
+  * This class represents an expression of a minor. It is the return
+  * type of MatrixBase::minor() and most of the time this is the only way it
+  * is used.
+  *
+  * \sa MatrixBase::minor()
+  */
+
+namespace internal {
+template<typename MatrixType>
+struct traits<Minor<MatrixType> >
+ : traits<MatrixType>
+{
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  typedef typename MatrixType::StorageKind StorageKind;
+  enum {
+    RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ?
+                          int(MatrixType::RowsAtCompileTime) - 1 : Dynamic,
+    ColsAtCompileTime = (MatrixType::ColsAtCompileTime != Dynamic) ?
+                          int(MatrixType::ColsAtCompileTime) - 1 : Dynamic,
+    MaxRowsAtCompileTime = (MatrixType::MaxRowsAtCompileTime != Dynamic) ?
+                             int(MatrixType::MaxRowsAtCompileTime) - 1 : Dynamic,
+    MaxColsAtCompileTime = (MatrixType::MaxColsAtCompileTime != Dynamic) ?
+                             int(MatrixType::MaxColsAtCompileTime) - 1 : Dynamic,
+    Flags = _MatrixTypeNested::Flags & (HereditaryBits | LvalueBit),
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost // minor is used typically on tiny matrices,
+      // where loops are unrolled and the 'if' evaluates at compile time
+  };
+};
+}
+
+template<typename MatrixType> class Minor
+  : public MatrixBase<Minor<MatrixType> >
+{
+  public:
+
+    typedef MatrixBase<Minor> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Minor)
+
+    inline Minor(const MatrixType& matrix,
+                       Index row, Index col)
+      : m_matrix(matrix), m_row(row), m_col(col)
+    {
+      eigen_assert(row >= 0 && row < matrix.rows()
+          && col >= 0 && col < matrix.cols());
+    }
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor)
+
+    inline Index rows() const { return m_matrix.rows() - 1; }
+    inline Index cols() const { return m_matrix.cols() - 1; }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col));
+    }
+
+    inline const Scalar coeff(Index row, Index col) const
+    {
+      return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col));
+    }
+
+  protected:
+    const typename MatrixType::Nested m_matrix;
+    const Index m_row, m_col;
+};
+
+/**
+  * \return an expression of the (\a row, \a col)-minor of *this,
+  * i.e. an expression constructed from *this by removing the specified
+  * row and column.
+  *
+  * Example: \include MatrixBase_minor.cpp
+  * Output: \verbinclude MatrixBase_minor.out
+  *
+  * \sa class Minor
+  */
+template<typename Derived>
+inline Minor<Derived>
+MatrixBase<Derived>::minor(Index row, Index col)
+{
+  return Minor<Derived>(derived(), row, col);
+}
+
+/**
+  * This is the const version of minor(). */
+template<typename Derived>
+inline const Minor<Derived>
+MatrixBase<Derived>::minor(Index row, Index col) const
+{
+  return Minor<Derived>(derived(), row, col);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MINOR_H
diff --git a/Eigen/src/Eigen2Support/QR.h b/Eigen/src/Eigen2Support/QR.h
new file mode 100644
index 0000000..2042c98
--- /dev/null
+++ b/Eigen/src/Eigen2Support/QR.h
@@ -0,0 +1,67 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_QR_H
+#define EIGEN2_QR_H
+
+namespace Eigen { 
+
+template<typename MatrixType>
+class QR : public HouseholderQR<MatrixType>
+{
+  public:
+
+    typedef HouseholderQR<MatrixType> Base;
+    typedef Block<const MatrixType, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixRBlockType;
+
+    QR() : Base() {}
+
+    template<typename T>
+    explicit QR(const T& t) : Base(t) {}
+
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
+    {
+      *result = static_cast<const Base*>(this)->solve(b);
+      return true;
+    }
+
+    MatrixType matrixQ(void) const {
+      MatrixType ret = MatrixType::Identity(this->rows(), this->cols());
+      ret = this->householderQ() * ret;
+      return ret;
+    }
+
+    bool isFullRank() const {
+      return true;
+    }
+    
+    const TriangularView<MatrixRBlockType, UpperTriangular>
+    matrixR(void) const
+    {
+      int cols = this->cols();
+      return MatrixRBlockType(this->matrixQR(), 0, 0, cols, cols).template triangularView<UpperTriangular>();
+    }
+};
+
+/** \return the QR decomposition of \c *this.
+  *
+  * \sa class QR
+  */
+template<typename Derived>
+const QR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::qr() const
+{
+  return QR<PlainObject>(eval());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_QR_H
diff --git a/Eigen/src/Eigen2Support/SVD.h b/Eigen/src/Eigen2Support/SVD.h
new file mode 100644
index 0000000..3d03d22
--- /dev/null
+++ b/Eigen/src/Eigen2Support/SVD.h
@@ -0,0 +1,637 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_SVD_H
+#define EIGEN2_SVD_H
+
+namespace Eigen {
+
+/** \ingroup SVD_Module
+  * \nonstableyet
+  *
+  * \class SVD
+  *
+  * \brief Standard SVD decomposition of a matrix and associated features
+  *
+  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+  *
+  * This class performs a standard SVD decomposition of a real matrix A of size \c M x \c N
+  * with \c M \>= \c N.
+  *
+  *
+  * \sa MatrixBase::SVD()
+  */
+template<typename MatrixType> class SVD
+{
+  private:
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+
+    enum {
+      PacketSize = internal::packet_traits<Scalar>::size,
+      AlignmentMask = int(PacketSize)-1,
+      MinSize = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)
+    };
+
+    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVector;
+    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> RowVector;
+
+    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MinSize> MatrixUType;
+    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixVType;
+    typedef Matrix<Scalar, MinSize, 1> SingularValuesType;
+
+  public:
+
+    SVD() {} // a user who relied on compiler-generated default compiler reported problems with MSVC in 2.0.7
+    
+    SVD(const MatrixType& matrix)
+      : m_matU(matrix.rows(), (std::min)(matrix.rows(), matrix.cols())),
+        m_matV(matrix.cols(),matrix.cols()),
+        m_sigma((std::min)(matrix.rows(),matrix.cols()))
+    {
+      compute(matrix);
+    }
+
+    template<typename OtherDerived, typename ResultType>
+    bool solve(const MatrixBase<OtherDerived> &b, ResultType* result) const;
+
+    const MatrixUType& matrixU() const { return m_matU; }
+    const SingularValuesType& singularValues() const { return m_sigma; }
+    const MatrixVType& matrixV() const { return m_matV; }
+
+    void compute(const MatrixType& matrix);
+    SVD& sort();
+
+    template<typename UnitaryType, typename PositiveType>
+    void computeUnitaryPositive(UnitaryType *unitary, PositiveType *positive) const;
+    template<typename PositiveType, typename UnitaryType>
+    void computePositiveUnitary(PositiveType *positive, UnitaryType *unitary) const;
+    template<typename RotationType, typename ScalingType>
+    void computeRotationScaling(RotationType *unitary, ScalingType *positive) const;
+    template<typename ScalingType, typename RotationType>
+    void computeScalingRotation(ScalingType *positive, RotationType *unitary) const;
+
+  protected:
+    /** \internal */
+    MatrixUType m_matU;
+    /** \internal */
+    MatrixVType m_matV;
+    /** \internal */
+    SingularValuesType m_sigma;
+};
+
+/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix
+  *
+  * \note this code has been adapted from JAMA (public domain)
+  */
+template<typename MatrixType>
+void SVD<MatrixType>::compute(const MatrixType& matrix)
+{
+  const int m = matrix.rows();
+  const int n = matrix.cols();
+  const int nu = (std::min)(m,n);
+  ei_assert(m>=n && "In Eigen 2.0, SVD only works for MxN matrices with M>=N. Sorry!");
+  ei_assert(m>1 && "In Eigen 2.0, SVD doesn't work on 1x1 matrices");
+
+  m_matU.resize(m, nu);
+  m_matU.setZero();
+  m_sigma.resize((std::min)(m,n));
+  m_matV.resize(n,n);
+
+  RowVector e(n);
+  ColVector work(m);
+  MatrixType matA(matrix);
+  const bool wantu = true;
+  const bool wantv = true;
+  int i=0, j=0, k=0;
+
+  // Reduce A to bidiagonal form, storing the diagonal elements
+  // in s and the super-diagonal elements in e.
+  int nct = (std::min)(m-1,n);
+  int nrt = (std::max)(0,(std::min)(n-2,m));
+  for (k = 0; k < (std::max)(nct,nrt); ++k)
+  {
+    if (k < nct)
+    {
+      // Compute the transformation for the k-th column and
+      // place the k-th diagonal in m_sigma[k].
+      m_sigma[k] = matA.col(k).end(m-k).norm();
+      if (m_sigma[k] != 0.0) // FIXME
+      {
+        if (matA(k,k) < 0.0)
+          m_sigma[k] = -m_sigma[k];
+        matA.col(k).end(m-k) /= m_sigma[k];
+        matA(k,k) += 1.0;
+      }
+      m_sigma[k] = -m_sigma[k];
+    }
+
+    for (j = k+1; j < n; ++j)
+    {
+      if ((k < nct) && (m_sigma[k] != 0.0))
+      {
+        // Apply the transformation.
+        Scalar t = matA.col(k).end(m-k).eigen2_dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ??
+        t = -t/matA(k,k);
+        matA.col(j).end(m-k) += t * matA.col(k).end(m-k);
+      }
+
+      // Place the k-th row of A into e for the
+      // subsequent calculation of the row transformation.
+      e[j] = matA(k,j);
+    }
+
+    // Place the transformation in U for subsequent back multiplication.
+    if (wantu & (k < nct))
+      m_matU.col(k).end(m-k) = matA.col(k).end(m-k);
+
+    if (k < nrt)
+    {
+      // Compute the k-th row transformation and place the
+      // k-th super-diagonal in e[k].
+      e[k] = e.end(n-k-1).norm();
+      if (e[k] != 0.0)
+      {
+          if (e[k+1] < 0.0)
+            e[k] = -e[k];
+          e.end(n-k-1) /= e[k];
+          e[k+1] += 1.0;
+      }
+      e[k] = -e[k];
+      if ((k+1 < m) & (e[k] != 0.0))
+      {
+        // Apply the transformation.
+        work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1);
+        for (j = k+1; j < n; ++j)
+          matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1);
+      }
+
+      // Place the transformation in V for subsequent back multiplication.
+      if (wantv)
+        m_matV.col(k).end(n-k-1) = e.end(n-k-1);
+    }
+  }
+
+
+  // Set up the final bidiagonal matrix or order p.
+  int p = (std::min)(n,m+1);
+  if (nct < n)
+    m_sigma[nct] = matA(nct,nct);
+  if (m < p)
+    m_sigma[p-1] = 0.0;
+  if (nrt+1 < p)
+    e[nrt] = matA(nrt,p-1);
+  e[p-1] = 0.0;
+
+  // If required, generate U.
+  if (wantu)
+  {
+    for (j = nct; j < nu; ++j)
+    {
+      m_matU.col(j).setZero();
+      m_matU(j,j) = 1.0;
+    }
+    for (k = nct-1; k >= 0; k--)
+    {
+      if (m_sigma[k] != 0.0)
+      {
+        for (j = k+1; j < nu; ++j)
+        {
+          Scalar t = m_matU.col(k).end(m-k).eigen2_dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ?
+          t = -t/m_matU(k,k);
+          m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k);
+        }
+        m_matU.col(k).end(m-k) = - m_matU.col(k).end(m-k);
+        m_matU(k,k) = Scalar(1) + m_matU(k,k);
+        if (k-1>0)
+          m_matU.col(k).start(k-1).setZero();
+      }
+      else
+      {
+        m_matU.col(k).setZero();
+        m_matU(k,k) = 1.0;
+      }
+    }
+  }
+
+  // If required, generate V.
+  if (wantv)
+  {
+    for (k = n-1; k >= 0; k--)
+    {
+      if ((k < nrt) & (e[k] != 0.0))
+      {
+        for (j = k+1; j < nu; ++j)
+        {
+          Scalar t = m_matV.col(k).end(n-k-1).eigen2_dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ?
+          t = -t/m_matV(k+1,k);
+          m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1);
+        }
+      }
+      m_matV.col(k).setZero();
+      m_matV(k,k) = 1.0;
+    }
+  }
+
+  // Main iteration loop for the singular values.
+  int pp = p-1;
+  int iter = 0;
+  Scalar eps = ei_pow(Scalar(2),ei_is_same_type<Scalar,float>::ret ? Scalar(-23) : Scalar(-52));
+  while (p > 0)
+  {
+    int k=0;
+    int kase=0;
+
+    // Here is where a test for too many iterations would go.
+
+    // This section of the program inspects for
+    // negligible elements in the s and e arrays.  On
+    // completion the variables kase and k are set as follows.
+
+    // kase = 1     if s(p) and e[k-1] are negligible and k<p
+    // kase = 2     if s(k) is negligible and k<p
+    // kase = 3     if e[k-1] is negligible, k<p, and
+    //              s(k), ..., s(p) are not negligible (qr step).
+    // kase = 4     if e(p-1) is negligible (convergence).
+
+    for (k = p-2; k >= -1; --k)
+    {
+      if (k == -1)
+          break;
+      if (ei_abs(e[k]) <= eps*(ei_abs(m_sigma[k]) + ei_abs(m_sigma[k+1])))
+      {
+          e[k] = 0.0;
+          break;
+      }
+    }
+    if (k == p-2)
+    {
+      kase = 4;
+    }
+    else
+    {
+      int ks;
+      for (ks = p-1; ks >= k; --ks)
+      {
+        if (ks == k)
+          break;
+        Scalar t = (ks != p ? ei_abs(e[ks]) : Scalar(0)) + (ks != k+1 ? ei_abs(e[ks-1]) : Scalar(0));
+        if (ei_abs(m_sigma[ks]) <= eps*t)
+        {
+          m_sigma[ks] = 0.0;
+          break;
+        }
+      }
+      if (ks == k)
+      {
+        kase = 3;
+      }
+      else if (ks == p-1)
+      {
+        kase = 1;
+      }
+      else
+      {
+        kase = 2;
+        k = ks;
+      }
+    }
+    ++k;
+
+    // Perform the task indicated by kase.
+    switch (kase)
+    {
+
+      // Deflate negligible s(p).
+      case 1:
+      {
+        Scalar f(e[p-2]);
+        e[p-2] = 0.0;
+        for (j = p-2; j >= k; --j)
+        {
+          Scalar t(numext::hypot(m_sigma[j],f));
+          Scalar cs(m_sigma[j]/t);
+          Scalar sn(f/t);
+          m_sigma[j] = t;
+          if (j != k)
+          {
+            f = -sn*e[j-1];
+            e[j-1] = cs*e[j-1];
+          }
+          if (wantv)
+          {
+            for (i = 0; i < n; ++i)
+            {
+              t = cs*m_matV(i,j) + sn*m_matV(i,p-1);
+              m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1);
+              m_matV(i,j) = t;
+            }
+          }
+        }
+      }
+      break;
+
+      // Split at negligible s(k).
+      case 2:
+      {
+        Scalar f(e[k-1]);
+        e[k-1] = 0.0;
+        for (j = k; j < p; ++j)
+        {
+          Scalar t(numext::hypot(m_sigma[j],f));
+          Scalar cs( m_sigma[j]/t);
+          Scalar sn(f/t);
+          m_sigma[j] = t;
+          f = -sn*e[j];
+          e[j] = cs*e[j];
+          if (wantu)
+          {
+            for (i = 0; i < m; ++i)
+            {
+              t = cs*m_matU(i,j) + sn*m_matU(i,k-1);
+              m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1);
+              m_matU(i,j) = t;
+            }
+          }
+        }
+      }
+      break;
+
+      // Perform one qr step.
+      case 3:
+      {
+        // Calculate the shift.
+        Scalar scale = (std::max)((std::max)((std::max)((std::max)(
+                        ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
+                        ei_abs(m_sigma[k])),ei_abs(e[k]));
+        Scalar sp = m_sigma[p-1]/scale;
+        Scalar spm1 = m_sigma[p-2]/scale;
+        Scalar epm1 = e[p-2]/scale;
+        Scalar sk = m_sigma[k]/scale;
+        Scalar ek = e[k]/scale;
+        Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/Scalar(2);
+        Scalar c = (sp*epm1)*(sp*epm1);
+        Scalar shift(0);
+        if ((b != 0.0) || (c != 0.0))
+        {
+          shift = ei_sqrt(b*b + c);
+          if (b < 0.0)
+            shift = -shift;
+          shift = c/(b + shift);
+        }
+        Scalar f = (sk + sp)*(sk - sp) + shift;
+        Scalar g = sk*ek;
+
+        // Chase zeros.
+
+        for (j = k; j < p-1; ++j)
+        {
+          Scalar t = numext::hypot(f,g);
+          Scalar cs = f/t;
+          Scalar sn = g/t;
+          if (j != k)
+            e[j-1] = t;
+          f = cs*m_sigma[j] + sn*e[j];
+          e[j] = cs*e[j] - sn*m_sigma[j];
+          g = sn*m_sigma[j+1];
+          m_sigma[j+1] = cs*m_sigma[j+1];
+          if (wantv)
+          {
+            for (i = 0; i < n; ++i)
+            {
+              t = cs*m_matV(i,j) + sn*m_matV(i,j+1);
+              m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1);
+              m_matV(i,j) = t;
+            }
+          }
+          t = numext::hypot(f,g);
+          cs = f/t;
+          sn = g/t;
+          m_sigma[j] = t;
+          f = cs*e[j] + sn*m_sigma[j+1];
+          m_sigma[j+1] = -sn*e[j] + cs*m_sigma[j+1];
+          g = sn*e[j+1];
+          e[j+1] = cs*e[j+1];
+          if (wantu && (j < m-1))
+          {
+            for (i = 0; i < m; ++i)
+            {
+              t = cs*m_matU(i,j) + sn*m_matU(i,j+1);
+              m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1);
+              m_matU(i,j) = t;
+            }
+          }
+        }
+        e[p-2] = f;
+        iter = iter + 1;
+      }
+      break;
+
+      // Convergence.
+      case 4:
+      {
+        // Make the singular values positive.
+        if (m_sigma[k] <= 0.0)
+        {
+          m_sigma[k] = m_sigma[k] < Scalar(0) ? -m_sigma[k] : Scalar(0);
+          if (wantv)
+            m_matV.col(k).start(pp+1) = -m_matV.col(k).start(pp+1);
+        }
+
+        // Order the singular values.
+        while (k < pp)
+        {
+          if (m_sigma[k] >= m_sigma[k+1])
+            break;
+          Scalar t = m_sigma[k];
+          m_sigma[k] = m_sigma[k+1];
+          m_sigma[k+1] = t;
+          if (wantv && (k < n-1))
+            m_matV.col(k).swap(m_matV.col(k+1));
+          if (wantu && (k < m-1))
+            m_matU.col(k).swap(m_matU.col(k+1));
+          ++k;
+        }
+        iter = 0;
+        p--;
+      }
+      break;
+    } // end big switch
+  } // end iterations
+}
+
+template<typename MatrixType>
+SVD<MatrixType>& SVD<MatrixType>::sort()
+{
+  int mu = m_matU.rows();
+  int mv = m_matV.rows();
+  int n  = m_matU.cols();
+
+  for (int i=0; i<n; ++i)
+  {
+    int  k = i;
+    Scalar p = m_sigma.coeff(i);
+
+    for (int j=i+1; j<n; ++j)
+    {
+      if (m_sigma.coeff(j) > p)
+      {
+        k = j;
+        p = m_sigma.coeff(j);
+      }
+    }
+    if (k != i)
+    {
+      m_sigma.coeffRef(k) = m_sigma.coeff(i);  // i.e.
+      m_sigma.coeffRef(i) = p;                 // swaps the i-th and the k-th elements
+
+      int j = mu;
+      for(int s=0; j!=0; ++s, --j)
+        std::swap(m_matU.coeffRef(s,i), m_matU.coeffRef(s,k));
+
+      j = mv;
+      for (int s=0; j!=0; ++s, --j)
+        std::swap(m_matV.coeffRef(s,i), m_matV.coeffRef(s,k));
+    }
+  }
+  return *this;
+}
+
+/** \returns the solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+  * The parts of the solution corresponding to zero singular values are ignored.
+  *
+  * \sa MatrixBase::svd(), LU::solve(), LLT::solve()
+  */
+template<typename MatrixType>
+template<typename OtherDerived, typename ResultType>
+bool SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
+{
+  ei_assert(b.rows() == m_matU.rows());
+
+  Scalar maxVal = m_sigma.cwise().abs().maxCoeff();
+  for (int j=0; j<b.cols(); ++j)
+  {
+    Matrix<Scalar,MatrixUType::RowsAtCompileTime,1> aux = m_matU.transpose() * b.col(j);
+
+    for (int i = 0; i <m_matU.cols(); ++i)
+    {
+      Scalar si = m_sigma.coeff(i);
+      if (ei_isMuchSmallerThan(ei_abs(si),maxVal))
+        aux.coeffRef(i) = 0;
+      else
+        aux.coeffRef(i) /= si;
+    }
+
+    result->col(j) = m_matV * aux;
+  }
+  return true;
+}
+
+/** Computes the polar decomposition of the matrix, as a product unitary x positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * Only for square matrices.
+  *
+  * \sa computePositiveUnitary(), computeRotationScaling()
+  */
+template<typename MatrixType>
+template<typename UnitaryType, typename PositiveType>
+void SVD<MatrixType>::computeUnitaryPositive(UnitaryType *unitary,
+                                             PositiveType *positive) const
+{
+  ei_assert(m_matU.cols() == m_matV.cols() && "Polar decomposition is only for square matrices");
+  if(unitary) *unitary = m_matU * m_matV.adjoint();
+  if(positive) *positive = m_matV * m_sigma.asDiagonal() * m_matV.adjoint();
+}
+
+/** Computes the polar decomposition of the matrix, as a product positive x unitary.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * Only for square matrices.
+  *
+  * \sa computeUnitaryPositive(), computeRotationScaling()
+  */
+template<typename MatrixType>
+template<typename UnitaryType, typename PositiveType>
+void SVD<MatrixType>::computePositiveUnitary(UnitaryType *positive,
+                                             PositiveType *unitary) const
+{
+  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
+  if(unitary) *unitary = m_matU * m_matV.adjoint();
+  if(positive) *positive = m_matU * m_sigma.asDiagonal() * m_matU.adjoint();
+}
+
+/** decomposes the matrix as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * This method requires the Geometry module.
+  *
+  * \sa computeScalingRotation(), computeUnitaryPositive()
+  */
+template<typename MatrixType>
+template<typename RotationType, typename ScalingType>
+void SVD<MatrixType>::computeRotationScaling(RotationType *rotation, ScalingType *scaling) const
+{
+  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
+  Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> sv(m_sigma);
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(m_matV * sv.asDiagonal() * m_matV.adjoint());
+  if(rotation)
+  {
+    MatrixType m(m_matU);
+    m.col(0) /= x;
+    rotation->lazyAssign(m * m_matV.adjoint());
+  }
+}
+
+/** decomposes the matrix as a product scaling x rotation, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  * This method requires the Geometry module.
+  *
+  * \sa computeRotationScaling(), computeUnitaryPositive()
+  */
+template<typename MatrixType>
+template<typename ScalingType, typename RotationType>
+void SVD<MatrixType>::computeScalingRotation(ScalingType *scaling, RotationType *rotation) const
+{
+  ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices");
+  Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1
+  Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> sv(m_sigma);
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(m_matU * sv.asDiagonal() * m_matU.adjoint());
+  if(rotation)
+  {
+    MatrixType m(m_matU);
+    m.col(0) /= x;
+    rotation->lazyAssign(m * m_matV.adjoint());
+  }
+}
+
+
+/** \svd_module
+  * \returns the SVD decomposition of \c *this
+  */
+template<typename Derived>
+inline SVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::svd() const
+{
+  return SVD<PlainObject>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_SVD_H
diff --git a/Eigen/src/Eigen2Support/TriangularSolver.h b/Eigen/src/Eigen2Support/TriangularSolver.h
new file mode 100644
index 0000000..ebbeb3b
--- /dev/null
+++ b/Eigen/src/Eigen2Support/TriangularSolver.h
@@ -0,0 +1,42 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIANGULAR_SOLVER2_H
+#define EIGEN_TRIANGULAR_SOLVER2_H
+
+namespace Eigen { 
+
+const unsigned int UnitDiagBit = UnitDiag;
+const unsigned int SelfAdjointBit = SelfAdjoint;
+const unsigned int UpperTriangularBit = Upper;
+const unsigned int LowerTriangularBit = Lower;
+
+const unsigned int UpperTriangular = Upper;
+const unsigned int LowerTriangular = Lower;
+const unsigned int UnitUpperTriangular = UnitUpper;
+const unsigned int UnitLowerTriangular = UnitLower;
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+template<typename OtherDerived>
+typename ExpressionType::PlainObject
+Flagged<ExpressionType,Added,Removed>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+  return m_matrix.template triangularView<Added>().solve(other.derived());
+}
+
+template<typename ExpressionType, unsigned int Added, unsigned int Removed>
+template<typename OtherDerived>
+void Flagged<ExpressionType,Added,Removed>::solveTriangularInPlace(const MatrixBase<OtherDerived>& other) const
+{
+  m_matrix.template triangularView<Added>().solveInPlace(other.derived());
+}
+
+} // end namespace Eigen
+    
+#endif // EIGEN_TRIANGULAR_SOLVER2_H
diff --git a/Eigen/src/Eigen2Support/VectorBlock.h b/Eigen/src/Eigen2Support/VectorBlock.h
new file mode 100644
index 0000000..71a8080
--- /dev/null
+++ b/Eigen/src/Eigen2Support/VectorBlock.h
@@ -0,0 +1,94 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN2_VECTORBLOCK_H
+#define EIGEN2_VECTORBLOCK_H
+
+namespace Eigen { 
+
+/** \deprecated use DenseMase::head(Index) */
+template<typename Derived>
+inline VectorBlock<Derived>
+MatrixBase<Derived>::start(Index size)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<Derived>(derived(), 0, size);
+}
+
+/** \deprecated use DenseMase::head(Index) */
+template<typename Derived>
+inline const VectorBlock<const Derived>
+MatrixBase<Derived>::start(Index size) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<const Derived>(derived(), 0, size);
+}
+
+/** \deprecated use DenseMase::tail(Index) */
+template<typename Derived>
+inline VectorBlock<Derived>
+MatrixBase<Derived>::end(Index size)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<Derived>(derived(), this->size() - size, size);
+}
+
+/** \deprecated use DenseMase::tail(Index) */
+template<typename Derived>
+inline const VectorBlock<const Derived>
+MatrixBase<Derived>::end(Index size) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<const Derived>(derived(), this->size() - size, size);
+}
+
+/** \deprecated use DenseMase::head() */
+template<typename Derived>
+template<int Size>
+inline VectorBlock<Derived,Size>
+MatrixBase<Derived>::start()
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<Derived,Size>(derived(), 0);
+}
+
+/** \deprecated use DenseMase::head() */
+template<typename Derived>
+template<int Size>
+inline const VectorBlock<const Derived,Size>
+MatrixBase<Derived>::start() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<const Derived,Size>(derived(), 0);
+}
+
+/** \deprecated use DenseMase::tail() */
+template<typename Derived>
+template<int Size>
+inline VectorBlock<Derived,Size>
+MatrixBase<Derived>::end()
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<Derived, Size>(derived(), size() - Size);
+}
+
+/** \deprecated use DenseMase::tail() */
+template<typename Derived>
+template<int Size>
+inline const VectorBlock<const Derived,Size>
+MatrixBase<Derived>::end() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return VectorBlock<const Derived, Size>(derived(), size() - Size);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN2_VECTORBLOCK_H
diff --git a/Eigen/src/Eigenvalues/CMakeLists.txt b/Eigen/src/Eigenvalues/CMakeLists.txt
new file mode 100644
index 0000000..193e026
--- /dev/null
+++ b/Eigen/src/Eigenvalues/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_EIGENVALUES_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_EIGENVALUES_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Eigenvalues COMPONENT Devel
+  )
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
new file mode 100644
index 0000000..417c729
--- /dev/null
+++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -0,0 +1,341 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Claire Maurice
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H
+#define EIGEN_COMPLEX_EIGEN_SOLVER_H
+
+#include "./ComplexSchur.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class ComplexEigenSolver
+  *
+  * \brief Computes eigenvalues and eigenvectors of general complex matrices
+  *
+  * \tparam _MatrixType the type of the matrix of which we are
+  * computing the eigendecomposition; this is expected to be an
+  * instantiation of the Matrix class template.
+  *
+  * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v
+  * \f$.  If \f$ D \f$ is a diagonal matrix with the eigenvalues on
+  * the diagonal, and \f$ V \f$ is a matrix with the eigenvectors as
+  * its columns, then \f$ A V = V D \f$. The matrix \f$ V \f$ is
+  * almost always invertible, in which case we have \f$ A = V D V^{-1}
+  * \f$. This is called the eigendecomposition.
+  *
+  * The main function in this class is compute(), which computes the
+  * eigenvalues and eigenvectors of a given function. The
+  * documentation for that function contains an example showing the
+  * main features of the class.
+  *
+  * \sa class EigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class ComplexEigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType.
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors().
+      *
+      * This is a square matrix with entries of type #ComplexScalar.
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute().
+      */
+    ComplexEigenSolver()
+            : m_eivec(),
+              m_eivalues(),
+              m_schur(),
+              m_isInitialized(false),
+              m_eigenvectorsOk(false),
+              m_matX()
+    {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa ComplexEigenSolver()
+      */
+    ComplexEigenSolver(Index size)
+            : m_eivec(size, size),
+              m_eivalues(size),
+              m_schur(size),
+              m_isInitialized(false),
+              m_eigenvectorsOk(false),
+              m_matX(size, size)
+    {}
+
+    /** \brief Constructor; computes eigendecomposition of given matrix.
+      *
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed.
+      *
+      * This constructor calls compute() to compute the eigendecomposition.
+      */
+      ComplexEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
+            : m_eivec(matrix.rows(),matrix.cols()),
+              m_eivalues(matrix.cols()),
+              m_schur(matrix.rows()),
+              m_isInitialized(false),
+              m_eigenvectorsOk(false),
+              m_matX(matrix.rows(),matrix.cols())
+    {
+      compute(matrix, computeEigenvectors);
+    }
+
+    /** \brief Returns the eigenvectors of given matrix.
+      *
+      * \returns  A const reference to the matrix whose columns are the eigenvectors.
+      *
+      * \pre Either the constructor
+      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
+      * function compute(const MatrixType& matrix, bool) has been called before
+      * to compute the eigendecomposition of a matrix, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * This function returns a matrix whose columns are the eigenvectors. Column
+      * \f$ k \f$ is an eigenvector corresponding to eigenvalue number \f$ k
+      * \f$ as returned by eigenvalues().  The eigenvectors are normalized to
+      * have (Euclidean) norm equal to one. The matrix returned by this
+      * function is the matrix \f$ V \f$ in the eigendecomposition \f$ A = V D
+      * V^{-1} \f$, if it exists.
+      *
+      * Example: \include ComplexEigenSolver_eigenvectors.cpp
+      * Output: \verbinclude ComplexEigenSolver_eigenvectors.out
+      */
+    const EigenvectorType& eigenvectors() const
+    {
+      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec;
+    }
+
+    /** \brief Returns the eigenvalues of given matrix.
+      *
+      * \returns A const reference to the column vector containing the eigenvalues.
+      *
+      * \pre Either the constructor
+      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member
+      * function compute(const MatrixType& matrix, bool) has been called before
+      * to compute the eigendecomposition of a matrix.
+      *
+      * This function returns a column vector containing the
+      * eigenvalues. Eigenvalues are repeated according to their
+      * algebraic multiplicity, so there are as many eigenvalues as
+      * rows in the matrix. The eigenvalues are not sorted in any particular
+      * order.
+      *
+      * Example: \include ComplexEigenSolver_eigenvalues.cpp
+      * Output: \verbinclude ComplexEigenSolver_eigenvalues.out
+      */
+    const EigenvalueType& eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+      return m_eivalues;
+    }
+
+    /** \brief Computes eigendecomposition of given matrix.
+      *
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed.
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the complex matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to Schur form using the
+      * ComplexSchur class. The Schur decomposition is then used to
+      * compute the eigenvalues and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * Schur decomposition, which is \f$ O(n^3) \f$ where \f$ n \f$
+      * is the size of the matrix.
+      *
+      * Example: \include ComplexEigenSolver_compute.cpp
+      * Output: \verbinclude ComplexEigenSolver_compute.out
+      */
+    ComplexEigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "ComplexEigenSolver is not initialized.");
+      return m_schur.info();
+    }
+
+    /** \brief Sets the maximum number of iterations allowed. */
+    ComplexEigenSolver& setMaxIterations(Index maxIters)
+    {
+      m_schur.setMaxIterations(maxIters);
+      return *this;
+    }
+
+    /** \brief Returns the maximum number of iterations. */
+    Index getMaxIterations()
+    {
+      return m_schur.getMaxIterations();
+    }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    EigenvectorType m_eivec;
+    EigenvalueType m_eivalues;
+    ComplexSchur<MatrixType> m_schur;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    EigenvectorType m_matX;
+
+  private:
+    void doComputeEigenvectors(const RealScalar& matrixnorm);
+    void sortEigenvalues(bool computeEigenvectors);
+};
+
+
+template<typename MatrixType>
+ComplexEigenSolver<MatrixType>& 
+ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+{
+  check_template_parameters();
+  
+  // this code is inspired from Jampack
+  eigen_assert(matrix.cols() == matrix.rows());
+
+  // Do a complex Schur decomposition, A = U T U^*
+  // The eigenvalues are on the diagonal of T.
+  m_schur.compute(matrix, computeEigenvectors);
+
+  if(m_schur.info() == Success)
+  {
+    m_eivalues = m_schur.matrixT().diagonal();
+    if(computeEigenvectors)
+      doComputeEigenvectors(matrix.norm());
+    sortEigenvalues(computeEigenvectors);
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = computeEigenvectors;
+  return *this;
+}
+
+
+template<typename MatrixType>
+void ComplexEigenSolver<MatrixType>::doComputeEigenvectors(const RealScalar& matrixnorm)
+{
+  const Index n = m_eivalues.size();
+
+  // Compute X such that T = X D X^(-1), where D is the diagonal of T.
+  // The matrix X is unit triangular.
+  m_matX = EigenvectorType::Zero(n, n);
+  for(Index k=n-1 ; k>=0 ; k--)
+  {
+    m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);
+    // Compute X(i,k) using the (i,k) entry of the equation X T = D X
+    for(Index i=k-1 ; i>=0 ; i--)
+    {
+      m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);
+      if(k-i-1>0)
+        m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();
+      ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);
+      if(z==ComplexScalar(0))
+      {
+        // If the i-th and k-th eigenvalue are equal, then z equals 0.
+        // Use a small value instead, to prevent division by zero.
+        numext::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;
+      }
+      m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;
+    }
+  }
+
+  // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)
+  m_eivec.noalias() = m_schur.matrixU() * m_matX;
+  // .. and normalize the eigenvectors
+  for(Index k=0 ; k<n ; k++)
+  {
+    m_eivec.col(k).normalize();
+  }
+}
+
+
+template<typename MatrixType>
+void ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)
+{
+  const Index n =  m_eivalues.size();
+  for (Index i=0; i<n; i++)
+  {
+    Index k;
+    m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);
+    if (k != 0)
+    {
+      k += i;
+      std::swap(m_eivalues[k],m_eivalues[i]);
+      if(computeEigenvectors)
+	m_eivec.col(i).swap(m_eivec.col(k));
+    }
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H
diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h
new file mode 100644
index 0000000..89e6cad
--- /dev/null
+++ b/Eigen/src/Eigenvalues/ComplexSchur.h
@@ -0,0 +1,456 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Claire Maurice
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPLEX_SCHUR_H
+#define EIGEN_COMPLEX_SCHUR_H
+
+#include "./HessenbergDecomposition.h"
+
+namespace Eigen { 
+
+namespace internal {
+template<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class ComplexSchur
+  *
+  * \brief Performs a complex Schur decomposition of a real or complex square matrix
+  *
+  * \tparam _MatrixType the type of the matrix of which we are
+  * computing the Schur decomposition; this is expected to be an
+  * instantiation of the Matrix class template.
+  *
+  * Given a real or complex square matrix A, this class computes the
+  * Schur decomposition: \f$ A = U T U^*\f$ where U is a unitary
+  * complex matrix, and T is a complex upper triangular matrix.  The
+  * diagonal of the matrix T corresponds to the eigenvalues of the
+  * matrix A.
+  *
+  * Call the function compute() to compute the Schur decomposition of
+  * a given matrix. Alternatively, you can use the 
+  * ComplexSchur(const MatrixType&, bool) constructor which computes
+  * the Schur decomposition at construction time. Once the
+  * decomposition is computed, you can use the matrixU() and matrixT()
+  * functions to retrieve the matrices U and V in the decomposition.
+  *
+  * \note This code is inspired from Jampack
+  *
+  * \sa class RealSchur, class EigenSolver, class ComplexEigenSolver
+  */
+template<typename _MatrixType> class ComplexSchur
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type \p _MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for \p _MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for the matrices in the Schur decomposition.
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of \p _MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;
+
+    /** \brief Default constructor.
+      *
+      * \param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.
+      *
+      * The default constructor is useful in cases in which the user
+      * intends to perform decompositions via compute().  The \p size
+      * parameter is only used as a hint. It is not an error to give a
+      * wrong \p size, but it may impair performance.
+      *
+      * \sa compute() for an example.
+      */
+    ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+      : m_matT(size,size),
+        m_matU(size,size),
+        m_hess(size),
+        m_isInitialized(false),
+        m_matUisUptodate(false),
+        m_maxIters(-1)
+    {}
+
+    /** \brief Constructor; computes Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      *
+      * This constructor calls compute() to compute the Schur decomposition.
+      *
+      * \sa matrixT() and matrixU() for examples.
+      */
+    ComplexSchur(const MatrixType& matrix, bool computeU = true)
+      : m_matT(matrix.rows(),matrix.cols()),
+        m_matU(matrix.rows(),matrix.cols()),
+        m_hess(matrix.rows()),
+        m_isInitialized(false),
+        m_matUisUptodate(false),
+        m_maxIters(-1)
+    {
+      compute(matrix, computeU);
+    }
+
+    /** \brief Returns the unitary matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix U.
+      *
+      * It is assumed that either the constructor
+      * ComplexSchur(const MatrixType& matrix, bool computeU) or the
+      * member function compute(const MatrixType& matrix, bool computeU)
+      * has been called before to compute the Schur decomposition of a
+      * matrix, and that \p computeU was set to true (the default
+      * value).
+      *
+      * Example: \include ComplexSchur_matrixU.cpp
+      * Output: \verbinclude ComplexSchur_matrixU.out
+      */
+    const ComplexMatrixType& matrixU() const
+    {
+      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+      eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the ComplexSchur decomposition.");
+      return m_matU;
+    }
+
+    /** \brief Returns the triangular matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix T.
+      *
+      * It is assumed that either the constructor
+      * ComplexSchur(const MatrixType& matrix, bool computeU) or the
+      * member function compute(const MatrixType& matrix, bool computeU)
+      * has been called before to compute the Schur decomposition of a
+      * matrix.
+      *
+      * Note that this function returns a plain square matrix. If you want to reference
+      * only the upper triangular part, use:
+      * \code schur.matrixT().triangularView<Upper>() \endcode 
+      *
+      * Example: \include ComplexSchur_matrixT.cpp
+      * Output: \verbinclude ComplexSchur_matrixT.out
+      */
+    const ComplexMatrixType& matrixT() const
+    {
+      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+      return m_matT;
+    }
+
+    /** \brief Computes Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix  Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+
+      * \returns    Reference to \c *this
+      *
+      * The Schur decomposition is computed by first reducing the
+      * matrix to Hessenberg form using the class
+      * HessenbergDecomposition. The Hessenberg matrix is then reduced
+      * to triangular form by performing QR iterations with a single
+      * shift. The cost of computing the Schur decomposition depends
+      * on the number of iterations; as a rough guide, it may be taken
+      * on the number of iterations; as a rough guide, it may be taken
+      * to be \f$25n^3\f$ complex flops, or \f$10n^3\f$ complex flops
+      * if \a computeU is false.
+      *
+      * Example: \include ComplexSchur_compute.cpp
+      * Output: \verbinclude ComplexSchur_compute.out
+      *
+      * \sa compute(const MatrixType&, bool, Index)
+      */
+    ComplexSchur& compute(const MatrixType& matrix, bool computeU = true);
+    
+    /** \brief Compute Schur decomposition from a given Hessenberg matrix
+     *  \param[in] matrixH Matrix in Hessenberg form H
+     *  \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
+     *  \param computeU Computes the matriX U of the Schur vectors
+     * \return Reference to \c *this
+     * 
+     *  This routine assumes that the matrix is already reduced in Hessenberg form matrixH
+     *  using either the class HessenbergDecomposition or another mean. 
+     *  It computes the upper quasi-triangular matrix T of the Schur decomposition of H
+     *  When computeU is true, this routine computes the matrix U such that 
+     *  A = U T U^T =  (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
+     * 
+     * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
+     * is not available, the user should give an identity matrix (Q.setIdentity())
+     * 
+     * \sa compute(const MatrixType&, bool)
+     */
+    template<typename HessMatrixType, typename OrthMatrixType>
+    ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU=true);
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "ComplexSchur is not initialized.");
+      return m_info;
+    }
+
+    /** \brief Sets the maximum number of iterations allowed. 
+      *
+      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
+      * of the matrix.
+      */
+    ComplexSchur& setMaxIterations(Index maxIters)
+    {
+      m_maxIters = maxIters;
+      return *this;
+    }
+
+    /** \brief Returns the maximum number of iterations. */
+    Index getMaxIterations()
+    {
+      return m_maxIters;
+    }
+
+    /** \brief Maximum number of iterations per row.
+      *
+      * If not otherwise specified, the maximum number of iterations is this number times the size of the
+      * matrix. It is currently set to 30.
+      */
+    static const int m_maxIterationsPerRow = 30;
+
+  protected:
+    ComplexMatrixType m_matT, m_matU;
+    HessenbergDecomposition<MatrixType> m_hess;
+    ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_matUisUptodate;
+    Index m_maxIters;
+
+  private:  
+    bool subdiagonalEntryIsNeglegible(Index i);
+    ComplexScalar computeShift(Index iu, Index iter);
+    void reduceToTriangularForm(bool computeU);
+    friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;
+};
+
+/** If m_matT(i+1,i) is neglegible in floating point arithmetic
+  * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and
+  * return true, else return false. */
+template<typename MatrixType>
+inline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)
+{
+  RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1));
+  RealScalar sd = numext::norm1(m_matT.coeff(i+1,i));
+  if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))
+  {
+    m_matT.coeffRef(i+1,i) = ComplexScalar(0);
+    return true;
+  }
+  return false;
+}
+
+
+/** Compute the shift in the current QR iteration. */
+template<typename MatrixType>
+typename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)
+{
+  using std::abs;
+  if (iter == 10 || iter == 20) 
+  {
+    // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f
+    return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2)));
+  }
+
+  // compute the shift as one of the eigenvalues of t, the 2x2
+  // diagonal block on the bottom of the active submatrix
+  Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);
+  RealScalar normt = t.cwiseAbs().sum();
+  t /= normt;     // the normalization by sf is to avoid under/overflow
+
+  ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);
+  ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);
+  ComplexScalar disc = sqrt(c*c + RealScalar(4)*b);
+  ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;
+  ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);
+  ComplexScalar eival1 = (trace + disc) / RealScalar(2);
+  ComplexScalar eival2 = (trace - disc) / RealScalar(2);
+
+  if(numext::norm1(eival1) > numext::norm1(eival2))
+    eival2 = det / eival1;
+  else
+    eival1 = det / eival2;
+
+  // choose the eigenvalue closest to the bottom entry of the diagonal
+  if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1)))
+    return normt * eival1;
+  else
+    return normt * eival2;
+}
+
+
+template<typename MatrixType>
+ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
+{
+  m_matUisUptodate = false;
+  eigen_assert(matrix.cols() == matrix.rows());
+
+  if(matrix.cols() == 1)
+  {
+    m_matT = matrix.template cast<ComplexScalar>();
+    if(computeU)  m_matU = ComplexMatrixType::Identity(1,1);
+    m_info = Success;
+    m_isInitialized = true;
+    m_matUisUptodate = computeU;
+    return *this;
+  }
+
+  internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix, computeU);
+  computeFromHessenberg(m_matT, m_matU, computeU);
+  return *this;
+}
+
+template<typename MatrixType>
+template<typename HessMatrixType, typename OrthMatrixType>
+ComplexSchur<MatrixType>& ComplexSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)
+{
+  m_matT = matrixH;
+  if(computeU)
+    m_matU = matrixQ;
+  reduceToTriangularForm(computeU);
+  return *this;
+}
+namespace internal {
+
+/* Reduce given matrix to Hessenberg form */
+template<typename MatrixType, bool IsComplex>
+struct complex_schur_reduce_to_hessenberg
+{
+  // this is the implementation for the case IsComplex = true
+  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
+  {
+    _this.m_hess.compute(matrix);
+    _this.m_matT = _this.m_hess.matrixH();
+    if(computeU)  _this.m_matU = _this.m_hess.matrixQ();
+  }
+};
+
+template<typename MatrixType>
+struct complex_schur_reduce_to_hessenberg<MatrixType, false>
+{
+  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)
+  {
+    typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;
+
+    // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar
+    _this.m_hess.compute(matrix);
+    _this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();
+    if(computeU)  
+    {
+      // This may cause an allocation which seems to be avoidable
+      MatrixType Q = _this.m_hess.matrixQ(); 
+      _this.m_matU = Q.template cast<ComplexScalar>();
+    }
+  }
+};
+
+} // end namespace internal
+
+// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.
+template<typename MatrixType>
+void ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)
+{  
+  Index maxIters = m_maxIters;
+  if (maxIters == -1)
+    maxIters = m_maxIterationsPerRow * m_matT.rows();
+
+  // The matrix m_matT is divided in three parts. 
+  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. 
+  // Rows il,...,iu is the part we are working on (the active submatrix).
+  // Rows iu+1,...,end are already brought in triangular form.
+  Index iu = m_matT.cols() - 1;
+  Index il;
+  Index iter = 0; // number of iterations we are working on the (iu,iu) element
+  Index totalIter = 0; // number of iterations for whole matrix
+
+  while(true)
+  {
+    // find iu, the bottom row of the active submatrix
+    while(iu > 0)
+    {
+      if(!subdiagonalEntryIsNeglegible(iu-1)) break;
+      iter = 0;
+      --iu;
+    }
+
+    // if iu is zero then we are done; the whole matrix is triangularized
+    if(iu==0) break;
+
+    // if we spent too many iterations, we give up
+    iter++;
+    totalIter++;
+    if(totalIter > maxIters) break;
+
+    // find il, the top row of the active submatrix
+    il = iu-1;
+    while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))
+    {
+      --il;
+    }
+
+    /* perform the QR step using Givens rotations. The first rotation
+       creates a bulge; the (il+2,il) element becomes nonzero. This
+       bulge is chased down to the bottom of the active submatrix. */
+
+    ComplexScalar shift = computeShift(iu, iter);
+    JacobiRotation<ComplexScalar> rot;
+    rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));
+    m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());
+    m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);
+    if(computeU) m_matU.applyOnTheRight(il, il+1, rot);
+
+    for(Index i=il+1 ; i<iu ; i++)
+    {
+      rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));
+      m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);
+      m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());
+      m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);
+      if(computeU) m_matU.applyOnTheRight(i, i+1, rot);
+    }
+  }
+
+  if(totalIter <= maxIters)
+    m_info = Success;
+  else
+    m_info = NoConvergence;
+
+  m_isInitialized = true;
+  m_matUisUptodate = computeU;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_SCHUR_H
diff --git a/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
new file mode 100644
index 0000000..91496ae
--- /dev/null
+++ b/Eigen/src/Eigenvalues/ComplexSchur_MKL.h
@@ -0,0 +1,94 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *    Complex Schur needed to complex unsymmetrical eigenvalues/eigenvectors.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_COMPLEX_SCHUR_MKL_H
+#define EIGEN_COMPLEX_SCHUR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SCHUR_COMPLEX(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \
+template<> inline \
+ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
+ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
+{ \
+  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
+  typedef MatrixType::Scalar Scalar; \
+  typedef MatrixType::RealScalar RealScalar; \
+  typedef std::complex<RealScalar> ComplexScalar; \
+\
+  eigen_assert(matrix.cols() == matrix.rows()); \
+\
+  m_matUisUptodate = false; \
+  if(matrix.cols() == 1) \
+  { \
+    m_matT = matrix.cast<ComplexScalar>(); \
+    if(computeU)  m_matU = ComplexMatrixType::Identity(1,1); \
+      m_info = Success; \
+      m_isInitialized = true; \
+      m_matUisUptodate = computeU; \
+      return *this; \
+  } \
+  lapack_int n = matrix.cols(), sdim, info; \
+  lapack_int lda = matrix.outerStride(); \
+  lapack_int matrix_order = MKLCOLROW; \
+  char jobvs, sort='N'; \
+  LAPACK_##MKLPREFIX_U##_SELECT1 select = 0; \
+  jobvs = (computeU) ? 'V' : 'N'; \
+  m_matU.resize(n, n); \
+  lapack_int ldvs  = m_matU.outerStride(); \
+  m_matT = matrix; \
+  Matrix<EIGTYPE, Dynamic, Dynamic> w; \
+  w.resize(n, 1);\
+  info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)w.data(), (MKLTYPE*)m_matU.data(), ldvs ); \
+  if(info == 0) \
+    m_info = Success; \
+  else \
+    m_info = NoConvergence; \
+\
+  m_isInitialized = true; \
+  m_matUisUptodate = computeU; \
+  return *this; \
+\
+}
+
+EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8,  c, C, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_COMPLEX(dcomplex, MKL_Complex16, z, Z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SCHUR_COMPLEX(scomplex, MKL_Complex8,  c, C, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPLEX_SCHUR_MKL_H
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
new file mode 100644
index 0000000..20c59a7
--- /dev/null
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -0,0 +1,607 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EIGENSOLVER_H
+#define EIGEN_EIGENSOLVER_H
+
+#include "./RealSchur.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class EigenSolver
+  *
+  * \brief Computes eigenvalues and eigenvectors of general matrices
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * eigendecomposition; this is expected to be an instantiation of the Matrix
+  * class template. Currently, only real matrices are supported.
+  *
+  * The eigenvalues and eigenvectors of a matrix \f$ A \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda v \f$.  If
+  * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+  * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+  * V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+  * have \f$ A = V D V^{-1} \f$. This is called the eigendecomposition.
+  *
+  * The eigenvalues and eigenvectors of a matrix may be complex, even when the
+  * matrix is real. However, we can choose real matrices \f$ V \f$ and \f$ D
+  * \f$ satisfying \f$ A V = V D \f$, just like the eigendecomposition, if the
+  * matrix \f$ D \f$ is not required to be diagonal, but if it is allowed to
+  * have blocks of the form
+  * \f[ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f]
+  * (where \f$ u \f$ and \f$ v \f$ are real numbers) on the diagonal.  These
+  * blocks correspond to complex eigenvalue pairs \f$ u \pm iv \f$. We call
+  * this variant of the eigendecomposition the pseudo-eigendecomposition.
+  *
+  * Call the function compute() to compute the eigenvalues and eigenvectors of
+  * a given matrix. Alternatively, you can use the 
+  * EigenSolver(const MatrixType&, bool) constructor which computes the
+  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+  * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+  * eigenvectors() functions. The pseudoEigenvalueMatrix() and
+  * pseudoEigenvectors() methods allow the construction of the
+  * pseudo-eigendecomposition.
+  *
+  * The documentation for EigenSolver(const MatrixType&, bool) contains an
+  * example of the typical use of this class.
+  *
+  * \note The implementation is adapted from
+  * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
+  * Their code is based on EISPACK.
+  *
+  * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class EigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of eigenvalues as returned by eigenvalues(). 
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). 
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+      *
+      * \sa compute() for an example.
+      */
+ EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}
+
+    /** \brief Default constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa EigenSolver()
+      */
+    EigenSolver(Index size)
+      : m_eivec(size, size),
+        m_eivalues(size),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realSchur(size),
+        m_matT(size, size), 
+        m_tmp(size)
+    {}
+
+    /** \brief Constructor; computes eigendecomposition of given matrix. 
+      * 
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      *
+      * This constructor calls compute() to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * Example: \include EigenSolver_EigenSolver_MatrixType.cpp
+      * Output: \verbinclude EigenSolver_EigenSolver_MatrixType.out
+      *
+      * \sa compute()
+      */
+    EigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
+      : m_eivec(matrix.rows(), matrix.cols()),
+        m_eivalues(matrix.cols()),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realSchur(matrix.cols()),
+        m_matT(matrix.rows(), matrix.cols()), 
+        m_tmp(matrix.cols())
+    {
+      compute(matrix, computeEigenvectors);
+    }
+
+    /** \brief Returns the eigenvectors of given matrix. 
+      *
+      * \returns  %Matrix whose columns are the (possibly complex) eigenvectors.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
+      * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+      * matrix returned by this function is the matrix \f$ V \f$ in the
+      * eigendecomposition \f$ A = V D V^{-1} \f$, if it exists.
+      *
+      * Example: \include EigenSolver_eigenvectors.cpp
+      * Output: \verbinclude EigenSolver_eigenvectors.out
+      *
+      * \sa eigenvalues(), pseudoEigenvectors()
+      */
+    EigenvectorsType eigenvectors() const;
+
+    /** \brief Returns the pseudo-eigenvectors of given matrix. 
+      *
+      * \returns  Const reference to matrix whose columns are the pseudo-eigenvectors.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * The real matrix \f$ V \f$ returned by this function and the
+      * block-diagonal matrix \f$ D \f$ returned by pseudoEigenvalueMatrix()
+      * satisfy \f$ AV = VD \f$.
+      *
+      * Example: \include EigenSolver_pseudoEigenvectors.cpp
+      * Output: \verbinclude EigenSolver_pseudoEigenvectors.out
+      *
+      * \sa pseudoEigenvalueMatrix(), eigenvectors()
+      */
+    const MatrixType& pseudoEigenvectors() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec;
+    }
+
+    /** \brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.
+      *
+      * \returns  A block-diagonal matrix.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before.
+      *
+      * The matrix \f$ D \f$ returned by this function is real and
+      * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2
+      * blocks of the form
+      * \f$ \begin{bmatrix} u & v \\ -v & u \end{bmatrix} \f$.
+      * These blocks are not sorted in any particular order.
+      * The matrix \f$ D \f$ and the matrix \f$ V \f$ returned by
+      * pseudoEigenvectors() satisfy \f$ AV = VD \f$.
+      *
+      * \sa pseudoEigenvectors() for an example, eigenvalues()
+      */
+    MatrixType pseudoEigenvalueMatrix() const;
+
+    /** \brief Returns the eigenvalues of given matrix. 
+      *
+      * \returns A const reference to the column vector containing the eigenvalues.
+      *
+      * \pre Either the constructor 
+      * EigenSolver(const MatrixType&,bool) or the member function
+      * compute(const MatrixType&, bool) has been called before.
+      *
+      * The eigenvalues are repeated according to their algebraic multiplicity,
+      * so there are as many eigenvalues as rows in the matrix. The eigenvalues 
+      * are not sorted in any particular order.
+      *
+      * Example: \include EigenSolver_eigenvalues.cpp
+      * Output: \verbinclude EigenSolver_eigenvalues.out
+      *
+      * \sa eigenvectors(), pseudoEigenvalueMatrix(),
+      *     MatrixBase::eigenvalues()
+      */
+    const EigenvalueType& eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_eivalues;
+    }
+
+    /** \brief Computes eigendecomposition of given matrix. 
+      * 
+      * \param[in]  matrix  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the real matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If 
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to real Schur form using the RealSchur
+      * class. The Schur decomposition is then used to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * Schur decomposition, which is very approximately \f$ 25n^3 \f$
+      * (where \f$ n \f$ is the size of the matrix) if \p computeEigenvectors 
+      * is true, and \f$ 10n^3 \f$ if \p computeEigenvectors is false.
+      *
+      * This method reuses of the allocated data in the EigenSolver object.
+      *
+      * Example: \include EigenSolver_compute.cpp
+      * Output: \verbinclude EigenSolver_compute.out
+      */
+    EigenSolver& compute(const MatrixType& matrix, bool computeEigenvectors = true);
+
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_realSchur.info();
+    }
+
+    /** \brief Sets the maximum number of iterations allowed. */
+    EigenSolver& setMaxIterations(Index maxIters)
+    {
+      m_realSchur.setMaxIterations(maxIters);
+      return *this;
+    }
+
+    /** \brief Returns the maximum number of iterations. */
+    Index getMaxIterations()
+    {
+      return m_realSchur.getMaxIterations();
+    }
+
+  private:
+    void doComputeEigenvectors();
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+      EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
+    }
+    
+    MatrixType m_eivec;
+    EigenvalueType m_eivalues;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    RealSchur<MatrixType> m_realSchur;
+    MatrixType m_matT;
+
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+    ColumnVectorType m_tmp;
+};
+
+template<typename MatrixType>
+MatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const
+{
+  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+  Index n = m_eivalues.rows();
+  MatrixType matD = MatrixType::Zero(n,n);
+  for (Index i=0; i<n; ++i)
+  {
+    if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i))))
+      matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));
+    else
+    {
+      matD.template block<2,2>(i,i) <<  numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),
+                                       -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));
+      ++i;
+    }
+  }
+  return matD;
+}
+
+template<typename MatrixType>
+typename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const
+{
+  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+  eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+  Index n = m_eivec.cols();
+  EigenvectorsType matV(n,n);
+  for (Index j=0; j<n; ++j)
+  {
+    if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j))) || j+1==n)
+    {
+      // we have a real eigen value
+      matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();
+      matV.col(j).normalize();
+    }
+    else
+    {
+      // we have a pair of complex eigen values
+      for (Index i=0; i<n; ++i)
+      {
+        matV.coeffRef(i,j)   = ComplexScalar(m_eivec.coeff(i,j),  m_eivec.coeff(i,j+1));
+        matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));
+      }
+      matV.col(j).normalize();
+      matV.col(j+1).normalize();
+      ++j;
+    }
+  }
+  return matV;
+}
+
+template<typename MatrixType>
+EigenSolver<MatrixType>& 
+EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
+{
+  check_template_parameters();
+  
+  using std::sqrt;
+  using std::abs;
+  eigen_assert(matrix.cols() == matrix.rows());
+
+  // Reduce to real Schur form.
+  m_realSchur.compute(matrix, computeEigenvectors);
+
+  if (m_realSchur.info() == Success)
+  {
+    m_matT = m_realSchur.matrixT();
+    if (computeEigenvectors)
+      m_eivec = m_realSchur.matrixU();
+  
+    // Compute eigenvalues from matT
+    m_eivalues.resize(matrix.cols());
+    Index i = 0;
+    while (i < matrix.cols()) 
+    {
+      if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) 
+      {
+        m_eivalues.coeffRef(i) = m_matT.coeff(i, i);
+        ++i;
+      }
+      else
+      {
+        Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));
+        Scalar z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));
+        m_eivalues.coeffRef(i)   = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);
+        m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);
+        i += 2;
+      }
+    }
+    
+    // Compute eigenvectors.
+    if (computeEigenvectors)
+      doComputeEigenvectors();
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = computeEigenvectors;
+
+  return *this;
+}
+
+// Complex scalar division.
+template<typename Scalar>
+std::complex<Scalar> cdiv(const Scalar& xr, const Scalar& xi, const Scalar& yr, const Scalar& yi)
+{
+  using std::abs;
+  Scalar r,d;
+  if (abs(yr) > abs(yi))
+  {
+      r = yi/yr;
+      d = yr + r*yi;
+      return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d);
+  }
+  else
+  {
+      r = yr/yi;
+      d = yi + r*yr;
+      return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d);
+  }
+}
+
+
+template<typename MatrixType>
+void EigenSolver<MatrixType>::doComputeEigenvectors()
+{
+  using std::abs;
+  const Index size = m_eivec.cols();
+  const Scalar eps = NumTraits<Scalar>::epsilon();
+
+  // inefficient! this is already computed in RealSchur
+  Scalar norm(0);
+  for (Index j = 0; j < size; ++j)
+  {
+    norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();
+  }
+  
+  // Backsubstitute to find vectors of upper triangular form
+  if (norm == 0.0)
+  {
+    return;
+  }
+
+  for (Index n = size-1; n >= 0; n--)
+  {
+    Scalar p = m_eivalues.coeff(n).real();
+    Scalar q = m_eivalues.coeff(n).imag();
+
+    // Scalar vector
+    if (q == Scalar(0))
+    {
+      Scalar lastr(0), lastw(0);
+      Index l = n;
+
+      m_matT.coeffRef(n,n) = 1.0;
+      for (Index i = n-1; i >= 0; i--)
+      {
+        Scalar w = m_matT.coeff(i,i) - p;
+        Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
+
+        if (m_eivalues.coeff(i).imag() < 0.0)
+        {
+          lastw = w;
+          lastr = r;
+        }
+        else
+        {
+          l = i;
+          if (m_eivalues.coeff(i).imag() == 0.0)
+          {
+            if (w != 0.0)
+              m_matT.coeffRef(i,n) = -r / w;
+            else
+              m_matT.coeffRef(i,n) = -r / (eps * norm);
+          }
+          else // Solve real equations
+          {
+            Scalar x = m_matT.coeff(i,i+1);
+            Scalar y = m_matT.coeff(i+1,i);
+            Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
+            Scalar t = (x * lastr - lastw * r) / denom;
+            m_matT.coeffRef(i,n) = t;
+            if (abs(x) > abs(lastw))
+              m_matT.coeffRef(i+1,n) = (-r - w * t) / x;
+            else
+              m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;
+          }
+
+          // Overflow control
+          Scalar t = abs(m_matT.coeff(i,n));
+          if ((eps * t) * t > Scalar(1))
+            m_matT.col(n).tail(size-i) /= t;
+        }
+      }
+    }
+    else if (q < Scalar(0) && n > 0) // Complex vector
+    {
+      Scalar lastra(0), lastsa(0), lastw(0);
+      Index l = n-1;
+
+      // Last vector component imaginary so matrix is triangular
+      if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n)))
+      {
+        m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);
+        m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);
+      }
+      else
+      {
+        std::complex<Scalar> cc = cdiv<Scalar>(0.0,-m_matT.coeff(n-1,n),m_matT.coeff(n-1,n-1)-p,q);
+        m_matT.coeffRef(n-1,n-1) = numext::real(cc);
+        m_matT.coeffRef(n-1,n) = numext::imag(cc);
+      }
+      m_matT.coeffRef(n,n-1) = 0.0;
+      m_matT.coeffRef(n,n) = 1.0;
+      for (Index i = n-2; i >= 0; i--)
+      {
+        Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));
+        Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));
+        Scalar w = m_matT.coeff(i,i) - p;
+
+        if (m_eivalues.coeff(i).imag() < 0.0)
+        {
+          lastw = w;
+          lastra = ra;
+          lastsa = sa;
+        }
+        else
+        {
+          l = i;
+          if (m_eivalues.coeff(i).imag() == RealScalar(0))
+          {
+            std::complex<Scalar> cc = cdiv(-ra,-sa,w,q);
+            m_matT.coeffRef(i,n-1) = numext::real(cc);
+            m_matT.coeffRef(i,n) = numext::imag(cc);
+          }
+          else
+          {
+            // Solve complex equations
+            Scalar x = m_matT.coeff(i,i+1);
+            Scalar y = m_matT.coeff(i+1,i);
+            Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
+            Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;
+            if ((vr == 0.0) && (vi == 0.0))
+              vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));
+
+            std::complex<Scalar> cc = cdiv(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra,vr,vi);
+            m_matT.coeffRef(i,n-1) = numext::real(cc);
+            m_matT.coeffRef(i,n) = numext::imag(cc);
+            if (abs(x) > (abs(lastw) + abs(q)))
+            {
+              m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;
+              m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;
+            }
+            else
+            {
+              cc = cdiv(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n),lastw,q);
+              m_matT.coeffRef(i+1,n-1) = numext::real(cc);
+              m_matT.coeffRef(i+1,n) = numext::imag(cc);
+            }
+          }
+
+          // Overflow control
+          using std::max;
+          Scalar t = (max)(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));
+          if ((eps * t) * t > Scalar(1))
+            m_matT.block(i, n-1, size-i, 2) /= t;
+
+        }
+      }
+      
+      // We handled a pair of complex conjugate eigenvalues, so need to skip them both
+      n--;
+    }
+    else
+    {
+      eigen_assert(0 && "Internal bug in EigenSolver"); // this should not happen
+    }
+  }
+
+  // Back transformation to get eigenvectors of original matrix
+  for (Index j = size-1; j >= 0; j--)
+  {
+    m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);
+    m_eivec.col(j) = m_tmp;
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_EIGENSOLVER_H
diff --git a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
new file mode 100644
index 0000000..956e80d
--- /dev/null
+++ b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -0,0 +1,350 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H
+#define EIGEN_GENERALIZEDEIGENSOLVER_H
+
+#include "./RealQZ.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class GeneralizedEigenSolver
+  *
+  * \brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices
+  *
+  * \tparam _MatrixType the type of the matrices of which we are computing the
+  * eigen-decomposition; this is expected to be an instantiation of the Matrix
+  * class template. Currently, only real matrices are supported.
+  *
+  * The generalized eigenvalues and eigenvectors of a matrix pair \f$ A \f$ and \f$ B \f$ are scalars
+  * \f$ \lambda \f$ and vectors \f$ v \f$ such that \f$ Av = \lambda Bv \f$.  If
+  * \f$ D \f$ is a diagonal matrix with the eigenvalues on the diagonal, and
+  * \f$ V \f$ is a matrix with the eigenvectors as its columns, then \f$ A V =
+  * B V D \f$. The matrix \f$ V \f$ is almost always invertible, in which case we
+  * have \f$ A = B V D V^{-1} \f$. This is called the generalized eigen-decomposition.
+  *
+  * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the
+  * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is
+  * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \f$ \alpha \f$
+  * and real \f$ \beta \f$ such that: \f$ \lambda_i = \alpha_i / \beta_i \f$. If \f$ \beta_i \f$ is (nearly) zero,
+  * then one can consider the well defined left eigenvalue \f$ \mu = \beta_i / \alpha_i\f$ such that:
+  * \f$ \mu_i A v_i = B v_i \f$, or even \f$ \mu_i u_i^T A  = u_i^T B \f$ where \f$ u_i \f$ is
+  * called the left eigenvector.
+  *
+  * Call the function compute() to compute the generalized eigenvalues and eigenvectors of
+  * a given matrix pair. Alternatively, you can use the
+  * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the
+  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and
+  * eigenvectors are computed, they can be retrieved with the eigenvalues() and
+  * eigenvectors() functions.
+  *
+  * Here is an usage example of this class:
+  * Example: \include GeneralizedEigenSolver.cpp
+  * Output: \verbinclude GeneralizedEigenSolver.out
+  *
+  * \sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class GeneralizedEigenSolver
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Complex scalar type for #MatrixType. 
+      *
+      * This is \c std::complex<Scalar> if #Scalar is real (e.g.,
+      * \c float or \c double) and just \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef std::complex<RealScalar> ComplexScalar;
+
+    /** \brief Type for vector of real scalar values eigenvalues as returned by betas().
+      *
+      * This is a column vector with entries of type #Scalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;
+
+    /** \brief Type for vector of complex scalar values eigenvalues as returned by betas().
+      *
+      * This is a column vector with entries of type #ComplexScalar.
+      * The length of the vector is the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;
+
+    /** \brief Expression type for the eigenvalues as returned by eigenvalues().
+      */
+    typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;
+
+    /** \brief Type for matrix of eigenvectors as returned by eigenvectors(). 
+      *
+      * This is a square matrix with entries of type #ComplexScalar. 
+      * The size is the same as the size of #MatrixType.
+      */
+    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;
+
+    /** \brief Default constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).
+      *
+      * \sa compute() for an example.
+      */
+    GeneralizedEigenSolver() : m_eivec(), m_alphas(), m_betas(), m_isInitialized(false), m_realQZ(), m_matS(), m_tmp() {}
+
+    /** \brief Default constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa GeneralizedEigenSolver()
+      */
+    GeneralizedEigenSolver(Index size)
+      : m_eivec(size, size),
+        m_alphas(size),
+        m_betas(size),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realQZ(size),
+        m_matS(size, size),
+        m_tmp(size)
+    {}
+
+    /** \brief Constructor; computes the generalized eigendecomposition of given matrix pair.
+      * 
+      * \param[in]  A  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  B  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are computed.
+      *
+      * This constructor calls compute() to compute the generalized eigenvalues
+      * and eigenvectors.
+      *
+      * \sa compute()
+      */
+    GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)
+      : m_eivec(A.rows(), A.cols()),
+        m_alphas(A.cols()),
+        m_betas(A.cols()),
+        m_isInitialized(false),
+        m_eigenvectorsOk(false),
+        m_realQZ(A.cols()),
+        m_matS(A.rows(), A.cols()),
+        m_tmp(A.cols())
+    {
+      compute(A, B, computeEigenvectors);
+    }
+
+    /* \brief Returns the computed generalized eigenvectors.
+      *
+      * \returns  %Matrix whose columns are the (possibly complex) eigenvectors.
+      *
+      * \pre Either the constructor 
+      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function
+      * compute(const MatrixType&, const MatrixType& bool) has been called before, and
+      * \p computeEigenvectors was set to true (the default).
+      *
+      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
+      * eigenvectors are normalized to have (Euclidean) norm equal to one. The
+      * matrix returned by this function is the matrix \f$ V \f$ in the
+      * generalized eigendecomposition \f$ A = B V D V^{-1} \f$, if it exists.
+      *
+      * \sa eigenvalues()
+      */
+//    EigenvectorsType eigenvectors() const;
+
+    /** \brief Returns an expression of the computed generalized eigenvalues.
+      *
+      * \returns An expression of the column vector containing the eigenvalues.
+      *
+      * It is a shortcut for \code this->alphas().cwiseQuotient(this->betas()); \endcode
+      * Not that betas might contain zeros. It is therefore not recommended to use this function,
+      * but rather directly deal with the alphas and betas vectors.
+      *
+      * \pre Either the constructor 
+      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function
+      * compute(const MatrixType&,const MatrixType&,bool) has been called before.
+      *
+      * The eigenvalues are repeated according to their algebraic multiplicity,
+      * so there are as many eigenvalues as rows in the matrix. The eigenvalues 
+      * are not sorted in any particular order.
+      *
+      * \sa alphas(), betas(), eigenvectors()
+      */
+    EigenvalueType eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return EigenvalueType(m_alphas,m_betas);
+    }
+
+    /** \returns A const reference to the vectors containing the alpha values
+      *
+      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+      *
+      * \sa betas(), eigenvalues() */
+    ComplexVectorType alphas() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return m_alphas;
+    }
+
+    /** \returns A const reference to the vectors containing the beta values
+      *
+      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).
+      *
+      * \sa alphas(), eigenvalues() */
+    VectorType betas() const
+    {
+      eigen_assert(m_isInitialized && "GeneralizedEigenSolver is not initialized.");
+      return m_betas;
+    }
+
+    /** \brief Computes generalized eigendecomposition of given matrix.
+      * 
+      * \param[in]  A  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  B  Square matrix whose eigendecomposition is to be computed.
+      * \param[in]  computeEigenvectors  If true, both the eigenvectors and the
+      *    eigenvalues are computed; if false, only the eigenvalues are
+      *    computed. 
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of the real matrix \p matrix.
+      * The eigenvalues() function can be used to retrieve them.  If 
+      * \p computeEigenvectors is true, then the eigenvectors are also computed
+      * and can be retrieved by calling eigenvectors().
+      *
+      * The matrix is first reduced to real generalized Schur form using the RealQZ
+      * class. The generalized Schur decomposition is then used to compute the eigenvalues
+      * and eigenvectors.
+      *
+      * The cost of the computation is dominated by the cost of the
+      * generalized Schur decomposition.
+      *
+      * This method reuses of the allocated data in the GeneralizedEigenSolver object.
+      */
+    GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);
+
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+      return m_realQZ.info();
+    }
+
+    /** Sets the maximal number of iterations allowed.
+    */
+    GeneralizedEigenSolver& setMaxIterations(Index maxIters)
+    {
+      m_realQZ.setMaxIterations(maxIters);
+      return *this;
+    }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+      EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
+    }
+    
+    MatrixType m_eivec;
+    ComplexVectorType m_alphas;
+    VectorType m_betas;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+    RealQZ<MatrixType> m_realQZ;
+    MatrixType m_matS;
+
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+    ColumnVectorType m_tmp;
+};
+
+//template<typename MatrixType>
+//typename GeneralizedEigenSolver<MatrixType>::EigenvectorsType GeneralizedEigenSolver<MatrixType>::eigenvectors() const
+//{
+//  eigen_assert(m_isInitialized && "EigenSolver is not initialized.");
+//  eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+//  Index n = m_eivec.cols();
+//  EigenvectorsType matV(n,n);
+//  // TODO
+//  return matV;
+//}
+
+template<typename MatrixType>
+GeneralizedEigenSolver<MatrixType>&
+GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
+{
+  check_template_parameters();
+  
+  using std::sqrt;
+  using std::abs;
+  eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
+
+  // Reduce to generalized real Schur form:
+  // A = Q S Z and B = Q T Z
+  m_realQZ.compute(A, B, computeEigenvectors);
+
+  if (m_realQZ.info() == Success)
+  {
+    m_matS = m_realQZ.matrixS();
+    if (computeEigenvectors)
+      m_eivec = m_realQZ.matrixZ().transpose();
+  
+    // Compute eigenvalues from matS
+    m_alphas.resize(A.cols());
+    m_betas.resize(A.cols());
+    Index i = 0;
+    while (i < A.cols())
+    {
+      if (i == A.cols() - 1 || m_matS.coeff(i+1, i) == Scalar(0))
+      {
+        m_alphas.coeffRef(i) = m_matS.coeff(i, i);
+        m_betas.coeffRef(i)  = m_realQZ.matrixT().coeff(i,i);
+        ++i;
+      }
+      else
+      {
+        Scalar p = Scalar(0.5) * (m_matS.coeff(i, i) - m_matS.coeff(i+1, i+1));
+        Scalar z = sqrt(abs(p * p + m_matS.coeff(i+1, i) * m_matS.coeff(i, i+1)));
+        m_alphas.coeffRef(i)   = ComplexScalar(m_matS.coeff(i+1, i+1) + p, z);
+        m_alphas.coeffRef(i+1) = ComplexScalar(m_matS.coeff(i+1, i+1) + p, -z);
+
+        m_betas.coeffRef(i)   = m_realQZ.matrixT().coeff(i,i);
+        m_betas.coeffRef(i+1) = m_realQZ.matrixT().coeff(i,i);
+        i += 2;
+      }
+    }
+  }
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = false;//computeEigenvectors;
+
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERALIZEDEIGENSOLVER_H
diff --git a/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
new file mode 100644
index 0000000..07bf1ea
--- /dev/null
+++ b/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
@@ -0,0 +1,227 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
+#define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
+
+#include "./Tridiagonalization.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class GeneralizedSelfAdjointEigenSolver
+  *
+  * \brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * eigendecomposition; this is expected to be an instantiation of the Matrix
+  * class template.
+  *
+  * This class solves the generalized eigenvalue problem
+  * \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be
+  * selfadjoint and the matrix \f$ B \f$ should be positive definite.
+  *
+  * Only the \b lower \b triangular \b part of the input matrix is referenced.
+  *
+  * Call the function compute() to compute the eigenvalues and eigenvectors of
+  * a given matrix. Alternatively, you can use the
+  * GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
+  * constructor which computes the eigenvalues and eigenvectors at construction time.
+  * Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues()
+  * and eigenvectors() functions.
+  *
+  * The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
+  * contains an example of the typical use of this class.
+  *
+  * \sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver
+  */
+template<typename _MatrixType>
+class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType>
+{
+    typedef SelfAdjointEigenSolver<_MatrixType> Base;
+  public:
+
+    typedef typename Base::Index Index;
+    typedef _MatrixType MatrixType;
+
+    /** \brief Default constructor for fixed-size matrices.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute(). This constructor
+      * can only be used if \p _MatrixType is a fixed-size matrix; use
+      * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices.
+      */
+    GeneralizedSelfAdjointEigenSolver() : Base() {}
+
+    /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
+      *
+      * \param [in]  size  Positive integer, size of the matrix whose
+      * eigenvalues and eigenvectors will be computed.
+      *
+      * This constructor is useful for dynamic-size matrices, when the user
+      * intends to perform decompositions via compute(). The \p size
+      * parameter is only used as a hint. It is not an error to give a wrong
+      * \p size, but it may impair performance.
+      *
+      * \sa compute() for an example
+      */
+    GeneralizedSelfAdjointEigenSolver(Index size)
+        : Base(size)
+    {}
+
+    /** \brief Constructor; computes generalized eigendecomposition of given matrix pencil.
+      *
+      * \param[in]  matA  Selfadjoint matrix in matrix pencil.
+      *                   Only the lower triangular part of the matrix is referenced.
+      * \param[in]  matB  Positive-definite matrix in matrix pencil.
+      *                   Only the lower triangular part of the matrix is referenced.
+      * \param[in]  options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.
+      *                     Default is #ComputeEigenvectors|#Ax_lBx.
+      *
+      * This constructor calls compute(const MatrixType&, const MatrixType&, int)
+      * to compute the eigenvalues and (if requested) the eigenvectors of the
+      * generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the
+      * selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix
+      * \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property
+      * \f$ x^* B x = 1 \f$. The eigenvectors are computed if
+      * \a options contains ComputeEigenvectors.
+      *
+      * In addition, the two following variants can be solved via \p options:
+      * - \c ABx_lx: \f$ ABx = \lambda x \f$
+      * - \c BAx_lx: \f$ BAx = \lambda x \f$
+      *
+      * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out
+      *
+      * \sa compute(const MatrixType&, const MatrixType&, int)
+      */
+    GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB,
+                                      int options = ComputeEigenvectors|Ax_lBx)
+      : Base(matA.cols())
+    {
+      compute(matA, matB, options);
+    }
+
+    /** \brief Computes generalized eigendecomposition of given matrix pencil.
+      *
+      * \param[in]  matA  Selfadjoint matrix in matrix pencil.
+      *                   Only the lower triangular part of the matrix is referenced.
+      * \param[in]  matB  Positive-definite matrix in matrix pencil.
+      *                   Only the lower triangular part of the matrix is referenced.
+      * \param[in]  options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.
+      *                     Default is #ComputeEigenvectors|#Ax_lBx.
+      *
+      * \returns    Reference to \c *this
+      *
+      * Accoring to \p options, this function computes eigenvalues and (if requested)
+      * the eigenvectors of one of the following three generalized eigenproblems:
+      * - \c Ax_lBx: \f$ Ax = \lambda B x \f$
+      * - \c ABx_lx: \f$ ABx = \lambda x \f$
+      * - \c BAx_lx: \f$ BAx = \lambda x \f$
+      * with \a matA the selfadjoint matrix \f$ A \f$ and \a matB the positive definite
+      * matrix \f$ B \f$.
+      * In addition, each eigenvector \f$ x \f$ satisfies the property \f$ x^* B x = 1 \f$.
+      *
+      * The eigenvalues() function can be used to retrieve
+      * the eigenvalues. If \p options contains ComputeEigenvectors, then the
+      * eigenvectors are also computed and can be retrieved by calling
+      * eigenvectors().
+      *
+      * The implementation uses LLT to compute the Cholesky decomposition
+      * \f$ B = LL^* \f$ and computes the classical eigendecomposition
+      * of the selfadjoint matrix \f$ L^{-1} A (L^*)^{-1} \f$ if \p options contains Ax_lBx
+      * and of \f$ L^{*} A L \f$ otherwise. This solves the
+      * generalized eigenproblem, because any solution of the generalized
+      * eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution
+      * \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the
+      * eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$. Similar statements
+      * can be made for the two other variants.
+      *
+      * Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out
+      *
+      * \sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)
+      */
+    GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB,
+                                               int options = ComputeEigenvectors|Ax_lBx);
+
+  protected:
+
+};
+
+
+template<typename MatrixType>
+GeneralizedSelfAdjointEigenSolver<MatrixType>& GeneralizedSelfAdjointEigenSolver<MatrixType>::
+compute(const MatrixType& matA, const MatrixType& matB, int options)
+{
+  eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());
+  eigen_assert((options&~(EigVecMask|GenEigMask))==0
+          && (options&EigVecMask)!=EigVecMask
+          && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx
+           || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx)
+          && "invalid option parameter");
+
+  bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors);
+
+  // Compute the cholesky decomposition of matB = L L' = U'U
+  LLT<MatrixType> cholB(matB);
+
+  int type = (options&GenEigMask);
+  if(type==0)
+    type = Ax_lBx;
+
+  if(type==Ax_lBx)
+  {
+    // compute C = inv(L) A inv(L')
+    MatrixType matC = matA.template selfadjointView<Lower>();
+    cholB.matrixL().template solveInPlace<OnTheLeft>(matC);
+    cholB.matrixU().template solveInPlace<OnTheRight>(matC);
+
+    Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly );
+
+    // transform back the eigen vectors: evecs = inv(U) * evecs
+    if(computeEigVecs)
+      cholB.matrixU().solveInPlace(Base::m_eivec);
+  }
+  else if(type==ABx_lx)
+  {
+    // compute C = L' A L
+    MatrixType matC = matA.template selfadjointView<Lower>();
+    matC = matC * cholB.matrixL();
+    matC = cholB.matrixU() * matC;
+
+    Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);
+
+    // transform back the eigen vectors: evecs = inv(U) * evecs
+    if(computeEigVecs)
+      cholB.matrixU().solveInPlace(Base::m_eivec);
+  }
+  else if(type==BAx_lx)
+  {
+    // compute C = L' A L
+    MatrixType matC = matA.template selfadjointView<Lower>();
+    matC = matC * cholB.matrixL();
+    matC = cholB.matrixU() * matC;
+
+    Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);
+
+    // transform back the eigen vectors: evecs = L * evecs
+    if(computeEigVecs)
+      Base::m_eivec = cholB.matrixL() * Base::m_eivec;
+  }
+
+  return *this;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h
new file mode 100644
index 0000000..3db0c01
--- /dev/null
+++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h
@@ -0,0 +1,373 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HESSENBERGDECOMPOSITION_H
+#define EIGEN_HESSENBERGDECOMPOSITION_H
+
+namespace Eigen { 
+
+namespace internal {
+  
+template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType;
+template<typename MatrixType>
+struct traits<HessenbergDecompositionMatrixHReturnType<MatrixType> >
+{
+  typedef MatrixType ReturnType;
+};
+
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class HessenbergDecomposition
+  *
+  * \brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
+  *
+  * This class performs an Hessenberg decomposition of a matrix \f$ A \f$. In
+  * the real case, the Hessenberg decomposition consists of an orthogonal
+  * matrix \f$ Q \f$ and a Hessenberg matrix \f$ H \f$ such that \f$ A = Q H
+  * Q^T \f$. An orthogonal matrix is a matrix whose inverse equals its
+  * transpose (\f$ Q^{-1} = Q^T \f$). A Hessenberg matrix has zeros below the
+  * subdiagonal, so it is almost upper triangular. The Hessenberg decomposition
+  * of a complex matrix is \f$ A = Q H Q^* \f$ with \f$ Q \f$ unitary (that is,
+  * \f$ Q^{-1} = Q^* \f$).
+  *
+  * Call the function compute() to compute the Hessenberg decomposition of a
+  * given matrix. Alternatively, you can use the
+  * HessenbergDecomposition(const MatrixType&) constructor which computes the
+  * Hessenberg decomposition at construction time. Once the decomposition is
+  * computed, you can use the matrixH() and matrixQ() functions to construct
+  * the matrices H and Q in the decomposition.
+  *
+  * The documentation for matrixH() contains an example of the typical use of
+  * this class.
+  *
+  * \sa class ComplexSchur, class Tridiagonalization, \ref QR_Module "QR Module"
+  */
+template<typename _MatrixType> class HessenbergDecomposition
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    enum {
+      Size = MatrixType::RowsAtCompileTime,
+      SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1,
+      Options = MatrixType::Options,
+      MaxSize = MatrixType::MaxRowsAtCompileTime,
+      MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1
+    };
+
+    /** \brief Scalar type for matrices of type #MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Type for vector of Householder coefficients.
+      *
+      * This is column vector with entries of type #Scalar. The length of the
+      * vector is one less than the size of #MatrixType, if it is a fixed-side
+      * type.
+      */
+    typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
+
+    /** \brief Return type of matrixQ() */
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
+    
+    typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;
+
+    /** \brief Default constructor; the decomposition will be computed later.
+      *
+      * \param [in] size  The size of the matrix whose Hessenberg decomposition will be computed.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute().  The \p size parameter is only
+      * used as a hint. It is not an error to give a wrong \p size, but it may
+      * impair performance.
+      *
+      * \sa compute() for an example.
+      */
+    HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)
+      : m_matrix(size,size),
+        m_temp(size),
+        m_isInitialized(false)
+    {
+      if(size>1)
+        m_hCoeffs.resize(size-1);
+    }
+
+    /** \brief Constructor; computes Hessenberg decomposition of given matrix.
+      *
+      * \param[in]  matrix  Square matrix whose Hessenberg decomposition is to be computed.
+      *
+      * This constructor calls compute() to compute the Hessenberg
+      * decomposition.
+      *
+      * \sa matrixH() for an example.
+      */
+    HessenbergDecomposition(const MatrixType& matrix)
+      : m_matrix(matrix),
+        m_temp(matrix.rows()),
+        m_isInitialized(false)
+    {
+      if(matrix.rows()<2)
+      {
+        m_isInitialized = true;
+        return;
+      }
+      m_hCoeffs.resize(matrix.rows()-1,1);
+      _compute(m_matrix, m_hCoeffs, m_temp);
+      m_isInitialized = true;
+    }
+
+    /** \brief Computes Hessenberg decomposition of given matrix.
+      *
+      * \param[in]  matrix  Square matrix whose Hessenberg decomposition is to be computed.
+      * \returns    Reference to \c *this
+      *
+      * The Hessenberg decomposition is computed by bringing the columns of the
+      * matrix successively in the required form using Householder reflections
+      * (see, e.g., Algorithm 7.4.2 in Golub \& Van Loan, <i>%Matrix
+      * Computations</i>). The cost is \f$ 10n^3/3 \f$ flops, where \f$ n \f$
+      * denotes the size of the given matrix.
+      *
+      * This method reuses of the allocated data in the HessenbergDecomposition
+      * object.
+      *
+      * Example: \include HessenbergDecomposition_compute.cpp
+      * Output: \verbinclude HessenbergDecomposition_compute.out
+      */
+    HessenbergDecomposition& compute(const MatrixType& matrix)
+    {
+      m_matrix = matrix;
+      if(matrix.rows()<2)
+      {
+        m_isInitialized = true;
+        return *this;
+      }
+      m_hCoeffs.resize(matrix.rows()-1,1);
+      _compute(m_matrix, m_hCoeffs, m_temp);
+      m_isInitialized = true;
+      return *this;
+    }
+
+    /** \brief Returns the Householder coefficients.
+      *
+      * \returns a const reference to the vector of Householder coefficients
+      *
+      * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+      * or the member function compute(const MatrixType&) has been called
+      * before to compute the Hessenberg decomposition of a matrix.
+      *
+      * The Householder coefficients allow the reconstruction of the matrix
+      * \f$ Q \f$ in the Hessenberg decomposition from the packed data.
+      *
+      * \sa packedMatrix(), \ref Householder_Module "Householder module"
+      */
+    const CoeffVectorType& householderCoefficients() const
+    {
+      eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+      return m_hCoeffs;
+    }
+
+    /** \brief Returns the internal representation of the decomposition
+      *
+      *	\returns a const reference to a matrix with the internal representation
+      *	         of the decomposition.
+      *
+      * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+      * or the member function compute(const MatrixType&) has been called
+      * before to compute the Hessenberg decomposition of a matrix.
+      *
+      * The returned matrix contains the following information:
+      *  - the upper part and lower sub-diagonal represent the Hessenberg matrix H
+      *  - the rest of the lower part contains the Householder vectors that, combined with
+      *    Householder coefficients returned by householderCoefficients(),
+      *    allows to reconstruct the matrix Q as
+      *       \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
+      *    Here, the matrices \f$ H_i \f$ are the Householder transformations
+      *       \f$ H_i = (I - h_i v_i v_i^T) \f$
+      *    where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
+      *    \f$ v_i \f$ is the Householder vector defined by
+      *       \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
+      *    with M the matrix returned by this function.
+      *
+      * See LAPACK for further details on this packed storage.
+      *
+      * Example: \include HessenbergDecomposition_packedMatrix.cpp
+      * Output: \verbinclude HessenbergDecomposition_packedMatrix.out
+      *
+      * \sa householderCoefficients()
+      */
+    const MatrixType& packedMatrix() const
+    {
+      eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+      return m_matrix;
+    }
+
+    /** \brief Reconstructs the orthogonal matrix Q in the decomposition
+      *
+      * \returns object representing the matrix Q
+      *
+      * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+      * or the member function compute(const MatrixType&) has been called
+      * before to compute the Hessenberg decomposition of a matrix.
+      *
+      * This function returns a light-weight object of template class
+      * HouseholderSequence. You can either apply it directly to a matrix or
+      * you can convert it to a matrix of type #MatrixType.
+      *
+      * \sa matrixH() for an example, class HouseholderSequence
+      */
+    HouseholderSequenceType matrixQ() const
+    {
+      eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+      return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
+             .setLength(m_matrix.rows() - 1)
+             .setShift(1);
+    }
+
+    /** \brief Constructs the Hessenberg matrix H in the decomposition
+      *
+      * \returns expression object representing the matrix H
+      *
+      * \pre Either the constructor HessenbergDecomposition(const MatrixType&)
+      * or the member function compute(const MatrixType&) has been called
+      * before to compute the Hessenberg decomposition of a matrix.
+      *
+      * The object returned by this function constructs the Hessenberg matrix H
+      * when it is assigned to a matrix or otherwise evaluated. The matrix H is
+      * constructed from the packed matrix as returned by packedMatrix(): The
+      * upper part (including the subdiagonal) of the packed matrix contains
+      * the matrix H. It may sometimes be better to directly use the packed
+      * matrix instead of constructing the matrix H.
+      *
+      * Example: \include HessenbergDecomposition_matrixH.cpp
+      * Output: \verbinclude HessenbergDecomposition_matrixH.out
+      *
+      * \sa matrixQ(), packedMatrix()
+      */
+    MatrixHReturnType matrixH() const
+    {
+      eigen_assert(m_isInitialized && "HessenbergDecomposition is not initialized.");
+      return MatrixHReturnType(*this);
+    }
+
+  private:
+
+    typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);
+
+  protected:
+    MatrixType m_matrix;
+    CoeffVectorType m_hCoeffs;
+    VectorType m_temp;
+    bool m_isInitialized;
+};
+
+/** \internal
+  * Performs a tridiagonal decomposition of \a matA in place.
+  *
+  * \param matA the input selfadjoint matrix
+  * \param hCoeffs returned Householder coefficients
+  *
+  * The result is written in the lower triangular part of \a matA.
+  *
+  * Implemented from Golub's "%Matrix Computations", algorithm 8.3.1.
+  *
+  * \sa packedMatrix()
+  */
+template<typename MatrixType>
+void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)
+{
+  eigen_assert(matA.rows()==matA.cols());
+  Index n = matA.rows();
+  temp.resize(n);
+  for (Index i = 0; i<n-1; ++i)
+  {
+    // let's consider the vector v = i-th column starting at position i+1
+    Index remainingSize = n-i-1;
+    RealScalar beta;
+    Scalar h;
+    matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
+    matA.col(i).coeffRef(i+1) = beta;
+    hCoeffs.coeffRef(i) = h;
+
+    // Apply similarity transformation to remaining columns,
+    // i.e., compute A = H A H'
+
+    // A = H A
+    matA.bottomRightCorner(remainingSize, remainingSize)
+        .applyHouseholderOnTheLeft(matA.col(i).tail(remainingSize-1), h, &temp.coeffRef(0));
+
+    // A = A H'
+    matA.rightCols(remainingSize)
+        .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));
+  }
+}
+
+namespace internal {
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \brief Expression type for return value of HessenbergDecomposition::matrixH()
+  *
+  * \tparam MatrixType type of matrix in the Hessenberg decomposition
+  *
+  * Objects of this type represent the Hessenberg matrix in the Hessenberg
+  * decomposition of some matrix. The object holds a reference to the
+  * HessenbergDecomposition class until the it is assigned or evaluated for
+  * some other reason (the reference should remain valid during the life time
+  * of this object). This class is the return type of
+  * HessenbergDecomposition::matrixH(); there is probably no other use for this
+  * class.
+  */
+template<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType
+: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >
+{
+    typedef typename MatrixType::Index Index;
+  public:
+    /** \brief Constructor.
+      *
+      * \param[in] hess  Hessenberg decomposition
+      */
+    HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition<MatrixType>& hess) : m_hess(hess) { }
+
+    /** \brief Hessenberg matrix in decomposition.
+      *
+      * \param[out] result  Hessenberg matrix in decomposition \p hess which
+      *                     was passed to the constructor
+      */
+    template <typename ResultType>
+    inline void evalTo(ResultType& result) const
+    {
+      result = m_hess.packedMatrix();
+      Index n = result.rows();
+      if (n>2)
+        result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();
+    }
+
+    Index rows() const { return m_hess.packedMatrix().rows(); }
+    Index cols() const { return m_hess.packedMatrix().cols(); }
+
+  protected:
+    const HessenbergDecomposition<MatrixType>& m_hess;
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_HESSENBERGDECOMPOSITION_H
diff --git a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
new file mode 100644
index 0000000..4fec8af
--- /dev/null
+++ b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
@@ -0,0 +1,160 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIXBASEEIGENVALUES_H
+#define EIGEN_MATRIXBASEEIGENVALUES_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Derived, bool IsComplex>
+struct eigenvalues_selector
+{
+  // this is the implementation for the case IsComplex = true
+  static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
+  run(const MatrixBase<Derived>& m)
+  {
+    typedef typename Derived::PlainObject PlainObject;
+    PlainObject m_eval(m);
+    return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues();
+  }
+};
+
+template<typename Derived>
+struct eigenvalues_selector<Derived, false>
+{
+  static inline typename MatrixBase<Derived>::EigenvaluesReturnType const
+  run(const MatrixBase<Derived>& m)
+  {
+    typedef typename Derived::PlainObject PlainObject;
+    PlainObject m_eval(m);
+    return EigenSolver<PlainObject>(m_eval, false).eigenvalues();
+  }
+};
+
+} // end namespace internal
+
+/** \brief Computes the eigenvalues of a matrix 
+  * \returns Column vector containing the eigenvalues.
+  *
+  * \eigenvalues_module
+  * This function computes the eigenvalues with the help of the EigenSolver
+  * class (for real matrices) or the ComplexEigenSolver class (for complex
+  * matrices). 
+  *
+  * The eigenvalues are repeated according to their algebraic multiplicity,
+  * so there are as many eigenvalues as rows in the matrix.
+  *
+  * The SelfAdjointView class provides a better algorithm for selfadjoint
+  * matrices.
+  *
+  * Example: \include MatrixBase_eigenvalues.cpp
+  * Output: \verbinclude MatrixBase_eigenvalues.out
+  *
+  * \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(),
+  *     SelfAdjointView::eigenvalues()
+  */
+template<typename Derived>
+inline typename MatrixBase<Derived>::EigenvaluesReturnType
+MatrixBase<Derived>::eigenvalues() const
+{
+  typedef typename internal::traits<Derived>::Scalar Scalar;
+  return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());
+}
+
+/** \brief Computes the eigenvalues of a matrix
+  * \returns Column vector containing the eigenvalues.
+  *
+  * \eigenvalues_module
+  * This function computes the eigenvalues with the help of the
+  * SelfAdjointEigenSolver class.  The eigenvalues are repeated according to
+  * their algebraic multiplicity, so there are as many eigenvalues as rows in
+  * the matrix.
+  *
+  * Example: \include SelfAdjointView_eigenvalues.cpp
+  * Output: \verbinclude SelfAdjointView_eigenvalues.out
+  *
+  * \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()
+  */
+template<typename MatrixType, unsigned int UpLo> 
+inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType
+SelfAdjointView<MatrixType, UpLo>::eigenvalues() const
+{
+  typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;
+  PlainObject thisAsMatrix(*this);
+  return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();
+}
+
+
+
+/** \brief Computes the L2 operator norm
+  * \returns Operator norm of the matrix.
+  *
+  * \eigenvalues_module
+  * This function computes the L2 operator norm of a matrix, which is also
+  * known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be
+  * \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f]
+  * where the maximum is over all vectors and the norm on the right is the
+  * Euclidean vector norm. The norm equals the largest singular value, which is
+  * the square root of the largest eigenvalue of the positive semi-definite
+  * matrix \f$ A^*A \f$.
+  *
+  * The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed
+  * by SelfAdjointView::eigenvalues(), to compute the operator norm of a
+  * matrix.  The SelfAdjointView class provides a better algorithm for
+  * selfadjoint matrices.
+  *
+  * Example: \include MatrixBase_operatorNorm.cpp
+  * Output: \verbinclude MatrixBase_operatorNorm.out
+  *
+  * \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm()
+  */
+template<typename Derived>
+inline typename MatrixBase<Derived>::RealScalar
+MatrixBase<Derived>::operatorNorm() const
+{
+  using std::sqrt;
+  typename Derived::PlainObject m_eval(derived());
+  // FIXME if it is really guaranteed that the eigenvalues are already sorted,
+  // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
+  return sqrt((m_eval*m_eval.adjoint())
+                 .eval()
+		 .template selfadjointView<Lower>()
+		 .eigenvalues()
+		 .maxCoeff()
+		 );
+}
+
+/** \brief Computes the L2 operator norm
+  * \returns Operator norm of the matrix.
+  *
+  * \eigenvalues_module
+  * This function computes the L2 operator norm of a self-adjoint matrix. For a
+  * self-adjoint matrix, the operator norm is the largest eigenvalue.
+  *
+  * The current implementation uses the eigenvalues of the matrix, as computed
+  * by eigenvalues(), to compute the operator norm of the matrix.
+  *
+  * Example: \include SelfAdjointView_operatorNorm.cpp
+  * Output: \verbinclude SelfAdjointView_operatorNorm.out
+  *
+  * \sa eigenvalues(), MatrixBase::operatorNorm()
+  */
+template<typename MatrixType, unsigned int UpLo>
+inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar
+SelfAdjointView<MatrixType, UpLo>::operatorNorm() const
+{
+  return eigenvalues().cwiseAbs().maxCoeff();
+}
+
+} // end namespace Eigen
+
+#endif
diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h
new file mode 100644
index 0000000..aa3833e
--- /dev/null
+++ b/Eigen/src/Eigenvalues/RealQZ.h
@@ -0,0 +1,624 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REAL_QZ_H
+#define EIGEN_REAL_QZ_H
+
+namespace Eigen {
+
+  /** \eigenvalues_module \ingroup Eigenvalues_Module
+   *
+   *
+   * \class RealQZ
+   *
+   * \brief Performs a real QZ decomposition of a pair of square matrices
+   *
+   * \tparam _MatrixType the type of the matrix of which we are computing the
+   * real QZ decomposition; this is expected to be an instantiation of the
+   * Matrix class template.
+   *
+   * Given a real square matrices A and B, this class computes the real QZ
+   * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are
+   * real orthogonal matrixes, T is upper-triangular matrix, and S is upper
+   * quasi-triangular matrix. An orthogonal matrix is a matrix whose
+   * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+   * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+   * blocks and 2-by-2 blocks where further reduction is impossible due to
+   * complex eigenvalues. 
+   *
+   * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from
+   * 1x1 and 2x2 blocks on the diagonals of S and T.
+   *
+   * Call the function compute() to compute the real QZ decomposition of a
+   * given pair of matrices. Alternatively, you can use the 
+   * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)
+   * constructor which computes the real QZ decomposition at construction
+   * time. Once the decomposition is computed, you can use the matrixS(),
+   * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices
+   * S, T, Q and Z in the decomposition. If computeQZ==false, some time
+   * is saved by not computing matrices Q and Z.
+   *
+   * Example: \include RealQZ_compute.cpp
+   * Output: \include RealQZ_compute.out
+   *
+   * \note The implementation is based on the algorithm in "Matrix Computations"
+   * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for
+   * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart.
+   *
+   * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+   */
+
+  template<typename _MatrixType> class RealQZ
+  {
+    public:
+      typedef _MatrixType MatrixType;
+      enum {
+        RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+        ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+        Options = MatrixType::Options,
+        MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+        MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+      };
+      typedef typename MatrixType::Scalar Scalar;
+      typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+      typedef typename MatrixType::Index Index;
+
+      typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+      typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+      /** \brief Default constructor.
+       *
+       * \param [in] size  Positive integer, size of the matrix whose QZ decomposition will be computed.
+       *
+       * The default constructor is useful in cases in which the user intends to
+       * perform decompositions via compute().  The \p size parameter is only
+       * used as a hint. It is not an error to give a wrong \p size, but it may
+       * impair performance.
+       *
+       * \sa compute() for an example.
+       */
+      RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : 
+        m_S(size, size),
+        m_T(size, size),
+        m_Q(size, size),
+        m_Z(size, size),
+        m_workspace(size*2),
+        m_maxIters(400),
+        m_isInitialized(false)
+        { }
+
+      /** \brief Constructor; computes real QZ decomposition of given matrices
+       * 
+       * \param[in]  A          Matrix A.
+       * \param[in]  B          Matrix B.
+       * \param[in]  computeQZ  If false, A and Z are not computed.
+       *
+       * This constructor calls compute() to compute the QZ decomposition.
+       */
+      RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :
+        m_S(A.rows(),A.cols()),
+        m_T(A.rows(),A.cols()),
+        m_Q(A.rows(),A.cols()),
+        m_Z(A.rows(),A.cols()),
+        m_workspace(A.rows()*2),
+        m_maxIters(400),
+        m_isInitialized(false) {
+          compute(A, B, computeQZ);
+        }
+
+      /** \brief Returns matrix Q in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix Q.
+       */
+      const MatrixType& matrixQ() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+        return m_Q;
+      }
+
+      /** \brief Returns matrix Z in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix Z.
+       */
+      const MatrixType& matrixZ() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition.");
+        return m_Z;
+      }
+
+      /** \brief Returns matrix S in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix S.
+       */
+      const MatrixType& matrixS() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_S;
+      }
+
+      /** \brief Returns matrix S in the QZ decomposition. 
+       *
+       * \returns A const reference to the matrix S.
+       */
+      const MatrixType& matrixT() const {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_T;
+      }
+
+      /** \brief Computes QZ decomposition of given matrix. 
+       * 
+       * \param[in]  A          Matrix A.
+       * \param[in]  B          Matrix B.
+       * \param[in]  computeQZ  If false, A and Z are not computed.
+       * \returns    Reference to \c *this
+       */
+      RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);
+
+      /** \brief Reports whether previous computation was successful.
+       *
+       * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+       */
+      ComputationInfo info() const
+      {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_info;
+      }
+
+      /** \brief Returns number of performed QR-like iterations.
+      */
+      Index iterations() const
+      {
+        eigen_assert(m_isInitialized && "RealQZ is not initialized.");
+        return m_global_iter;
+      }
+
+      /** Sets the maximal number of iterations allowed to converge to one eigenvalue
+       * or decouple the problem.
+      */
+      RealQZ& setMaxIterations(Index maxIters)
+      {
+        m_maxIters = maxIters;
+        return *this;
+      }
+
+    private:
+
+      MatrixType m_S, m_T, m_Q, m_Z;
+      Matrix<Scalar,Dynamic,1> m_workspace;
+      ComputationInfo m_info;
+      Index m_maxIters;
+      bool m_isInitialized;
+      bool m_computeQZ;
+      Scalar m_normOfT, m_normOfS;
+      Index m_global_iter;
+
+      typedef Matrix<Scalar,3,1> Vector3s;
+      typedef Matrix<Scalar,2,1> Vector2s;
+      typedef Matrix<Scalar,2,2> Matrix2s;
+      typedef JacobiRotation<Scalar> JRs;
+
+      void hessenbergTriangular();
+      void computeNorms();
+      Index findSmallSubdiagEntry(Index iu);
+      Index findSmallDiagEntry(Index f, Index l);
+      void splitOffTwoRows(Index i);
+      void pushDownZero(Index z, Index f, Index l);
+      void step(Index f, Index l, Index iter);
+
+  }; // RealQZ
+
+  /** \internal Reduces S and T to upper Hessenberg - triangular form */
+  template<typename MatrixType>
+    void RealQZ<MatrixType>::hessenbergTriangular()
+    {
+
+      const Index dim = m_S.cols();
+
+      // perform QR decomposition of T, overwrite T with R, save Q
+      HouseholderQR<MatrixType> qrT(m_T);
+      m_T = qrT.matrixQR();
+      m_T.template triangularView<StrictlyLower>().setZero();
+      m_Q = qrT.householderQ();
+      // overwrite S with Q* S
+      m_S.applyOnTheLeft(m_Q.adjoint());
+      // init Z as Identity
+      if (m_computeQZ)
+        m_Z = MatrixType::Identity(dim,dim);
+      // reduce S to upper Hessenberg with Givens rotations
+      for (Index j=0; j<=dim-3; j++) {
+        for (Index i=dim-1; i>=j+2; i--) {
+          JRs G;
+          // kill S(i,j)
+          if(m_S.coeff(i,j) != 0)
+          {
+            G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));
+            m_S.coeffRef(i,j) = Scalar(0.0);
+            m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
+            m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
+            // update Q
+            if (m_computeQZ)
+              m_Q.applyOnTheRight(i-1,i,G);
+          }
+          // kill T(i,i-1)
+          if(m_T.coeff(i,i-1)!=Scalar(0))
+          {
+            G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
+            m_T.coeffRef(i,i-1) = Scalar(0.0);
+            m_S.applyOnTheRight(i,i-1,G);
+            m_T.topRows(i).applyOnTheRight(i,i-1,G);
+            // update Z
+            if (m_computeQZ)
+              m_Z.applyOnTheLeft(i,i-1,G.adjoint());
+          }
+        }
+      }
+    }
+
+  /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::computeNorms()
+    {
+      const Index size = m_S.cols();
+      m_normOfS = Scalar(0.0);
+      m_normOfT = Scalar(0.0);
+      for (Index j = 0; j < size; ++j)
+      {
+        m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
+        m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();
+      }
+    }
+
+
+  /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */
+  template<typename MatrixType>
+    inline typename MatrixType::Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)
+    {
+      using std::abs;
+      Index res = iu;
+      while (res > 0)
+      {
+        Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res));
+        if (s == Scalar(0.0))
+          s = m_normOfS;
+        if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+          break;
+        res--;
+      }
+      return res;
+    }
+
+  /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1)  */
+  template<typename MatrixType>
+    inline typename MatrixType::Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)
+    {
+      using std::abs;
+      Index res = l;
+      while (res >= f) {
+        if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)
+          break;
+        res--;
+      }
+      return res;
+    }
+
+  /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)
+    {
+      using std::abs;
+      using std::sqrt;
+      const Index dim=m_S.cols();
+      if (abs(m_S.coeff(i+1,i))==Scalar(0))
+        return;
+      Index z = findSmallDiagEntry(i,i+1);
+      if (z==i-1)
+      {
+        // block of (S T^{-1})
+        Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().
+          template solve<OnTheRight>(m_S.template block<2,2>(i,i));
+        Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));
+        Scalar q = p*p + STi(1,0)*STi(0,1);
+        if (q>=0) {
+          Scalar z = sqrt(q);
+          // one QR-like iteration for ABi - lambda I
+          // is enough - when we know exact eigenvalue in advance,
+          // convergence is immediate
+          JRs G;
+          if (p>=0)
+            G.makeGivens(p + z, STi(1,0));
+          else
+            G.makeGivens(p - z, STi(1,0));
+          m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+          m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());
+          // update Q
+          if (m_computeQZ)
+            m_Q.applyOnTheRight(i,i+1,G);
+
+          G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));
+          m_S.topRows(i+2).applyOnTheRight(i+1,i,G);
+          m_T.topRows(i+2).applyOnTheRight(i+1,i,G);
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(i+1,i,G.adjoint());
+
+          m_S.coeffRef(i+1,i) = Scalar(0.0);
+          m_T.coeffRef(i+1,i) = Scalar(0.0);
+        }
+      }
+      else
+      {
+        pushDownZero(z,i,i+1);
+      }
+    }
+
+  /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)
+    {
+      JRs G;
+      const Index dim = m_S.cols();
+      for (Index zz=z; zz<l; zz++)
+      {
+        // push 0 down
+        Index firstColS = zz>f ? (zz-1) : zz;
+        G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));
+        m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());
+        m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());
+        m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);
+        // update Q
+        if (m_computeQZ)
+          m_Q.applyOnTheRight(zz,zz+1,G);
+        // kill S(zz+1, zz-1)
+        if (zz>f)
+        {
+          G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));
+          m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);
+          m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);
+          m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);
+          // update Z
+          if (m_computeQZ)
+            m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());
+        }
+      }
+      // finally kill S(l,l-1)
+      G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));
+      m_S.applyOnTheRight(l,l-1,G);
+      m_T.applyOnTheRight(l,l-1,G);
+      m_S.coeffRef(l,l-1)=Scalar(0.0);
+      // update Z
+      if (m_computeQZ)
+        m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+    }
+
+  /** \internal QR-like iterative step for block f..l */
+  template<typename MatrixType>
+    inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter)
+    {
+      using std::abs;
+      const Index dim = m_S.cols();
+
+      // x, y, z
+      Scalar x, y, z;
+      if (iter==10)
+      {
+        // Wilkinson ad hoc shift
+        const Scalar
+          a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),
+          a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),
+          b12=m_T.coeff(f+0,f+1),
+          b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),
+          b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),
+          a87=m_S.coeff(l-1,l-2),
+          a98=m_S.coeff(l-0,l-1),
+          b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),
+          b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);
+        Scalar ss = abs(a87*b77i) + abs(a98*b88i),
+               lpl = Scalar(1.5)*ss,
+               ll = ss*ss;
+        x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i
+          - a11*a21*b12*b11i*b11i*b22i;
+        y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i 
+          - a21*a21*b12*b11i*b11i*b22i;
+        z = a21*a32*b11i*b22i;
+      }
+      else if (iter==16)
+      {
+        // another exceptional shift
+        x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /
+          (m_T.coeff(l-1,l-1)*m_T.coeff(l,l));
+        y = m_S.coeff(f+1,f)/m_T.coeff(f,f);
+        z = 0;
+      }
+      else if (iter>23 && !(iter%8))
+      {
+        // extremely exceptional shift
+        x = internal::random<Scalar>(-1.0,1.0);
+        y = internal::random<Scalar>(-1.0,1.0);
+        z = internal::random<Scalar>(-1.0,1.0);
+      }
+      else
+      {
+        // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1
+        // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where
+        // U and V are 2x2 bottom right sub matrices of A and B. Thus:
+        //  = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)
+        //  = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)
+        // Since we are only interested in having x, y, z with a correct ratio, we have:
+        const Scalar
+          a11 = m_S.coeff(f,f),     a12 = m_S.coeff(f,f+1),
+          a21 = m_S.coeff(f+1,f),   a22 = m_S.coeff(f+1,f+1),
+                                    a32 = m_S.coeff(f+2,f+1),
+
+          a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),
+          a98 = m_S.coeff(l,l-1),   a99 = m_S.coeff(l,l),
+
+          b11 = m_T.coeff(f,f),     b12 = m_T.coeff(f,f+1),
+                                    b22 = m_T.coeff(f+1,f+1),
+
+          b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),
+                                    b99 = m_T.coeff(l,l);
+
+        x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)
+          + a12/b22 - (a11/b11)*(b12/b22);
+        y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);
+        z = a32/b22;
+      }
+
+      JRs G;
+
+      for (Index k=f; k<=l-2; k++)
+      {
+        // variables for Householder reflections
+        Vector2s essential2;
+        Scalar tau, beta;
+
+        Vector3s hr(x,y,z);
+
+        // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)
+        hr.makeHouseholderInPlace(tau, beta);
+        essential2 = hr.template bottomRows<2>();
+        Index fc=(std::max)(k-1,Index(0));  // first col to update
+        m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+        m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());
+        if (m_computeQZ)
+          m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());
+        if (k>f)
+          m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);
+
+        // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)
+        hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);
+        hr.makeHouseholderInPlace(tau, beta);
+        essential2 = hr.template bottomRows<2>();
+        {
+          Index lr = (std::min)(k+4,dim); // last row to update
+          Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);
+          // S
+          tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;
+          tmp += m_S.col(k+2).head(lr);
+          m_S.col(k+2).head(lr) -= tau*tmp;
+          m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+          // T
+          tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;
+          tmp += m_T.col(k+2).head(lr);
+          m_T.col(k+2).head(lr) -= tau*tmp;
+          m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();
+        }
+        if (m_computeQZ)
+        {
+          // Z
+          Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);
+          tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));
+          tmp += m_Z.row(k+2);
+          m_Z.row(k+2) -= tau*tmp;
+          m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);
+        }
+        m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);
+
+        // Z_{k2} to annihilate T(k+1,k)
+        G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));
+        m_S.applyOnTheRight(k+1,k,G);
+        m_T.applyOnTheRight(k+1,k,G);
+        // update Z
+        if (m_computeQZ)
+          m_Z.applyOnTheLeft(k+1,k,G.adjoint());
+        m_T.coeffRef(k+1,k) = Scalar(0.0);
+
+        // update x,y,z
+        x = m_S.coeff(k+1,k);
+        y = m_S.coeff(k+2,k);
+        if (k < l-2)
+          z = m_S.coeff(k+3,k);
+      } // loop over k
+
+      // Q_{n-1} to annihilate y = S(l,l-2)
+      G.makeGivens(x,y);
+      m_S.applyOnTheLeft(l-1,l,G.adjoint());
+      m_T.applyOnTheLeft(l-1,l,G.adjoint());
+      if (m_computeQZ)
+        m_Q.applyOnTheRight(l-1,l,G);
+      m_S.coeffRef(l,l-2) = Scalar(0.0);
+
+      // Z_{n-1} to annihilate T(l,l-1)
+      G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));
+      m_S.applyOnTheRight(l,l-1,G);
+      m_T.applyOnTheRight(l,l-1,G);
+      if (m_computeQZ)
+        m_Z.applyOnTheLeft(l,l-1,G.adjoint());
+      m_T.coeffRef(l,l-1) = Scalar(0.0);
+    }
+
+
+  template<typename MatrixType>
+    RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)
+    {
+
+      const Index dim = A_in.cols();
+
+      eigen_assert (A_in.rows()==dim && A_in.cols()==dim 
+          && B_in.rows()==dim && B_in.cols()==dim 
+          && "Need square matrices of the same dimension");
+
+      m_isInitialized = true;
+      m_computeQZ = computeQZ;
+      m_S = A_in; m_T = B_in;
+      m_workspace.resize(dim*2);
+      m_global_iter = 0;
+
+      // entrance point: hessenberg triangular decomposition
+      hessenbergTriangular();
+      // compute L1 vector norms of T, S into m_normOfS, m_normOfT
+      computeNorms();
+
+      Index l = dim-1, 
+            f, 
+            local_iter = 0;
+
+      while (l>0 && local_iter<m_maxIters)
+      {
+        f = findSmallSubdiagEntry(l);
+        // now rows and columns f..l (including) decouple from the rest of the problem
+        if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);
+        if (f == l) // One root found
+        {
+          l--;
+          local_iter = 0;
+        }
+        else if (f == l-1) // Two roots found
+        {
+          splitOffTwoRows(f);
+          l -= 2;
+          local_iter = 0;
+        }
+        else // No convergence yet
+        {
+          // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations
+          Index z = findSmallDiagEntry(f,l);
+          if (z>=f)
+          {
+            // zero found
+            pushDownZero(z,f,l);
+          }
+          else
+          {
+            // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg 
+            // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to
+            // apply a QR-like iteration to rows and columns f..l.
+            step(f,l, local_iter);
+            local_iter++;
+            m_global_iter++;
+          }
+        }
+      }
+      // check if we converged before reaching iterations limit
+      m_info = (local_iter<m_maxIters) ? Success : NoConvergence;
+      return *this;
+    } // end compute
+
+} // end namespace Eigen
+
+#endif //EIGEN_REAL_QZ
diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h
new file mode 100644
index 0000000..16d3875
--- /dev/null
+++ b/Eigen/src/Eigenvalues/RealSchur.h
@@ -0,0 +1,525 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_REAL_SCHUR_H
+#define EIGEN_REAL_SCHUR_H
+
+#include "./HessenbergDecomposition.h"
+
+namespace Eigen { 
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class RealSchur
+  *
+  * \brief Performs a real Schur decomposition of a square matrix
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * real Schur decomposition; this is expected to be an instantiation of the
+  * Matrix class template.
+  *
+  * Given a real square matrix A, this class computes the real Schur
+  * decomposition: \f$ A = U T U^T \f$ where U is a real orthogonal matrix and
+  * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose
+  * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular
+  * matrix is a block-triangular matrix whose diagonal consists of 1-by-1
+  * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the
+  * blocks on the diagonal of T are the same as the eigenvalues of the matrix
+  * A, and thus the real Schur decomposition is used in EigenSolver to compute
+  * the eigendecomposition of a matrix.
+  *
+  * Call the function compute() to compute the real Schur decomposition of a
+  * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)
+  * constructor which computes the real Schur decomposition at construction
+  * time. Once the decomposition is computed, you can use the matrixU() and
+  * matrixT() functions to retrieve the matrices U and T in the decomposition.
+  *
+  * The documentation of RealSchur(const MatrixType&, bool) contains an example
+  * of the typical use of this class.
+  *
+  * \note The implementation is adapted from
+  * <a href="http://math.nist.gov/javanumerics/jama/">JAMA</a> (public domain).
+  * Their code is based on EISPACK.
+  *
+  * \sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver
+  */
+template<typename _MatrixType> class RealSchur
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+    typedef typename MatrixType::Index Index;
+
+    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;
+    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;
+
+    /** \brief Default constructor.
+      *
+      * \param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute().  The \p size parameter is only
+      * used as a hint. It is not an error to give a wrong \p size, but it may
+      * impair performance.
+      *
+      * \sa compute() for an example.
+      */
+    RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)
+            : m_matT(size, size),
+              m_matU(size, size),
+              m_workspaceVector(size),
+              m_hess(size),
+              m_isInitialized(false),
+              m_matUisUptodate(false),
+              m_maxIters(-1)
+    { }
+
+    /** \brief Constructor; computes real Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      *
+      * This constructor calls compute() to compute the Schur decomposition.
+      *
+      * Example: \include RealSchur_RealSchur_MatrixType.cpp
+      * Output: \verbinclude RealSchur_RealSchur_MatrixType.out
+      */
+    RealSchur(const MatrixType& matrix, bool computeU = true)
+            : m_matT(matrix.rows(),matrix.cols()),
+              m_matU(matrix.rows(),matrix.cols()),
+              m_workspaceVector(matrix.rows()),
+              m_hess(matrix.rows()),
+              m_isInitialized(false),
+              m_matUisUptodate(false),
+              m_maxIters(-1)
+    {
+      compute(matrix, computeU);
+    }
+
+    /** \brief Returns the orthogonal matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix U.
+      *
+      * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
+      * member function compute(const MatrixType&, bool) has been called before
+      * to compute the Schur decomposition of a matrix, and \p computeU was set
+      * to true (the default value).
+      *
+      * \sa RealSchur(const MatrixType&, bool) for an example
+      */
+    const MatrixType& matrixU() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      eigen_assert(m_matUisUptodate && "The matrix U has not been computed during the RealSchur decomposition.");
+      return m_matU;
+    }
+
+    /** \brief Returns the quasi-triangular matrix in the Schur decomposition. 
+      *
+      * \returns A const reference to the matrix T.
+      *
+      * \pre Either the constructor RealSchur(const MatrixType&, bool) or the
+      * member function compute(const MatrixType&, bool) has been called before
+      * to compute the Schur decomposition of a matrix.
+      *
+      * \sa RealSchur(const MatrixType&, bool) for an example
+      */
+    const MatrixType& matrixT() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      return m_matT;
+    }
+  
+    /** \brief Computes Schur decomposition of given matrix. 
+      * 
+      * \param[in]  matrix    Square matrix whose Schur decomposition is to be computed.
+      * \param[in]  computeU  If true, both T and U are computed; if false, only T is computed.
+      * \returns    Reference to \c *this
+      *
+      * The Schur decomposition is computed by first reducing the matrix to
+      * Hessenberg form using the class HessenbergDecomposition. The Hessenberg
+      * matrix is then reduced to triangular form by performing Francis QR
+      * iterations with implicit double shift. The cost of computing the Schur
+      * decomposition depends on the number of iterations; as a rough guide, it
+      * may be taken to be \f$25n^3\f$ flops if \a computeU is true and
+      * \f$10n^3\f$ flops if \a computeU is false.
+      *
+      * Example: \include RealSchur_compute.cpp
+      * Output: \verbinclude RealSchur_compute.out
+      *
+      * \sa compute(const MatrixType&, bool, Index)
+      */
+    RealSchur& compute(const MatrixType& matrix, bool computeU = true);
+
+    /** \brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T
+     *  \param[in] matrixH Matrix in Hessenberg form H
+     *  \param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T
+     *  \param computeU Computes the matriX U of the Schur vectors
+     * \return Reference to \c *this
+     * 
+     *  This routine assumes that the matrix is already reduced in Hessenberg form matrixH
+     *  using either the class HessenbergDecomposition or another mean. 
+     *  It computes the upper quasi-triangular matrix T of the Schur decomposition of H
+     *  When computeU is true, this routine computes the matrix U such that 
+     *  A = U T U^T =  (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix
+     * 
+     * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix
+     * is not available, the user should give an identity matrix (Q.setIdentity())
+     * 
+     * \sa compute(const MatrixType&, bool)
+     */
+    template<typename HessMatrixType, typename OrthMatrixType>
+    RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU);
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "RealSchur is not initialized.");
+      return m_info;
+    }
+
+    /** \brief Sets the maximum number of iterations allowed. 
+      *
+      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size
+      * of the matrix.
+      */
+    RealSchur& setMaxIterations(Index maxIters)
+    {
+      m_maxIters = maxIters;
+      return *this;
+    }
+
+    /** \brief Returns the maximum number of iterations. */
+    Index getMaxIterations()
+    {
+      return m_maxIters;
+    }
+
+    /** \brief Maximum number of iterations per row.
+      *
+      * If not otherwise specified, the maximum number of iterations is this number times the size of the
+      * matrix. It is currently set to 40.
+      */
+    static const int m_maxIterationsPerRow = 40;
+
+  private:
+    
+    MatrixType m_matT;
+    MatrixType m_matU;
+    ColumnVectorType m_workspaceVector;
+    HessenbergDecomposition<MatrixType> m_hess;
+    ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_matUisUptodate;
+    Index m_maxIters;
+
+    typedef Matrix<Scalar,3,1> Vector3s;
+
+    Scalar computeNormOfT();
+    Index findSmallSubdiagEntry(Index iu);
+    void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
+    void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
+    void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
+    void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);
+};
+
+
+template<typename MatrixType>
+RealSchur<MatrixType>& RealSchur<MatrixType>::compute(const MatrixType& matrix, bool computeU)
+{
+  eigen_assert(matrix.cols() == matrix.rows());
+  Index maxIters = m_maxIters;
+  if (maxIters == -1)
+    maxIters = m_maxIterationsPerRow * matrix.rows();
+
+  // Step 1. Reduce to Hessenberg form
+  m_hess.compute(matrix);
+
+  // Step 2. Reduce to real Schur form  
+  computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);
+  
+  return *this;
+}
+template<typename MatrixType>
+template<typename HessMatrixType, typename OrthMatrixType>
+RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU)
+{  
+  m_matT = matrixH; 
+  if(computeU)
+    m_matU = matrixQ;
+  
+  Index maxIters = m_maxIters;
+  if (maxIters == -1)
+    maxIters = m_maxIterationsPerRow * matrixH.rows();
+  m_workspaceVector.resize(m_matT.cols());
+  Scalar* workspace = &m_workspaceVector.coeffRef(0);
+
+  // The matrix m_matT is divided in three parts. 
+  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. 
+  // Rows il,...,iu is the part we are working on (the active window).
+  // Rows iu+1,...,end are already brought in triangular form.
+  Index iu = m_matT.cols() - 1;
+  Index iter = 0;      // iteration count for current eigenvalue
+  Index totalIter = 0; // iteration count for whole matrix
+  Scalar exshift(0);   // sum of exceptional shifts
+  Scalar norm = computeNormOfT();
+
+  if(norm!=0)
+  {
+    while (iu >= 0)
+    {
+      Index il = findSmallSubdiagEntry(iu);
+
+      // Check for convergence
+      if (il == iu) // One root found
+      {
+        m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;
+        if (iu > 0)
+          m_matT.coeffRef(iu, iu-1) = Scalar(0);
+        iu--;
+        iter = 0;
+      }
+      else if (il == iu-1) // Two roots found
+      {
+        splitOffTwoRows(iu, computeU, exshift);
+        iu -= 2;
+        iter = 0;
+      }
+      else // No convergence yet
+      {
+        // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )
+        Vector3s firstHouseholderVector(0,0,0), shiftInfo;
+        computeShift(iu, iter, exshift, shiftInfo);
+        iter = iter + 1;
+        totalIter = totalIter + 1;
+        if (totalIter > maxIters) break;
+        Index im;
+        initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);
+        performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);
+      }
+    }
+  }
+  if(totalIter <= maxIters)
+    m_info = Success;
+  else
+    m_info = NoConvergence;
+
+  m_isInitialized = true;
+  m_matUisUptodate = computeU;
+  return *this;
+}
+
+/** \internal Computes and returns vector L1 norm of T */
+template<typename MatrixType>
+inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
+{
+  const Index size = m_matT.cols();
+  // FIXME to be efficient the following would requires a triangular reduxion code
+  // Scalar norm = m_matT.upper().cwiseAbs().sum() 
+  //               + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();
+  Scalar norm(0);
+  for (Index j = 0; j < size; ++j)
+    norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();
+  return norm;
+}
+
+/** \internal Look for single small sub-diagonal element and returns its index */
+template<typename MatrixType>
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
+{
+  using std::abs;
+  Index res = iu;
+  while (res > 0)
+  {
+    Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
+    if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
+      break;
+    res--;
+  }
+  return res;
+}
+
+/** \internal Update T given that rows iu-1 and iu decouple from the rest. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)
+{
+  using std::sqrt;
+  using std::abs;
+  const Index size = m_matT.cols();
+
+  // The eigenvalues of the 2x2 matrix [a b; c d] are 
+  // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc
+  Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));
+  Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);   // q = tr^2 / 4 - det = discr/4
+  m_matT.coeffRef(iu,iu) += exshift;
+  m_matT.coeffRef(iu-1,iu-1) += exshift;
+
+  if (q >= Scalar(0)) // Two real eigenvalues
+  {
+    Scalar z = sqrt(abs(q));
+    JacobiRotation<Scalar> rot;
+    if (p >= Scalar(0))
+      rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));
+    else
+      rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));
+
+    m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());
+    m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);
+    m_matT.coeffRef(iu, iu-1) = Scalar(0); 
+    if (computeU)
+      m_matU.applyOnTheRight(iu-1, iu, rot);
+  }
+
+  if (iu > 1) 
+    m_matT.coeffRef(iu-1, iu-2) = Scalar(0);
+}
+
+/** \internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)
+{
+  using std::sqrt;
+  using std::abs;
+  shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);
+  shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);
+  shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);
+
+  // Wilkinson's original ad hoc shift
+  if (iter == 10)
+  {
+    exshift += shiftInfo.coeff(0);
+    for (Index i = 0; i <= iu; ++i)
+      m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);
+    Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2));
+    shiftInfo.coeffRef(0) = Scalar(0.75) * s;
+    shiftInfo.coeffRef(1) = Scalar(0.75) * s;
+    shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;
+  }
+
+  // MATLAB's new ad hoc shift
+  if (iter == 30)
+  {
+    Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
+    s = s * s + shiftInfo.coeff(2);
+    if (s > Scalar(0))
+    {
+      s = sqrt(s);
+      if (shiftInfo.coeff(1) < shiftInfo.coeff(0))
+        s = -s;
+      s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);
+      s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;
+      exshift += s;
+      for (Index i = 0; i <= iu; ++i)
+        m_matT.coeffRef(i,i) -= s;
+      shiftInfo.setConstant(Scalar(0.964));
+    }
+  }
+}
+
+/** \internal Compute index im at which Francis QR step starts and the first Householder vector. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)
+{
+  using std::abs;
+  Vector3s& v = firstHouseholderVector; // alias to save typing
+
+  for (im = iu-2; im >= il; --im)
+  {
+    const Scalar Tmm = m_matT.coeff(im,im);
+    const Scalar r = shiftInfo.coeff(0) - Tmm;
+    const Scalar s = shiftInfo.coeff(1) - Tmm;
+    v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);
+    v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;
+    v.coeffRef(2) = m_matT.coeff(im+2,im+1);
+    if (im == il) {
+      break;
+    }
+    const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
+    const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
+    if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
+      break;
+  }
+}
+
+/** \internal Perform a Francis QR step involving rows il:iu and columns im:iu. */
+template<typename MatrixType>
+inline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)
+{
+  eigen_assert(im >= il);
+  eigen_assert(im <= iu-2);
+
+  const Index size = m_matT.cols();
+
+  for (Index k = im; k <= iu-2; ++k)
+  {
+    bool firstIteration = (k == im);
+
+    Vector3s v;
+    if (firstIteration)
+      v = firstHouseholderVector;
+    else
+      v = m_matT.template block<3,1>(k,k-1);
+
+    Scalar tau, beta;
+    Matrix<Scalar, 2, 1> ess;
+    v.makeHouseholder(ess, tau, beta);
+    
+    if (beta != Scalar(0)) // if v is not zero
+    {
+      if (firstIteration && k > il)
+        m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);
+      else if (!firstIteration)
+        m_matT.coeffRef(k,k-1) = beta;
+
+      // These Householder transformations form the O(n^3) part of the algorithm
+      m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);
+      m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);
+      if (computeU)
+        m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);
+    }
+  }
+
+  Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);
+  Scalar tau, beta;
+  Matrix<Scalar, 1, 1> ess;
+  v.makeHouseholder(ess, tau, beta);
+
+  if (beta != Scalar(0)) // if v is not zero
+  {
+    m_matT.coeffRef(iu-1, iu-2) = beta;
+    m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);
+    m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);
+    if (computeU)
+      m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);
+  }
+
+  // clean up pollution due to round-off errors
+  for (Index i = im+2; i <= iu; ++i)
+  {
+    m_matT.coeffRef(i,i-2) = Scalar(0);
+    if (i > im+2)
+      m_matT.coeffRef(i,i-3) = Scalar(0);
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_REAL_SCHUR_H
diff --git a/Eigen/src/Eigenvalues/RealSchur_MKL.h b/Eigen/src/Eigenvalues/RealSchur_MKL.h
new file mode 100644
index 0000000..ad97364
--- /dev/null
+++ b/Eigen/src/Eigenvalues/RealSchur_MKL.h
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *    Real Schur needed to real unsymmetrical eigenvalues/eigenvectors.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_REAL_SCHUR_MKL_H
+#define EIGEN_REAL_SCHUR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SCHUR_REAL(EIGTYPE, MKLTYPE, MKLPREFIX, MKLPREFIX_U, EIGCOLROW, MKLCOLROW) \
+template<> inline \
+RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
+RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
+{ \
+  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
+  typedef MatrixType::Scalar Scalar; \
+  typedef MatrixType::RealScalar RealScalar; \
+\
+  eigen_assert(matrix.cols() == matrix.rows()); \
+\
+  lapack_int n = matrix.cols(), sdim, info; \
+  lapack_int lda = matrix.outerStride(); \
+  lapack_int matrix_order = MKLCOLROW; \
+  char jobvs, sort='N'; \
+  LAPACK_##MKLPREFIX_U##_SELECT2 select = 0; \
+  jobvs = (computeU) ? 'V' : 'N'; \
+  m_matU.resize(n, n); \
+  lapack_int ldvs  = m_matU.outerStride(); \
+  m_matT = matrix; \
+  Matrix<EIGTYPE, Dynamic, Dynamic> wr, wi; \
+  wr.resize(n, 1); wi.resize(n, 1); \
+  info = LAPACKE_##MKLPREFIX##gees( matrix_order, jobvs, sort, select, n, (MKLTYPE*)m_matT.data(), lda, &sdim, (MKLTYPE*)wr.data(), (MKLTYPE*)wi.data(), (MKLTYPE*)m_matU.data(), ldvs ); \
+  if(info == 0) \
+    m_info = Success; \
+  else \
+    m_info = NoConvergence; \
+\
+  m_isInitialized = true; \
+  m_matUisUptodate = computeU; \
+  return *this; \
+\
+}
+
+EIGEN_MKL_SCHUR_REAL(double,   double, d, D, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_REAL(float,    float,  s, S, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SCHUR_REAL(double,   double, d, D, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SCHUR_REAL(float,    float,  s, S, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_REAL_SCHUR_MKL_H
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
new file mode 100644
index 0000000..c2e76c8
--- /dev/null
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -0,0 +1,798 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
+#define EIGEN_SELFADJOINTEIGENSOLVER_H
+
+#include "./Tridiagonalization.h"
+
+namespace Eigen { 
+
+template<typename _MatrixType>
+class GeneralizedSelfAdjointEigenSolver;
+
+namespace internal {
+template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class SelfAdjointEigenSolver
+  *
+  * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * eigendecomposition; this is expected to be an instantiation of the Matrix
+  * class template.
+  *
+  * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
+  * matrices, this means that the matrix is symmetric: it equals its
+  * transpose. This class computes the eigenvalues and eigenvectors of a
+  * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
+  * \f$ v \f$ such that \f$ Av = \lambda v \f$.  The eigenvalues of a
+  * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
+  * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
+  * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
+  * matrices, the matrix \f$ V \f$ is always invertible). This is called the
+  * eigendecomposition.
+  *
+  * The algorithm exploits the fact that the matrix is selfadjoint, making it
+  * faster and more accurate than the general purpose eigenvalue algorithms
+  * implemented in EigenSolver and ComplexEigenSolver.
+  *
+  * Only the \b lower \b triangular \b part of the input matrix is referenced.
+  *
+  * Call the function compute() to compute the eigenvalues and eigenvectors of
+  * a given matrix. Alternatively, you can use the
+  * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
+  * the eigenvalues and eigenvectors at construction time. Once the eigenvalue
+  * and eigenvectors are computed, they can be retrieved with the eigenvalues()
+  * and eigenvectors() functions.
+  *
+  * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
+  * contains an example of the typical use of this class.
+  *
+  * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
+  * the likes, see the class GeneralizedSelfAdjointEigenSolver.
+  *
+  * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
+  */
+template<typename _MatrixType> class SelfAdjointEigenSolver
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      Size = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    
+    /** \brief Scalar type for matrices of type \p _MatrixType. */
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+
+    /** \brief Real scalar type for \p _MatrixType.
+      *
+      * This is just \c Scalar if #Scalar is real (e.g., \c float or
+      * \c double), and the type of the real part of \c Scalar if #Scalar is
+      * complex.
+      */
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    
+    friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;
+
+    /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+      *
+      * This is a column vector with entries of type #RealScalar.
+      * The length of the vector is the size of \p _MatrixType.
+      */
+    typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
+    typedef Tridiagonalization<MatrixType> TridiagonalizationType;
+
+    /** \brief Default constructor for fixed-size matrices.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute(). This constructor
+      * can only be used if \p _MatrixType is a fixed-size matrix; use
+      * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
+      *
+      * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
+      */
+    SelfAdjointEigenSolver()
+        : m_eivec(),
+          m_eivalues(),
+          m_subdiag(),
+          m_isInitialized(false)
+    { }
+
+    /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
+      *
+      * \param [in]  size  Positive integer, size of the matrix whose
+      * eigenvalues and eigenvectors will be computed.
+      *
+      * This constructor is useful for dynamic-size matrices, when the user
+      * intends to perform decompositions via compute(). The \p size
+      * parameter is only used as a hint. It is not an error to give a wrong
+      * \p size, but it may impair performance.
+      *
+      * \sa compute() for an example
+      */
+    SelfAdjointEigenSolver(Index size)
+        : m_eivec(size, size),
+          m_eivalues(size),
+          m_subdiag(size > 1 ? size - 1 : 1),
+          m_isInitialized(false)
+    {}
+
+    /** \brief Constructor; computes eigendecomposition of given matrix.
+      *
+      * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
+      *    be computed. Only the lower triangular part of the matrix is referenced.
+      * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+      *
+      * This constructor calls compute(const MatrixType&, int) to compute the
+      * eigenvalues of the matrix \p matrix. The eigenvectors are computed if
+      * \p options equals #ComputeEigenvectors.
+      *
+      * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
+      *
+      * \sa compute(const MatrixType&, int)
+      */
+    SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors)
+      : m_eivec(matrix.rows(), matrix.cols()),
+        m_eivalues(matrix.cols()),
+        m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
+        m_isInitialized(false)
+    {
+      compute(matrix, options);
+    }
+
+    /** \brief Computes eigendecomposition of given matrix.
+      *
+      * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
+      *    be computed. Only the lower triangular part of the matrix is referenced.
+      * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+      * \returns    Reference to \c *this
+      *
+      * This function computes the eigenvalues of \p matrix.  The eigenvalues()
+      * function can be used to retrieve them.  If \p options equals #ComputeEigenvectors,
+      * then the eigenvectors are also computed and can be retrieved by
+      * calling eigenvectors().
+      *
+      * This implementation uses a symmetric QR algorithm. The matrix is first
+      * reduced to tridiagonal form using the Tridiagonalization class. The
+      * tridiagonal matrix is then brought to diagonal form with implicit
+      * symmetric QR steps with Wilkinson shift. Details can be found in
+      * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
+      *
+      * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
+      * are required and \f$ 4n^3/3 \f$ if they are not required.
+      *
+      * This method reuses the memory in the SelfAdjointEigenSolver object that
+      * was allocated when the object was constructed, if the size of the
+      * matrix does not change.
+      *
+      * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
+      *
+      * \sa SelfAdjointEigenSolver(const MatrixType&, int)
+      */
+    SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors);
+    
+    /** \brief Computes eigendecomposition of given matrix using a direct algorithm
+      *
+      * This is a variant of compute(const MatrixType&, int options) which
+      * directly solves the underlying polynomial equation.
+      * 
+      * Currently only 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
+      * 
+      * This method is usually significantly faster than the QR algorithm
+      * but it might also be less accurate. It is also worth noting that
+      * for 3x3 matrices it involves trigonometric operations which are
+      * not necessarily available for all scalar types.
+      *
+      * \sa compute(const MatrixType&, int options)
+      */
+    SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
+
+    /** \brief Returns the eigenvectors of given matrix.
+      *
+      * \returns  A const reference to the matrix whose columns are the eigenvectors.
+      *
+      * \pre The eigenvectors have been computed before.
+      *
+      * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+      * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
+      * eigenvectors are normalized to have (Euclidean) norm equal to one. If
+      * this object was used to solve the eigenproblem for the selfadjoint
+      * matrix \f$ A \f$, then the matrix returned by this function is the
+      * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
+      *
+      * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
+      *
+      * \sa eigenvalues()
+      */
+    const MatrixType& eigenvectors() const
+    {
+      eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec;
+    }
+
+    /** \brief Returns the eigenvalues of given matrix.
+      *
+      * \returns A const reference to the column vector containing the eigenvalues.
+      *
+      * \pre The eigenvalues have been computed before.
+      *
+      * The eigenvalues are repeated according to their algebraic multiplicity,
+      * so there are as many eigenvalues as rows in the matrix. The eigenvalues
+      * are sorted in increasing order.
+      *
+      * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
+      *
+      * \sa eigenvectors(), MatrixBase::eigenvalues()
+      */
+    const RealVectorType& eigenvalues() const
+    {
+      eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+      return m_eivalues;
+    }
+
+    /** \brief Computes the positive-definite square root of the matrix.
+      *
+      * \returns the positive-definite square root of the matrix
+      *
+      * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+      * have been computed before.
+      *
+      * The square root of a positive-definite matrix \f$ A \f$ is the
+      * positive-definite matrix whose square equals \f$ A \f$. This function
+      * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
+      * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
+      *
+      * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
+      *
+      * \sa operatorInverseSqrt(),
+      *     \ref MatrixFunctions_Module "MatrixFunctions Module"
+      */
+    MatrixType operatorSqrt() const
+    {
+      eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+    }
+
+    /** \brief Computes the inverse square root of the matrix.
+      *
+      * \returns the inverse positive-definite square root of the matrix
+      *
+      * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+      * have been computed before.
+      *
+      * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
+      * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
+      * cheaper than first computing the square root with operatorSqrt() and
+      * then its inverse with MatrixBase::inverse().
+      *
+      * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
+      * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
+      *
+      * \sa operatorSqrt(), MatrixBase::inverse(),
+      *     \ref MatrixFunctions_Module "MatrixFunctions Module"
+      */
+    MatrixType operatorInverseSqrt() const
+    {
+      eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+      eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+      return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+    }
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+      return m_info;
+    }
+
+    /** \brief Maximum number of iterations.
+      *
+      * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
+      * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
+      */
+    static const int m_maxIterations = 30;
+
+    #ifdef EIGEN2_SUPPORT
+    SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors)
+      : m_eivec(matrix.rows(), matrix.cols()),
+        m_eivalues(matrix.cols()),
+        m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
+        m_isInitialized(false)
+    {
+      compute(matrix, computeEigenvectors);
+    }
+    
+    SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
+        : m_eivec(matA.cols(), matA.cols()),
+          m_eivalues(matA.cols()),
+          m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1),
+          m_isInitialized(false)
+    {
+      static_cast<GeneralizedSelfAdjointEigenSolver<MatrixType>*>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
+    }
+    
+    void compute(const MatrixType& matrix, bool computeEigenvectors)
+    {
+      compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
+    }
+
+    void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
+    {
+      compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
+    }
+    #endif // EIGEN2_SUPPORT
+
+  protected:
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    MatrixType m_eivec;
+    RealVectorType m_eivalues;
+    typename TridiagonalizationType::SubDiagonalType m_subdiag;
+    ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_eigenvectorsOk;
+};
+
+/** \internal
+  *
+  * \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  * Performs a QR step on a tridiagonal symmetric matrix represented as a
+  * pair of two vectors \a diag and \a subdiag.
+  *
+  * \param matA the input selfadjoint matrix
+  * \param hCoeffs returned Householder coefficients
+  *
+  * For compilation efficiency reasons, this procedure does not use eigen expression
+  * for its arguments.
+  *
+  * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
+  * "implicit symmetric QR step with Wilkinson shift"
+  */
+namespace internal {
+template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
+static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
+}
+
+template<typename MatrixType>
+SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
+::compute(const MatrixType& matrix, int options)
+{
+  check_template_parameters();
+  
+  using std::abs;
+  eigen_assert(matrix.cols() == matrix.rows());
+  eigen_assert((options&~(EigVecMask|GenEigMask))==0
+          && (options&EigVecMask)!=EigVecMask
+          && "invalid option parameter");
+  bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
+  Index n = matrix.cols();
+  m_eivalues.resize(n,1);
+
+  if(n==1)
+  {
+    m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
+    if(computeEigenvectors)
+      m_eivec.setOnes(n,n);
+    m_info = Success;
+    m_isInitialized = true;
+    m_eigenvectorsOk = computeEigenvectors;
+    return *this;
+  }
+
+  // declare some aliases
+  RealVectorType& diag = m_eivalues;
+  MatrixType& mat = m_eivec;
+
+  // map the matrix coefficients to [-1:1] to avoid over- and underflow.
+  mat = matrix.template triangularView<Lower>();
+  RealScalar scale = mat.cwiseAbs().maxCoeff();
+  if(scale==RealScalar(0)) scale = RealScalar(1);
+  mat.template triangularView<Lower>() /= scale;
+  m_subdiag.resize(n-1);
+  internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
+  
+  Index end = n-1;
+  Index start = 0;
+  Index iter = 0; // total number of iterations
+
+  while (end>0)
+  {
+    for (Index i = start; i<end; ++i)
+      if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1]))))
+        m_subdiag[i] = 0;
+
+    // find the largest unreduced block
+    while (end>0 && m_subdiag[end-1]==0)
+    {
+      end--;
+    }
+    if (end<=0)
+      break;
+
+    // if we spent too many iterations, we give up
+    iter++;
+    if(iter > m_maxIterations * n) break;
+
+    start = end - 1;
+    while (start>0 && m_subdiag[start-1]!=0)
+      start--;
+
+    internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
+  }
+
+  if (iter <= m_maxIterations * n)
+    m_info = Success;
+  else
+    m_info = NoConvergence;
+
+  // Sort eigenvalues and corresponding vectors.
+  // TODO make the sort optional ?
+  // TODO use a better sort algorithm !!
+  if (m_info == Success)
+  {
+    for (Index i = 0; i < n-1; ++i)
+    {
+      Index k;
+      m_eivalues.segment(i,n-i).minCoeff(&k);
+      if (k > 0)
+      {
+        std::swap(m_eivalues[i], m_eivalues[k+i]);
+        if(computeEigenvectors)
+          m_eivec.col(i).swap(m_eivec.col(k+i));
+      }
+    }
+  }
+  
+  // scale back the eigen values
+  m_eivalues *= scale;
+
+  m_isInitialized = true;
+  m_eigenvectorsOk = computeEigenvectors;
+  return *this;
+}
+
+
+namespace internal {
+  
+template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
+{
+  static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
+  { eig.compute(A,options); }
+};
+
+template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
+{
+  typedef typename SolverType::MatrixType MatrixType;
+  typedef typename SolverType::RealVectorType VectorType;
+  typedef typename SolverType::Scalar Scalar;
+  typedef typename MatrixType::Index Index;
+  
+  /** \internal
+   * Computes the roots of the characteristic polynomial of \a m.
+   * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
+   */
+  static inline void computeRoots(const MatrixType& m, VectorType& roots)
+  {
+    using std::sqrt;
+    using std::atan2;
+    using std::cos;
+    using std::sin;
+    const Scalar s_inv3 = Scalar(1.0)/Scalar(3.0);
+    const Scalar s_sqrt3 = sqrt(Scalar(3.0));
+
+    // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0.  The
+    // eigenvalues are the roots to this equation, all guaranteed to be
+    // real-valued, because the matrix is symmetric.
+    Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
+    Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
+    Scalar c2 = m(0,0) + m(1,1) + m(2,2);
+
+    // Construct the parameters used in classifying the roots of the equation
+    // and in solving the equation for the roots in closed form.
+    Scalar c2_over_3 = c2*s_inv3;
+    Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
+    if(a_over_3<Scalar(0))
+      a_over_3 = Scalar(0);
+
+    Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
+
+    Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
+    if(q<Scalar(0))
+      q = Scalar(0);
+
+    // Compute the eigenvalues by solving for the roots of the polynomial.
+    Scalar rho = sqrt(a_over_3);
+    Scalar theta = atan2(sqrt(q),half_b)*s_inv3;  // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
+    Scalar cos_theta = cos(theta);
+    Scalar sin_theta = sin(theta);
+    // roots are already sorted, since cos is monotonically decreasing on [0, pi]
+    roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
+    roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
+    roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
+  }
+
+  static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
+  {
+    using std::abs;
+    Index i0;
+    // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
+    mat.diagonal().cwiseAbs().maxCoeff(&i0);
+    // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
+    // so let's save it:
+    representative = mat.col(i0);
+    Scalar n0, n1;
+    VectorType c0, c1;
+    n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
+    n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
+    if(n0>n1) res = c0/std::sqrt(n0);
+    else      res = c1/std::sqrt(n1);
+
+    return true;
+  }
+
+  static inline void run(SolverType& solver, const MatrixType& mat, int options)
+  {
+    eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
+    eigen_assert((options&~(EigVecMask|GenEigMask))==0
+            && (options&EigVecMask)!=EigVecMask
+            && "invalid option parameter");
+    bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
+    
+    MatrixType& eivecs = solver.m_eivec;
+    VectorType& eivals = solver.m_eivalues;
+  
+    // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
+    Scalar shift = mat.trace() / Scalar(3);
+    // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
+    MatrixType scaledMat = mat.template selfadjointView<Lower>();
+    scaledMat.diagonal().array() -= shift;
+    Scalar scale = scaledMat.cwiseAbs().maxCoeff();
+    if(scale > 0) scaledMat /= scale;   // TODO for scale==0 we could save the remaining operations
+
+    // compute the eigenvalues
+    computeRoots(scaledMat,eivals);
+
+    // compute the eigenvectors
+    if(computeEigenvectors)
+    {
+      if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
+      {
+        // All three eigenvalues are numerically the same
+        eivecs.setIdentity();
+      }
+      else
+      {
+        MatrixType tmp;
+        tmp = scaledMat;
+
+        // Compute the eigenvector of the most distinct eigenvalue
+        Scalar d0 = eivals(2) - eivals(1);
+        Scalar d1 = eivals(1) - eivals(0);
+        Index k(0), l(2);
+        if(d0 > d1)
+        {
+          std::swap(k,l);
+          d0 = d1;
+        }
+
+        // Compute the eigenvector of index k
+        {
+          tmp.diagonal().array () -= eivals(k);
+          // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
+          extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
+        }
+
+        // Compute eigenvector of index l
+        if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
+        {
+          // If d0 is too small, then the two other eigenvalues are numerically the same,
+          // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
+          eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
+          eivecs.col(l).normalize();
+        }
+        else
+        {
+          tmp = scaledMat;
+          tmp.diagonal().array () -= eivals(l);
+
+          VectorType dummy;
+          extract_kernel(tmp, eivecs.col(l), dummy);
+        }
+
+        // Compute last eigenvector from the other two
+        eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
+      }
+    }
+
+    // Rescale back to the original size.
+    eivals *= scale;
+    eivals.array() += shift;
+    
+    solver.m_info = Success;
+    solver.m_isInitialized = true;
+    solver.m_eigenvectorsOk = computeEigenvectors;
+  }
+};
+
+// 2x2 direct eigenvalues decomposition, code from Hauke Heibel
+template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2,false>
+{
+  typedef typename SolverType::MatrixType MatrixType;
+  typedef typename SolverType::RealVectorType VectorType;
+  typedef typename SolverType::Scalar Scalar;
+  
+  static inline void computeRoots(const MatrixType& m, VectorType& roots)
+  {
+    using std::sqrt;
+    const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
+    const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
+    roots(0) = t1 - t0;
+    roots(1) = t1 + t0;
+  }
+  
+  static inline void run(SolverType& solver, const MatrixType& mat, int options)
+  {
+    using std::sqrt;
+    using std::abs;
+
+    eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
+    eigen_assert((options&~(EigVecMask|GenEigMask))==0
+            && (options&EigVecMask)!=EigVecMask
+            && "invalid option parameter");
+    bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
+    
+    MatrixType& eivecs = solver.m_eivec;
+    VectorType& eivals = solver.m_eivalues;
+  
+    // map the matrix coefficients to [-1:1] to avoid over- and underflow.
+    Scalar scale = mat.cwiseAbs().maxCoeff();
+    scale = (std::max)(scale,Scalar(1));
+    MatrixType scaledMat = mat / scale;
+    
+    // Compute the eigenvalues
+    computeRoots(scaledMat,eivals);
+    
+    // compute the eigen vectors
+    if(computeEigenvectors)
+    {
+      if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
+      {
+        eivecs.setIdentity();
+      }
+      else
+      {
+        scaledMat.diagonal().array () -= eivals(1);
+        Scalar a2 = numext::abs2(scaledMat(0,0));
+        Scalar c2 = numext::abs2(scaledMat(1,1));
+        Scalar b2 = numext::abs2(scaledMat(1,0));
+        if(a2>c2)
+        {
+          eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
+          eivecs.col(1) /= sqrt(a2+b2);
+        }
+        else
+        {
+          eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
+          eivecs.col(1) /= sqrt(c2+b2);
+        }
+
+        eivecs.col(0) << eivecs.col(1).unitOrthogonal();
+      }
+    }
+    
+    // Rescale back to the original size.
+    eivals *= scale;
+    
+    solver.m_info = Success;
+    solver.m_isInitialized = true;
+    solver.m_eigenvectorsOk = computeEigenvectors;
+  }
+};
+
+}
+
+template<typename MatrixType>
+SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
+::computeDirect(const MatrixType& matrix, int options)
+{
+  internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
+  return *this;
+}
+
+namespace internal {
+template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
+static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
+{
+  using std::abs;
+  RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
+  RealScalar e = subdiag[end-1];
+  // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
+  // underflow thus leading to inf/NaN values when using the following commented code:
+//   RealScalar e2 = numext::abs2(subdiag[end-1]);
+//   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
+  // This explain the following, somewhat more complicated, version:
+  RealScalar mu = diag[end];
+  if(td==0)
+    mu -= abs(e);
+  else
+  {
+    RealScalar e2 = numext::abs2(subdiag[end-1]);
+    RealScalar h = numext::hypot(td,e);
+    if(e2==0)  mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
+    else       mu -= e2 / (td + (td>0 ? h : -h));
+  }
+  
+  RealScalar x = diag[start] - mu;
+  RealScalar z = subdiag[start];
+  for (Index k = start; k < end; ++k)
+  {
+    JacobiRotation<RealScalar> rot;
+    rot.makeGivens(x, z);
+
+    // do T = G' T G
+    RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
+    RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
+
+    diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
+    diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
+    subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
+    
+
+    if (k > start)
+      subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
+
+    x = subdiag[k];
+
+    if (k < end - 1)
+    {
+      z = -rot.s() * subdiag[k+1];
+      subdiag[k + 1] = rot.c() * subdiag[k+1];
+    }
+    
+    // apply the givens rotation to the unit matrix Q = Q * G
+    if (matrixQ)
+    {
+      // FIXME if StorageOrder == RowMajor this operation is not very efficient
+      Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
+      q.applyOnTheRight(k,k+1,rot);
+    }
+  }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
new file mode 100644
index 0000000..17c0dad
--- /dev/null
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_MKL.h
@@ -0,0 +1,92 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *    Self-adjoint eigenvalues/eigenvectors.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_SAEIGENSOLVER_MKL_H
+#define EIGEN_SAEIGENSOLVER_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_EIG_SELFADJ(EIGTYPE, MKLTYPE, MKLRTYPE, MKLNAME, EIGCOLROW, MKLCOLROW ) \
+template<> inline \
+SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
+SelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, int options) \
+{ \
+  eigen_assert(matrix.cols() == matrix.rows()); \
+  eigen_assert((options&~(EigVecMask|GenEigMask))==0 \
+          && (options&EigVecMask)!=EigVecMask \
+          && "invalid option parameter"); \
+  bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \
+  lapack_int n = matrix.cols(), lda, matrix_order, info; \
+  m_eivalues.resize(n,1); \
+  m_subdiag.resize(n-1); \
+  m_eivec = matrix; \
+\
+  if(n==1) \
+  { \
+    m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0)); \
+    if(computeEigenvectors) m_eivec.setOnes(n,n); \
+    m_info = Success; \
+    m_isInitialized = true; \
+    m_eigenvectorsOk = computeEigenvectors; \
+    return *this; \
+  } \
+\
+  lda = matrix.outerStride(); \
+  matrix_order=MKLCOLROW; \
+  char jobz, uplo='L'/*, range='A'*/; \
+  jobz = computeEigenvectors ? 'V' : 'N'; \
+\
+  info = LAPACKE_##MKLNAME( matrix_order, jobz, uplo, n, (MKLTYPE*)m_eivec.data(), lda, (MKLRTYPE*)m_eivalues.data() ); \
+  m_info = (info==0) ? Success : NoConvergence; \
+  m_isInitialized = true; \
+  m_eigenvectorsOk = computeEigenvectors; \
+  return *this; \
+}
+
+
+EIGEN_MKL_EIG_SELFADJ(double,   double,        double, dsyev, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(float,    float,         float,  ssyev, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8,  float,  cheev, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_EIG_SELFADJ(double,   double,        double, dsyev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(float,    float,         float,  ssyev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(dcomplex, MKL_Complex16, double, zheev, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_EIG_SELFADJ(scomplex, MKL_Complex8,  float,  cheev, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_SAEIGENSOLVER_H
diff --git a/Eigen/src/Eigenvalues/Tridiagonalization.h b/Eigen/src/Eigenvalues/Tridiagonalization.h
new file mode 100644
index 0000000..192278d
--- /dev/null
+++ b/Eigen/src/Eigenvalues/Tridiagonalization.h
@@ -0,0 +1,557 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRIDIAGONALIZATION_H
+#define EIGEN_TRIDIAGONALIZATION_H
+
+namespace Eigen { 
+
+namespace internal {
+  
+template<typename MatrixType> struct TridiagonalizationMatrixTReturnType;
+template<typename MatrixType>
+struct traits<TridiagonalizationMatrixTReturnType<MatrixType> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename MatrixType, typename CoeffVectorType>
+void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);
+}
+
+/** \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  *
+  * \class Tridiagonalization
+  *
+  * \brief Tridiagonal decomposition of a selfadjoint matrix
+  *
+  * \tparam _MatrixType the type of the matrix of which we are computing the
+  * tridiagonal decomposition; this is expected to be an instantiation of the
+  * Matrix class template.
+  *
+  * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that:
+  * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix.
+  *
+  * A tridiagonal matrix is a matrix which has nonzero elements only on the
+  * main diagonal and the first diagonal below and above it. The Hessenberg
+  * decomposition of a selfadjoint matrix is in fact a tridiagonal
+  * decomposition. This class is used in SelfAdjointEigenSolver to compute the
+  * eigenvalues and eigenvectors of a selfadjoint matrix.
+  *
+  * Call the function compute() to compute the tridiagonal decomposition of a
+  * given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&)
+  * constructor which computes the tridiagonal Schur decomposition at
+  * construction time. Once the decomposition is computed, you can use the
+  * matrixQ() and matrixT() functions to retrieve the matrices Q and T in the
+  * decomposition.
+  *
+  * The documentation of Tridiagonalization(const MatrixType&) contains an
+  * example of the typical use of this class.
+  *
+  * \sa class HessenbergDecomposition, class SelfAdjointEigenSolver
+  */
+template<typename _MatrixType> class Tridiagonalization
+{
+  public:
+
+    /** \brief Synonym for the template parameter \p _MatrixType. */
+    typedef _MatrixType MatrixType;
+
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+
+    enum {
+      Size = MatrixType::RowsAtCompileTime,
+      SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1),
+      Options = MatrixType::Options,
+      MaxSize = MatrixType::MaxRowsAtCompileTime,
+      MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1)
+    };
+
+    typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;
+    typedef typename internal::plain_col_type<MatrixType, RealScalar>::type DiagonalType;
+    typedef Matrix<RealScalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> SubDiagonalType;
+    typedef typename internal::remove_all<typename MatrixType::RealReturnType>::type MatrixTypeRealView;
+    typedef internal::TridiagonalizationMatrixTReturnType<MatrixTypeRealView> MatrixTReturnType;
+
+    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+              typename internal::add_const_on_value_type<typename Diagonal<const MatrixType>::RealReturnType>::type,
+              const Diagonal<const MatrixType>
+            >::type DiagonalReturnType;
+
+    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+              typename internal::add_const_on_value_type<typename Diagonal<
+                Block<const MatrixType,SizeMinusOne,SizeMinusOne> >::RealReturnType>::type,
+              const Diagonal<
+                Block<const MatrixType,SizeMinusOne,SizeMinusOne> >
+            >::type SubDiagonalReturnType;
+
+    /** \brief Return type of matrixQ() */
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;
+
+    /** \brief Default constructor.
+      *
+      * \param [in]  size  Positive integer, size of the matrix whose tridiagonal
+      * decomposition will be computed.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via compute().  The \p size parameter is only
+      * used as a hint. It is not an error to give a wrong \p size, but it may
+      * impair performance.
+      *
+      * \sa compute() for an example.
+      */
+    Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)
+      : m_matrix(size,size),
+        m_hCoeffs(size > 1 ? size-1 : 1),
+        m_isInitialized(false)
+    {}
+
+    /** \brief Constructor; computes tridiagonal decomposition of given matrix.
+      *
+      * \param[in]  matrix  Selfadjoint matrix whose tridiagonal decomposition
+      * is to be computed.
+      *
+      * This constructor calls compute() to compute the tridiagonal decomposition.
+      *
+      * Example: \include Tridiagonalization_Tridiagonalization_MatrixType.cpp
+      * Output: \verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out
+      */
+    Tridiagonalization(const MatrixType& matrix)
+      : m_matrix(matrix),
+        m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1),
+        m_isInitialized(false)
+    {
+      internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
+      m_isInitialized = true;
+    }
+
+    /** \brief Computes tridiagonal decomposition of given matrix.
+      *
+      * \param[in]  matrix  Selfadjoint matrix whose tridiagonal decomposition
+      * is to be computed.
+      * \returns    Reference to \c *this
+      *
+      * The tridiagonal decomposition is computed by bringing the columns of
+      * the matrix successively in the required form using Householder
+      * reflections. The cost is \f$ 4n^3/3 \f$ flops, where \f$ n \f$ denotes
+      * the size of the given matrix.
+      *
+      * This method reuses of the allocated data in the Tridiagonalization
+      * object, if the size of the matrix does not change.
+      *
+      * Example: \include Tridiagonalization_compute.cpp
+      * Output: \verbinclude Tridiagonalization_compute.out
+      */
+    Tridiagonalization& compute(const MatrixType& matrix)
+    {
+      m_matrix = matrix;
+      m_hCoeffs.resize(matrix.rows()-1, 1);
+      internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);
+      m_isInitialized = true;
+      return *this;
+    }
+
+    /** \brief Returns the Householder coefficients.
+      *
+      * \returns a const reference to the vector of Householder coefficients
+      *
+      * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+      * the member function compute(const MatrixType&) has been called before
+      * to compute the tridiagonal decomposition of a matrix.
+      *
+      * The Householder coefficients allow the reconstruction of the matrix
+      * \f$ Q \f$ in the tridiagonal decomposition from the packed data.
+      *
+      * Example: \include Tridiagonalization_householderCoefficients.cpp
+      * Output: \verbinclude Tridiagonalization_householderCoefficients.out
+      *
+      * \sa packedMatrix(), \ref Householder_Module "Householder module"
+      */
+    inline CoeffVectorType householderCoefficients() const
+    {
+      eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+      return m_hCoeffs;
+    }
+
+    /** \brief Returns the internal representation of the decomposition
+      *
+      *	\returns a const reference to a matrix with the internal representation
+      *	         of the decomposition.
+      *
+      * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+      * the member function compute(const MatrixType&) has been called before
+      * to compute the tridiagonal decomposition of a matrix.
+      *
+      * The returned matrix contains the following information:
+      *  - the strict upper triangular part is equal to the input matrix A.
+      *  - the diagonal and lower sub-diagonal represent the real tridiagonal
+      *    symmetric matrix T.
+      *  - the rest of the lower part contains the Householder vectors that,
+      *    combined with Householder coefficients returned by
+      *    householderCoefficients(), allows to reconstruct the matrix Q as
+      *       \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
+      *    Here, the matrices \f$ H_i \f$ are the Householder transformations
+      *       \f$ H_i = (I - h_i v_i v_i^T) \f$
+      *    where \f$ h_i \f$ is the \f$ i \f$th Householder coefficient and
+      *    \f$ v_i \f$ is the Householder vector defined by
+      *       \f$ v_i = [ 0, \ldots, 0, 1, M(i+2,i), \ldots, M(N-1,i) ]^T \f$
+      *    with M the matrix returned by this function.
+      *
+      * See LAPACK for further details on this packed storage.
+      *
+      * Example: \include Tridiagonalization_packedMatrix.cpp
+      * Output: \verbinclude Tridiagonalization_packedMatrix.out
+      *
+      * \sa householderCoefficients()
+      */
+    inline const MatrixType& packedMatrix() const
+    {
+      eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+      return m_matrix;
+    }
+
+    /** \brief Returns the unitary matrix Q in the decomposition
+      *
+      * \returns object representing the matrix Q
+      *
+      * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+      * the member function compute(const MatrixType&) has been called before
+      * to compute the tridiagonal decomposition of a matrix.
+      *
+      * This function returns a light-weight object of template class
+      * HouseholderSequence. You can either apply it directly to a matrix or
+      * you can convert it to a matrix of type #MatrixType.
+      *
+      * \sa Tridiagonalization(const MatrixType&) for an example,
+      *     matrixT(), class HouseholderSequence
+      */
+    HouseholderSequenceType matrixQ() const
+    {
+      eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+      return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())
+             .setLength(m_matrix.rows() - 1)
+             .setShift(1);
+    }
+
+    /** \brief Returns an expression of the tridiagonal matrix T in the decomposition
+      *
+      * \returns expression object representing the matrix T
+      *
+      * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+      * the member function compute(const MatrixType&) has been called before
+      * to compute the tridiagonal decomposition of a matrix.
+      *
+      * Currently, this function can be used to extract the matrix T from internal
+      * data and copy it to a dense matrix object. In most cases, it may be
+      * sufficient to directly use the packed matrix or the vector expressions
+      * returned by diagonal() and subDiagonal() instead of creating a new
+      * dense copy matrix with this function.
+      *
+      * \sa Tridiagonalization(const MatrixType&) for an example,
+      * matrixQ(), packedMatrix(), diagonal(), subDiagonal()
+      */
+    MatrixTReturnType matrixT() const
+    {
+      eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+      return MatrixTReturnType(m_matrix.real());
+    }
+
+    /** \brief Returns the diagonal of the tridiagonal matrix T in the decomposition.
+      *
+      * \returns expression representing the diagonal of T
+      *
+      * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+      * the member function compute(const MatrixType&) has been called before
+      * to compute the tridiagonal decomposition of a matrix.
+      *
+      * Example: \include Tridiagonalization_diagonal.cpp
+      * Output: \verbinclude Tridiagonalization_diagonal.out
+      *
+      * \sa matrixT(), subDiagonal()
+      */
+    DiagonalReturnType diagonal() const;
+
+    /** \brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition.
+      *
+      * \returns expression representing the subdiagonal of T
+      *
+      * \pre Either the constructor Tridiagonalization(const MatrixType&) or
+      * the member function compute(const MatrixType&) has been called before
+      * to compute the tridiagonal decomposition of a matrix.
+      *
+      * \sa diagonal() for an example, matrixT()
+      */
+    SubDiagonalReturnType subDiagonal() const;
+
+  protected:
+
+    MatrixType m_matrix;
+    CoeffVectorType m_hCoeffs;
+    bool m_isInitialized;
+};
+
+template<typename MatrixType>
+typename Tridiagonalization<MatrixType>::DiagonalReturnType
+Tridiagonalization<MatrixType>::diagonal() const
+{
+  eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+  return m_matrix.diagonal();
+}
+
+template<typename MatrixType>
+typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
+Tridiagonalization<MatrixType>::subDiagonal() const
+{
+  eigen_assert(m_isInitialized && "Tridiagonalization is not initialized.");
+  Index n = m_matrix.rows();
+  return Block<const MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1).diagonal();
+}
+
+namespace internal {
+
+/** \internal
+  * Performs a tridiagonal decomposition of the selfadjoint matrix \a matA in-place.
+  *
+  * \param[in,out] matA On input the selfadjoint matrix. Only the \b lower triangular part is referenced.
+  *                     On output, the strict upper part is left unchanged, and the lower triangular part
+  *                     represents the T and Q matrices in packed format has detailed below.
+  * \param[out]    hCoeffs returned Householder coefficients (see below)
+  *
+  * On output, the tridiagonal selfadjoint matrix T is stored in the diagonal
+  * and lower sub-diagonal of the matrix \a matA.
+  * The unitary matrix Q is represented in a compact way as a product of
+  * Householder reflectors \f$ H_i \f$ such that:
+  *       \f$ Q = H_{N-1} \ldots H_1 H_0 \f$.
+  * The Householder reflectors are defined as
+  *       \f$ H_i = (I - h_i v_i v_i^T) \f$
+  * where \f$ h_i = hCoeffs[i]\f$ is the \f$ i \f$th Householder coefficient and
+  * \f$ v_i \f$ is the Householder vector defined by
+  *       \f$ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T \f$.
+  *
+  * Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
+  *
+  * \sa Tridiagonalization::packedMatrix()
+  */
+template<typename MatrixType, typename CoeffVectorType>
+void tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)
+{
+  using numext::conj;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  Index n = matA.rows();
+  eigen_assert(n==matA.cols());
+  eigen_assert(n==hCoeffs.size()+1 || n==1);
+  
+  for (Index i = 0; i<n-1; ++i)
+  {
+    Index remainingSize = n-i-1;
+    RealScalar beta;
+    Scalar h;
+    matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
+
+    // Apply similarity transformation to remaining columns,
+    // i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
+    matA.col(i).coeffRef(i+1) = 1;
+
+    hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
+                                  * (conj(h) * matA.col(i).tail(remainingSize)));
+
+    hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
+
+    matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
+      .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1);
+
+    matA.col(i).coeffRef(i+1) = beta;
+    hCoeffs.coeffRef(i) = h;
+  }
+}
+
+// forward declaration, implementation at the end of this file
+template<typename MatrixType,
+         int Size=MatrixType::ColsAtCompileTime,
+         bool IsComplex=NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct tridiagonalization_inplace_selector;
+
+/** \brief Performs a full tridiagonalization in place
+  *
+  * \param[in,out]  mat  On input, the selfadjoint matrix whose tridiagonal
+  *    decomposition is to be computed. Only the lower triangular part referenced.
+  *    The rest is left unchanged. On output, the orthogonal matrix Q
+  *    in the decomposition if \p extractQ is true.
+  * \param[out]  diag  The diagonal of the tridiagonal matrix T in the
+  *    decomposition.
+  * \param[out]  subdiag  The subdiagonal of the tridiagonal matrix T in
+  *    the decomposition.
+  * \param[in]  extractQ  If true, the orthogonal matrix Q in the
+  *    decomposition is computed and stored in \p mat.
+  *
+  * Computes the tridiagonal decomposition of the selfadjoint matrix \p mat in place
+  * such that \f$ mat = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real
+  * symmetric tridiagonal matrix.
+  *
+  * The tridiagonal matrix T is passed to the output parameters \p diag and \p subdiag. If
+  * \p extractQ is true, then the orthogonal matrix Q is passed to \p mat. Otherwise the lower
+  * part of the matrix \p mat is destroyed.
+  *
+  * The vectors \p diag and \p subdiag are not resized. The function
+  * assumes that they are already of the correct size. The length of the
+  * vector \p diag should equal the number of rows in \p mat, and the
+  * length of the vector \p subdiag should be one left.
+  *
+  * This implementation contains an optimized path for 3-by-3 matrices
+  * which is especially useful for plane fitting.
+  *
+  * \note Currently, it requires two temporary vectors to hold the intermediate
+  * Householder coefficients, and to reconstruct the matrix Q from the Householder
+  * reflectors.
+  *
+  * Example (this uses the same matrix as the example in
+  *    Tridiagonalization::Tridiagonalization(const MatrixType&)):
+  *    \include Tridiagonalization_decomposeInPlace.cpp
+  * Output: \verbinclude Tridiagonalization_decomposeInPlace.out
+  *
+  * \sa class Tridiagonalization
+  */
+template<typename MatrixType, typename DiagonalType, typename SubDiagonalType>
+void tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+{
+  eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
+  tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
+}
+
+/** \internal
+  * General full tridiagonalization
+  */
+template<typename MatrixType, int Size, bool IsComplex>
+struct tridiagonalization_inplace_selector
+{
+  typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;
+  typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;
+  typedef typename MatrixType::Index Index;
+  template<typename DiagonalType, typename SubDiagonalType>
+  static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+  {
+    CoeffVectorType hCoeffs(mat.cols()-1);
+    tridiagonalization_inplace(mat,hCoeffs);
+    diag = mat.diagonal().real();
+    subdiag = mat.template diagonal<-1>().real();
+    if(extractQ)
+      mat = HouseholderSequenceType(mat, hCoeffs.conjugate())
+            .setLength(mat.rows() - 1)
+            .setShift(1);
+  }
+};
+
+/** \internal
+  * Specialization for 3x3 real matrices.
+  * Especially useful for plane fitting.
+  */
+template<typename MatrixType>
+struct tridiagonalization_inplace_selector<MatrixType,3,false>
+{
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+
+  template<typename DiagonalType, typename SubDiagonalType>
+  static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
+  {
+    using std::sqrt;
+    diag[0] = mat(0,0);
+    RealScalar v1norm2 = numext::abs2(mat(2,0));
+    if(v1norm2 == RealScalar(0))
+    {
+      diag[1] = mat(1,1);
+      diag[2] = mat(2,2);
+      subdiag[0] = mat(1,0);
+      subdiag[1] = mat(2,1);
+      if (extractQ)
+        mat.setIdentity();
+    }
+    else
+    {
+      RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);
+      RealScalar invBeta = RealScalar(1)/beta;
+      Scalar m01 = mat(1,0) * invBeta;
+      Scalar m02 = mat(2,0) * invBeta;
+      Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1));
+      diag[1] = mat(1,1) + m02*q;
+      diag[2] = mat(2,2) - m02*q;
+      subdiag[0] = beta;
+      subdiag[1] = mat(2,1) - m01 * q;
+      if (extractQ)
+      {
+        mat << 1,   0,    0,
+               0, m01,  m02,
+               0, m02, -m01;
+      }
+    }
+  }
+};
+
+/** \internal
+  * Trivial specialization for 1x1 matrices
+  */
+template<typename MatrixType, bool IsComplex>
+struct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>
+{
+  typedef typename MatrixType::Scalar Scalar;
+
+  template<typename DiagonalType, typename SubDiagonalType>
+  static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)
+  {
+    diag(0,0) = numext::real(mat(0,0));
+    if(extractQ)
+      mat(0,0) = Scalar(1);
+  }
+};
+
+/** \internal
+  * \eigenvalues_module \ingroup Eigenvalues_Module
+  *
+  * \brief Expression type for return value of Tridiagonalization::matrixT()
+  *
+  * \tparam MatrixType type of underlying dense matrix
+  */
+template<typename MatrixType> struct TridiagonalizationMatrixTReturnType
+: public ReturnByValue<TridiagonalizationMatrixTReturnType<MatrixType> >
+{
+    typedef typename MatrixType::Index Index;
+  public:
+    /** \brief Constructor.
+      *
+      * \param[in] mat The underlying dense matrix
+      */
+    TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { }
+
+    template <typename ResultType>
+    inline void evalTo(ResultType& result) const
+    {
+      result.setZero();
+      result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate();
+      result.diagonal() = m_matrix.diagonal();
+      result.template diagonal<-1>() = m_matrix.template diagonal<-1>();
+    }
+
+    Index rows() const { return m_matrix.rows(); }
+    Index cols() const { return m_matrix.cols(); }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRIDIAGONALIZATION_H
diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h
new file mode 100644
index 0000000..b226336
--- /dev/null
+++ b/Eigen/src/Geometry/AlignedBox.h
@@ -0,0 +1,392 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ALIGNEDBOX_H
+#define EIGEN_ALIGNEDBOX_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  *
+  * \class AlignedBox
+  *
+  * \brief An axis aligned box
+  *
+  * \tparam _Scalar the type of the scalar coefficients
+  * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *
+  * This class represents an axis aligned box as a pair of the minimal and maximal corners.
+  * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
+  * \sa alignedboxtypedefs
+  */
+template <typename _Scalar, int _AmbientDim>
+class AlignedBox
+{
+public:
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+  enum { AmbientDimAtCompileTime = _AmbientDim };
+  typedef _Scalar                                   Scalar;
+  typedef NumTraits<Scalar>                         ScalarTraits;
+  typedef DenseIndex                                Index;
+  typedef typename ScalarTraits::Real               RealScalar;
+  typedef typename ScalarTraits::NonInteger      NonInteger;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1>  VectorType;
+
+  /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
+  enum CornerType
+  {
+    /** 1D names @{ */
+    Min=0, Max=1,
+    /** @} */
+
+    /** Identifier for 2D corner @{ */
+    BottomLeft=0, BottomRight=1,
+    TopLeft=2, TopRight=3,
+    /** @} */
+
+    /** Identifier for 3D corner  @{ */
+    BottomLeftFloor=0, BottomRightFloor=1,
+    TopLeftFloor=2, TopRightFloor=3,
+    BottomLeftCeil=4, BottomRightCeil=5,
+    TopLeftCeil=6, TopRightCeil=7
+    /** @} */
+  };
+
+
+  /** Default constructor initializing a null box. */
+  inline AlignedBox()
+  { if (AmbientDimAtCompileTime!=Dynamic) setEmpty(); }
+
+  /** Constructs a null box with \a _dim the dimension of the ambient space. */
+  inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
+  { setEmpty(); }
+
+  /** Constructs a box with extremities \a _min and \a _max.
+   * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
+  template<typename OtherVectorType1, typename OtherVectorType2>
+  inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
+
+  /** Constructs a box containing a single point \a p. */
+  template<typename Derived>
+  inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
+  { }
+
+  ~AlignedBox() {}
+
+  /** \returns the dimension in which the box holds */
+  inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
+
+  /** \deprecated use isEmpty() */
+  inline bool isNull() const { return isEmpty(); }
+
+  /** \deprecated use setEmpty() */
+  inline void setNull() { setEmpty(); }
+
+  /** \returns true if the box is empty.
+   * \sa setEmpty */
+  inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
+
+  /** Makes \c *this an empty box.
+   * \sa isEmpty */
+  inline void setEmpty()
+  {
+    m_min.setConstant( ScalarTraits::highest() );
+    m_max.setConstant( ScalarTraits::lowest() );
+  }
+
+  /** \returns the minimal corner */
+  inline const VectorType& (min)() const { return m_min; }
+  /** \returns a non const reference to the minimal corner */
+  inline VectorType& (min)() { return m_min; }
+  /** \returns the maximal corner */
+  inline const VectorType& (max)() const { return m_max; }
+  /** \returns a non const reference to the maximal corner */
+  inline VectorType& (max)() { return m_max; }
+
+  /** \returns the center of the box */
+  inline const CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>,
+                            const CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> >
+  center() const
+  { return (m_min+m_max)/2; }
+
+  /** \returns the lengths of the sides of the bounding box.
+    * Note that this function does not get the same
+    * result for integral or floating scalar types: see
+    */
+  inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> sizes() const
+  { return m_max - m_min; }
+
+  /** \returns the volume of the bounding box */
+  inline Scalar volume() const
+  { return sizes().prod(); }
+
+  /** \returns an expression for the bounding box diagonal vector
+    * if the length of the diagonal is needed: diagonal().norm()
+    * will provide it.
+    */
+  inline CwiseBinaryOp< internal::scalar_difference_op<Scalar>, const VectorType, const VectorType> diagonal() const
+  { return sizes(); }
+
+  /** \returns the vertex of the bounding box at the corner defined by
+    * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.
+    * For 1D bounding boxes corners are named by 2 enum constants:
+    * BottomLeft and BottomRight.
+    * For 2D bounding boxes, corners are named by 4 enum constants:
+    * BottomLeft, BottomRight, TopLeft, TopRight.
+    * For 3D bounding boxes, the following names are added:
+    * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.
+    */
+  inline VectorType corner(CornerType corner) const
+  {
+    EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);
+
+    VectorType res;
+
+    Index mult = 1;
+    for(Index d=0; d<dim(); ++d)
+    {
+      if( mult & corner ) res[d] = m_max[d];
+      else                res[d] = m_min[d];
+      mult *= 2;
+    }
+    return res;
+  }
+
+  /** \returns a random point inside the bounding box sampled with
+   * a uniform distribution */
+  inline VectorType sample() const
+  {
+    VectorType r;
+    for(Index d=0; d<dim(); ++d)
+    {
+      if(!ScalarTraits::IsInteger)
+      {
+        r[d] = m_min[d] + (m_max[d]-m_min[d])
+             * internal::random<Scalar>(Scalar(0), Scalar(1));
+      }
+      else
+        r[d] = internal::random(m_min[d], m_max[d]);
+    }
+    return r;
+  }
+
+  /** \returns true if the point \a p is inside the box \c *this. */
+  template<typename Derived>
+  inline bool contains(const MatrixBase<Derived>& p) const
+  {
+    typename internal::nested<Derived,2>::type p_n(p.derived());
+    return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
+  }
+
+  /** \returns true if the box \a b is entirely inside the box \c *this. */
+  inline bool contains(const AlignedBox& b) const
+  { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
+
+  /** \returns true if the box \a b is intersecting the box \c *this.
+   * \sa intersection, clamp */
+  inline bool intersects(const AlignedBox& b) const
+  { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
+
+  /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
+   * \sa extend(const AlignedBox&) */
+  template<typename Derived>
+  inline AlignedBox& extend(const MatrixBase<Derived>& p)
+  {
+    typename internal::nested<Derived,2>::type p_n(p.derived());
+    m_min = m_min.cwiseMin(p_n);
+    m_max = m_max.cwiseMax(p_n);
+    return *this;
+  }
+
+  /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
+   * \sa merged, extend(const MatrixBase&) */
+  inline AlignedBox& extend(const AlignedBox& b)
+  {
+    m_min = m_min.cwiseMin(b.m_min);
+    m_max = m_max.cwiseMax(b.m_max);
+    return *this;
+  }
+
+  /** Clamps \c *this by the box \a b and returns a reference to \c *this.
+   * \note If the boxes don't intersect, the resulting box is empty.
+   * \sa intersection(), intersects() */
+  inline AlignedBox& clamp(const AlignedBox& b)
+  {
+    m_min = m_min.cwiseMax(b.m_min);
+    m_max = m_max.cwiseMin(b.m_max);
+    return *this;
+  }
+
+  /** Returns an AlignedBox that is the intersection of \a b and \c *this
+   * \note If the boxes don't intersect, the resulting box is empty.
+   * \sa intersects(), clamp, contains()  */
+  inline AlignedBox intersection(const AlignedBox& b) const
+  {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
+
+  /** Returns an AlignedBox that is the union of \a b and \c *this.
+   * \note Merging with an empty box may result in a box bigger than \c *this. 
+   * \sa extend(const AlignedBox&) */
+  inline AlignedBox merged(const AlignedBox& b) const
+  { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
+
+  /** Translate \c *this by the vector \a t and returns a reference to \c *this. */
+  template<typename Derived>
+  inline AlignedBox& translate(const MatrixBase<Derived>& a_t)
+  {
+    const typename internal::nested<Derived,2>::type t(a_t.derived());
+    m_min += t;
+    m_max += t;
+    return *this;
+  }
+
+  /** \returns the squared distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
+    */
+  template<typename Derived>
+  inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
+
+  /** \returns the squared distance between the boxes \a b and \c *this,
+    * and zero if the boxes intersect.
+    * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
+    */
+  inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
+
+  /** \returns the distance between the point \a p and the box \c *this,
+    * and zero if \a p is inside the box.
+    * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
+    */
+  template<typename Derived>
+  inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
+  { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(p))); }
+
+  /** \returns the distance between the boxes \a b and \c *this,
+    * and zero if the boxes intersect.
+    * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
+    */
+  inline NonInteger exteriorDistance(const AlignedBox& b) const
+  { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AlignedBox,
+           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const
+  {
+    return typename internal::cast_return_type<AlignedBox,
+                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)
+  {
+    m_min = (other.min)().template cast<Scalar>();
+    m_max = (other.max)().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const
+  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }
+
+protected:
+
+  VectorType m_min, m_max;
+};
+
+
+
+template<typename Scalar,int AmbientDim>
+template<typename Derived>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const
+{
+  typename internal::nested<Derived,2*AmbientDim>::type p(a_p.derived());
+  Scalar dist2(0);
+  Scalar aux;
+  for (Index k=0; k<dim(); ++k)
+  {
+    if( m_min[k] > p[k] )
+    {
+      aux = m_min[k] - p[k];
+      dist2 += aux*aux;
+    }
+    else if( p[k] > m_max[k] )
+    {
+      aux = p[k] - m_max[k];
+      dist2 += aux*aux;
+    }
+  }
+  return dist2;
+}
+
+template<typename Scalar,int AmbientDim>
+inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const
+{
+  Scalar dist2(0);
+  Scalar aux;
+  for (Index k=0; k<dim(); ++k)
+  {
+    if( m_min[k] > b.m_max[k] )
+    {
+      aux = m_min[k] - b.m_max[k];
+      dist2 += aux*aux;
+    }
+    else if( b.m_min[k] > m_max[k] )
+    {
+      aux = b.m_min[k] - m_max[k];
+      dist2 += aux*aux;
+    }
+  }
+  return dist2;
+}
+
+/** \defgroup alignedboxtypedefs Global aligned box typedefs
+  *
+  * \ingroup Geometry_Module
+  *
+  * Eigen defines several typedef shortcuts for most common aligned box types.
+  *
+  * The general patterns are the following:
+  *
+  * \c AlignedBoxSizeType where \c Size can be \c 1, \c 2,\c 3,\c 4 for fixed size boxes or \c X for dynamic size,
+  * and where \c Type can be \c i for integer, \c f for float, \c d for double.
+  *
+  * For example, \c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \c AlignedBoxXf is a dynamic-size aligned box of floats.
+  *
+  * \sa class AlignedBox
+  */
+
+#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)    \
+/** \ingroup alignedboxtypedefs */                                 \
+typedef AlignedBox<Type, Size>   AlignedBox##SizeSuffix##TypeSuffix;
+
+#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
+EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)
+
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)
+EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)
+
+#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
+#undef EIGEN_MAKE_TYPEDEFS
+
+} // end namespace Eigen
+
+#endif // EIGEN_ALIGNEDBOX_H
diff --git a/Eigen/src/Geometry/AngleAxis.h b/Eigen/src/Geometry/AngleAxis.h
new file mode 100644
index 0000000..553d38c
--- /dev/null
+++ b/Eigen/src/Geometry/AngleAxis.h
@@ -0,0 +1,233 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ANGLEAXIS_H
+#define EIGEN_ANGLEAXIS_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class AngleAxis
+  *
+  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  *
+  * \warning When setting up an AngleAxis object, the axis vector \b must \b be \b normalized.
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c AngleAxisf for \c float
+  * \li \c AngleAxisd for \c double
+  *
+  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
+  * mimic Euler-angles. Here is an example:
+  * \include AngleAxis_mimic_euler.cpp
+  * Output: \verbinclude AngleAxis_mimic_euler.out
+  *
+  * \note This class is not aimed to be used to store a rotation transformation,
+  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)
+  * and transformation objects.
+  *
+  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
+  */
+
+namespace internal {
+template<typename _Scalar> struct traits<AngleAxis<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+}
+
+template<typename _Scalar>
+class AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>
+{
+  typedef RotationBase<AngleAxis<_Scalar>,3> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 3 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,3,3> Matrix3;
+  typedef Matrix<Scalar,3,1> Vector3;
+  typedef Quaternion<Scalar> QuaternionType;
+
+protected:
+
+  Vector3 m_axis;
+  Scalar m_angle;
+
+public:
+
+  /** Default constructor without initialization. */
+  AngleAxis() {}
+  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
+    * and an \a axis which \b must \b be \b normalized.
+    *
+    * \warning If the \a axis vector is not normalized, then the angle-axis object
+    *          represents an invalid rotation. */
+  template<typename Derived>
+  inline AngleAxis(const Scalar& angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
+  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
+  template<typename QuatDerived> inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }
+  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
+  template<typename Derived>
+  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
+
+  Scalar angle() const { return m_angle; }
+  Scalar& angle() { return m_angle; }
+
+  const Vector3& axis() const { return m_axis; }
+  Vector3& axis() { return m_axis; }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const AngleAxis& other) const
+  { return QuaternionType(*this) * QuaternionType(other); }
+
+  /** Concatenates two rotations */
+  inline QuaternionType operator* (const QuaternionType& other) const
+  { return QuaternionType(*this) * other; }
+
+  /** Concatenates two rotations */
+  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
+  { return a * QuaternionType(b); }
+
+  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
+  AngleAxis inverse() const
+  { return AngleAxis(-m_angle, m_axis); }
+
+  template<class QuatDerived>
+  AngleAxis& operator=(const QuaternionBase<QuatDerived>& q);
+  template<typename Derived>
+  AngleAxis& operator=(const MatrixBase<Derived>& m);
+
+  template<typename Derived>
+  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix3 toRotationMatrix(void) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)
+  {
+    m_axis = other.axis().template cast<Scalar>();
+    m_angle = Scalar(other.angle());
+  }
+
+  static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const AngleAxis& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision angle-axis type */
+typedef AngleAxis<float> AngleAxisf;
+/** \ingroup Geometry_Module
+  * double precision angle-axis type */
+typedef AngleAxis<double> AngleAxisd;
+
+/** Set \c *this from a \b unit quaternion.
+  * The axis is normalized.
+  * 
+  * \warning As any other method dealing with quaternion, if the input quaternion
+  *          is not normalized then the result is undefined.
+  */
+template<typename Scalar>
+template<typename QuatDerived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)
+{
+  using std::acos;
+  using std::min;
+  using std::max;
+  using std::sqrt;
+  Scalar n2 = q.vec().squaredNorm();
+  if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
+  {
+    m_angle = 0;
+    m_axis << 1, 0, 0;
+  }
+  else
+  {
+    m_angle = Scalar(2)*acos((min)((max)(Scalar(-1),q.w()),Scalar(1)));
+    m_axis = q.vec() / sqrt(n2);
+  }
+  return *this;
+}
+
+/** Set \c *this from a 3x3 rotation matrix \a mat.
+  */
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
+{
+  // Since a direct conversion would not be really faster,
+  // let's use the robust Quaternion implementation:
+  return *this = QuaternionType(mat);
+}
+
+/**
+* \brief Sets \c *this from a 3x3 rotation matrix.
+**/
+template<typename Scalar>
+template<typename Derived>
+AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+  return *this = QuaternionType(mat);
+}
+
+/** Constructs and \returns an equivalent 3x3 rotation matrix.
+  */
+template<typename Scalar>
+typename AngleAxis<Scalar>::Matrix3
+AngleAxis<Scalar>::toRotationMatrix(void) const
+{
+  using std::sin;
+  using std::cos;
+  Matrix3 res;
+  Vector3 sin_axis  = sin(m_angle) * m_axis;
+  Scalar c = cos(m_angle);
+  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
+
+  Scalar tmp;
+  tmp = cos1_axis.x() * m_axis.y();
+  res.coeffRef(0,1) = tmp - sin_axis.z();
+  res.coeffRef(1,0) = tmp + sin_axis.z();
+
+  tmp = cos1_axis.x() * m_axis.z();
+  res.coeffRef(0,2) = tmp + sin_axis.y();
+  res.coeffRef(2,0) = tmp - sin_axis.y();
+
+  tmp = cos1_axis.y() * m_axis.z();
+  res.coeffRef(1,2) = tmp - sin_axis.x();
+  res.coeffRef(2,1) = tmp + sin_axis.x();
+
+  res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c;
+
+  return res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ANGLEAXIS_H
diff --git a/Eigen/src/Geometry/CMakeLists.txt b/Eigen/src/Geometry/CMakeLists.txt
new file mode 100644
index 0000000..f8f728b
--- /dev/null
+++ b/Eigen/src/Geometry/CMakeLists.txt
@@ -0,0 +1,8 @@
+FILE(GLOB Eigen_Geometry_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Geometry_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Geometry COMPONENT Devel
+  )
+
+ADD_SUBDIRECTORY(arch)
diff --git a/Eigen/src/Geometry/EulerAngles.h b/Eigen/src/Geometry/EulerAngles.h
new file mode 100644
index 0000000..82802fb
--- /dev/null
+++ b/Eigen/src/Geometry/EulerAngles.h
@@ -0,0 +1,104 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_EULERANGLES_H
+#define EIGEN_EULERANGLES_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  *
+  * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2)
+  *
+  * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}.
+  * For instance, in:
+  * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode
+  * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that
+  * we have the following equality:
+  * \code
+  * mat == AngleAxisf(ea[0], Vector3f::UnitZ())
+  *      * AngleAxisf(ea[1], Vector3f::UnitX())
+  *      * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
+  * This corresponds to the right-multiply conventions (with right hand side frames).
+  * 
+  * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi].
+  * 
+  * \sa class AngleAxis
+  */
+template<typename Derived>
+inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>
+MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const
+{
+  using std::atan2;
+  using std::sin;
+  using std::cos;
+  /* Implemented from Graphics Gems IV */
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)
+
+  Matrix<Scalar,3,1> res;
+  typedef Matrix<typename Derived::Scalar,2,1> Vector2;
+
+  const Index odd = ((a0+1)%3 == a1) ? 0 : 1;
+  const Index i = a0;
+  const Index j = (a0 + 1 + odd)%3;
+  const Index k = (a0 + 2 - odd)%3;
+  
+  if (a0==a2)
+  {
+    res[0] = atan2(coeff(j,i), coeff(k,i));
+    if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0)))
+    {
+      res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
+      Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
+      res[1] = -atan2(s2, coeff(i,i));
+    }
+    else
+    {
+      Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();
+      res[1] = atan2(s2, coeff(i,i));
+    }
+    
+    // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,
+    // we can compute their respective rotation, and apply its inverse to M. Since the result must
+    // be a rotation around x, we have:
+    //
+    //  c2  s1.s2 c1.s2                   1  0   0 
+    //  0   c1    -s1       *    M    =   0  c3  s3
+    //  -s2 s1.c2 c1.c2                   0 -s3  c3
+    //
+    //  Thus:  m11.c1 - m21.s1 = c3  &   m12.c1 - m22.s1 = s3
+    
+    Scalar s1 = sin(res[0]);
+    Scalar c1 = cos(res[0]);
+    res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j));
+  } 
+  else
+  {
+    res[0] = atan2(coeff(j,k), coeff(k,k));
+    Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm();
+    if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) {
+      res[0] = (res[0] > Scalar(0)) ? res[0] - Scalar(M_PI) : res[0] + Scalar(M_PI);
+      res[1] = atan2(-coeff(i,k), -c2);
+    }
+    else
+      res[1] = atan2(-coeff(i,k), c2);
+    Scalar s1 = sin(res[0]);
+    Scalar c1 = cos(res[0]);
+    res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j));
+  }
+  if (!odd)
+    res = -res;
+  
+  return res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_EULERANGLES_H
diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h
new file mode 100644
index 0000000..372e422
--- /dev/null
+++ b/Eigen/src/Geometry/Homogeneous.h
@@ -0,0 +1,307 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HOMOGENEOUS_H
+#define EIGEN_HOMOGENEOUS_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Homogeneous
+  *
+  * \brief Expression of one (or a set of) homogeneous vector(s)
+  *
+  * \param MatrixType the type of the object in which we are making homogeneous
+  *
+  * This class represents an expression of one (or a set of) homogeneous vector(s).
+  * It is the return type of MatrixBase::homogeneous() and most of the time
+  * this is the only way it is used.
+  *
+  * \sa MatrixBase::homogeneous()
+  */
+
+namespace internal {
+
+template<typename MatrixType,int Direction>
+struct traits<Homogeneous<MatrixType,Direction> >
+ : traits<MatrixType>
+{
+  typedef typename traits<MatrixType>::StorageKind StorageKind;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+  enum {
+    RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ?
+                  int(MatrixType::RowsAtCompileTime) + 1 : Dynamic,
+    ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ?
+                  int(MatrixType::ColsAtCompileTime) + 1 : Dynamic,
+    RowsAtCompileTime = Direction==Vertical  ?  RowsPlusOne : MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = RowsAtCompileTime,
+    MaxColsAtCompileTime = ColsAtCompileTime,
+    TmpFlags = _MatrixTypeNested::Flags & HereditaryBits,
+    Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit)
+          : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit)
+          : TmpFlags,
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
+  };
+};
+
+template<typename MatrixType,typename Lhs> struct homogeneous_left_product_impl;
+template<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl;
+
+} // end namespace internal
+
+template<typename MatrixType,int _Direction> class Homogeneous
+  : internal::no_assignment_operator, public MatrixBase<Homogeneous<MatrixType,_Direction> >
+{
+  public:
+
+    enum { Direction = _Direction };
+
+    typedef MatrixBase<Homogeneous> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous)
+
+    inline Homogeneous(const MatrixType& matrix)
+      : m_matrix(matrix)
+    {}
+
+    inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical   ? 1 : 0); }
+    inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }
+
+    inline Scalar coeff(Index row, Index col) const
+    {
+      if(  (int(Direction)==Vertical   && row==m_matrix.rows())
+        || (int(Direction)==Horizontal && col==m_matrix.cols()))
+        return Scalar(1);
+      return m_matrix.coeff(row, col);
+    }
+
+    template<typename Rhs>
+    inline const internal::homogeneous_right_product_impl<Homogeneous,Rhs>
+    operator* (const MatrixBase<Rhs>& rhs) const
+    {
+      eigen_assert(int(Direction)==Horizontal);
+      return internal::homogeneous_right_product_impl<Homogeneous,Rhs>(m_matrix,rhs.derived());
+    }
+
+    template<typename Lhs> friend
+    inline const internal::homogeneous_left_product_impl<Homogeneous,Lhs>
+    operator* (const MatrixBase<Lhs>& lhs, const Homogeneous& rhs)
+    {
+      eigen_assert(int(Direction)==Vertical);
+      return internal::homogeneous_left_product_impl<Homogeneous,Lhs>(lhs.derived(),rhs.m_matrix);
+    }
+
+    template<typename Scalar, int Dim, int Mode, int Options> friend
+    inline const internal::homogeneous_left_product_impl<Homogeneous,Transform<Scalar,Dim,Mode,Options> >
+    operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs)
+    {
+      eigen_assert(int(Direction)==Vertical);
+      return internal::homogeneous_left_product_impl<Homogeneous,Transform<Scalar,Dim,Mode,Options> >(lhs,rhs.m_matrix);
+    }
+
+  protected:
+    typename MatrixType::Nested m_matrix;
+};
+
+/** \geometry_module
+  *
+  * \return an expression of the equivalent homogeneous vector
+  *
+  * \only_for_vectors
+  *
+  * Example: \include MatrixBase_homogeneous.cpp
+  * Output: \verbinclude MatrixBase_homogeneous.out
+  *
+  * \sa class Homogeneous
+  */
+template<typename Derived>
+inline typename MatrixBase<Derived>::HomogeneousReturnType
+MatrixBase<Derived>::homogeneous() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+  return derived();
+}
+
+/** \geometry_module
+  *
+  * \returns a matrix expression of homogeneous column (or row) vectors
+  *
+  * Example: \include VectorwiseOp_homogeneous.cpp
+  * Output: \verbinclude VectorwiseOp_homogeneous.out
+  *
+  * \sa MatrixBase::homogeneous() */
+template<typename ExpressionType, int Direction>
+inline Homogeneous<ExpressionType,Direction>
+VectorwiseOp<ExpressionType,Direction>::homogeneous() const
+{
+  return _expression();
+}
+
+/** \geometry_module
+  *
+  * \returns an expression of the homogeneous normalized vector of \c *this
+  *
+  * Example: \include MatrixBase_hnormalized.cpp
+  * Output: \verbinclude MatrixBase_hnormalized.out
+  *
+  * \sa VectorwiseOp::hnormalized() */
+template<typename Derived>
+inline const typename MatrixBase<Derived>::HNormalizedReturnType
+MatrixBase<Derived>::hnormalized() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
+  return ConstStartMinusOne(derived(),0,0,
+    ColsAtCompileTime==1?size()-1:1,
+    ColsAtCompileTime==1?1:size()-1) / coeff(size()-1);
+}
+
+/** \geometry_module
+  *
+  * \returns an expression of the homogeneous normalized vector of \c *this
+  *
+  * Example: \include DirectionWise_hnormalized.cpp
+  * Output: \verbinclude DirectionWise_hnormalized.out
+  *
+  * \sa MatrixBase::hnormalized() */
+template<typename ExpressionType, int Direction>
+inline const typename VectorwiseOp<ExpressionType,Direction>::HNormalizedReturnType
+VectorwiseOp<ExpressionType,Direction>::hnormalized() const
+{
+  return HNormalized_Block(_expression(),0,0,
+      Direction==Vertical   ? _expression().rows()-1 : _expression().rows(),
+      Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient(
+      Replicate<HNormalized_Factors,
+                Direction==Vertical   ? HNormalized_SizeMinusOne : 1,
+                Direction==Horizontal ? HNormalized_SizeMinusOne : 1>
+        (HNormalized_Factors(_expression(),
+          Direction==Vertical    ? _expression().rows()-1:0,
+          Direction==Horizontal  ? _expression().cols()-1:0,
+          Direction==Vertical    ? 1 : _expression().rows(),
+          Direction==Horizontal  ? 1 : _expression().cols()),
+         Direction==Vertical   ? _expression().rows()-1 : 1,
+         Direction==Horizontal ? _expression().cols()-1 : 1));
+}
+
+namespace internal {
+
+template<typename MatrixOrTransformType>
+struct take_matrix_for_product
+{
+  typedef MatrixOrTransformType type;
+  static const type& run(const type &x) { return x; }
+};
+
+template<typename Scalar, int Dim, int Mode,int Options>
+struct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> >
+{
+  typedef Transform<Scalar, Dim, Mode, Options> TransformType;
+  typedef typename internal::add_const<typename TransformType::ConstAffinePart>::type type;
+  static type run (const TransformType& x) { return x.affine(); }
+};
+
+template<typename Scalar, int Dim, int Options>
+struct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> >
+{
+  typedef Transform<Scalar, Dim, Projective, Options> TransformType;
+  typedef typename TransformType::MatrixType type;
+  static const type& run (const TransformType& x) { return x.matrix(); }
+};
+
+template<typename MatrixType,typename Lhs>
+struct traits<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
+{
+  typedef typename take_matrix_for_product<Lhs>::type LhsMatrixType;
+  typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
+  typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
+  typedef typename make_proper_matrix_type<
+                 typename traits<MatrixTypeCleaned>::Scalar,
+                 LhsMatrixTypeCleaned::RowsAtCompileTime,
+                 MatrixTypeCleaned::ColsAtCompileTime,
+                 MatrixTypeCleaned::PlainObject::Options,
+                 LhsMatrixTypeCleaned::MaxRowsAtCompileTime,
+                 MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType;
+};
+
+template<typename MatrixType,typename Lhs>
+struct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>
+  : public ReturnByValue<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >
+{
+  typedef typename traits<homogeneous_left_product_impl>::LhsMatrixType LhsMatrixType;
+  typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;
+  typedef typename remove_all<typename LhsMatrixTypeCleaned::Nested>::type LhsMatrixTypeNested;
+  typedef typename MatrixType::Index Index;
+  homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)
+    : m_lhs(take_matrix_for_product<Lhs>::run(lhs)),
+      m_rhs(rhs)
+  {}
+
+  inline Index rows() const { return m_lhs.rows(); }
+  inline Index cols() const { return m_rhs.cols(); }
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    // FIXME investigate how to allow lazy evaluation of this product when possible
+    dst = Block<const LhsMatrixTypeNested,
+              LhsMatrixTypeNested::RowsAtCompileTime,
+              LhsMatrixTypeNested::ColsAtCompileTime==Dynamic?Dynamic:LhsMatrixTypeNested::ColsAtCompileTime-1>
+            (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs;
+    dst += m_lhs.col(m_lhs.cols()-1).rowwise()
+            .template replicate<MatrixType::ColsAtCompileTime>(m_rhs.cols());
+  }
+
+  typename LhsMatrixTypeCleaned::Nested m_lhs;
+  typename MatrixType::Nested m_rhs;
+};
+
+template<typename MatrixType,typename Rhs>
+struct traits<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
+{
+  typedef typename make_proper_matrix_type<typename traits<MatrixType>::Scalar,
+                 MatrixType::RowsAtCompileTime,
+                 Rhs::ColsAtCompileTime,
+                 MatrixType::PlainObject::Options,
+                 MatrixType::MaxRowsAtCompileTime,
+                 Rhs::MaxColsAtCompileTime>::type ReturnType;
+};
+
+template<typename MatrixType,typename Rhs>
+struct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>
+  : public ReturnByValue<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >
+{
+  typedef typename remove_all<typename Rhs::Nested>::type RhsNested;
+  typedef typename MatrixType::Index Index;
+  homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)
+    : m_lhs(lhs), m_rhs(rhs)
+  {}
+
+  inline Index rows() const { return m_lhs.rows(); }
+  inline Index cols() const { return m_rhs.cols(); }
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    // FIXME investigate how to allow lazy evaluation of this product when possible
+    dst = m_lhs * Block<const RhsNested,
+                        RhsNested::RowsAtCompileTime==Dynamic?Dynamic:RhsNested::RowsAtCompileTime-1,
+                        RhsNested::ColsAtCompileTime>
+            (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols());
+    dst += m_rhs.row(m_rhs.rows()-1).colwise()
+            .template replicate<MatrixType::RowsAtCompileTime>(m_lhs.rows());
+  }
+
+  typename MatrixType::Nested m_lhs;
+  typename Rhs::Nested m_rhs;
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_HOMOGENEOUS_H
diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h
new file mode 100644
index 0000000..00b7c43
--- /dev/null
+++ b/Eigen/src/Geometry/Hyperplane.h
@@ -0,0 +1,280 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HYPERPLANE_H
+#define EIGEN_HYPERPLANE_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Hyperplane
+  *
+  * \brief A hyperplane
+  *
+  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.
+  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  *             Notice that the dimension of the hyperplane is _AmbientDim-1.
+  *
+  * This class represents an hyperplane as the zero set of the implicit equation
+  * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part)
+  * and \f$ d \f$ is the distance (offset) to the origin.
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+class Hyperplane
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)
+  enum {
+    AmbientDimAtCompileTime = _AmbientDim,
+    Options = _Options
+  };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef DenseIndex Index;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;
+  typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic
+                        ? Dynamic
+                        : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients;
+  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;
+  typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;
+
+  /** Default constructor without initialization */
+  inline Hyperplane() {}
+  
+  template<int OtherOptions>
+  Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
+   : m_coeffs(other.coeffs())
+  {}
+
+  /** Constructs a dynamic-size hyperplane with \a _dim the dimension
+    * of the ambient space */
+  inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}
+
+  /** Construct a plane from its normal \a n and a point \a e onto the plane.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, const VectorType& e)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = -n.dot(e);
+  }
+
+  /** Constructs a plane from its normal \a n and distance to the origin \a d
+    * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$.
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline Hyperplane(const VectorType& n, const Scalar& d)
+    : m_coeffs(n.size()+1)
+  {
+    normal() = n;
+    offset() = d;
+  }
+
+  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space
+    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)
+  {
+    Hyperplane result(p0.size());
+    result.normal() = (p1 - p0).unitOrthogonal();
+    result.offset() = -p0.dot(result.normal());
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space
+    * is required to be exactly 3.
+    */
+  static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)
+  {
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
+    Hyperplane result(p0.size());
+    VectorType v0(p2 - p0), v1(p1 - p0);
+    result.normal() = v0.cross(v1);
+    RealScalar norm = result.normal().norm();
+    if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())
+    {
+      Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
+      JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
+      result.normal() = svd.matrixV().col(2);
+    }
+    else
+      result.normal() /= norm;
+    result.offset() = -p0.dot(result.normal());
+    return result;
+  }
+
+  /** Constructs a hyperplane passing through the parametrized line \a parametrized.
+    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,
+    * so an arbitrary choice is made.
+    */
+  // FIXME to be consitent with the rest this could be implemented as a static Through function ??
+  explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)
+  {
+    normal() = parametrized.direction().unitOrthogonal();
+    offset() = -parametrized.origin().dot(normal());
+  }
+
+  ~Hyperplane() {}
+
+  /** \returns the dimension in which the plane holds */
+  inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }
+
+  /** normalizes \c *this */
+  void normalize(void)
+  {
+    m_coeffs /= normal().norm();
+  }
+
+  /** \returns the signed distance between the plane \c *this and a point \a p.
+    * \sa absDistance()
+    */
+  inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }
+
+  /** \returns the absolute distance between the plane \c *this and a point \a p.
+    * \sa signedDistance()
+    */
+  inline Scalar absDistance(const VectorType& p) const { using std::abs; return abs(signedDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the plane \c *this.
+    */
+  inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }
+
+  /** \returns a constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); }
+
+  /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds
+    * to the linear part of the implicit equation.
+    */
+  inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }
+
+  /** \returns the distance to the origin, which is also the "constant term" of the implicit equation
+    * \warning the vector normal is assumed to be normalized.
+    */
+  inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }
+
+  /** \returns a non-constant reference to the distance to the origin, which is also the constant part
+    * of the implicit equation */
+  inline Scalar& offset() { return m_coeffs(dim()); }
+
+  /** \returns a constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  /** \returns a non-constant reference to the coefficients c_i of the plane equation:
+    * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$
+    */
+  inline Coefficients& coeffs() { return m_coeffs; }
+
+  /** \returns the intersection of *this with \a other.
+    *
+    * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines.
+    *
+    * \note If \a other is approximately parallel to *this, this method will return any point on *this.
+    */
+  VectorType intersection(const Hyperplane& other) const
+  {
+    using std::abs;
+    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);
+    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests
+    // whether the two lines are approximately parallel.
+    if(internal::isMuchSmallerThan(det, Scalar(1)))
+    {   // special case where the two lines are approximately parallel. Pick any point on the first line.
+        if(abs(coeffs().coeff(1))>abs(coeffs().coeff(0)))
+            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));
+        else
+            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));
+    }
+    else
+    {   // general case
+        Scalar invdet = Scalar(1) / det;
+        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),
+                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));
+    }
+  }
+
+  /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this.
+    *
+    * \param mat the Dim x Dim transformation matrix
+    * \param traits specifies whether the matrix \a mat represents an #Isometry
+    *               or a more generic #Affine transformation. The default is #Affine.
+    */
+  template<typename XprType>
+  inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)
+  {
+    if (traits==Affine)
+      normal() = mat.inverse().transpose() * normal();
+    else if (traits==Isometry)
+      normal() = mat * normal();
+    else
+    {
+      eigen_assert(0 && "invalid traits value in Hyperplane::transform()");
+    }
+    return *this;
+  }
+
+  /** Applies the transformation \a t to \c *this and returns a reference to \c *this.
+    *
+    * \param t the transformation of dimension Dim
+    * \param traits specifies whether the transformation \a t represents an #Isometry
+    *               or a more generic #Affine transformation. The default is #Affine.
+    *               Other kind of transformations are not supported.
+    */
+  template<int TrOptions>
+  inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,
+                                TransformTraits traits = Affine)
+  {
+    transform(t.linear(), traits);
+    offset() -= normal().dot(t.translation());
+    return *this;
+  }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Hyperplane,
+           Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
+  {
+    return typename internal::cast_return_type<Hyperplane,
+                    Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType,int OtherOptions>
+  inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  template<int OtherOptions>
+  bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+protected:
+
+  Coefficients m_coeffs;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_HYPERPLANE_H
diff --git a/Eigen/src/Geometry/OrthoMethods.h b/Eigen/src/Geometry/OrthoMethods.h
new file mode 100644
index 0000000..556bc81
--- /dev/null
+++ b/Eigen/src/Geometry/OrthoMethods.h
@@ -0,0 +1,218 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ORTHOMETHODS_H
+#define EIGEN_ORTHOMETHODS_H
+
+namespace Eigen { 
+
+/** \geometry_module
+  *
+  * \returns the cross product of \c *this and \a other
+  *
+  * Here is a very good explanation of cross-product: http://xkcd.com/199/
+  * \sa MatrixBase::cross3()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type
+MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3)
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
+
+  // Note that there is no need for an expression here since the compiler
+  // optimize such a small temporary very well (even within a complex expression)
+  typename internal::nested<Derived,2>::type lhs(derived());
+  typename internal::nested<OtherDerived,2>::type rhs(other.derived());
+  return typename cross_product_return_type<OtherDerived>::type(
+    numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+    numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+    numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))
+  );
+}
+
+namespace internal {
+
+template< int Arch,typename VectorLhs,typename VectorRhs,
+          typename Scalar = typename VectorLhs::Scalar,
+          bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)>
+struct cross3_impl {
+  static inline typename internal::plain_matrix_type<VectorLhs>::type
+  run(const VectorLhs& lhs, const VectorRhs& rhs)
+  {
+    return typename internal::plain_matrix_type<VectorLhs>::type(
+      numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),
+      numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),
+      numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),
+      0
+    );
+  }
+};
+
+}
+
+/** \geometry_module
+  *
+  * \returns the cross product of \c *this and \a other using only the x, y, and z coefficients
+  *
+  * The size of \c *this and \a other must be four. This function is especially useful
+  * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization.
+  *
+  * \sa MatrixBase::cross()
+  */
+template<typename Derived>
+template<typename OtherDerived>
+inline typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4)
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4)
+
+  typedef typename internal::nested<Derived,2>::type DerivedNested;
+  typedef typename internal::nested<OtherDerived,2>::type OtherDerivedNested;
+  DerivedNested lhs(derived());
+  OtherDerivedNested rhs(other.derived());
+
+  return internal::cross3_impl<Architecture::Target,
+                        typename internal::remove_all<DerivedNested>::type,
+                        typename internal::remove_all<OtherDerivedNested>::type>::run(lhs,rhs);
+}
+
+/** \returns a matrix expression of the cross product of each column or row
+  * of the referenced expression with the \a other vector.
+  *
+  * The referenced matrix must have one dimension equal to 3.
+  * The result matrix has the same dimensions than the referenced one.
+  *
+  * \geometry_module
+  *
+  * \sa MatrixBase::cross() */
+template<typename ExpressionType, int Direction>
+template<typename OtherDerived>
+const typename VectorwiseOp<ExpressionType,Direction>::CrossReturnType
+VectorwiseOp<ExpressionType,Direction>::cross(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  CrossReturnType res(_expression().rows(),_expression().cols());
+  if(Direction==Vertical)
+  {
+    eigen_assert(CrossReturnType::RowsAtCompileTime==3 && "the matrix must have exactly 3 rows");
+    res.row(0) = (_expression().row(1) * other.coeff(2) - _expression().row(2) * other.coeff(1)).conjugate();
+    res.row(1) = (_expression().row(2) * other.coeff(0) - _expression().row(0) * other.coeff(2)).conjugate();
+    res.row(2) = (_expression().row(0) * other.coeff(1) - _expression().row(1) * other.coeff(0)).conjugate();
+  }
+  else
+  {
+    eigen_assert(CrossReturnType::ColsAtCompileTime==3 && "the matrix must have exactly 3 columns");
+    res.col(0) = (_expression().col(1) * other.coeff(2) - _expression().col(2) * other.coeff(1)).conjugate();
+    res.col(1) = (_expression().col(2) * other.coeff(0) - _expression().col(0) * other.coeff(2)).conjugate();
+    res.col(2) = (_expression().col(0) * other.coeff(1) - _expression().col(1) * other.coeff(0)).conjugate();
+  }
+  return res;
+}
+
+namespace internal {
+
+template<typename Derived, int Size = Derived::SizeAtCompileTime>
+struct unitOrthogonal_selector
+{
+  typedef typename plain_matrix_type<Derived>::type VectorType;
+  typedef typename traits<Derived>::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef typename Derived::Index Index;
+  typedef Matrix<Scalar,2,1> Vector2;
+  static inline VectorType run(const Derived& src)
+  {
+    VectorType perp = VectorType::Zero(src.size());
+    Index maxi = 0;
+    Index sndi = 0;
+    src.cwiseAbs().maxCoeff(&maxi);
+    if (maxi==0)
+      sndi = 1;
+    RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();
+    perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm;
+    perp.coeffRef(sndi) =  numext::conj(src.coeff(maxi)) * invnm;
+
+    return perp;
+   }
+};
+
+template<typename Derived>
+struct unitOrthogonal_selector<Derived,3>
+{
+  typedef typename plain_matrix_type<Derived>::type VectorType;
+  typedef typename traits<Derived>::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  static inline VectorType run(const Derived& src)
+  {
+    VectorType perp;
+    /* Let us compute the crossed product of *this with a vector
+     * that is not too close to being colinear to *this.
+     */
+
+    /* unless the x and y coords are both close to zero, we can
+     * simply take ( -y, x, 0 ) and normalize it.
+     */
+    if((!isMuchSmallerThan(src.x(), src.z()))
+    || (!isMuchSmallerThan(src.y(), src.z())))
+    {
+      RealScalar invnm = RealScalar(1)/src.template head<2>().norm();
+      perp.coeffRef(0) = -numext::conj(src.y())*invnm;
+      perp.coeffRef(1) = numext::conj(src.x())*invnm;
+      perp.coeffRef(2) = 0;
+    }
+    /* if both x and y are close to zero, then the vector is close
+     * to the z-axis, so it's far from colinear to the x-axis for instance.
+     * So we take the crossed product with (1,0,0) and normalize it.
+     */
+    else
+    {
+      RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();
+      perp.coeffRef(0) = 0;
+      perp.coeffRef(1) = -numext::conj(src.z())*invnm;
+      perp.coeffRef(2) = numext::conj(src.y())*invnm;
+    }
+
+    return perp;
+   }
+};
+
+template<typename Derived>
+struct unitOrthogonal_selector<Derived,2>
+{
+  typedef typename plain_matrix_type<Derived>::type VectorType;
+  static inline VectorType run(const Derived& src)
+  { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); }
+};
+
+} // end namespace internal
+
+/** \returns a unit vector which is orthogonal to \c *this
+  *
+  * The size of \c *this must be at least 2. If the size is exactly 2,
+  * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized().
+  *
+  * \sa cross()
+  */
+template<typename Derived>
+typename MatrixBase<Derived>::PlainObject
+MatrixBase<Derived>::unitOrthogonal() const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return internal::unitOrthogonal_selector<Derived>::run(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ORTHOMETHODS_H
diff --git a/Eigen/src/Geometry/ParametrizedLine.h b/Eigen/src/Geometry/ParametrizedLine.h
new file mode 100644
index 0000000..77fa228
--- /dev/null
+++ b/Eigen/src/Geometry/ParametrizedLine.h
@@ -0,0 +1,195 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PARAMETRIZEDLINE_H
+#define EIGEN_PARAMETRIZEDLINE_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class ParametrizedLine
+  *
+  * \brief A parametrized line
+  *
+  * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit
+  * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to
+  * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ t \in \mathbf{R} \f$.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+class ParametrizedLine
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
+  enum {
+    AmbientDimAtCompileTime = _AmbientDim,
+    Options = _Options
+  };
+  typedef _Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef DenseIndex Index;
+  typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;
+
+  /** Default constructor without initialization */
+  inline ParametrizedLine() {}
+  
+  template<int OtherOptions>
+  ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)
+   : m_origin(other.origin()), m_direction(other.direction())
+  {}
+
+  /** Constructs a dynamic-size line with \a _dim the dimension
+    * of the ambient space */
+  inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}
+
+  /** Initializes a parametrized line of direction \a direction and origin \a origin.
+    * \warning the vector direction is assumed to be normalized.
+    */
+  ParametrizedLine(const VectorType& origin, const VectorType& direction)
+    : m_origin(origin), m_direction(direction) {}
+
+  template <int OtherOptions>
+  explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);
+
+  /** Constructs a parametrized line going from \a p0 to \a p1. */
+  static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)
+  { return ParametrizedLine(p0, (p1-p0).normalized()); }
+
+  ~ParametrizedLine() {}
+
+  /** \returns the dimension in which the line holds */
+  inline Index dim() const { return m_direction.size(); }
+
+  const VectorType& origin() const { return m_origin; }
+  VectorType& origin() { return m_origin; }
+
+  const VectorType& direction() const { return m_direction; }
+  VectorType& direction() { return m_direction; }
+
+  /** \returns the squared distance of a point \a p to its projection onto the line \c *this.
+    * \sa distance()
+    */
+  RealScalar squaredDistance(const VectorType& p) const
+  {
+    VectorType diff = p - origin();
+    return (diff - direction().dot(diff) * direction()).squaredNorm();
+  }
+  /** \returns the distance of a point \a p to its projection onto the line \c *this.
+    * \sa squaredDistance()
+    */
+  RealScalar distance(const VectorType& p) const { using std::sqrt; return sqrt(squaredDistance(p)); }
+
+  /** \returns the projection of a point \a p onto the line \c *this. */
+  VectorType projection(const VectorType& p) const
+  { return origin() + direction().dot(p-origin()) * direction(); }
+
+  VectorType pointAt(const Scalar& t) const;
+  
+  template <int OtherOptions>
+  Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+ 
+  template <int OtherOptions>
+  Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+  
+  template <int OtherOptions>
+  VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<ParametrizedLine,
+           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const
+  {
+    return typename internal::cast_return_type<ParametrizedLine,
+                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);
+  }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType,int OtherOptions>
+  inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)
+  {
+    m_origin = other.origin().template cast<Scalar>();
+    m_direction = other.direction().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
+
+protected:
+
+  VectorType m_origin, m_direction;
+};
+
+/** Constructs a parametrized line from a 2D hyperplane
+  *
+  * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)
+  direction() = hyperplane.normal().unitOrthogonal();
+  origin() = -hyperplane.normal()*hyperplane.offset();
+}
+
+/** \returns the point at \a t along this line
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
+ParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const
+{
+  return origin() + (direction()*t); 
+}
+
+/** \returns the parameter value of the intersection between \c *this and the given \a hyperplane
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+  return -(hyperplane.offset()+hyperplane.normal().dot(origin()))
+          / hyperplane.normal().dot(direction());
+}
+
+
+/** \deprecated use intersectionParameter()
+  * \returns the parameter value of the intersection between \c *this and the given \a hyperplane
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+  return intersectionParameter(hyperplane);
+}
+
+/** \returns the point of the intersection between \c *this and the given hyperplane
+  */
+template <typename _Scalar, int _AmbientDim, int _Options>
+template <int OtherOptions>
+inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType
+ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const
+{
+  return pointAt(intersectionParameter(hyperplane));
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARAMETRIZEDLINE_H
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
new file mode 100644
index 0000000..93056f6
--- /dev/null
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -0,0 +1,776 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_QUATERNION_H
+#define EIGEN_QUATERNION_H
+namespace Eigen { 
+
+
+/***************************************************************************
+* Definition of QuaternionBase<Derived>
+* The implementation is at the end of the file
+***************************************************************************/
+
+namespace internal {
+template<typename Other,
+         int OtherRows=Other::RowsAtCompileTime,
+         int OtherCols=Other::ColsAtCompileTime>
+struct quaternionbase_assign_impl;
+}
+
+/** \geometry_module \ingroup Geometry_Module
+  * \class QuaternionBase
+  * \brief Base class for quaternion expressions
+  * \tparam Derived derived type (CRTP)
+  * \sa class Quaternion
+  */
+template<class Derived>
+class QuaternionBase : public RotationBase<Derived, 3>
+{
+  typedef RotationBase<Derived, 3> Base;
+public:
+  using Base::operator*;
+  using Base::derived;
+
+  typedef typename internal::traits<Derived>::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef typename internal::traits<Derived>::Coefficients Coefficients;
+  enum {
+    Flags = Eigen::internal::traits<Derived>::Flags
+  };
+
+ // typedef typename Matrix<Scalar,4,1> Coefficients;
+  /** the type of a 3D vector */
+  typedef Matrix<Scalar,3,1> Vector3;
+  /** the equivalent rotation matrix type */
+  typedef Matrix<Scalar,3,3> Matrix3;
+  /** the equivalent angle-axis type */
+  typedef AngleAxis<Scalar> AngleAxisType;
+
+
+
+  /** \returns the \c x coefficient */
+  inline Scalar x() const { return this->derived().coeffs().coeff(0); }
+  /** \returns the \c y coefficient */
+  inline Scalar y() const { return this->derived().coeffs().coeff(1); }
+  /** \returns the \c z coefficient */
+  inline Scalar z() const { return this->derived().coeffs().coeff(2); }
+  /** \returns the \c w coefficient */
+  inline Scalar w() const { return this->derived().coeffs().coeff(3); }
+
+  /** \returns a reference to the \c x coefficient */
+  inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }
+  /** \returns a reference to the \c y coefficient */
+  inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }
+  /** \returns a reference to the \c z coefficient */
+  inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }
+  /** \returns a reference to the \c w coefficient */
+  inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }
+
+  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
+  inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }
+
+  /** \returns a vector expression of the imaginary part (x,y,z) */
+  inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }
+
+  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
+  inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }
+
+  /** \returns a vector expression of the coefficients (x,y,z,w) */
+  inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }
+
+  EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other);
+  template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other);
+
+// disabled this copy operator as it is giving very strange compilation errors when compiling
+// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's
+// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase
+// we didn't have to add, in addition to templated operator=, such a non-templated copy operator.
+//  Derived& operator=(const QuaternionBase& other)
+//  { return operator=<Derived>(other); }
+
+  Derived& operator=(const AngleAxisType& aa);
+  template<class OtherDerived> Derived& operator=(const MatrixBase<OtherDerived>& m);
+
+  /** \returns a quaternion representing an identity rotation
+    * \sa MatrixBase::Identity()
+    */
+  static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
+
+  /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
+    */
+  inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
+
+  /** \returns the squared norm of the quaternion's coefficients
+    * \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
+    */
+  inline Scalar squaredNorm() const { return coeffs().squaredNorm(); }
+
+  /** \returns the norm of the quaternion's coefficients
+    * \sa QuaternionBase::squaredNorm(), MatrixBase::norm()
+    */
+  inline Scalar norm() const { return coeffs().norm(); }
+
+  /** Normalizes the quaternion \c *this
+    * \sa normalized(), MatrixBase::normalize() */
+  inline void normalize() { coeffs().normalize(); }
+  /** \returns a normalized copy of \c *this
+    * \sa normalize(), MatrixBase::normalized() */
+  inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); }
+
+    /** \returns the dot product of \c *this and \a other
+    * Geometrically speaking, the dot product of two unit quaternions
+    * corresponds to the cosine of half the angle between the two rotations.
+    * \sa angularDistance()
+    */
+  template<class OtherDerived> inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); }
+
+  template<class OtherDerived> Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;
+
+  /** \returns an equivalent 3x3 rotation matrix */
+  Matrix3 toRotationMatrix() const;
+
+  /** \returns the quaternion which transform \a a into \a b through a rotation */
+  template<typename Derived1, typename Derived2>
+  Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+  template<class OtherDerived> EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const;
+  template<class OtherDerived> EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q);
+
+  /** \returns the quaternion describing the inverse rotation */
+  Quaternion<Scalar> inverse() const;
+
+  /** \returns the conjugated quaternion */
+  Quaternion<Scalar> conjugate() const;
+
+  template<class OtherDerived> Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  template<class OtherDerived>
+  bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
+  { return coeffs().isApprox(other.coeffs(), prec); }
+
+	/** return the result vector of \a v through the rotation*/
+  EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const
+  {
+    return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());
+  }
+
+#ifdef EIGEN_QUATERNIONBASE_PLUGIN
+# include EIGEN_QUATERNIONBASE_PLUGIN
+#endif
+};
+
+/***************************************************************************
+* Definition/implementation of Quaternion<Scalar>
+***************************************************************************/
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Quaternion
+  *
+  * \brief The quaternion class used to represent 3D orientations and rotations
+  *
+  * \tparam _Scalar the scalar type, i.e., the type of the coefficients
+  * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
+  *
+  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
+  * orientations and rotations of objects in three dimensions. Compared to other representations
+  * like Euler angles or 3x3 matrices, quaternions offer the following advantages:
+  * \li \b compact storage (4 scalars)
+  * \li \b efficient to compose (28 flops),
+  * \li \b stable spherical interpolation
+  *
+  * The following two typedefs are provided for convenience:
+  * \li \c Quaternionf for \c float
+  * \li \c Quaterniond for \c double
+  *
+  * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized.
+  *
+  * \sa  class AngleAxis, class Transform
+  */
+
+namespace internal {
+template<typename _Scalar,int _Options>
+struct traits<Quaternion<_Scalar,_Options> >
+{
+  typedef Quaternion<_Scalar,_Options> PlainObject;
+  typedef _Scalar Scalar;
+  typedef Matrix<_Scalar,4,1,_Options> Coefficients;
+  enum{
+    IsAligned = internal::traits<Coefficients>::Flags & AlignedBit,
+    Flags = IsAligned ? (AlignedBit | LvalueBit) : LvalueBit
+  };
+};
+}
+
+template<typename _Scalar, int _Options>
+class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
+{
+  typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;
+  enum { IsAligned = internal::traits<Quaternion>::IsAligned };
+
+public:
+  typedef _Scalar Scalar;
+
+  EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
+  using Base::operator*=;
+
+  typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
+  typedef typename Base::AngleAxisType AngleAxisType;
+
+  /** Default constructor leaving the quaternion uninitialized. */
+  inline Quaternion() {}
+
+  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
+    * its four coefficients \a w, \a x, \a y and \a z.
+    *
+    * \warning Note the order of the arguments: the real \a w coefficient first,
+    * while internally the coefficients are stored in the following order:
+    * [\c x, \c y, \c z, \c w]
+    */
+  inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){}
+
+  /** Constructs and initialize a quaternion from the array data */
+  inline Quaternion(const Scalar* data) : m_coeffs(data) {}
+
+  /** Copy constructor */
+  template<class Derived> EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); }
+
+  /** Constructs and initializes a quaternion from the angle-axis \a aa */
+  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
+
+  /** Constructs and initializes a quaternion from either:
+    *  - a rotation matrix expression,
+    *  - a 4D vector expression representing quaternion coefficients.
+    */
+  template<typename Derived>
+  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
+
+  /** Explicit copy constructor with scalar conversion */
+  template<typename OtherScalar, int OtherOptions>
+  explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)
+  { m_coeffs = other.coeffs().template cast<Scalar>(); }
+
+  template<typename Derived1, typename Derived2>
+  static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
+
+  inline Coefficients& coeffs() { return m_coeffs;}
+  inline const Coefficients& coeffs() const { return m_coeffs;}
+
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(IsAligned)
+
+protected:
+  Coefficients m_coeffs;
+  
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    static EIGEN_STRONG_INLINE void _check_template_params()
+    {
+      EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options,
+        INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    }
+#endif
+};
+
+/** \ingroup Geometry_Module
+  * single precision quaternion type */
+typedef Quaternion<float> Quaternionf;
+/** \ingroup Geometry_Module
+  * double precision quaternion type */
+typedef Quaternion<double> Quaterniond;
+
+/***************************************************************************
+* Specialization of Map<Quaternion<Scalar>>
+***************************************************************************/
+
+namespace internal {
+  template<typename _Scalar, int _Options>
+  struct traits<Map<Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >
+  {
+    typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;
+  };
+}
+
+namespace internal {
+  template<typename _Scalar, int _Options>
+  struct traits<Map<const Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >
+  {
+    typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;
+    typedef traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > TraitsBase;
+    enum {
+      Flags = TraitsBase::Flags & ~LvalueBit
+    };
+  };
+}
+
+/** \ingroup Geometry_Module
+  * \brief Quaternion expression mapping a constant memory buffer
+  *
+  * \tparam _Scalar the type of the Quaternion coefficients
+  * \tparam _Options see class Map
+  *
+  * This is a specialization of class Map for Quaternion. This class allows to view
+  * a 4 scalar memory buffer as an Eigen's Quaternion object.
+  *
+  * \sa class Map, class Quaternion, class QuaternionBase
+  */
+template<typename _Scalar, int _Options>
+class Map<const Quaternion<_Scalar>, _Options >
+  : public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> >
+{
+    typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base;
+
+  public:
+    typedef _Scalar Scalar;
+    typedef typename internal::traits<Map>::Coefficients Coefficients;
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
+    using Base::operator*=;
+
+    /** Constructs a Mapped Quaternion object from the pointer \a coeffs
+      *
+      * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
+      * \code *coeffs == {x, y, z, w} \endcode
+      *
+      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
+    EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {}
+
+    inline const Coefficients& coeffs() const { return m_coeffs;}
+
+  protected:
+    const Coefficients m_coeffs;
+};
+
+/** \ingroup Geometry_Module
+  * \brief Expression of a quaternion from a memory buffer
+  *
+  * \tparam _Scalar the type of the Quaternion coefficients
+  * \tparam _Options see class Map
+  *
+  * This is a specialization of class Map for Quaternion. This class allows to view
+  * a 4 scalar memory buffer as an Eigen's  Quaternion object.
+  *
+  * \sa class Map, class Quaternion, class QuaternionBase
+  */
+template<typename _Scalar, int _Options>
+class Map<Quaternion<_Scalar>, _Options >
+  : public QuaternionBase<Map<Quaternion<_Scalar>, _Options> >
+{
+    typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base;
+
+  public:
+    typedef _Scalar Scalar;
+    typedef typename internal::traits<Map>::Coefficients Coefficients;
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
+    using Base::operator*=;
+
+    /** Constructs a Mapped Quaternion object from the pointer \a coeffs
+      *
+      * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
+      * \code *coeffs == {x, y, z, w} \endcode
+      *
+      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
+    EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {}
+
+    inline Coefficients& coeffs() { return m_coeffs; }
+    inline const Coefficients& coeffs() const { return m_coeffs; }
+
+  protected:
+    Coefficients m_coeffs;
+};
+
+/** \ingroup Geometry_Module
+  * Map an unaligned array of single precision scalars as a quaternion */
+typedef Map<Quaternion<float>, 0>         QuaternionMapf;
+/** \ingroup Geometry_Module
+  * Map an unaligned array of double precision scalars as a quaternion */
+typedef Map<Quaternion<double>, 0>        QuaternionMapd;
+/** \ingroup Geometry_Module
+  * Map a 16-byte aligned array of single precision scalars as a quaternion */
+typedef Map<Quaternion<float>, Aligned>   QuaternionMapAlignedf;
+/** \ingroup Geometry_Module
+  * Map a 16-byte aligned array of double precision scalars as a quaternion */
+typedef Map<Quaternion<double>, Aligned>  QuaternionMapAlignedd;
+
+/***************************************************************************
+* Implementation of QuaternionBase methods
+***************************************************************************/
+
+// Generic Quaternion * Quaternion product
+// This product can be specialized for a given architecture via the Arch template argument.
+namespace internal {
+template<int Arch, class Derived1, class Derived2, typename Scalar, int _Options> struct quat_product
+{
+  static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){
+    return Quaternion<Scalar>
+    (
+      a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
+      a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
+      a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
+      a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()
+    );
+  }
+};
+}
+
+/** \returns the concatenation of two rotations as a quaternion-quaternion product */
+template <class Derived>
+template <class OtherDerived>
+EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),
+   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+  return internal::quat_product<Architecture::Target, Derived, OtherDerived,
+                         typename internal::traits<Derived>::Scalar,
+                         internal::traits<Derived>::IsAligned && internal::traits<OtherDerived>::IsAligned>::run(*this, other);
+}
+
+/** \sa operator*(Quaternion) */
+template <class Derived>
+template <class OtherDerived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)
+{
+  derived() = derived() * other.derived();
+  return derived();
+}
+
+/** Rotation of a vector by a quaternion.
+  * \remarks If the quaternion is used to rotate several points (>1)
+  * then it is much more efficient to first convert it to a 3x3 Matrix.
+  * Comparison of the operation cost for n transformations:
+  *   - Quaternion2:    30n
+  *   - Via a Matrix3: 24 + 15n
+  */
+template <class Derived>
+EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
+QuaternionBase<Derived>::_transformVector(const Vector3& v) const
+{
+    // Note that this algorithm comes from the optimization by hand
+    // of the conversion to a Matrix followed by a Matrix/Vector product.
+    // It appears to be much faster than the common algorithm found
+    // in the literature (30 versus 39 flops). It also requires two
+    // Vector3 as temporaries.
+    Vector3 uv = this->vec().cross(v);
+    uv += uv;
+    return v + this->w() * uv + this->vec().cross(uv);
+}
+
+template<class Derived>
+EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other)
+{
+  coeffs() = other.coeffs();
+  return derived();
+}
+
+template<class Derived>
+template<class OtherDerived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other)
+{
+  coeffs() = other.coeffs();
+  return derived();
+}
+
+/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
+  */
+template<class Derived>
+EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)
+{
+  using std::cos;
+  using std::sin;
+  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings
+  this->w() = cos(ha);
+  this->vec() = sin(ha) * aa.axis();
+  return derived();
+}
+
+/** Set \c *this from the expression \a xpr:
+  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
+  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
+  *     and \a xpr is converted to a quaternion
+  */
+
+template<class Derived>
+template<class MatrixDerived>
+inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr)
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value),
+   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+  internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived());
+  return derived();
+}
+
+/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to
+  * be normalized, otherwise the result is undefined.
+  */
+template<class Derived>
+inline typename QuaternionBase<Derived>::Matrix3
+QuaternionBase<Derived>::toRotationMatrix(void) const
+{
+  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
+  // if not inlined then the cost of the return by value is huge ~ +35%,
+  // however, not inlining this function is an order of magnitude slower, so
+  // it has to be inlined, and so the return by value is not an issue
+  Matrix3 res;
+
+  const Scalar tx  = Scalar(2)*this->x();
+  const Scalar ty  = Scalar(2)*this->y();
+  const Scalar tz  = Scalar(2)*this->z();
+  const Scalar twx = tx*this->w();
+  const Scalar twy = ty*this->w();
+  const Scalar twz = tz*this->w();
+  const Scalar txx = tx*this->x();
+  const Scalar txy = ty*this->x();
+  const Scalar txz = tz*this->x();
+  const Scalar tyy = ty*this->y();
+  const Scalar tyz = tz*this->y();
+  const Scalar tzz = tz*this->z();
+
+  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);
+  res.coeffRef(0,1) = txy-twz;
+  res.coeffRef(0,2) = txz+twy;
+  res.coeffRef(1,0) = txy+twz;
+  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);
+  res.coeffRef(1,2) = tyz-twx;
+  res.coeffRef(2,0) = txz-twy;
+  res.coeffRef(2,1) = tyz+twx;
+  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);
+
+  return res;
+}
+
+/** Sets \c *this to be a quaternion representing a rotation between
+  * the two arbitrary vectors \a a and \a b. In other words, the built
+  * rotation represent a rotation sending the line of direction \a a
+  * to the line of direction \a b, both lines passing through the origin.
+  *
+  * \returns a reference to \c *this.
+  *
+  * Note that the two input vectors do \b not have to be normalized, and
+  * do not need to have the same norm.
+  */
+template<class Derived>
+template<typename Derived1, typename Derived2>
+inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+  using std::max;
+  using std::sqrt;
+  Vector3 v0 = a.normalized();
+  Vector3 v1 = b.normalized();
+  Scalar c = v1.dot(v0);
+
+  // if dot == -1, vectors are nearly opposites
+  // => accurately compute the rotation axis by computing the
+  //    intersection of the two planes. This is done by solving:
+  //       x^T v0 = 0
+  //       x^T v1 = 0
+  //    under the constraint:
+  //       ||x|| = 1
+  //    which yields a singular value problem
+  if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
+  {
+    c = (max)(c,Scalar(-1));
+    Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
+    JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
+    Vector3 axis = svd.matrixV().col(2);
+
+    Scalar w2 = (Scalar(1)+c)*Scalar(0.5);
+    this->w() = sqrt(w2);
+    this->vec() = axis * sqrt(Scalar(1) - w2);
+    return derived();
+  }
+  Vector3 axis = v0.cross(v1);
+  Scalar s = sqrt((Scalar(1)+c)*Scalar(2));
+  Scalar invs = Scalar(1)/s;
+  this->vec() = axis * invs;
+  this->w() = s * Scalar(0.5);
+
+  return derived();
+}
+
+
+/** Returns a quaternion representing a rotation between
+  * the two arbitrary vectors \a a and \a b. In other words, the built
+  * rotation represent a rotation sending the line of direction \a a
+  * to the line of direction \a b, both lines passing through the origin.
+  *
+  * \returns resulting quaternion
+  *
+  * Note that the two input vectors do \b not have to be normalized, and
+  * do not need to have the same norm.
+  */
+template<typename Scalar, int Options>
+template<typename Derived1, typename Derived2>
+Quaternion<Scalar,Options> Quaternion<Scalar,Options>::FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
+{
+    Quaternion quat;
+    quat.setFromTwoVectors(a, b);
+    return quat;
+}
+
+
+/** \returns the multiplicative inverse of \c *this
+  * Note that in most cases, i.e., if you simply want the opposite rotation,
+  * and/or the quaternion is normalized, then it is enough to use the conjugate.
+  *
+  * \sa QuaternionBase::conjugate()
+  */
+template <class Derived>
+inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const
+{
+  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
+  Scalar n2 = this->squaredNorm();
+  if (n2 > Scalar(0))
+    return Quaternion<Scalar>(conjugate().coeffs() / n2);
+  else
+  {
+    // return an invalid result to flag the error
+    return Quaternion<Scalar>(Coefficients::Zero());
+  }
+}
+
+/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
+  * if the quaternion is normalized.
+  * The conjugate of a quaternion represents the opposite rotation.
+  *
+  * \sa Quaternion2::inverse()
+  */
+template <class Derived>
+inline Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::conjugate() const
+{
+  return Quaternion<Scalar>(this->w(),-this->x(),-this->y(),-this->z());
+}
+
+/** \returns the angle (in radian) between two rotations
+  * \sa dot()
+  */
+template <class Derived>
+template <class OtherDerived>
+inline typename internal::traits<Derived>::Scalar
+QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
+{
+  using std::atan2;
+  using std::abs;
+  Quaternion<Scalar> d = (*this) * other.conjugate();
+  return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
+}
+
+ 
+    
+/** \returns the spherical linear interpolation between the two quaternions
+  * \c *this and \a other at the parameter \a t in [0;1].
+  * 
+  * This represents an interpolation for a constant motion between \c *this and \a other,
+  * see also http://en.wikipedia.org/wiki/Slerp.
+  */
+template <class Derived>
+template <class OtherDerived>
+Quaternion<typename internal::traits<Derived>::Scalar>
+QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const
+{
+  using std::acos;
+  using std::sin;
+  using std::abs;
+  static const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();
+  Scalar d = this->dot(other);
+  Scalar absD = abs(d);
+
+  Scalar scale0;
+  Scalar scale1;
+
+  if(absD>=one)
+  {
+    scale0 = Scalar(1) - t;
+    scale1 = t;
+  }
+  else
+  {
+    // theta is the angle between the 2 quaternions
+    Scalar theta = acos(absD);
+    Scalar sinTheta = sin(theta);
+
+    scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
+    scale1 = sin( ( t * theta) ) / sinTheta;
+  }
+  if(d<Scalar(0)) scale1 = -scale1;
+
+  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
+}
+
+namespace internal {
+
+// set from a rotation matrix
+template<typename Other>
+struct quaternionbase_assign_impl<Other,3,3>
+{
+  typedef typename Other::Scalar Scalar;
+  typedef DenseIndex Index;
+  template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& mat)
+  {
+    using std::sqrt;
+    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
+    // Ken Shoemake, 1987 SIGGRAPH course notes
+    Scalar t = mat.trace();
+    if (t > Scalar(0))
+    {
+      t = sqrt(t + Scalar(1.0));
+      q.w() = Scalar(0.5)*t;
+      t = Scalar(0.5)/t;
+      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
+      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
+      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
+    }
+    else
+    {
+      DenseIndex i = 0;
+      if (mat.coeff(1,1) > mat.coeff(0,0))
+        i = 1;
+      if (mat.coeff(2,2) > mat.coeff(i,i))
+        i = 2;
+      DenseIndex j = (i+1)%3;
+      DenseIndex k = (j+1)%3;
+
+      t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));
+      q.coeffs().coeffRef(i) = Scalar(0.5) * t;
+      t = Scalar(0.5)/t;
+      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
+      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
+      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
+    }
+  }
+};
+
+// set from a vector of coefficients assumed to be a quaternion
+template<typename Other>
+struct quaternionbase_assign_impl<Other,4,1>
+{
+  typedef typename Other::Scalar Scalar;
+  template<class Derived> static inline void run(QuaternionBase<Derived>& q, const Other& vec)
+  {
+    q.coeffs() = vec;
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_QUATERNION_H
diff --git a/Eigen/src/Geometry/Rotation2D.h b/Eigen/src/Geometry/Rotation2D.h
new file mode 100644
index 0000000..a2d59fc
--- /dev/null
+++ b/Eigen/src/Geometry/Rotation2D.h
@@ -0,0 +1,160 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ROTATION2D_H
+#define EIGEN_ROTATION2D_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Rotation2D
+  *
+  * \brief Represents a rotation/orientation in a 2 dimensional space.
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients
+  *
+  * This class is equivalent to a single scalar representing a counter clock wise rotation
+  * as a single angle in radian. It provides some additional features such as the automatic
+  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
+  * interface to Quaternion in order to facilitate the writing of generic algorithms
+  * dealing with rotations.
+  *
+  * \sa class Quaternion, class Transform
+  */
+
+namespace internal {
+
+template<typename _Scalar> struct traits<Rotation2D<_Scalar> >
+{
+  typedef _Scalar Scalar;
+};
+} // end namespace internal
+
+template<typename _Scalar>
+class Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>
+{
+  typedef RotationBase<Rotation2D<_Scalar>,2> Base;
+
+public:
+
+  using Base::operator*;
+
+  enum { Dim = 2 };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef Matrix<Scalar,2,1> Vector2;
+  typedef Matrix<Scalar,2,2> Matrix2;
+
+protected:
+
+  Scalar m_angle;
+
+public:
+
+  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
+  inline Rotation2D(const Scalar& a) : m_angle(a) {}
+  
+  /** Default constructor wihtout initialization. The represented rotation is undefined. */
+  Rotation2D() {}
+
+  /** \returns the rotation angle */
+  inline Scalar angle() const { return m_angle; }
+
+  /** \returns a read-write reference to the rotation angle */
+  inline Scalar& angle() { return m_angle; }
+
+  /** \returns the inverse rotation */
+  inline Rotation2D inverse() const { return -m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D operator*(const Rotation2D& other) const
+  { return m_angle + other.m_angle; }
+
+  /** Concatenates two rotations */
+  inline Rotation2D& operator*=(const Rotation2D& other)
+  { m_angle += other.m_angle; return *this; }
+
+  /** Applies the rotation to a 2D vector */
+  Vector2 operator* (const Vector2& vec) const
+  { return toRotationMatrix() * vec; }
+  
+  template<typename Derived>
+  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
+  Matrix2 toRotationMatrix() const;
+
+  /** \returns the spherical interpolation between \c *this and \a other using
+    * parameter \a t. It is in fact equivalent to a linear interpolation.
+    */
+  inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const
+  { return m_angle * (1-t) + other.angle() * t; }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const
+  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)
+  {
+    m_angle = Scalar(other.angle());
+  }
+
+  static inline Rotation2D Identity() { return Rotation2D(0); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
+  { return internal::isApprox(m_angle,other.m_angle, prec); }
+};
+
+/** \ingroup Geometry_Module
+  * single precision 2D rotation type */
+typedef Rotation2D<float> Rotation2Df;
+/** \ingroup Geometry_Module
+  * double precision 2D rotation type */
+typedef Rotation2D<double> Rotation2Dd;
+
+/** Set \c *this from a 2x2 rotation matrix \a mat.
+  * In other words, this function extract the rotation angle
+  * from the rotation matrix.
+  */
+template<typename Scalar>
+template<typename Derived>
+Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
+{
+  using std::atan2;
+  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0));
+  return *this;
+}
+
+/** Constructs and \returns an equivalent 2x2 rotation matrix.
+  */
+template<typename Scalar>
+typename Rotation2D<Scalar>::Matrix2
+Rotation2D<Scalar>::toRotationMatrix(void) const
+{
+  using std::sin;
+  using std::cos;
+  Scalar sinA = sin(m_angle);
+  Scalar cosA = cos(m_angle);
+  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_ROTATION2D_H
diff --git a/Eigen/src/Geometry/RotationBase.h b/Eigen/src/Geometry/RotationBase.h
new file mode 100644
index 0000000..b88661d
--- /dev/null
+++ b/Eigen/src/Geometry/RotationBase.h
@@ -0,0 +1,206 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ROTATIONBASE_H
+#define EIGEN_ROTATIONBASE_H
+
+namespace Eigen { 
+
+// forward declaration
+namespace internal {
+template<typename RotationDerived, typename MatrixType, bool IsVector=MatrixType::IsVectorAtCompileTime>
+struct rotation_base_generic_product_selector;
+}
+
+/** \class RotationBase
+  *
+  * \brief Common base class for compact rotation representations
+  *
+  * \param Derived is the derived type, i.e., a rotation type
+  * \param _Dim the dimension of the space
+  */
+template<typename Derived, int _Dim>
+class RotationBase
+{
+  public:
+    enum { Dim = _Dim };
+    /** the scalar type of the coefficients */
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+
+    /** corresponding linear transformation matrix type */
+    typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;
+    typedef Matrix<Scalar,Dim,1> VectorType;
+
+  public:
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+
+    /** \returns an equivalent rotation matrix */
+    inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }
+
+    /** \returns an equivalent rotation matrix 
+      * This function is added to be conform with the Transform class' naming scheme.
+      */
+    inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); }
+
+    /** \returns the inverse rotation */
+    inline Derived inverse() const { return derived().inverse(); }
+
+    /** \returns the concatenation of the rotation \c *this with a translation \a t */
+    inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t) const
+    { return Transform<Scalar,Dim,Isometry>(*this) * t; }
+
+    /** \returns the concatenation of the rotation \c *this with a uniform scaling \a s */
+    inline RotationMatrixType operator*(const UniformScaling<Scalar>& s) const
+    { return toRotationMatrix() * s.factor(); }
+
+    /** \returns the concatenation of the rotation \c *this with a generic expression \a e
+      * \a e can be:
+      *  - a DimxDim linear transformation matrix
+      *  - a DimxDim diagonal matrix (axis aligned scaling)
+      *  - a vector of size Dim
+      */
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector<Derived,OtherDerived,OtherDerived::IsVectorAtCompileTime>::ReturnType
+    operator*(const EigenBase<OtherDerived>& e) const
+    { return internal::rotation_base_generic_product_selector<Derived,OtherDerived>::run(derived(), e.derived()); }
+
+    /** \returns the concatenation of a linear transformation \a l with the rotation \a r */
+    template<typename OtherDerived> friend
+    inline RotationMatrixType operator*(const EigenBase<OtherDerived>& l, const Derived& r)
+    { return l.derived() * r.toRotationMatrix(); }
+
+    /** \returns the concatenation of a scaling \a l with the rotation \a r */
+    friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar,Dim>& l, const Derived& r)
+    { 
+      Transform<Scalar,Dim,Affine> res(r);
+      res.linear().applyOnTheLeft(l);
+      return res;
+    }
+
+    /** \returns the concatenation of the rotation \c *this with a transformation \a t */
+    template<int Mode, int Options>
+    inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Options>& t) const
+    { return toRotationMatrix() * t; }
+
+    template<typename OtherVectorType>
+    inline VectorType _transformVector(const OtherVectorType& v) const
+    { return toRotationMatrix() * v; }
+};
+
+namespace internal {
+
+// implementation of the generic product rotation * matrix
+template<typename RotationDerived, typename MatrixType>
+struct rotation_base_generic_product_selector<RotationDerived,MatrixType,false>
+{
+  enum { Dim = RotationDerived::Dim };
+  typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType;
+  static inline ReturnType run(const RotationDerived& r, const MatrixType& m)
+  { return r.toRotationMatrix() * m; }
+};
+
+template<typename RotationDerived, typename Scalar, int Dim, int MaxDim>
+struct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix<Scalar,Dim,MaxDim>, false >
+{
+  typedef Transform<Scalar,Dim,Affine> ReturnType;
+  static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix<Scalar,Dim,MaxDim>& m)
+  {
+    ReturnType res(r);
+    res.linear() *= m;
+    return res;
+  }
+};
+
+template<typename RotationDerived,typename OtherVectorType>
+struct rotation_base_generic_product_selector<RotationDerived,OtherVectorType,true>
+{
+  enum { Dim = RotationDerived::Dim };
+  typedef Matrix<typename RotationDerived::Scalar,Dim,1> ReturnType;
+  static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v)
+  {
+    return r._transformVector(v);
+  }
+};
+
+} // end namespace internal
+
+/** \geometry_module
+  *
+  * \brief Constructs a Dim x Dim rotation matrix from the rotation \a r
+  */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+  *this = r.toRotationMatrix();
+}
+
+/** \geometry_module
+  *
+  * \brief Set a Dim x Dim rotation matrix from the rotation \a r
+  */
+template<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>
+template<typename OtherDerived>
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&
+Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>
+::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)
+{
+  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))
+  return *this = r.toRotationMatrix();
+}
+
+namespace internal {
+
+/** \internal
+  *
+  * Helper function to return an arbitrary rotation object to a rotation matrix.
+  *
+  * \param Scalar the numeric type of the matrix coefficients
+  * \param Dim the dimension of the current space
+  *
+  * It returns a Dim x Dim fixed size matrix.
+  *
+  * Default specializations are provided for:
+  *   - any scalar type (2D),
+  *   - any matrix expression,
+  *   - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)
+  *
+  * Currently toRotationMatrix is only used by Transform.
+  *
+  * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
+  */
+template<typename Scalar, int Dim>
+static inline Matrix<Scalar,2,2> toRotationMatrix(const Scalar& s)
+{
+  EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return Rotation2D<Scalar>(s).toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+static inline Matrix<Scalar,Dim,Dim> toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)
+{
+  return r.toRotationMatrix();
+}
+
+template<typename Scalar, int Dim, typename OtherDerived>
+static inline const MatrixBase<OtherDerived>& toRotationMatrix(const MatrixBase<OtherDerived>& mat)
+{
+  EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
+    YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return mat;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ROTATIONBASE_H
diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h
new file mode 100644
index 0000000..1c25f36
--- /dev/null
+++ b/Eigen/src/Geometry/Scaling.h
@@ -0,0 +1,166 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SCALING_H
+#define EIGEN_SCALING_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Scaling
+  *
+  * \brief Represents a generic uniform scaling transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  *
+  * This class represent a uniform scaling transformation. It is the return
+  * type of Scaling(Scalar), and most of the time this is the only way it
+  * is used. In particular, this class is not aimed to be used to store a scaling transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * To represent an axis aligned scaling, use the DiagonalMatrix class.
+  *
+  * \sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform
+  */
+template<typename _Scalar>
+class UniformScaling
+{
+public:
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+
+protected:
+
+  Scalar m_factor;
+
+public:
+
+  /** Default constructor without initialization. */
+  UniformScaling() {}
+  /** Constructs and initialize a uniform scaling transformation */
+  explicit inline UniformScaling(const Scalar& s) : m_factor(s) {}
+
+  inline const Scalar& factor() const { return m_factor; }
+  inline Scalar& factor() { return m_factor; }
+
+  /** Concatenates two uniform scaling */
+  inline UniformScaling operator* (const UniformScaling& other) const
+  { return UniformScaling(m_factor * other.factor()); }
+
+  /** Concatenates a uniform scaling and a translation */
+  template<int Dim>
+  inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const;
+
+  /** Concatenates a uniform scaling and an affine transformation */
+  template<int Dim, int Mode, int Options>
+  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const
+  {
+   Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;
+   res.prescale(factor());
+   return res;
+}
+
+  /** Concatenates a uniform scaling and a linear transformation matrix */
+  // TODO returns an expression
+  template<typename Derived>
+  inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const
+  { return other * m_factor; }
+
+  template<typename Derived,int Dim>
+  inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const
+  { return r.toRotationMatrix() * m_factor; }
+
+  /** \returns the inverse scaling */
+  inline UniformScaling inverse() const
+  { return UniformScaling(Scalar(1)/m_factor); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline UniformScaling<NewScalarType> cast() const
+  { return UniformScaling<NewScalarType>(NewScalarType(m_factor)); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other)
+  { m_factor = Scalar(other.factor()); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
+  { return internal::isApprox(m_factor, other.factor(), prec); }
+
+};
+
+/** Concatenates a linear transformation matrix and a uniform scaling */
+// NOTE this operator is defiend in MatrixBase and not as a friend function
+// of UniformScaling to fix an internal crash of Intel's ICC
+template<typename Derived> typename MatrixBase<Derived>::ScalarMultipleReturnType
+MatrixBase<Derived>::operator*(const UniformScaling<Scalar>& s) const
+{ return derived() * s.factor(); }
+
+/** Constructs a uniform scaling from scale factor \a s */
+static inline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }
+/** Constructs a uniform scaling from scale factor \a s */
+static inline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }
+/** Constructs a uniform scaling from scale factor \a s */
+template<typename RealScalar>
+static inline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)
+{ return UniformScaling<std::complex<RealScalar> >(s); }
+
+/** Constructs a 2D axis aligned scaling */
+template<typename Scalar>
+static inline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)
+{ return DiagonalMatrix<Scalar,2>(sx, sy); }
+/** Constructs a 3D axis aligned scaling */
+template<typename Scalar>
+static inline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }
+
+/** Constructs an axis aligned scaling expression from vector expression \a coeffs
+  * This is an alias for coeffs.asDiagonal()
+  */
+template<typename Derived>
+static inline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)
+{ return coeffs.asDiagonal(); }
+
+/** \addtogroup Geometry_Module */
+//@{
+/** \deprecated */
+typedef DiagonalMatrix<float, 2> AlignedScaling2f;
+/** \deprecated */
+typedef DiagonalMatrix<double,2> AlignedScaling2d;
+/** \deprecated */
+typedef DiagonalMatrix<float, 3> AlignedScaling3f;
+/** \deprecated */
+typedef DiagonalMatrix<double,3> AlignedScaling3d;
+//@}
+
+template<typename Scalar>
+template<int Dim>
+inline Transform<Scalar,Dim,Affine>
+UniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const
+{
+  Transform<Scalar,Dim,Affine> res;
+  res.matrix().setZero();
+  res.linear().diagonal().fill(factor());
+  res.translation() = factor() * t.vector();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SCALING_H
diff --git a/Eigen/src/Geometry/Transform.h b/Eigen/src/Geometry/Transform.h
new file mode 100644
index 0000000..e786e53
--- /dev/null
+++ b/Eigen/src/Geometry/Transform.h
@@ -0,0 +1,1455 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSFORM_H
+#define EIGEN_TRANSFORM_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Transform>
+struct transform_traits
+{
+  enum
+  {
+    Dim = Transform::Dim,
+    HDim = Transform::HDim,
+    Mode = Transform::Mode,
+    IsProjective = (int(Mode)==int(Projective))
+  };
+};
+
+template< typename TransformType,
+          typename MatrixType,
+          int Case = transform_traits<TransformType>::IsProjective ? 0
+                   : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1
+                   : 2>
+struct transform_right_product_impl;
+
+template< typename Other,
+          int Mode,
+          int Options,
+          int Dim,
+          int HDim,
+          int OtherRows=Other::RowsAtCompileTime,
+          int OtherCols=Other::ColsAtCompileTime>
+struct transform_left_product_impl;
+
+template< typename Lhs,
+          typename Rhs,
+          bool AnyProjective = 
+            transform_traits<Lhs>::IsProjective ||
+            transform_traits<Rhs>::IsProjective>
+struct transform_transform_product_impl;
+
+template< typename Other,
+          int Mode,
+          int Options,
+          int Dim,
+          int HDim,
+          int OtherRows=Other::RowsAtCompileTime,
+          int OtherCols=Other::ColsAtCompileTime>
+struct transform_construct_from_matrix;
+
+template<typename TransformType> struct transform_take_affine_part;
+
+template<int Mode> struct transform_make_affine;
+
+} // end namespace internal
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Transform
+  *
+  * \brief Represents an homogeneous transformation in a N dimensional space
+  *
+  * \tparam _Scalar the scalar type, i.e., the type of the coefficients
+  * \tparam _Dim the dimension of the space
+  * \tparam _Mode the type of the transformation. Can be:
+  *              - #Affine: the transformation is stored as a (Dim+1)^2 matrix,
+  *                         where the last row is assumed to be [0 ... 0 1].
+  *              - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.
+  *              - #Projective: the transformation is stored as a (Dim+1)^2 matrix
+  *                             without any assumption.
+  * \tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.
+  *                  These Options are passed directly to the underlying matrix type.
+  *
+  * The homography is internally represented and stored by a matrix which
+  * is available through the matrix() method. To understand the behavior of
+  * this class you have to think a Transform object as its internal
+  * matrix representation. The chosen convention is right multiply:
+  *
+  * \code v' = T * v \endcode
+  *
+  * Therefore, an affine transformation matrix M is shaped like this:
+  *
+  * \f$ \left( \begin{array}{cc}
+  * linear & translation\\
+  * 0 ... 0 & 1
+  * \end{array} \right) \f$
+  *
+  * Note that for a projective transformation the last row can be anything,
+  * and then the interpretation of different parts might be sightly different.
+  *
+  * However, unlike a plain matrix, the Transform class provides many features
+  * simplifying both its assembly and usage. In particular, it can be composed
+  * with any other transformations (Transform,Translation,RotationBase,Matrix)
+  * and can be directly used to transform implicit homogeneous vectors. All these
+  * operations are handled via the operator*. For the composition of transformations,
+  * its principle consists to first convert the right/left hand sides of the product
+  * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
+  * Of course, internally, operator* tries to perform the minimal number of operations
+  * according to the nature of each terms. Likewise, when applying the transform
+  * to non homogeneous vectors, the latters are automatically promoted to homogeneous
+  * one before doing the matrix product. The convertions to homogeneous representations
+  * are performed as follow:
+  *
+  * \b Translation t (Dim)x(1):
+  * \f$ \left( \begin{array}{cc}
+  * I & t \\
+  * 0\,...\,0 & 1
+  * \end{array} \right) \f$
+  *
+  * \b Rotation R (Dim)x(Dim):
+  * \f$ \left( \begin{array}{cc}
+  * R & 0\\
+  * 0\,...\,0 & 1
+  * \end{array} \right) \f$
+  *
+  * \b Linear \b Matrix L (Dim)x(Dim):
+  * \f$ \left( \begin{array}{cc}
+  * L & 0\\
+  * 0\,...\,0 & 1
+  * \end{array} \right) \f$
+  *
+  * \b Affine \b Matrix A (Dim)x(Dim+1):
+  * \f$ \left( \begin{array}{c}
+  * A\\
+  * 0\,...\,0\,1
+  * \end{array} \right) \f$
+  *
+  * \b Column \b vector v (Dim)x(1):
+  * \f$ \left( \begin{array}{c}
+  * v\\
+  * 1
+  * \end{array} \right) \f$
+  *
+  * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n):
+  * \f$ \left( \begin{array}{ccc}
+  * v_1 & ... & v_n\\
+  * 1 & ... & 1
+  * \end{array} \right) \f$
+  *
+  * The concatenation of a Transform object with any kind of other transformation
+  * always returns a Transform object.
+  *
+  * A little exception to the "as pure matrix product" rule is the case of the
+  * transformation of non homogeneous vectors by an affine transformation. In
+  * that case the last matrix row can be ignored, and the product returns non
+  * homogeneous vectors.
+  *
+  * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation,
+  * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix.
+  * The solution is either to use a Dim x Dynamic matrix or explicitly request a
+  * vector transformation by making the vector homogeneous:
+  * \code
+  * m' = T * m.colwise().homogeneous();
+  * \endcode
+  * Note that there is zero overhead.
+  *
+  * Conversion methods from/to Qt's QMatrix and QTransform are available if the
+  * preprocessor token EIGEN_QT_SUPPORT is defined.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_TRANSFORM_PLUGIN.
+  *
+  * \sa class Matrix, class Quaternion
+  */
+template<typename _Scalar, int _Dim, int _Mode, int _Options>
+class Transform
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))
+  enum {
+    Mode = _Mode,
+    Options = _Options,
+    Dim = _Dim,     ///< space dimension in which the transformation holds
+    HDim = _Dim+1,  ///< size of a respective homogeneous vector
+    Rows = int(Mode)==(AffineCompact) ? Dim : HDim
+  };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  typedef DenseIndex Index;
+  /** type of the matrix used to represent the transformation */
+  typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;
+  /** constified MatrixType */
+  typedef const MatrixType ConstMatrixType;
+  /** type of the matrix used to represent the linear part of the transformation */
+  typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
+  /** type of read/write reference to the linear part of the transformation */
+  typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
+  /** type of read reference to the linear part of the transformation */
+  typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
+  /** type of read/write reference to the affine part of the transformation */
+  typedef typename internal::conditional<int(Mode)==int(AffineCompact),
+                              MatrixType&,
+                              Block<MatrixType,Dim,HDim> >::type AffinePart;
+  /** type of read reference to the affine part of the transformation */
+  typedef typename internal::conditional<int(Mode)==int(AffineCompact),
+                              const MatrixType&,
+                              const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart;
+  /** type of a vector */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** type of a read/write reference to the translation part of the rotation */
+  typedef Block<MatrixType,Dim,1,int(Mode)==(AffineCompact)> TranslationPart;
+  /** type of a read reference to the translation part of the rotation */
+  typedef const Block<ConstMatrixType,Dim,1,int(Mode)==(AffineCompact)> ConstTranslationPart;
+  /** corresponding translation type */
+  typedef Translation<Scalar,Dim> TranslationType;
+  
+  // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0
+  enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };
+  /** The return type of the product between a diagonal matrix and a transform */
+  typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType;
+
+protected:
+
+  MatrixType m_matrix;
+
+public:
+
+  /** Default constructor without initialization of the meaningful coefficients.
+    * If Mode==Affine, then the last row is set to [0 ... 0 1] */
+  inline Transform()
+  {
+    check_template_params();
+    internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
+  }
+
+  inline Transform(const Transform& other)
+  {
+    check_template_params();
+    m_matrix = other.m_matrix;
+  }
+
+  inline explicit Transform(const TranslationType& t)
+  {
+    check_template_params();
+    *this = t;
+  }
+  inline explicit Transform(const UniformScaling<Scalar>& s)
+  {
+    check_template_params();
+    *this = s;
+  }
+  template<typename Derived>
+  inline explicit Transform(const RotationBase<Derived, Dim>& r)
+  {
+    check_template_params();
+    *this = r;
+  }
+
+  inline Transform& operator=(const Transform& other)
+  { m_matrix = other.m_matrix; return *this; }
+
+  typedef internal::transform_take_affine_part<Transform> take_affine_part;
+
+  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline explicit Transform(const EigenBase<OtherDerived>& other)
+  {
+    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
+      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
+
+    check_template_params();
+    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
+  }
+
+  /** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */
+  template<typename OtherDerived>
+  inline Transform& operator=(const EigenBase<OtherDerived>& other)
+  {
+    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),
+      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);
+
+    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());
+    return *this;
+  }
+  
+  template<int OtherOptions>
+  inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)
+  {
+    check_template_params();
+    // only the options change, we can directly copy the matrices
+    m_matrix = other.matrix();
+  }
+
+  template<int OtherMode,int OtherOptions>
+  inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other)
+  {
+    check_template_params();
+    // prevent conversions as:
+    // Affine | AffineCompact | Isometry = Projective
+    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),
+                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
+
+    // prevent conversions as:
+    // Isometry = Affine | AffineCompact
+    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),
+                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)
+
+    enum { ModeIsAffineCompact = Mode == int(AffineCompact),
+           OtherModeIsAffineCompact = OtherMode == int(AffineCompact)
+    };
+
+    if(ModeIsAffineCompact == OtherModeIsAffineCompact)
+    {
+      // We need the block expression because the code is compiled for all
+      // combinations of transformations and will trigger a compile time error
+      // if one tries to assign the matrices directly
+      m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0);
+      makeAffine();
+    }
+    else if(OtherModeIsAffineCompact)
+    {
+      typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType;
+      internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix());
+    }
+    else
+    {
+      // here we know that Mode == AffineCompact and OtherMode != AffineCompact.
+      // if OtherMode were Projective, the static assert above would already have caught it.
+      // So the only possibility is that OtherMode == Affine
+      linear() = other.linear();
+      translation() = other.translation();
+    }
+  }
+
+  template<typename OtherDerived>
+  Transform(const ReturnByValue<OtherDerived>& other)
+  {
+    check_template_params();
+    other.evalTo(*this);
+  }
+
+  template<typename OtherDerived>
+  Transform& operator=(const ReturnByValue<OtherDerived>& other)
+  {
+    other.evalTo(*this);
+    return *this;
+  }
+
+  #ifdef EIGEN_QT_SUPPORT
+  inline Transform(const QMatrix& other);
+  inline Transform& operator=(const QMatrix& other);
+  inline QMatrix toQMatrix(void) const;
+  inline Transform(const QTransform& other);
+  inline Transform& operator=(const QTransform& other);
+  inline QTransform toQTransform(void) const;
+  #endif
+
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operator(Index,Index) const */
+  inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }
+  /** shortcut for m_matrix(row,col);
+    * \sa MatrixBase::operator(Index,Index) */
+  inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }
+
+  /** \returns a read-only expression of the transformation matrix */
+  inline const MatrixType& matrix() const { return m_matrix; }
+  /** \returns a writable expression of the transformation matrix */
+  inline MatrixType& matrix() { return m_matrix; }
+
+  /** \returns a read-only expression of the linear part of the transformation */
+  inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); }
+  /** \returns a writable expression of the linear part of the transformation */
+  inline LinearPart linear() { return LinearPart(m_matrix,0,0); }
+
+  /** \returns a read-only expression of the Dim x HDim affine part of the transformation */
+  inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); }
+  /** \returns a writable expression of the Dim x HDim affine part of the transformation */
+  inline AffinePart affine() { return take_affine_part::run(m_matrix); }
+
+  /** \returns a read-only expression of the translation vector of the transformation */
+  inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); }
+  /** \returns a writable expression of the translation vector of the transformation */
+  inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
+
+  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
+    *
+    * The right hand side \a other might be either:
+    * \li a vector of size Dim,
+    * \li an homogeneous vector of size Dim+1,
+    * \li a set of vectors of size Dim x Dynamic,
+    * \li a set of homogeneous vectors of size Dim+1 x Dynamic,
+    * \li a linear transformation matrix of size Dim x Dim,
+    * \li an affine transformation matrix of size Dim x Dim+1,
+    * \li a transformation matrix of size Dim+1 x Dim+1.
+    */
+  // note: this function is defined here because some compilers cannot find the respective declaration
+  template<typename OtherDerived>
+  EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
+  operator * (const EigenBase<OtherDerived> &other) const
+  { return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
+
+  /** \returns the product expression of a transformation matrix \a a times a transform \a b
+    *
+    * The left hand side \a other might be either:
+    * \li a linear transformation matrix of size Dim x Dim,
+    * \li an affine transformation matrix of size Dim x Dim+1,
+    * \li a general transformation matrix of size Dim+1 x Dim+1.
+    */
+  template<typename OtherDerived> friend
+  inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType
+    operator * (const EigenBase<OtherDerived> &a, const Transform &b)
+  { return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); }
+
+  /** \returns The product expression of a transform \a a times a diagonal matrix \a b
+    *
+    * The rhs diagonal matrix is interpreted as an affine scaling transformation. The
+    * product results in a Transform of the same type (mode) as the lhs only if the lhs 
+    * mode is no isometry. In that case, the returned transform is an affinity.
+    */
+  template<typename DiagonalDerived>
+  inline const TransformTimeDiagonalReturnType
+    operator * (const DiagonalBase<DiagonalDerived> &b) const
+  {
+    TransformTimeDiagonalReturnType res(*this);
+    res.linear() *= b;
+    return res;
+  }
+
+  /** \returns The product expression of a diagonal matrix \a a times a transform \a b
+    *
+    * The lhs diagonal matrix is interpreted as an affine scaling transformation. The
+    * product results in a Transform of the same type (mode) as the lhs only if the lhs 
+    * mode is no isometry. In that case, the returned transform is an affinity.
+    */
+  template<typename DiagonalDerived>
+  friend inline TransformTimeDiagonalReturnType
+    operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b)
+  {
+    TransformTimeDiagonalReturnType res;
+    res.linear().noalias() = a*b.linear();
+    res.translation().noalias() = a*b.translation();
+    if (Mode!=int(AffineCompact))
+      res.matrix().row(Dim) = b.matrix().row(Dim);
+    return res;
+  }
+
+  template<typename OtherDerived>
+  inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }
+
+  /** Concatenates two transformations */
+  inline const Transform operator * (const Transform& other) const
+  {
+    return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);
+  }
+  
+  #ifdef __INTEL_COMPILER
+private:
+  // this intermediate structure permits to workaround a bug in ICC 11:
+  //   error: template instantiation resulted in unexpected function type of "Eigen::Transform<double, 3, 32, 0>
+  //             (const Eigen::Transform<double, 3, 2, 0> &) const"
+  //  (the meaning of a name may have changed since the template declaration -- the type of the template is:
+  // "Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>,
+  //     Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const")
+  // 
+  template<int OtherMode,int OtherOptions> struct icc_11_workaround
+  {
+    typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType;
+    typedef typename ProductType::ResultType ResultType;
+  };
+  
+public:
+  /** Concatenates two different transformations */
+  template<int OtherMode,int OtherOptions>
+  inline typename icc_11_workaround<OtherMode,OtherOptions>::ResultType
+    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
+  {
+    typedef typename icc_11_workaround<OtherMode,OtherOptions>::ProductType ProductType;
+    return ProductType::run(*this,other);
+  }
+  #else
+  /** Concatenates two different transformations */
+  template<int OtherMode,int OtherOptions>
+  inline typename internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType
+    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const
+  {
+    return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other);
+  }
+  #endif
+
+  /** \sa MatrixBase::setIdentity() */
+  void setIdentity() { m_matrix.setIdentity(); }
+
+  /**
+   * \brief Returns an identity transformation.
+   * \todo In the future this function should be returning a Transform expression.
+   */
+  static const Transform Identity()
+  {
+    return Transform(MatrixType::Identity());
+  }
+
+  template<typename OtherDerived>
+  inline Transform& scale(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& prescale(const MatrixBase<OtherDerived> &other);
+
+  inline Transform& scale(const Scalar& s);
+  inline Transform& prescale(const Scalar& s);
+
+  template<typename OtherDerived>
+  inline Transform& translate(const MatrixBase<OtherDerived> &other);
+
+  template<typename OtherDerived>
+  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);
+
+  template<typename RotationType>
+  inline Transform& rotate(const RotationType& rotation);
+
+  template<typename RotationType>
+  inline Transform& prerotate(const RotationType& rotation);
+
+  Transform& shear(const Scalar& sx, const Scalar& sy);
+  Transform& preshear(const Scalar& sx, const Scalar& sy);
+
+  inline Transform& operator=(const TranslationType& t);
+  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }
+  inline Transform operator*(const TranslationType& t) const;
+
+  inline Transform& operator=(const UniformScaling<Scalar>& t);
+  inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
+  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode))> operator*(const UniformScaling<Scalar>& s) const
+  {
+    Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode)),Options> res = *this;
+    res.scale(s.factor());
+    return res;
+  }
+
+  inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linear() *= s; return *this; }
+
+  template<typename Derived>
+  inline Transform& operator=(const RotationBase<Derived,Dim>& r);
+  template<typename Derived>
+  inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }
+  template<typename Derived>
+  inline Transform operator*(const RotationBase<Derived,Dim>& r) const;
+
+  const LinearMatrixType rotation() const;
+  template<typename RotationMatrixType, typename ScalingMatrixType>
+  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;
+  template<typename ScalingMatrixType, typename RotationMatrixType>
+  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;
+
+  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
+
+  inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const;
+
+  /** \returns a const pointer to the column major internal matrix */
+  const Scalar* data() const { return m_matrix.data(); }
+  /** \returns a non-const pointer to the column major internal matrix */
+  Scalar* data() { return m_matrix.data(); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const
+  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other)
+  {
+    check_template_params();
+    m_matrix = other.matrix().template cast<Scalar>();
+  }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_matrix.isApprox(other.m_matrix, prec); }
+
+  /** Sets the last row to [0 ... 0 1]
+    */
+  void makeAffine()
+  {
+    internal::transform_make_affine<int(Mode)>::run(m_matrix);
+  }
+
+  /** \internal
+    * \returns the Dim x Dim linear part if the transformation is affine,
+    *          and the HDim x Dim part for projective transformations.
+    */
+  inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt()
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
+  /** \internal
+    * \returns the Dim x Dim linear part if the transformation is affine,
+    *          and the HDim x Dim part for projective transformations.
+    */
+  inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }
+
+  /** \internal
+    * \returns the translation part if the transformation is affine,
+    *          and the last column for projective transformations.
+    */
+  inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt()
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
+  /** \internal
+    * \returns the translation part if the transformation is affine,
+    *          and the last column for projective transformations.
+    */
+  inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const
+  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }
+
+
+  #ifdef EIGEN_TRANSFORM_PLUGIN
+  #include EIGEN_TRANSFORM_PLUGIN
+  #endif
+  
+protected:
+  #ifndef EIGEN_PARSED_BY_DOXYGEN
+    static EIGEN_STRONG_INLINE void check_template_params()
+    {
+      EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)
+    }
+  #endif
+
+};
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Isometry> Isometry2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Isometry> Isometry3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Isometry> Isometry2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Isometry> Isometry3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Affine> Affine2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Affine> Affine3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Affine> Affine2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Affine> Affine3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,AffineCompact> AffineCompact2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,AffineCompact> AffineCompact3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,AffineCompact> AffineCompact2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,AffineCompact> AffineCompact3d;
+
+/** \ingroup Geometry_Module */
+typedef Transform<float,2,Projective> Projective2f;
+/** \ingroup Geometry_Module */
+typedef Transform<float,3,Projective> Projective3f;
+/** \ingroup Geometry_Module */
+typedef Transform<double,2,Projective> Projective2d;
+/** \ingroup Geometry_Module */
+typedef Transform<double,3,Projective> Projective3d;
+
+/**************************
+*** Optional QT support ***
+**************************/
+
+#ifdef EIGEN_QT_SUPPORT
+/** Initializes \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other)
+{
+  check_template_params();
+  *this = other;
+}
+
+/** Set \c *this from a QMatrix assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  m_matrix << other.m11(), other.m21(), other.dx(),
+              other.m12(), other.m22(), other.dy(),
+              0, 0, 1;
+  return *this;
+}
+
+/** \returns a QMatrix from \c *this assuming the dimension is 2.
+  *
+  * \warning this conversion might loss data if \c *this is not affine
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+QMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const
+{
+  check_template_params();
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+}
+
+/** Initializes \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode,int Options>
+Transform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other)
+{
+  check_template_params();
+  *this = other;
+}
+
+/** Set \c *this from a QTransform assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other)
+{
+  check_template_params();
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  if (Mode == int(AffineCompact))
+    m_matrix << other.m11(), other.m21(), other.dx(),
+                other.m12(), other.m22(), other.dy();
+  else
+    m_matrix << other.m11(), other.m21(), other.dx(),
+                other.m12(), other.m22(), other.dy(),
+                other.m13(), other.m23(), other.m33();
+  return *this;
+}
+
+/** \returns a QTransform from \c *this assuming the dimension is 2.
+  *
+  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+QTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const
+{
+  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  if (Mode == int(AffineCompact))
+    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0),
+                      m_matrix.coeff(0,1), m_matrix.coeff(1,1),
+                      m_matrix.coeff(0,2), m_matrix.coeff(1,2));
+  else
+    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),
+                      m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),
+                      m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));
+}
+#endif
+
+/*********************
+*** Procedural API ***
+*********************/
+
+/** Applies on the right the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa prescale()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  linearExt().noalias() = (linearExt() * other.asDiagonal());
+  return *this;
+}
+
+/** Applies on the right a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa prescale(Scalar)
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s)
+{
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  linearExt() *= s;
+  return *this;
+}
+
+/** Applies on the left the non uniform scale transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \sa scale()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  m_matrix.template block<Dim,HDim>(0,0).noalias() = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0));
+  return *this;
+}
+
+/** Applies on the left a uniform scale of a factor \a c to \c *this
+  * and returns a reference to \c *this.
+  * \sa scale(Scalar)
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s)
+{
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  m_matrix.template topRows<Dim>() *= s;
+  return *this;
+}
+
+/** Applies on the right the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa pretranslate()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  translationExt() += linearExt() * other;
+  return *this;
+}
+
+/** Applies on the left the translation matrix represented by the vector \a other
+  * to \c *this and returns a reference to \c *this.
+  * \sa translate()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename OtherDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))
+  if(int(Mode)==int(Projective))
+    affine() += other * m_matrix.row(Dim);
+  else
+    translation() += other;
+  return *this;
+}
+
+/** Applies on the right the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * The template parameter \a RotationType is the type of the rotation which
+  * must be known by internal::toRotationMatrix<>.
+  *
+  * Natively supported types includes:
+  *   - any scalar (2D),
+  *   - a Dim x Dim matrix expression,
+  *   - a Quaternion (3D),
+  *   - a AngleAxis (3D)
+  *
+  * This mechanism is easily extendable to support user types such as Euler angles,
+  * or a pair of Quaternion for 4D rotations.
+  *
+  * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationType>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation)
+{
+  linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation);
+  return *this;
+}
+
+/** Applies on the left the rotation represented by the rotation \a rotation
+  * to \c *this and returns a reference to \c *this.
+  *
+  * See rotate() for further details.
+  *
+  * \sa rotate()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationType>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)
+{
+  m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation)
+                                         * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/** Applies on the right the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa preshear()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  VectorType tmp = linear().col(0)*sy + linear().col(1);
+  linear() << linear().col(0) + linear().col(1)*sx, tmp;
+  return *this;
+}
+
+/** Applies on the left the shear transformation represented
+  * by the vector \a other to \c *this and returns a reference to \c *this.
+  * \warning 2D only.
+  * \sa shear()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy)
+{
+  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)
+  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)
+  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
+  return *this;
+}
+
+/******************************************************
+*** Scaling, Translation and Rotation compatibility ***
+******************************************************/
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t)
+{
+  linear().setIdentity();
+  translation() = t.vector();
+  makeAffine();
+  return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const
+{
+  Transform res = *this;
+  res.translate(t.vector());
+  return res;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s)
+{
+  m_matrix.setZero();
+  linear().diagonal().fill(s.factor());
+  makeAffine();
+  return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename Derived>
+inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r)
+{
+  linear() = internal::toRotationMatrix<Scalar,Dim>(r);
+  translation().setZero();
+  makeAffine();
+  return *this;
+}
+
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename Derived>
+inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const
+{
+  Transform res = *this;
+  res.rotate(r.derived());
+  return res;
+}
+
+/************************
+*** Special functions ***
+************************/
+
+/** \returns the rotation part of the transformation
+  *
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), computeScalingRotation(), class SVD
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType
+Transform<Scalar,Dim,Mode,Options>::rotation() const
+{
+  LinearMatrixType result;
+  computeRotationScaling(&result, (LinearMatrixType*)0);
+  return result;
+}
+
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  *
+  *
+  * \svd_module
+  *
+  * \sa computeScalingRotation(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename RotationMatrixType, typename ScalingMatrixType>
+void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
+
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  VectorType sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->lazyAssign(m * svd.matrixV().adjoint());
+  }
+}
+
+/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being
+  * not necessarily positive.
+  *
+  * If either pointer is zero, the corresponding computation is skipped.
+  *
+  *
+  *
+  * \svd_module
+  *
+  * \sa computeRotationScaling(), rotation(), class SVD
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename ScalingMatrixType, typename RotationMatrixType>
+void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const
+{
+  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);
+
+  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1
+  VectorType sv(svd.singularValues());
+  sv.coeffRef(0) *= x;
+  if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());
+  if(rotation)
+  {
+    LinearMatrixType m(svd.matrixU());
+    m.col(0) /= x;
+    rotation->lazyAssign(m * svd.matrixV().adjoint());
+  }
+}
+
+/** Convenient method to set \c *this from a position, orientation and scale
+  * of a 3D object.
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
+Transform<Scalar,Dim,Mode,Options>&
+Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
+  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
+{
+  linear() = internal::toRotationMatrix<Scalar,Dim>(orientation);
+  linear() *= scale.asDiagonal();
+  translation() = position;
+  makeAffine();
+  return *this;
+}
+
+namespace internal {
+
+template<int Mode>
+struct transform_make_affine
+{
+  template<typename MatrixType>
+  static void run(MatrixType &mat)
+  {
+    static const int Dim = MatrixType::ColsAtCompileTime-1;
+    mat.template block<1,Dim>(Dim,0).setZero();
+    mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1);
+  }
+};
+
+template<>
+struct transform_make_affine<AffineCompact>
+{
+  template<typename MatrixType> static void run(MatrixType &) { }
+};
+    
+// selector needed to avoid taking the inverse of a 3x4 matrix
+template<typename TransformType, int Mode=TransformType::Mode>
+struct projective_transform_inverse
+{
+  static inline void run(const TransformType&, TransformType&)
+  {}
+};
+
+template<typename TransformType>
+struct projective_transform_inverse<TransformType, Projective>
+{
+  static inline void run(const TransformType& m, TransformType& res)
+  {
+    res.matrix() = m.matrix().inverse();
+  }
+};
+
+} // end namespace internal
+
+
+/**
+  *
+  * \returns the inverse transformation according to some given knowledge
+  * on \c *this.
+  *
+  * \param hint allows to optimize the inversion process when the transformation
+  * is known to be not a general transformation (optional). The possible values are:
+  *  - #Projective if the transformation is not necessarily affine, i.e., if the
+  *    last row is not guaranteed to be [0 ... 0 1]
+  *  - #Affine if the last row can be assumed to be [0 ... 0 1]
+  *  - #Isometry if the transformation is only a concatenations of translations
+  *    and rotations.
+  *  The default is the template class parameter \c Mode.
+  *
+  * \warning unless \a traits is always set to NoShear or NoScaling, this function
+  * requires the generic inverse method of MatrixBase defined in the LU module. If
+  * you forget to include this module, then you will get hard to debug linking errors.
+  *
+  * \sa MatrixBase::inverse()
+  */
+template<typename Scalar, int Dim, int Mode, int Options>
+Transform<Scalar,Dim,Mode,Options>
+Transform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const
+{
+  Transform res;
+  if (hint == Projective)
+  {
+    internal::projective_transform_inverse<Transform>::run(*this, res);
+  }
+  else
+  {
+    if (hint == Isometry)
+    {
+      res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose();
+    }
+    else if(hint&Affine)
+    {
+      res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse();
+    }
+    else
+    {
+      eigen_assert(false && "Invalid transform traits in Transform::Inverse");
+    }
+    // translation and remaining parts
+    res.matrix().template topRightCorner<Dim,1>()
+      = - res.matrix().template topLeftCorner<Dim,Dim>() * translation();
+    res.makeAffine(); // we do need this, because in the beginning res is uninitialized
+  }
+  return res;
+}
+
+namespace internal {
+
+/*****************************************************
+*** Specializations of take affine part            ***
+*****************************************************/
+
+template<typename TransformType> struct transform_take_affine_part {
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef typename TransformType::AffinePart AffinePart;
+  typedef typename TransformType::ConstAffinePart ConstAffinePart;
+  static inline AffinePart run(MatrixType& m)
+  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
+  static inline ConstAffinePart run(const MatrixType& m)
+  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }
+};
+
+template<typename Scalar, int Dim, int Options>
+struct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > {
+  typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType;
+  static inline MatrixType& run(MatrixType& m) { return m; }
+  static inline const MatrixType& run(const MatrixType& m) { return m; }
+};
+
+/*****************************************************
+*** Specializations of construct from matrix       ***
+*****************************************************/
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+  {
+    transform->linear() = other;
+    transform->translation().setZero();
+    transform->makeAffine();
+  }
+};
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+  {
+    transform->affine() = other;
+    transform->makeAffine();
+  }
+};
+
+template<typename Other, int Mode, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)
+  { transform->matrix() = other; }
+};
+
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim>
+{
+  static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other)
+  { transform->matrix() = other.template block<Dim,HDim>(0,0); }
+};
+
+/**********************************************************
+***   Specializations of operator* with rhs EigenBase   ***
+**********************************************************/
+
+template<int LhsMode,int RhsMode>
+struct transform_product_result
+{
+  enum 
+  { 
+    Mode =
+      (LhsMode == (int)Projective    || RhsMode == (int)Projective    ) ? Projective :
+      (LhsMode == (int)Affine        || RhsMode == (int)Affine        ) ? Affine :
+      (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact :
+      (LhsMode == (int)Isometry      || RhsMode == (int)Isometry      ) ? Isometry : Projective
+  };
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 0 >
+{
+  typedef typename MatrixType::PlainObject ResultType;
+
+  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+  {
+    return T.matrix() * other;
+  }
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 1 >
+{
+  enum { 
+    Dim = TransformType::Dim, 
+    HDim = TransformType::HDim,
+    OtherRows = MatrixType::RowsAtCompileTime,
+    OtherCols = MatrixType::ColsAtCompileTime
+  };
+
+  typedef typename MatrixType::PlainObject ResultType;
+
+  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+  {
+    EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
+
+    typedef Block<ResultType, Dim, OtherCols, int(MatrixType::RowsAtCompileTime)==Dim> TopLeftLhs;
+
+    ResultType res(other.rows(),other.cols());
+    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;
+    res.row(OtherRows-1) = other.row(OtherRows-1);
+    
+    return res;
+  }
+};
+
+template< typename TransformType, typename MatrixType >
+struct transform_right_product_impl< TransformType, MatrixType, 2 >
+{
+  enum { 
+    Dim = TransformType::Dim, 
+    HDim = TransformType::HDim,
+    OtherRows = MatrixType::RowsAtCompileTime,
+    OtherCols = MatrixType::ColsAtCompileTime
+  };
+
+  typedef typename MatrixType::PlainObject ResultType;
+
+  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)
+  {
+    EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);
+
+    typedef Block<ResultType, Dim, OtherCols, true> TopLeftLhs;
+    ResultType res(Replicate<typename TransformType::ConstTranslationPart, 1, OtherCols>(T.translation(),1,other.cols()));
+    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other;
+
+    return res;
+  }
+};
+
+/**********************************************************
+***   Specializations of operator* with lhs EigenBase   ***
+**********************************************************/
+
+// generic HDim x HDim matrix * T => Projective
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  { return ResultType(other * tr.matrix()); }
+};
+
+// generic HDim x HDim matrix * AffineCompact => Projective
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  {
+    ResultType res;
+    res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix();
+    res.matrix().col(Dim) += other.col(Dim);
+    return res;
+  }
+};
+
+// affine matrix * T
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  {
+    ResultType res;
+    res.affine().noalias() = other * tr.matrix();
+    res.matrix().row(Dim) = tr.matrix().row(Dim);
+    return res;
+  }
+};
+
+// affine matrix * AffineCompact
+template<typename Other, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim>
+{
+  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const Other& other,const TransformType& tr)
+  {
+    ResultType res;
+    res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix();
+    res.translation() += other.col(Dim);
+    return res;
+  }
+};
+
+// linear matrix * T
+template<typename Other,int Mode, int Options, int Dim, int HDim>
+struct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim>
+{
+  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;
+  typedef typename TransformType::MatrixType MatrixType;
+  typedef TransformType ResultType;
+  static ResultType run(const Other& other, const TransformType& tr)
+  {
+    TransformType res;
+    if(Mode!=int(AffineCompact))
+      res.matrix().row(Dim) = tr.matrix().row(Dim);
+    res.matrix().template topRows<Dim>().noalias()
+      = other * tr.matrix().template topRows<Dim>();
+    return res;
+  }
+};
+
+/**********************************************************
+*** Specializations of operator* with another Transform ***
+**********************************************************/
+
+template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false >
+{
+  enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode };
+  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    ResultType res;
+    res.linear() = lhs.linear() * rhs.linear();
+    res.translation() = lhs.linear() * rhs.translation() + lhs.translation();
+    res.makeAffine();
+    return res;
+  }
+};
+
+template<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true >
+{
+  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,Projective> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    return ResultType( lhs.matrix() * rhs.matrix() );
+  }
+};
+
+template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,AffineCompact,LhsOptions>,Transform<Scalar,Dim,Projective,RhsOptions>,true >
+{
+  typedef Transform<Scalar,Dim,AffineCompact,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,Projective,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,Projective> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    ResultType res;
+    res.matrix().template topRows<Dim>() = lhs.matrix() * rhs.matrix();
+    res.matrix().row(Dim) = rhs.matrix().row(Dim);
+    return res;
+  }
+};
+
+template<typename Scalar, int Dim, int LhsOptions, int RhsOptions>
+struct transform_transform_product_impl<Transform<Scalar,Dim,Projective,LhsOptions>,Transform<Scalar,Dim,AffineCompact,RhsOptions>,true >
+{
+  typedef Transform<Scalar,Dim,Projective,LhsOptions> Lhs;
+  typedef Transform<Scalar,Dim,AffineCompact,RhsOptions> Rhs;
+  typedef Transform<Scalar,Dim,Projective> ResultType;
+  static ResultType run(const Lhs& lhs, const Rhs& rhs)
+  {
+    ResultType res(lhs.matrix().template leftCols<Dim>() * rhs.matrix());
+    res.matrix().col(Dim) += lhs.matrix().col(Dim);
+    return res;
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSFORM_H
diff --git a/Eigen/src/Geometry/Translation.h b/Eigen/src/Geometry/Translation.h
new file mode 100644
index 0000000..7fda179
--- /dev/null
+++ b/Eigen/src/Geometry/Translation.h
@@ -0,0 +1,206 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_TRANSLATION_H
+#define EIGEN_TRANSLATION_H
+
+namespace Eigen { 
+
+/** \geometry_module \ingroup Geometry_Module
+  *
+  * \class Translation
+  *
+  * \brief Represents a translation transformation
+  *
+  * \param _Scalar the scalar type, i.e., the type of the coefficients.
+  * \param _Dim the  dimension of the space, can be a compile time value or Dynamic
+  *
+  * \note This class is not aimed to be used to store a translation transformation,
+  * but rather to make easier the constructions and updates of Transform objects.
+  *
+  * \sa class Scaling, class Transform
+  */
+template<typename _Scalar, int _Dim>
+class Translation
+{
+public:
+  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)
+  /** dimension of the space */
+  enum { Dim = _Dim };
+  /** the scalar type of the coefficients */
+  typedef _Scalar Scalar;
+  /** corresponding vector type */
+  typedef Matrix<Scalar,Dim,1> VectorType;
+  /** corresponding linear transformation matrix type */
+  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;
+  /** corresponding affine transformation type */
+  typedef Transform<Scalar,Dim,Affine> AffineTransformType;
+  /** corresponding isometric transformation type */
+  typedef Transform<Scalar,Dim,Isometry> IsometryTransformType;
+
+protected:
+
+  VectorType m_coeffs;
+
+public:
+
+  /** Default constructor without initialization. */
+  Translation() {}
+  /**  */
+  inline Translation(const Scalar& sx, const Scalar& sy)
+  {
+    eigen_assert(Dim==2);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+  }
+  /**  */
+  inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)
+  {
+    eigen_assert(Dim==3);
+    m_coeffs.x() = sx;
+    m_coeffs.y() = sy;
+    m_coeffs.z() = sz;
+  }
+  /** Constructs and initialize the translation transformation from a vector of translation coefficients */
+  explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}
+
+  /** \brief Retruns the x-translation by value. **/
+  inline Scalar x() const { return m_coeffs.x(); }
+  /** \brief Retruns the y-translation by value. **/
+  inline Scalar y() const { return m_coeffs.y(); }
+  /** \brief Retruns the z-translation by value. **/
+  inline Scalar z() const { return m_coeffs.z(); }
+
+  /** \brief Retruns the x-translation as a reference. **/
+  inline Scalar& x() { return m_coeffs.x(); }
+  /** \brief Retruns the y-translation as a reference. **/
+  inline Scalar& y() { return m_coeffs.y(); }
+  /** \brief Retruns the z-translation as a reference. **/
+  inline Scalar& z() { return m_coeffs.z(); }
+
+  const VectorType& vector() const { return m_coeffs; }
+  VectorType& vector() { return m_coeffs; }
+
+  const VectorType& translation() const { return m_coeffs; }
+  VectorType& translation() { return m_coeffs; }
+
+  /** Concatenates two translation */
+  inline Translation operator* (const Translation& other) const
+  { return Translation(m_coeffs + other.m_coeffs); }
+
+  /** Concatenates a translation and a uniform scaling */
+  inline AffineTransformType operator* (const UniformScaling<Scalar>& other) const;
+
+  /** Concatenates a translation and a linear transformation */
+  template<typename OtherDerived>
+  inline AffineTransformType operator* (const EigenBase<OtherDerived>& linear) const;
+
+  /** Concatenates a translation and a rotation */
+  template<typename Derived>
+  inline IsometryTransformType operator*(const RotationBase<Derived,Dim>& r) const
+  { return *this * IsometryTransformType(r); }
+
+  /** \returns the concatenation of a linear transformation \a l with the translation \a t */
+  // its a nightmare to define a templated friend function outside its declaration
+  template<typename OtherDerived> friend
+  inline AffineTransformType operator*(const EigenBase<OtherDerived>& linear, const Translation& t)
+  {
+    AffineTransformType res;
+    res.matrix().setZero();
+    res.linear() = linear.derived();
+    res.translation() = linear.derived() * t.m_coeffs;
+    res.matrix().row(Dim).setZero();
+    res(Dim,Dim) = Scalar(1);
+    return res;
+  }
+
+  /** Concatenates a translation and a transformation */
+  template<int Mode, int Options>
+  inline Transform<Scalar,Dim,Mode> operator* (const Transform<Scalar,Dim,Mode,Options>& t) const
+  {
+    Transform<Scalar,Dim,Mode> res = t;
+    res.pretranslate(m_coeffs);
+    return res;
+  }
+
+  /** Applies translation to vector */
+  inline VectorType operator* (const VectorType& other) const
+  { return m_coeffs + other; }
+
+  /** \returns the inverse translation (opposite) */
+  Translation inverse() const { return Translation(-m_coeffs); }
+
+  Translation& operator=(const Translation& other)
+  {
+    m_coeffs = other.m_coeffs;
+    return *this;
+  }
+
+  static const Translation Identity() { return Translation(VectorType::Zero()); }
+
+  /** \returns \c *this with scalar type casted to \a NewScalarType
+    *
+    * Note that if \a NewScalarType is equal to the current scalar type of \c *this
+    * then this function smartly returns a const reference to \c *this.
+    */
+  template<typename NewScalarType>
+  inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const
+  { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }
+
+  /** Copy constructor with scalar type conversion */
+  template<typename OtherScalarType>
+  inline explicit Translation(const Translation<OtherScalarType,Dim>& other)
+  { m_coeffs = other.vector().template cast<Scalar>(); }
+
+  /** \returns \c true if \c *this is approximately equal to \a other, within the precision
+    * determined by \a prec.
+    *
+    * \sa MatrixBase::isApprox() */
+  bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
+  { return m_coeffs.isApprox(other.m_coeffs, prec); }
+
+};
+
+/** \addtogroup Geometry_Module */
+//@{
+typedef Translation<float, 2> Translation2f;
+typedef Translation<double,2> Translation2d;
+typedef Translation<float, 3> Translation3f;
+typedef Translation<double,3> Translation3d;
+//@}
+
+template<typename Scalar, int Dim>
+inline typename Translation<Scalar,Dim>::AffineTransformType
+Translation<Scalar,Dim>::operator* (const UniformScaling<Scalar>& other) const
+{
+  AffineTransformType res;
+  res.matrix().setZero();
+  res.linear().diagonal().fill(other.factor());
+  res.translation() = m_coeffs;
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+template<typename Scalar, int Dim>
+template<typename OtherDerived>
+inline typename Translation<Scalar,Dim>::AffineTransformType
+Translation<Scalar,Dim>::operator* (const EigenBase<OtherDerived>& linear) const
+{
+  AffineTransformType res;
+  res.matrix().setZero();
+  res.linear() = linear.derived();
+  res.translation() = m_coeffs;
+  res.matrix().row(Dim).setZero();
+  res(Dim,Dim) = Scalar(1);
+  return res;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_TRANSLATION_H
diff --git a/Eigen/src/Geometry/Umeyama.h b/Eigen/src/Geometry/Umeyama.h
new file mode 100644
index 0000000..5e20662
--- /dev/null
+++ b/Eigen/src/Geometry/Umeyama.h
@@ -0,0 +1,177 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_UMEYAMA_H
+#define EIGEN_UMEYAMA_H
+
+// This file requires the user to include 
+// * Eigen/Core
+// * Eigen/LU 
+// * Eigen/SVD
+// * Eigen/Array
+
+namespace Eigen { 
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+// These helpers are required since it allows to use mixed types as parameters
+// for the Umeyama. The problem with mixed parameters is that the return type
+// cannot trivially be deduced when float and double types are mixed.
+namespace internal {
+
+// Compile time return type deduction for different MatrixBase types.
+// Different means here different alignment and parameters but the same underlying
+// real scalar type.
+template<typename MatrixType, typename OtherMatrixType>
+struct umeyama_transform_matrix_type
+{
+  enum {
+    MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),
+
+    // When possible we want to choose some small fixed size value since the result
+    // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.
+    HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1
+  };
+
+  typedef Matrix<typename traits<MatrixType>::Scalar,
+    HomogeneousDimension,
+    HomogeneousDimension,
+    AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor),
+    HomogeneousDimension,
+    HomogeneousDimension
+  > type;
+};
+
+}
+
+#endif
+
+/**
+* \geometry_module \ingroup Geometry_Module
+*
+* \brief Returns the transformation between two point sets.
+*
+* The algorithm is based on:
+* "Least-squares estimation of transformation parameters between two point patterns",
+* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
+*
+* It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that
+* \f{align*}
+*   \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2
+* \f}
+* is minimized.
+*
+* The algorithm is based on the analysis of the covariance matrix
+* \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$
+* of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where 
+* \f$d\f$ is corresponding to the dimension (which is typically small).
+* The analysis is involving the SVD having a complexity of \f$O(d^3)\f$
+* though the actual computational effort lies in the covariance
+* matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when 
+* the input point sets have dimension \f$d \times m\f$.
+*
+* Currently the method is working only for floating point matrices.
+*
+* \todo Should the return type of umeyama() become a Transform?
+*
+* \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$.
+* \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$.
+* \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed.
+* \return The homogeneous transformation 
+* \f{align*}
+*   T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix}
+* \f}
+* minimizing the resudiual above. This transformation is always returned as an 
+* Eigen::Matrix.
+*/
+template <typename Derived, typename OtherDerived>
+typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type
+umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true)
+{
+  typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;
+  typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  typedef typename Derived::Index Index;
+
+  EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };
+
+  typedef Matrix<Scalar, Dimension, 1> VectorType;
+  typedef Matrix<Scalar, Dimension, Dimension> MatrixType;
+  typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;
+
+  const Index m = src.rows(); // dimension
+  const Index n = src.cols(); // number of measurements
+
+  // required for demeaning ...
+  const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);
+
+  // computation of mean
+  const VectorType src_mean = src.rowwise().sum() * one_over_n;
+  const VectorType dst_mean = dst.rowwise().sum() * one_over_n;
+
+  // demeaning of src and dst points
+  const RowMajorMatrixType src_demean = src.colwise() - src_mean;
+  const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean;
+
+  // Eq. (36)-(37)
+  const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n;
+
+  // Eq. (38)
+  const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();
+
+  JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);
+
+  // Initialize the resulting transformation with an identity matrix...
+  TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);
+
+  // Eq. (39)
+  VectorType S = VectorType::Ones(m);
+  if (sigma.determinant()<Scalar(0)) S(m-1) = Scalar(-1);
+
+  // Eq. (40) and (43)
+  const VectorType& d = svd.singularValues();
+  Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
+  if (rank == m-1) {
+    if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) {
+      Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
+    } else {
+      const Scalar s = S(m-1); S(m-1) = Scalar(-1);
+      Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
+      S(m-1) = s;
+    }
+  } else {
+    Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
+  }
+
+  if (with_scaling)
+  {
+    // Eq. (42)
+    const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);
+
+    // Eq. (41)
+    Rt.col(m).head(m) = dst_mean;
+    Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;
+    Rt.block(0,0,m,m) *= c;
+  }
+  else
+  {
+    Rt.col(m).head(m) = dst_mean;
+    Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean;
+  }
+
+  return Rt;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_UMEYAMA_H
diff --git a/Eigen/src/Geometry/arch/CMakeLists.txt b/Eigen/src/Geometry/arch/CMakeLists.txt
new file mode 100644
index 0000000..1267a79
--- /dev/null
+++ b/Eigen/src/Geometry/arch/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Geometry_arch_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Geometry_arch_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Geometry/arch COMPONENT Devel
+  )
diff --git a/Eigen/src/Geometry/arch/Geometry_SSE.h b/Eigen/src/Geometry/arch/Geometry_SSE.h
new file mode 100644
index 0000000..3d8284f
--- /dev/null
+++ b/Eigen/src/Geometry/arch/Geometry_SSE.h
@@ -0,0 +1,115 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>
+// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GEOMETRY_SSE_H
+#define EIGEN_GEOMETRY_SSE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::SSE, Derived, OtherDerived, float, Aligned>
+{
+  static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
+  {
+    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000));
+    Quaternion<float> res;
+    __m128 a = _a.coeffs().template packet<Aligned>(0);
+    __m128 b = _b.coeffs().template packet<Aligned>(0);
+    __m128 flip1 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),
+                                         vec4f_swizzle1(b,2,0,1,2)),mask);
+    __m128 flip2 = _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),
+                                         vec4f_swizzle1(b,0,1,2,1)),mask);
+    pstore(&res.x(),
+              _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)),
+                                    _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0),
+                                               vec4f_swizzle1(b,1,2,0,0))),
+                         _mm_add_ps(flip1,flip2)));
+    return res;
+  }
+};
+
+template<typename VectorLhs,typename VectorRhs>
+struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>
+{
+  static inline typename plain_matrix_type<VectorLhs>::type
+  run(const VectorLhs& lhs, const VectorRhs& rhs)
+  {
+    __m128 a = lhs.template packet<VectorLhs::Flags&AlignedBit ? Aligned : Unaligned>(0);
+    __m128 b = rhs.template packet<VectorRhs::Flags&AlignedBit ? Aligned : Unaligned>(0);
+    __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));
+    __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));
+    typename plain_matrix_type<VectorLhs>::type res;
+    pstore(&res.x(),_mm_sub_ps(mul1,mul2));
+    return res;
+  }
+};
+
+
+
+
+template<class Derived, class OtherDerived>
+struct quat_product<Architecture::SSE, Derived, OtherDerived, double, Aligned>
+{
+  static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)
+  {
+  const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
+
+  Quaternion<double> res;
+
+  const double* a = _a.coeffs().data();
+  Packet2d b_xy = _b.coeffs().template packet<Aligned>(0);
+  Packet2d b_zw = _b.coeffs().template packet<Aligned>(2);
+  Packet2d a_xx = pset1<Packet2d>(a[0]);
+  Packet2d a_yy = pset1<Packet2d>(a[1]);
+  Packet2d a_zz = pset1<Packet2d>(a[2]);
+  Packet2d a_ww = pset1<Packet2d>(a[3]);
+
+  // two temporaries:
+  Packet2d t1, t2;
+
+  /*
+   * t1 = ww*xy + yy*zw
+   * t2 = zz*xy - xx*zw
+   * res.xy = t1 +/- swap(t2)
+   */
+  t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw));
+  t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));
+#ifdef EIGEN_VECTORIZE_SSE3
+  EIGEN_UNUSED_VARIABLE(mask)
+  pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2)));
+#else
+  pstore(&res.x(), padd(t1, pxor(mask,preverse(t2))));
+#endif
+  
+  /*
+   * t1 = ww*zw - yy*xy
+   * t2 = zz*zw + xx*xy
+   * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2)
+   */
+  t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy));
+  t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));
+#ifdef EIGEN_VECTORIZE_SSE3
+  EIGEN_UNUSED_VARIABLE(mask)
+  pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));
+#else
+  pstore(&res.z(), psub(t1, pxor(mask,preverse(t2))));
+#endif
+
+  return res;
+}
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GEOMETRY_SSE_H
diff --git a/Eigen/src/Householder/BlockHouseholder.h b/Eigen/src/Householder/BlockHouseholder.h
new file mode 100644
index 0000000..60dbea5
--- /dev/null
+++ b/Eigen/src/Householder/BlockHouseholder.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Vincent Lejeune
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BLOCK_HOUSEHOLDER_H
+#define EIGEN_BLOCK_HOUSEHOLDER_H
+
+// This file contains some helper function to deal with block householder reflectors
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal */
+template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>
+void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)
+{
+  typedef typename TriangularFactorType::Index Index;
+  typedef typename VectorsType::Scalar Scalar;
+  const Index nbVecs = vectors.cols();
+  eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
+
+  for(Index i = 0; i < nbVecs; i++)
+  {
+    Index rs = vectors.rows() - i;
+    Scalar Vii = vectors(i,i);
+    vectors.const_cast_derived().coeffRef(i,i) = Scalar(1);
+    triFactor.col(i).head(i).noalias() = -hCoeffs(i) * vectors.block(i, 0, rs, i).adjoint()
+                                       * vectors.col(i).tail(rs);
+    vectors.const_cast_derived().coeffRef(i, i) = Vii;
+    // FIXME add .noalias() once the triangular product can work inplace
+    triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView<Upper>()
+                             * triFactor.col(i).head(i);
+    triFactor(i,i) = hCoeffs(i);
+  }
+}
+
+/** \internal */
+template<typename MatrixType,typename VectorsType,typename CoeffsType>
+void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs)
+{
+  typedef typename MatrixType::Index Index;
+  enum { TFactorSize = MatrixType::ColsAtCompileTime };
+  Index nbVecs = vectors.cols();
+  Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, ColMajor> T(nbVecs,nbVecs);
+  make_block_householder_triangular_factor(T, vectors, hCoeffs);
+
+  const TriangularView<const VectorsType, UnitLower>& V(vectors);
+
+  // A -= V T V^* A
+  Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,0,
+         VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;
+  // FIXME add .noalias() once the triangular product can work inplace
+  tmp = T.template triangularView<Upper>().adjoint() * tmp;
+  mat.noalias() -= V * tmp;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BLOCK_HOUSEHOLDER_H
diff --git a/Eigen/src/Householder/CMakeLists.txt b/Eigen/src/Householder/CMakeLists.txt
new file mode 100644
index 0000000..ce4937d
--- /dev/null
+++ b/Eigen/src/Householder/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Householder_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Householder_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Householder COMPONENT Devel
+  )
diff --git a/Eigen/src/Householder/Householder.h b/Eigen/src/Householder/Householder.h
new file mode 100644
index 0000000..32112af
--- /dev/null
+++ b/Eigen/src/Householder/Householder.h
@@ -0,0 +1,171 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HOUSEHOLDER_H
+#define EIGEN_HOUSEHOLDER_H
+
+namespace Eigen { 
+
+namespace internal {
+template<int n> struct decrement_size
+{
+  enum {
+    ret = n==Dynamic ? n : n-1
+  };
+};
+}
+
+/** Computes the elementary reflector H such that:
+  * \f$ H *this = [ beta 0 ... 0]^T \f$
+  * where the transformation H is:
+  * \f$ H = I - tau v v^*\f$
+  * and the vector v is:
+  * \f$ v^T = [1 essential^T] \f$
+  *
+  * The essential part of the vector \c v is stored in *this.
+  * 
+  * On output:
+  * \param tau the scaling factor of the Householder transformation
+  * \param beta the result of H * \c *this
+  *
+  * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(),
+  *     MatrixBase::applyHouseholderOnTheRight()
+  */
+template<typename Derived>
+void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)
+{
+  VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);
+  makeHouseholder(essentialPart, tau, beta);
+}
+
+/** Computes the elementary reflector H such that:
+  * \f$ H *this = [ beta 0 ... 0]^T \f$
+  * where the transformation H is:
+  * \f$ H = I - tau v v^*\f$
+  * and the vector v is:
+  * \f$ v^T = [1 essential^T] \f$
+  *
+  * On output:
+  * \param essential the essential part of the vector \c v
+  * \param tau the scaling factor of the Householder transformation
+  * \param beta the result of H * \c *this
+  *
+  * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(),
+  *     MatrixBase::applyHouseholderOnTheRight()
+  */
+template<typename Derived>
+template<typename EssentialPart>
+void MatrixBase<Derived>::makeHouseholder(
+  EssentialPart& essential,
+  Scalar& tau,
+  RealScalar& beta) const
+{
+  using std::sqrt;
+  using numext::conj;
+  
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)
+  VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);
+  
+  RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();
+  Scalar c0 = coeff(0);
+
+  if(tailSqNorm == RealScalar(0) && numext::imag(c0)==RealScalar(0))
+  {
+    tau = RealScalar(0);
+    beta = numext::real(c0);
+    essential.setZero();
+  }
+  else
+  {
+    beta = sqrt(numext::abs2(c0) + tailSqNorm);
+    if (numext::real(c0)>=RealScalar(0))
+      beta = -beta;
+    essential = tail / (c0 - beta);
+    tau = conj((beta - c0) / beta);
+  }
+}
+
+/** Apply the elementary reflector H given by
+  * \f$ H = I - tau v v^*\f$
+  * with
+  * \f$ v^T = [1 essential^T] \f$
+  * from the left to a vector or matrix.
+  *
+  * On input:
+  * \param essential the essential part of the vector \c v
+  * \param tau the scaling factor of the Householder transformation
+  * \param workspace a pointer to working space with at least
+  *                  this->cols() * essential.size() entries
+  *
+  * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), 
+  *     MatrixBase::applyHouseholderOnTheRight()
+  */
+template<typename Derived>
+template<typename EssentialPart>
+void MatrixBase<Derived>::applyHouseholderOnTheLeft(
+  const EssentialPart& essential,
+  const Scalar& tau,
+  Scalar* workspace)
+{
+  if(rows() == 1)
+  {
+    *this *= Scalar(1)-tau;
+  }
+  else
+  {
+    Map<typename internal::plain_row_type<PlainObject>::type> tmp(workspace,cols());
+    Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());
+    tmp.noalias() = essential.adjoint() * bottom;
+    tmp += this->row(0);
+    this->row(0) -= tau * tmp;
+    bottom.noalias() -= tau * essential * tmp;
+  }
+}
+
+/** Apply the elementary reflector H given by
+  * \f$ H = I - tau v v^*\f$
+  * with
+  * \f$ v^T = [1 essential^T] \f$
+  * from the right to a vector or matrix.
+  *
+  * On input:
+  * \param essential the essential part of the vector \c v
+  * \param tau the scaling factor of the Householder transformation
+  * \param workspace a pointer to working space with at least
+  *                  this->cols() * essential.size() entries
+  *
+  * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), 
+  *     MatrixBase::applyHouseholderOnTheLeft()
+  */
+template<typename Derived>
+template<typename EssentialPart>
+void MatrixBase<Derived>::applyHouseholderOnTheRight(
+  const EssentialPart& essential,
+  const Scalar& tau,
+  Scalar* workspace)
+{
+  if(cols() == 1)
+  {
+    *this *= Scalar(1)-tau;
+  }
+  else
+  {
+    Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows());
+    Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
+    tmp.noalias() = right * essential.conjugate();
+    tmp += this->col(0);
+    this->col(0) -= tau * tmp;
+    right.noalias() -= tau * tmp * essential.transpose();
+  }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_HOUSEHOLDER_H
diff --git a/Eigen/src/Householder/HouseholderSequence.h b/Eigen/src/Householder/HouseholderSequence.h
new file mode 100644
index 0000000..d800ca1
--- /dev/null
+++ b/Eigen/src/Householder/HouseholderSequence.h
@@ -0,0 +1,441 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H
+#define EIGEN_HOUSEHOLDER_SEQUENCE_H
+
+namespace Eigen { 
+
+/** \ingroup Householder_Module
+  * \householder_module
+  * \class HouseholderSequence
+  * \brief Sequence of Householder reflections acting on subspaces with decreasing size
+  * \tparam VectorsType type of matrix containing the Householder vectors
+  * \tparam CoeffsType  type of vector containing the Householder coefficients
+  * \tparam Side        either OnTheLeft (the default) or OnTheRight
+  *
+  * This class represents a product sequence of Householder reflections where the first Householder reflection
+  * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by
+  * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace
+  * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but
+  * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections
+  * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods
+  * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(),
+  * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence.
+  *
+  * More precisely, the class %HouseholderSequence represents an \f$ n \times n \f$ matrix \f$ H \f$ of the
+  * form \f$ H = \prod_{i=0}^{n-1} H_i \f$ where the i-th Householder reflection is \f$ H_i = I - h_i v_i
+  * v_i^* \f$. The i-th Householder coefficient \f$ h_i \f$ is a scalar and the i-th Householder vector \f$
+  * v_i \f$ is a vector of the form
+  * \f[ 
+  * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. 
+  * \f]
+  * The last \f$ n-i \f$ entries of \f$ v_i \f$ are called the essential part of the Householder vector.
+  *
+  * Typical usages are listed below, where H is a HouseholderSequence:
+  * \code
+  * A.applyOnTheRight(H);             // A = A * H
+  * A.applyOnTheLeft(H);              // A = H * A
+  * A.applyOnTheRight(H.adjoint());   // A = A * H^*
+  * A.applyOnTheLeft(H.adjoint());    // A = H^* * A
+  * MatrixXd Q = H;                   // conversion to a dense matrix
+  * \endcode
+  * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators.
+  *
+  * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example.
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+
+namespace internal {
+
+template<typename VectorsType, typename CoeffsType, int Side>
+struct traits<HouseholderSequence<VectorsType,CoeffsType,Side> >
+{
+  typedef typename VectorsType::Scalar Scalar;
+  typedef typename VectorsType::Index Index;
+  typedef typename VectorsType::StorageKind StorageKind;
+  enum {
+    RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime
+                                        : traits<VectorsType>::ColsAtCompileTime,
+    ColsAtCompileTime = RowsAtCompileTime,
+    MaxRowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::MaxRowsAtCompileTime
+                                           : traits<VectorsType>::MaxColsAtCompileTime,
+    MaxColsAtCompileTime = MaxRowsAtCompileTime,
+    Flags = 0
+  };
+};
+
+template<typename VectorsType, typename CoeffsType, int Side>
+struct hseq_side_dependent_impl
+{
+  typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;
+  typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;
+  typedef typename VectorsType::Index Index;
+  static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
+  {
+    Index start = k+1+h.m_shift;
+    return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);
+  }
+};
+
+template<typename VectorsType, typename CoeffsType>
+struct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>
+{
+  typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType;
+  typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;
+  typedef typename VectorsType::Index Index;
+  static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)
+  {
+    Index start = k+1+h.m_shift;
+    return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();
+  }
+};
+
+template<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type
+{
+  typedef typename scalar_product_traits<OtherScalarType, typename MatrixType::Scalar>::ReturnType
+    ResultScalar;
+  typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime,
+                 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type;
+};
+
+} // end namespace internal
+
+template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
+  : public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
+{
+    typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;
+  
+  public:
+    enum {
+      RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,
+      ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,
+      MaxRowsAtCompileTime = internal::traits<HouseholderSequence>::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime
+    };
+    typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;
+    typedef typename VectorsType::Index Index;
+
+    typedef HouseholderSequence<
+      typename internal::conditional<NumTraits<Scalar>::IsComplex,
+        typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,
+        VectorsType>::type,
+      typename internal::conditional<NumTraits<Scalar>::IsComplex,
+        typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,
+        CoeffsType>::type,
+      Side
+    > ConjugateReturnType;
+
+    /** \brief Constructor.
+      * \param[in]  v      %Matrix containing the essential parts of the Householder vectors
+      * \param[in]  h      Vector containing the Householder coefficients
+      *
+      * Constructs the Householder sequence with coefficients given by \p h and vectors given by \p v. The
+      * i-th Householder coefficient \f$ h_i \f$ is given by \p h(i) and the essential part of the i-th
+      * Householder vector \f$ v_i \f$ is given by \p v(k,i) with \p k > \p i (the subdiagonal part of the
+      * i-th column). If \p v has fewer columns than rows, then the Householder sequence contains as many
+      * Householder reflections as there are columns.
+      *
+      * \note The %HouseholderSequence object stores \p v and \p h by reference.
+      *
+      * Example: \include HouseholderSequence_HouseholderSequence.cpp
+      * Output: \verbinclude HouseholderSequence_HouseholderSequence.out
+      *
+      * \sa setLength(), setShift()
+      */
+    HouseholderSequence(const VectorsType& v, const CoeffsType& h)
+      : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()),
+        m_shift(0)
+    {
+    }
+
+    /** \brief Copy constructor. */
+    HouseholderSequence(const HouseholderSequence& other)
+      : m_vectors(other.m_vectors),
+        m_coeffs(other.m_coeffs),
+        m_trans(other.m_trans),
+        m_length(other.m_length),
+        m_shift(other.m_shift)
+    {
+    }
+
+    /** \brief Number of rows of transformation viewed as a matrix.
+      * \returns Number of rows 
+      * \details This equals the dimension of the space that the transformation acts on.
+      */
+    Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }
+
+    /** \brief Number of columns of transformation viewed as a matrix.
+      * \returns Number of columns
+      * \details This equals the dimension of the space that the transformation acts on.
+      */
+    Index cols() const { return rows(); }
+
+    /** \brief Essential part of a Householder vector.
+      * \param[in]  k  Index of Householder reflection
+      * \returns    Vector containing non-trivial entries of k-th Householder vector
+      *
+      * This function returns the essential part of the Householder vector \f$ v_i \f$. This is a vector of
+      * length \f$ n-i \f$ containing the last \f$ n-i \f$ entries of the vector
+      * \f[ 
+      * v_i = [\underbrace{0, \ldots, 0}_{i-1\mbox{ zeros}}, 1, \underbrace{*, \ldots,*}_{n-i\mbox{ arbitrary entries}} ]. 
+      * \f]
+      * The index \f$ i \f$ equals \p k + shift(), corresponding to the k-th column of the matrix \p v
+      * passed to the constructor.
+      *
+      * \sa setShift(), shift()
+      */
+    const EssentialVectorType essentialVector(Index k) const
+    {
+      eigen_assert(k >= 0 && k < m_length);
+      return internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k);
+    }
+
+    /** \brief %Transpose of the Householder sequence. */
+    HouseholderSequence transpose() const
+    {
+      return HouseholderSequence(*this).setTrans(!m_trans);
+    }
+
+    /** \brief Complex conjugate of the Householder sequence. */
+    ConjugateReturnType conjugate() const
+    {
+      return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())
+             .setTrans(m_trans)
+             .setLength(m_length)
+             .setShift(m_shift);
+    }
+
+    /** \brief Adjoint (conjugate transpose) of the Householder sequence. */
+    ConjugateReturnType adjoint() const
+    {
+      return conjugate().setTrans(!m_trans);
+    }
+
+    /** \brief Inverse of the Householder sequence (equals the adjoint). */
+    ConjugateReturnType inverse() const { return adjoint(); }
+
+    /** \internal */
+    template<typename DestType> inline void evalTo(DestType& dst) const
+    {
+      Matrix<Scalar, DestType::RowsAtCompileTime, 1,
+             AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows());
+      evalTo(dst, workspace);
+    }
+
+    /** \internal */
+    template<typename Dest, typename Workspace>
+    void evalTo(Dest& dst, Workspace& workspace) const
+    {
+      workspace.resize(rows());
+      Index vecs = m_length;
+      if(    internal::is_same<typename internal::remove_all<VectorsType>::type,Dest>::value
+          && internal::extract_data(dst) == internal::extract_data(m_vectors))
+      {
+        // in-place
+        dst.diagonal().setOnes();
+        dst.template triangularView<StrictlyUpper>().setZero();
+        for(Index k = vecs-1; k >= 0; --k)
+        {
+          Index cornerSize = rows() - k - m_shift;
+          if(m_trans)
+            dst.bottomRightCorner(cornerSize, cornerSize)
+               .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());
+          else
+            dst.bottomRightCorner(cornerSize, cornerSize)
+               .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data());
+
+          // clear the off diagonal vector
+          dst.col(k).tail(rows()-k-1).setZero();
+        }
+        // clear the remaining columns if needed
+        for(Index k = 0; k<cols()-vecs ; ++k)
+          dst.col(k).tail(rows()-k-1).setZero();
+      }
+      else
+      {
+        dst.setIdentity(rows(), rows());
+        for(Index k = vecs-1; k >= 0; --k)
+        {
+          Index cornerSize = rows() - k - m_shift;
+          if(m_trans)
+            dst.bottomRightCorner(cornerSize, cornerSize)
+               .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
+          else
+            dst.bottomRightCorner(cornerSize, cornerSize)
+               .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));
+        }
+      }
+    }
+
+    /** \internal */
+    template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const
+    {
+      Matrix<Scalar,1,Dest::RowsAtCompileTime,RowMajor,1,Dest::MaxRowsAtCompileTime> workspace(dst.rows());
+      applyThisOnTheRight(dst, workspace);
+    }
+
+    /** \internal */
+    template<typename Dest, typename Workspace>
+    inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const
+    {
+      workspace.resize(dst.rows());
+      for(Index k = 0; k < m_length; ++k)
+      {
+        Index actual_k = m_trans ? m_length-k-1 : k;
+        dst.rightCols(rows()-m_shift-actual_k)
+           .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
+      }
+    }
+
+    /** \internal */
+    template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const
+    {
+      Matrix<Scalar,1,Dest::ColsAtCompileTime,RowMajor,1,Dest::MaxColsAtCompileTime> workspace(dst.cols());
+      applyThisOnTheLeft(dst, workspace);
+    }
+
+    /** \internal */
+    template<typename Dest, typename Workspace>
+    inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const
+    {
+      workspace.resize(dst.cols());
+      for(Index k = 0; k < m_length; ++k)
+      {
+        Index actual_k = m_trans ? k : m_length-k-1;
+        dst.bottomRows(rows()-m_shift-actual_k)
+           .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());
+      }
+    }
+
+    /** \brief Computes the product of a Householder sequence with a matrix.
+      * \param[in]  other  %Matrix being multiplied.
+      * \returns    Expression object representing the product.
+      *
+      * This function computes \f$ HM \f$ where \f$ H \f$ is the Householder sequence represented by \p *this
+      * and \f$ M \f$ is the matrix \p other.
+      */
+    template<typename OtherDerived>
+    typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other) const
+    {
+      typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type
+        res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>());
+      applyThisOnTheLeft(res);
+      return res;
+    }
+
+    template<typename _VectorsType, typename _CoeffsType, int _Side> friend struct internal::hseq_side_dependent_impl;
+
+    /** \brief Sets the length of the Householder sequence.
+      * \param [in]  length  New value for the length.
+      *
+      * By default, the length \f$ n \f$ of the Householder sequence \f$ H = H_0 H_1 \ldots H_{n-1} \f$ is set
+      * to the number of columns of the matrix \p v passed to the constructor, or the number of rows if that
+      * is smaller. After this function is called, the length equals \p length.
+      *
+      * \sa length()
+      */
+    HouseholderSequence& setLength(Index length)
+    {
+      m_length = length;
+      return *this;
+    }
+
+    /** \brief Sets the shift of the Householder sequence.
+      * \param [in]  shift  New value for the shift.
+      *
+      * By default, a %HouseholderSequence object represents \f$ H = H_0 H_1 \ldots H_{n-1} \f$ and the i-th
+      * column of the matrix \p v passed to the constructor corresponds to the i-th Householder
+      * reflection. After this function is called, the object represents \f$ H = H_{\mathrm{shift}}
+      * H_{\mathrm{shift}+1} \ldots H_{n-1} \f$ and the i-th column of \p v corresponds to the (shift+i)-th
+      * Householder reflection.
+      *
+      * \sa shift()
+      */
+    HouseholderSequence& setShift(Index shift)
+    {
+      m_shift = shift;
+      return *this;
+    }
+
+    Index length() const { return m_length; }  /**< \brief Returns the length of the Householder sequence. */
+    Index shift() const { return m_shift; }    /**< \brief Returns the shift of the Householder sequence. */
+
+    /* Necessary for .adjoint() and .conjugate() */
+    template <typename VectorsType2, typename CoeffsType2, int Side2> friend class HouseholderSequence;
+
+  protected:
+
+    /** \brief Sets the transpose flag.
+      * \param [in]  trans  New value of the transpose flag.
+      *
+      * By default, the transpose flag is not set. If the transpose flag is set, then this object represents 
+      * \f$ H^T = H_{n-1}^T \ldots H_1^T H_0^T \f$ instead of \f$ H = H_0 H_1 \ldots H_{n-1} \f$.
+      *
+      * \sa trans()
+      */
+    HouseholderSequence& setTrans(bool trans)
+    {
+      m_trans = trans;
+      return *this;
+    }
+
+    bool trans() const { return m_trans; }     /**< \brief Returns the transpose flag. */
+
+    typename VectorsType::Nested m_vectors;
+    typename CoeffsType::Nested m_coeffs;
+    bool m_trans;
+    Index m_length;
+    Index m_shift;
+};
+
+/** \brief Computes the product of a matrix with a Householder sequence.
+  * \param[in]  other  %Matrix being multiplied.
+  * \param[in]  h      %HouseholderSequence being multiplied.
+  * \returns    Expression object representing the product.
+  *
+  * This function computes \f$ MH \f$ where \f$ M \f$ is the matrix \p other and \f$ H \f$ is the
+  * Householder sequence represented by \p h.
+  */
+template<typename OtherDerived, typename VectorsType, typename CoeffsType, int Side>
+typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other, const HouseholderSequence<VectorsType,CoeffsType,Side>& h)
+{
+  typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type
+    res(other.template cast<typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::ResultScalar>());
+  h.applyThisOnTheRight(res);
+  return res;
+}
+
+/** \ingroup Householder_Module \householder_module
+  * \brief Convenience function for constructing a Householder sequence. 
+  * \returns A HouseholderSequence constructed from the specified arguments.
+  */
+template<typename VectorsType, typename CoeffsType>
+HouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h)
+{
+  return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h);
+}
+
+/** \ingroup Householder_Module \householder_module
+  * \brief Convenience function for constructing a Householder sequence. 
+  * \returns A HouseholderSequence constructed from the specified arguments.
+  * \details This function differs from householderSequence() in that the template argument \p OnTheSide of
+  * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft.
+  */
+template<typename VectorsType, typename CoeffsType>
+HouseholderSequence<VectorsType,CoeffsType,OnTheRight> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h)
+{
+  return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_HOUSEHOLDER_SEQUENCE_H
diff --git a/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
new file mode 100644
index 0000000..73ca9bf
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
@@ -0,0 +1,149 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BASIC_PRECONDITIONERS_H
+#define EIGEN_BASIC_PRECONDITIONERS_H
+
+namespace Eigen { 
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief A preconditioner based on the digonal entries
+  *
+  * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.
+  * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:
+  * \code
+  * A.diagonal().asDiagonal() . x = b
+  * \endcode
+  *
+  * \tparam _Scalar the type of the scalar.
+  *
+  * This preconditioner is suitable for both selfadjoint and general problems.
+  * The diagonal entries are pre-inverted and stored into a dense vector.
+  *
+  * \note A variant that has yet to be implemented would attempt to preserve the norm of each column.
+  *
+  */
+template <typename _Scalar>
+class DiagonalPreconditioner
+{
+    typedef _Scalar Scalar;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef typename Vector::Index Index;
+
+  public:
+    // this typedef is only to export the scalar type and compile-time dimensions to solve_retval
+    typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+    DiagonalPreconditioner() : m_isInitialized(false) {}
+
+    template<typename MatType>
+    DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())
+    {
+      compute(mat);
+    }
+
+    Index rows() const { return m_invdiag.size(); }
+    Index cols() const { return m_invdiag.size(); }
+    
+    template<typename MatType>
+    DiagonalPreconditioner& analyzePattern(const MatType& )
+    {
+      return *this;
+    }
+    
+    template<typename MatType>
+    DiagonalPreconditioner& factorize(const MatType& mat)
+    {
+      m_invdiag.resize(mat.cols());
+      for(int j=0; j<mat.outerSize(); ++j)
+      {
+        typename MatType::InnerIterator it(mat,j);
+        while(it && it.index()!=j) ++it;
+        if(it && it.index()==j)
+          m_invdiag(j) = Scalar(1)/it.value();
+        else
+          m_invdiag(j) = 0;
+      }
+      m_isInitialized = true;
+      return *this;
+    }
+    
+    template<typename MatType>
+    DiagonalPreconditioner& compute(const MatType& mat)
+    {
+      return factorize(mat);
+    }
+
+    template<typename Rhs, typename Dest>
+    void _solve(const Rhs& b, Dest& x) const
+    {
+      x = m_invdiag.array() * b.array() ;
+    }
+
+    template<typename Rhs> inline const internal::solve_retval<DiagonalPreconditioner, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized.");
+      eigen_assert(m_invdiag.size()==b.rows()
+                && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<DiagonalPreconditioner, Rhs>(*this, b.derived());
+    }
+
+  protected:
+    Vector m_invdiag;
+    bool m_isInitialized;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<DiagonalPreconditioner<_MatrixType>, Rhs>
+  : solve_retval_base<DiagonalPreconditioner<_MatrixType>, Rhs>
+{
+  typedef DiagonalPreconditioner<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief A naive preconditioner which approximates any matrix as the identity matrix
+  *
+  * \sa class DiagonalPreconditioner
+  */
+class IdentityPreconditioner
+{
+  public:
+
+    IdentityPreconditioner() {}
+
+    template<typename MatrixType>
+    IdentityPreconditioner(const MatrixType& ) {}
+    
+    template<typename MatrixType>
+    IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }
+    
+    template<typename MatrixType>
+    IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }
+
+    template<typename MatrixType>
+    IdentityPreconditioner& compute(const MatrixType& ) { return *this; }
+    
+    template<typename Rhs>
+    inline const Rhs& solve(const Rhs& b) const { return b; }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_BASIC_PRECONDITIONERS_H
diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
new file mode 100644
index 0000000..2625c4d
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -0,0 +1,262 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BICGSTAB_H
+#define EIGEN_BICGSTAB_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal Low-level bi conjugate gradient stabilized algorithm
+  * \param mat The matrix A
+  * \param rhs The right hand side vector b
+  * \param x On input and initial solution, on output the computed solution.
+  * \param precond A preconditioner being able to efficiently solve for an
+  *                approximation of Ax=b (regardless of b)
+  * \param iters On input the max number of iteration, on output the number of performed iterations.
+  * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+  * \return false in the case of numerical issue, for example a break down of BiCGSTAB. 
+  */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
+              const Preconditioner& precond, int& iters,
+              typename Dest::RealScalar& tol_error)
+{
+  using std::sqrt;
+  using std::abs;
+  typedef typename Dest::RealScalar RealScalar;
+  typedef typename Dest::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,1> VectorType;
+  RealScalar tol = tol_error;
+  int maxIters = iters;
+
+  int n = mat.cols();
+  VectorType r  = rhs - mat * x;
+  VectorType r0 = r;
+  
+  RealScalar r0_sqnorm = r0.squaredNorm();
+  RealScalar rhs_sqnorm = rhs.squaredNorm();
+  if(rhs_sqnorm == 0)
+  {
+    x.setZero();
+    return true;
+  }
+  Scalar rho    = 1;
+  Scalar alpha  = 1;
+  Scalar w      = 1;
+  
+  VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
+  VectorType y(n),  z(n);
+  VectorType kt(n), ks(n);
+
+  VectorType s(n), t(n);
+
+  RealScalar tol2 = tol*tol;
+  RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
+  int i = 0;
+  int restarts = 0;
+
+  while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
+  {
+    Scalar rho_old = rho;
+
+    rho = r0.dot(r);
+    if (abs(rho) < eps2*r0_sqnorm)
+    {
+      // The new residual vector became too orthogonal to the arbitrarily choosen direction r0
+      // Let's restart with a new r0:
+      r0 = r;
+      rho = r0_sqnorm = r.squaredNorm();
+      if(restarts++ == 0)
+        i = 0;
+    }
+    Scalar beta = (rho/rho_old) * (alpha / w);
+    p = r + beta * (p - w * v);
+    
+    y = precond.solve(p);
+    
+    v.noalias() = mat * y;
+
+    alpha = rho / r0.dot(v);
+    s = r - alpha * v;
+
+    z = precond.solve(s);
+    t.noalias() = mat * z;
+
+    RealScalar tmp = t.squaredNorm();
+    if(tmp>RealScalar(0))
+      w = t.dot(s) / tmp;
+    else
+      w = Scalar(0);
+    x += alpha * y + w * z;
+    r = s - w * t;
+    ++i;
+  }
+  tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
+  iters = i;
+  return true; 
+}
+
+}
+
+template< typename _MatrixType,
+          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class BiCGSTAB;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief A bi conjugate gradient stabilized solver for sparse square problems
+  *
+  * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient
+  * stabilized algorithm. The vectors x and b can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+  * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+  *
+  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+  * and NumTraits<Scalar>::epsilon() for the tolerance.
+  * 
+  * This class can be used as the direct solver classes. Here is a typical usage example:
+  * \code
+  * int n = 10000;
+  * VectorXd x(n), b(n);
+  * SparseMatrix<double> A(n,n);
+  * // fill A and b
+  * BiCGSTAB<SparseMatrix<double> > solver;
+  * solver.compute(A);
+  * x = solver.solve(b);
+  * std::cout << "#iterations:     " << solver.iterations() << std::endl;
+  * std::cout << "estimated error: " << solver.error()      << std::endl;
+  * // update b, and solve again
+  * x = solver.solve(b);
+  * \endcode
+  * 
+  * By default the iterations start with x=0 as an initial guess of the solution.
+  * One can control the start using the solveWithGuess() method.
+  * 
+  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+  */
+template< typename _MatrixType, typename _Preconditioner>
+class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >
+{
+  typedef IterativeSolverBase<BiCGSTAB> Base;
+  using Base::mp_matrix;
+  using Base::m_error;
+  using Base::m_iterations;
+  using Base::m_info;
+  using Base::m_isInitialized;
+public:
+  typedef _MatrixType MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef _Preconditioner Preconditioner;
+
+public:
+
+  /** Default constructor. */
+  BiCGSTAB() : Base() {}
+
+  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+    * 
+    * This constructor is a shortcut for the default constructor followed
+    * by a call to compute().
+    * 
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  BiCGSTAB(const MatrixType& A) : Base(A) {}
+
+  ~BiCGSTAB() {}
+  
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+    * \a x0 as an initial solution.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs,typename Guess>
+  inline const internal::solve_retval_with_guess<BiCGSTAB, Rhs, Guess>
+  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+  {
+    eigen_assert(m_isInitialized && "BiCGSTAB is not initialized.");
+    eigen_assert(Base::rows()==b.rows()
+              && "BiCGSTAB::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::solve_retval_with_guess
+            <BiCGSTAB, Rhs, Guess>(*this, b.derived(), x0);
+  }
+  
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solveWithGuess(const Rhs& b, Dest& x) const
+  {    
+    bool failed = false;
+    for(int j=0; j<b.cols(); ++j)
+    {
+      m_iterations = Base::maxIterations();
+      m_error = Base::m_tolerance;
+      
+      typename Dest::ColXpr xj(x,j);
+      if(!internal::bicgstab(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))
+        failed = true;
+    }
+    m_info = failed ? NumericalIssue
+           : m_error <= Base::m_tolerance ? Success
+           : NoConvergence;
+    m_isInitialized = true;
+  }
+
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solve(const Rhs& b, Dest& x) const
+  {
+//     x.setZero();
+  x = b;
+    _solveWithGuess(b,x);
+  }
+
+protected:
+
+};
+
+
+namespace internal {
+
+  template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+  : solve_retval_base<BiCGSTAB<_MatrixType, _Preconditioner>, Rhs>
+{
+  typedef BiCGSTAB<_MatrixType, _Preconditioner> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BICGSTAB_H
diff --git a/Eigen/src/IterativeLinearSolvers/CMakeLists.txt b/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
new file mode 100644
index 0000000..59ccc00
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_IterativeLinearSolvers_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_IterativeLinearSolvers_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/IterativeLinearSolvers COMPONENT Devel
+  )
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
new file mode 100644
index 0000000..8ba4a8d
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -0,0 +1,255 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONJUGATE_GRADIENT_H
+#define EIGEN_CONJUGATE_GRADIENT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal Low-level conjugate gradient algorithm
+  * \param mat The matrix A
+  * \param rhs The right hand side vector b
+  * \param x On input and initial solution, on output the computed solution.
+  * \param precond A preconditioner being able to efficiently solve for an
+  *                approximation of Ax=b (regardless of b)
+  * \param iters On input the max number of iteration, on output the number of performed iterations.
+  * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+  */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+EIGEN_DONT_INLINE
+void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
+                        const Preconditioner& precond, int& iters,
+                        typename Dest::RealScalar& tol_error)
+{
+  using std::sqrt;
+  using std::abs;
+  typedef typename Dest::RealScalar RealScalar;
+  typedef typename Dest::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,1> VectorType;
+  
+  RealScalar tol = tol_error;
+  int maxIters = iters;
+  
+  int n = mat.cols();
+
+  VectorType residual = rhs - mat * x; //initial residual
+
+  RealScalar rhsNorm2 = rhs.squaredNorm();
+  if(rhsNorm2 == 0) 
+  {
+    x.setZero();
+    iters = 0;
+    tol_error = 0;
+    return;
+  }
+  RealScalar threshold = tol*tol*rhsNorm2;
+  RealScalar residualNorm2 = residual.squaredNorm();
+  if (residualNorm2 < threshold)
+  {
+    iters = 0;
+    tol_error = sqrt(residualNorm2 / rhsNorm2);
+    return;
+  }
+  
+  VectorType p(n);
+  p = precond.solve(residual);      //initial search direction
+
+  VectorType z(n), tmp(n);
+  RealScalar absNew = numext::real(residual.dot(p));  // the square of the absolute value of r scaled by invM
+  int i = 0;
+  while(i < maxIters)
+  {
+    tmp.noalias() = mat * p;              // the bottleneck of the algorithm
+
+    Scalar alpha = absNew / p.dot(tmp);   // the amount we travel on dir
+    x += alpha * p;                       // update solution
+    residual -= alpha * tmp;              // update residue
+    
+    residualNorm2 = residual.squaredNorm();
+    if(residualNorm2 < threshold)
+      break;
+    
+    z = precond.solve(residual);          // approximately solve for "A z = residual"
+
+    RealScalar absOld = absNew;
+    absNew = numext::real(residual.dot(z));     // update the absolute value of r
+    RealScalar beta = absNew / absOld;            // calculate the Gram-Schmidt value used to create the new search direction
+    p = z + beta * p;                             // update search direction
+    i++;
+  }
+  tol_error = sqrt(residualNorm2 / rhsNorm2);
+  iters = i;
+}
+
+}
+
+template< typename _MatrixType, int _UpLo=Lower,
+          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class ConjugateGradient;
+
+namespace internal {
+
+template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief A conjugate gradient solver for sparse self-adjoint problems
+  *
+  * This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
+  * The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
+  *               Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
+  * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+  *
+  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+  * and NumTraits<Scalar>::epsilon() for the tolerance.
+  * 
+  * This class can be used as the direct solver classes. Here is a typical usage example:
+  * \code
+  * int n = 10000;
+  * VectorXd x(n), b(n);
+  * SparseMatrix<double> A(n,n);
+  * // fill A and b
+  * ConjugateGradient<SparseMatrix<double> > cg;
+  * cg.compute(A);
+  * x = cg.solve(b);
+  * std::cout << "#iterations:     " << cg.iterations() << std::endl;
+  * std::cout << "estimated error: " << cg.error()      << std::endl;
+  * // update b, and solve again
+  * x = cg.solve(b);
+  * \endcode
+  * 
+  * By default the iterations start with x=0 as an initial guess of the solution.
+  * One can control the start using the solveWithGuess() method.
+  * 
+  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+  */
+template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
+{
+  typedef IterativeSolverBase<ConjugateGradient> Base;
+  using Base::mp_matrix;
+  using Base::m_error;
+  using Base::m_iterations;
+  using Base::m_info;
+  using Base::m_isInitialized;
+public:
+  typedef _MatrixType MatrixType;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef _Preconditioner Preconditioner;
+
+  enum {
+    UpLo = _UpLo
+  };
+
+public:
+
+  /** Default constructor. */
+  ConjugateGradient() : Base() {}
+
+  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+    * 
+    * This constructor is a shortcut for the default constructor followed
+    * by a call to compute().
+    * 
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  ConjugateGradient(const MatrixType& A) : Base(A) {}
+
+  ~ConjugateGradient() {}
+  
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+    * \a x0 as an initial solution.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs,typename Guess>
+  inline const internal::solve_retval_with_guess<ConjugateGradient, Rhs, Guess>
+  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+  {
+    eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+    eigen_assert(Base::rows()==b.rows()
+              && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::solve_retval_with_guess
+            <ConjugateGradient, Rhs, Guess>(*this, b.derived(), x0);
+  }
+
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solveWithGuess(const Rhs& b, Dest& x) const
+  {
+    typedef typename internal::conditional<UpLo==(Lower|Upper),
+                                           const MatrixType&,
+                                           SparseSelfAdjointView<const MatrixType, UpLo>
+                                          >::type MatrixWrapperType;
+    m_iterations = Base::maxIterations();
+    m_error = Base::m_tolerance;
+
+    for(int j=0; j<b.cols(); ++j)
+    {
+      m_iterations = Base::maxIterations();
+      m_error = Base::m_tolerance;
+
+      typename Dest::ColXpr xj(x,j);
+      internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
+    }
+
+    m_isInitialized = true;
+    m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+  }
+  
+  /** \internal */
+  template<typename Rhs,typename Dest>
+  void _solve(const Rhs& b, Dest& x) const
+  {
+    x.setZero();
+    _solveWithGuess(b,x);
+  }
+
+protected:
+
+};
+
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
+struct solve_retval<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+  : solve_retval_base<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+{
+  typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONJUGATE_GRADIENT_H
diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
new file mode 100644
index 0000000..4c169aa
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -0,0 +1,469 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_LUT_H
+#define EIGEN_INCOMPLETE_LUT_H
+
+
+namespace Eigen { 
+
+namespace internal {
+    
+/** \internal
+  * Compute a quick-sort split of a vector 
+  * On output, the vector row is permuted such that its elements satisfy
+  * abs(row(i)) >= abs(row(ncut)) if i<ncut
+  * abs(row(i)) <= abs(row(ncut)) if i>ncut 
+  * \param row The vector of values
+  * \param ind The array of index for the elements in @p row
+  * \param ncut  The number of largest elements to keep
+  **/ 
+template <typename VectorV, typename VectorI, typename Index>
+Index QuickSplit(VectorV &row, VectorI &ind, Index ncut)
+{
+  typedef typename VectorV::RealScalar RealScalar;
+  using std::swap;
+  using std::abs;
+  Index mid;
+  Index n = row.size(); /* length of the vector */
+  Index first, last ;
+  
+  ncut--; /* to fit the zero-based indices */
+  first = 0; 
+  last = n-1; 
+  if (ncut < first || ncut > last ) return 0;
+  
+  do {
+    mid = first; 
+    RealScalar abskey = abs(row(mid)); 
+    for (Index j = first + 1; j <= last; j++) {
+      if ( abs(row(j)) > abskey) {
+        ++mid;
+        swap(row(mid), row(j));
+        swap(ind(mid), ind(j));
+      }
+    }
+    /* Interchange for the pivot element */
+    swap(row(mid), row(first));
+    swap(ind(mid), ind(first));
+    
+    if (mid > ncut) last = mid - 1;
+    else if (mid < ncut ) first = mid + 1; 
+  } while (mid != ncut );
+  
+  return 0; /* mid is equal to ncut */ 
+}
+
+}// end namespace internal
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \class IncompleteLUT
+  * \brief Incomplete LU factorization with dual-threshold strategy
+  *
+  * During the numerical factorization, two dropping rules are used :
+  *  1) any element whose magnitude is less than some tolerance is dropped.
+  *    This tolerance is obtained by multiplying the input tolerance @p droptol 
+  *    by the average magnitude of all the original elements in the current row.
+  *  2) After the elimination of the row, only the @p fill largest elements in 
+  *    the L part and the @p fill largest elements in the U part are kept 
+  *    (in addition to the diagonal element ). Note that @p fill is computed from 
+  *    the input parameter @p fillfactor which is used the ratio to control the fill_in 
+  *    relatively to the initial number of nonzero elements.
+  * 
+  * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)
+  * and when @p fill=n/2 with @p droptol being different to zero. 
+  * 
+  * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, 
+  *              Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.
+  * 
+  * NOTE : The following implementation is derived from the ILUT implementation
+  * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota 
+  *  released under the terms of the GNU LGPL: 
+  *    http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README
+  * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.
+  * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:
+  *   http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html
+  * alternatively, on GMANE:
+  *   http://comments.gmane.org/gmane.comp.lib.eigen/3302
+  */
+template <typename _Scalar>
+class IncompleteLUT : internal::noncopyable
+{
+    typedef _Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef SparseMatrix<Scalar,RowMajor> FactorType;
+    typedef SparseMatrix<Scalar,ColMajor> PermutType;
+    typedef typename FactorType::Index Index;
+
+  public:
+    typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+    
+    IncompleteLUT()
+      : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),
+        m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false)
+    {}
+    
+    template<typename MatrixType>
+    IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)
+      : m_droptol(droptol),m_fillfactor(fillfactor),
+        m_analysisIsOk(false),m_factorizationIsOk(false),m_isInitialized(false)
+    {
+      eigen_assert(fillfactor != 0);
+      compute(mat); 
+    }
+    
+    Index rows() const { return m_lu.rows(); }
+    
+    Index cols() const { return m_lu.cols(); }
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
+      return m_info;
+    }
+    
+    template<typename MatrixType>
+    void analyzePattern(const MatrixType& amat);
+    
+    template<typename MatrixType>
+    void factorize(const MatrixType& amat);
+    
+    /**
+      * Compute an incomplete LU factorization with dual threshold on the matrix mat
+      * No pivoting is done in this version
+      * 
+      **/
+    template<typename MatrixType>
+    IncompleteLUT<Scalar>& compute(const MatrixType& amat)
+    {
+      analyzePattern(amat); 
+      factorize(amat);
+      return *this;
+    }
+
+    void setDroptol(const RealScalar& droptol); 
+    void setFillfactor(int fillfactor); 
+    
+    template<typename Rhs, typename Dest>
+    void _solve(const Rhs& b, Dest& x) const
+    {
+      x = m_Pinv * b;  
+      x = m_lu.template triangularView<UnitLower>().solve(x);
+      x = m_lu.template triangularView<Upper>().solve(x);
+      x = m_P * x; 
+    }
+
+    template<typename Rhs> inline const internal::solve_retval<IncompleteLUT, Rhs>
+     solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "IncompleteLUT is not initialized.");
+      eigen_assert(cols()==b.rows()
+                && "IncompleteLUT::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<IncompleteLUT, Rhs>(*this, b.derived());
+    }
+
+protected:
+
+    /** keeps off-diagonal entries; drops diagonal entries */
+    struct keep_diag {
+      inline bool operator() (const Index& row, const Index& col, const Scalar&) const
+      {
+        return row!=col;
+      }
+    };
+
+protected:
+
+    FactorType m_lu;
+    RealScalar m_droptol;
+    int m_fillfactor;
+    bool m_analysisIsOk;
+    bool m_factorizationIsOk;
+    bool m_isInitialized;
+    ComputationInfo m_info;
+    PermutationMatrix<Dynamic,Dynamic,Index> m_P;     // Fill-reducing permutation
+    PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv;  // Inverse permutation
+};
+
+/**
+ * Set control parameter droptol
+ *  \param droptol   Drop any element whose magnitude is less than this tolerance 
+ **/ 
+template<typename Scalar>
+void IncompleteLUT<Scalar>::setDroptol(const RealScalar& droptol)
+{
+  this->m_droptol = droptol;   
+}
+
+/**
+ * Set control parameter fillfactor
+ * \param fillfactor  This is used to compute the  number @p fill_in of largest elements to keep on each row. 
+ **/ 
+template<typename Scalar>
+void IncompleteLUT<Scalar>::setFillfactor(int fillfactor)
+{
+  this->m_fillfactor = fillfactor;   
+}
+
+template <typename Scalar>
+template<typename _MatrixType>
+void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
+{
+  // Compute the Fill-reducing permutation
+  SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
+  SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
+  // Symmetrize the pattern
+  // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
+  //       on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
+  SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
+  AtA.prune(keep_diag());
+  internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P);  // Then compute the AMD ordering...
+
+  m_Pinv  = m_P.inverse(); // ... and the inverse permutation
+
+  m_analysisIsOk = true;
+  m_factorizationIsOk = false;
+  m_isInitialized = false;
+}
+
+template <typename Scalar>
+template<typename _MatrixType>
+void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
+{
+  using std::sqrt;
+  using std::swap;
+  using std::abs;
+
+  eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix");
+  Index n = amat.cols();  // Size of the matrix
+  m_lu.resize(n,n);
+  // Declare Working vectors and variables
+  Vector u(n) ;     // real values of the row -- maximum size is n --
+  VectorXi ju(n);   // column position of the values in u -- maximum size  is n
+  VectorXi jr(n);   // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1
+
+  // Apply the fill-reducing permutation
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  SparseMatrix<Scalar,RowMajor, Index> mat;
+  mat = amat.twistedBy(m_Pinv);
+
+  // Initialization
+  jr.fill(-1);
+  ju.fill(0);
+  u.fill(0);
+
+  // number of largest elements to keep in each row:
+  Index fill_in =   static_cast<Index> (amat.nonZeros()*m_fillfactor)/n+1;
+  if (fill_in > n) fill_in = n;
+
+  // number of largest nonzero elements to keep in the L and the U part of the current row:
+  Index nnzL = fill_in/2;
+  Index nnzU = nnzL;
+  m_lu.reserve(n * (nnzL + nnzU + 1));
+
+  // global loop over the rows of the sparse matrix
+  for (Index ii = 0; ii < n; ii++)
+  {
+    // 1 - copy the lower and the upper part of the row i of mat in the working vector u
+
+    Index sizeu = 1; // number of nonzero elements in the upper part of the current row
+    Index sizel = 0; // number of nonzero elements in the lower part of the current row
+    ju(ii)    = ii;
+    u(ii)     = 0;
+    jr(ii)    = ii;
+    RealScalar rownorm = 0;
+
+    typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii
+    for (; j_it; ++j_it)
+    {
+      Index k = j_it.index();
+      if (k < ii)
+      {
+        // copy the lower part
+        ju(sizel) = k;
+        u(sizel) = j_it.value();
+        jr(k) = sizel;
+        ++sizel;
+      }
+      else if (k == ii)
+      {
+        u(ii) = j_it.value();
+      }
+      else
+      {
+        // copy the upper part
+        Index jpos = ii + sizeu;
+        ju(jpos) = k;
+        u(jpos) = j_it.value();
+        jr(k) = jpos;
+        ++sizeu;
+      }
+      rownorm += numext::abs2(j_it.value());
+    }
+
+    // 2 - detect possible zero row
+    if(rownorm==0)
+    {
+      m_info = NumericalIssue;
+      return;
+    }
+    // Take the 2-norm of the current row as a relative tolerance
+    rownorm = sqrt(rownorm);
+
+    // 3 - eliminate the previous nonzero rows
+    Index jj = 0;
+    Index len = 0;
+    while (jj < sizel)
+    {
+      // In order to eliminate in the correct order,
+      // we must select first the smallest column index among  ju(jj:sizel)
+      Index k;
+      Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment
+      k += jj;
+      if (minrow != ju(jj))
+      {
+        // swap the two locations
+        Index j = ju(jj);
+        swap(ju(jj), ju(k));
+        jr(minrow) = jj;   jr(j) = k;
+        swap(u(jj), u(k));
+      }
+      // Reset this location
+      jr(minrow) = -1;
+
+      // Start elimination
+      typename FactorType::InnerIterator ki_it(m_lu, minrow);
+      while (ki_it && ki_it.index() < minrow) ++ki_it;
+      eigen_internal_assert(ki_it && ki_it.col()==minrow);
+      Scalar fact = u(jj) / ki_it.value();
+
+      // drop too small elements
+      if(abs(fact) <= m_droptol)
+      {
+        jj++;
+        continue;
+      }
+
+      // linear combination of the current row ii and the row minrow
+      ++ki_it;
+      for (; ki_it; ++ki_it)
+      {
+        Scalar prod = fact * ki_it.value();
+        Index j       = ki_it.index();
+        Index jpos    = jr(j);
+        if (jpos == -1) // fill-in element
+        {
+          Index newpos;
+          if (j >= ii) // dealing with the upper part
+          {
+            newpos = ii + sizeu;
+            sizeu++;
+            eigen_internal_assert(sizeu<=n);
+          }
+          else // dealing with the lower part
+          {
+            newpos = sizel;
+            sizel++;
+            eigen_internal_assert(sizel<=ii);
+          }
+          ju(newpos) = j;
+          u(newpos) = -prod;
+          jr(j) = newpos;
+        }
+        else
+          u(jpos) -= prod;
+      }
+      // store the pivot element
+      u(len) = fact;
+      ju(len) = minrow;
+      ++len;
+
+      jj++;
+    } // end of the elimination on the row ii
+
+    // reset the upper part of the pointer jr to zero
+    for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;
+
+    // 4 - partially sort and insert the elements in the m_lu matrix
+
+    // sort the L-part of the row
+    sizel = len;
+    len = (std::min)(sizel, nnzL);
+    typename Vector::SegmentReturnType ul(u.segment(0, sizel));
+    typename VectorXi::SegmentReturnType jul(ju.segment(0, sizel));
+    internal::QuickSplit(ul, jul, len);
+
+    // store the largest m_fill elements of the L part
+    m_lu.startVec(ii);
+    for(Index k = 0; k < len; k++)
+      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
+
+    // store the diagonal element
+    // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)
+    if (u(ii) == Scalar(0))
+      u(ii) = sqrt(m_droptol) * rownorm;
+    m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);
+
+    // sort the U-part of the row
+    // apply the dropping rule first
+    len = 0;
+    for(Index k = 1; k < sizeu; k++)
+    {
+      if(abs(u(ii+k)) > m_droptol * rownorm )
+      {
+        ++len;
+        u(ii + len)  = u(ii + k);
+        ju(ii + len) = ju(ii + k);
+      }
+    }
+    sizeu = len + 1; // +1 to take into account the diagonal element
+    len = (std::min)(sizeu, nnzU);
+    typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));
+    typename VectorXi::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));
+    internal::QuickSplit(uu, juu, len);
+
+    // store the largest elements of the U part
+    for(Index k = ii + 1; k < ii + len; k++)
+      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);
+  }
+
+  m_lu.finalize();
+  m_lu.makeCompressed();
+
+  m_factorizationIsOk = true;
+  m_isInitialized = m_factorizationIsOk;
+  m_info = Success;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<IncompleteLUT<_MatrixType>, Rhs>
+  : solve_retval_base<IncompleteLUT<_MatrixType>, Rhs>
+{
+  typedef IncompleteLUT<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_INCOMPLETE_LUT_H
diff --git a/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
new file mode 100644
index 0000000..2036922
--- /dev/null
+++ b/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
@@ -0,0 +1,254 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H
+#define EIGEN_ITERATIVE_SOLVER_BASE_H
+
+namespace Eigen { 
+
+/** \ingroup IterativeLinearSolvers_Module
+  * \brief Base class for linear iterative solvers
+  *
+  * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+  */
+template< typename Derived>
+class IterativeSolverBase : internal::noncopyable
+{
+public:
+  typedef typename internal::traits<Derived>::MatrixType MatrixType;
+  typedef typename internal::traits<Derived>::Preconditioner Preconditioner;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::RealScalar RealScalar;
+
+public:
+
+  Derived& derived() { return *static_cast<Derived*>(this); }
+  const Derived& derived() const { return *static_cast<const Derived*>(this); }
+
+  /** Default constructor. */
+  IterativeSolverBase()
+    : mp_matrix(0)
+  {
+    init();
+  }
+
+  /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+    * 
+    * This constructor is a shortcut for the default constructor followed
+    * by a call to compute().
+    * 
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  IterativeSolverBase(const MatrixType& A)
+  {
+    init();
+    compute(A);
+  }
+
+  ~IterativeSolverBase() {}
+  
+  /** Initializes the iterative solver for the sparcity pattern of the matrix \a A for further solving \c Ax=b problems.
+    *
+    * Currently, this function mostly call analyzePattern on the preconditioner. In the future
+    * we might, for instance, implement column reodering for faster matrix vector products.
+    */
+  Derived& analyzePattern(const MatrixType& A)
+  {
+    m_preconditioner.analyzePattern(A);
+    m_isInitialized = true;
+    m_analysisIsOk = true;
+    m_info = Success;
+    return derived();
+  }
+  
+  /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems.
+    *
+    * Currently, this function mostly call factorize on the preconditioner.
+    *
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  Derived& factorize(const MatrixType& A)
+  {
+    eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); 
+    mp_matrix = &A;
+    m_preconditioner.factorize(A);
+    m_factorizationIsOk = true;
+    m_info = Success;
+    return derived();
+  }
+
+  /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems.
+    *
+    * Currently, this function mostly initialized/compute the preconditioner. In the future
+    * we might, for instance, implement column reodering for faster matrix vector products.
+    *
+    * \warning this class stores a reference to the matrix A as well as some
+    * precomputed values that depend on it. Therefore, if \a A is changed
+    * this class becomes invalid. Call compute() to update it with the new
+    * matrix A, or modify a copy of A.
+    */
+  Derived& compute(const MatrixType& A)
+  {
+    mp_matrix = &A;
+    m_preconditioner.compute(A);
+    m_isInitialized = true;
+    m_analysisIsOk = true;
+    m_factorizationIsOk = true;
+    m_info = Success;
+    return derived();
+  }
+
+  /** \internal */
+  Index rows() const { return mp_matrix ? mp_matrix->rows() : 0; }
+  /** \internal */
+  Index cols() const { return mp_matrix ? mp_matrix->cols() : 0; }
+
+  /** \returns the tolerance threshold used by the stopping criteria */
+  RealScalar tolerance() const { return m_tolerance; }
+  
+  /** Sets the tolerance threshold used by the stopping criteria */
+  Derived& setTolerance(const RealScalar& tolerance)
+  {
+    m_tolerance = tolerance;
+    return derived();
+  }
+
+  /** \returns a read-write reference to the preconditioner for custom configuration. */
+  Preconditioner& preconditioner() { return m_preconditioner; }
+  
+  /** \returns a read-only reference to the preconditioner. */
+  const Preconditioner& preconditioner() const { return m_preconditioner; }
+
+  /** \returns the max number of iterations */
+  int maxIterations() const
+  {
+    return (mp_matrix && m_maxIterations<0) ? mp_matrix->cols() : m_maxIterations;
+  }
+  
+  /** Sets the max number of iterations */
+  Derived& setMaxIterations(int maxIters)
+  {
+    m_maxIterations = maxIters;
+    return derived();
+  }
+
+  /** \returns the number of iterations performed during the last solve */
+  int iterations() const
+  {
+    eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+    return m_iterations;
+  }
+
+  /** \returns the tolerance error reached during the last solve */
+  RealScalar error() const
+  {
+    eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
+    return m_error;
+  }
+
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs> inline const internal::solve_retval<Derived, Rhs>
+  solve(const MatrixBase<Rhs>& b) const
+  {
+    eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+    eigen_assert(rows()==b.rows()
+              && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::solve_retval<Derived, Rhs>(derived(), b.derived());
+  }
+  
+  /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+    *
+    * \sa compute()
+    */
+  template<typename Rhs>
+  inline const internal::sparse_solve_retval<IterativeSolverBase, Rhs>
+  solve(const SparseMatrixBase<Rhs>& b) const
+  {
+    eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+    eigen_assert(rows()==b.rows()
+              && "IterativeSolverBase::solve(): invalid number of rows of the right hand side matrix b");
+    return internal::sparse_solve_retval<IterativeSolverBase, Rhs>(*this, b.derived());
+  }
+
+  /** \returns Success if the iterations converged, and NoConvergence otherwise. */
+  ComputationInfo info() const
+  {
+    eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized.");
+    return m_info;
+  }
+  
+  /** \internal */
+  template<typename Rhs, typename DestScalar, int DestOptions, typename DestIndex>
+  void _solve_sparse(const Rhs& b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const
+  {
+    eigen_assert(rows()==b.rows());
+    
+    int rhsCols = b.cols();
+    int size = b.rows();
+    Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
+    Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
+    for(int k=0; k<rhsCols; ++k)
+    {
+      tb = b.col(k);
+      tx = derived().solve(tb);
+      dest.col(k) = tx.sparseView(0);
+    }
+  }
+
+protected:
+  void init()
+  {
+    m_isInitialized = false;
+    m_analysisIsOk = false;
+    m_factorizationIsOk = false;
+    m_maxIterations = -1;
+    m_tolerance = NumTraits<Scalar>::epsilon();
+  }
+  const MatrixType* mp_matrix;
+  Preconditioner m_preconditioner;
+
+  int m_maxIterations;
+  RealScalar m_tolerance;
+  
+  mutable RealScalar m_error;
+  mutable int m_iterations;
+  mutable ComputationInfo m_info;
+  mutable bool m_isInitialized, m_analysisIsOk, m_factorizationIsOk;
+};
+
+namespace internal {
+ 
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<IterativeSolverBase<Derived>, Rhs>
+  : sparse_solve_retval_base<IterativeSolverBase<Derived>, Rhs>
+{
+  typedef IterativeSolverBase<Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec().derived()._solve_sparse(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ITERATIVE_SOLVER_BASE_H
diff --git a/Eigen/src/Jacobi/CMakeLists.txt b/Eigen/src/Jacobi/CMakeLists.txt
new file mode 100644
index 0000000..490dac6
--- /dev/null
+++ b/Eigen/src/Jacobi/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Jacobi_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_Jacobi_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Jacobi COMPONENT Devel
+  )
diff --git a/Eigen/src/Jacobi/Jacobi.h b/Eigen/src/Jacobi/Jacobi.h
new file mode 100644
index 0000000..956f72d
--- /dev/null
+++ b/Eigen/src/Jacobi/Jacobi.h
@@ -0,0 +1,433 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBI_H
+#define EIGEN_JACOBI_H
+
+namespace Eigen { 
+
+/** \ingroup Jacobi_Module
+  * \jacobi_module
+  * \class JacobiRotation
+  * \brief Rotation given by a cosine-sine pair.
+  *
+  * This class represents a Jacobi or Givens rotation.
+  * This is a 2D rotation in the plane \c J of angle \f$ \theta \f$ defined by
+  * its cosine \c c and sine \c s as follow:
+  * \f$ J = \left ( \begin{array}{cc} c & \overline s \\ -s  & \overline c \end{array} \right ) \f$
+  *
+  * You can apply the respective counter-clockwise rotation to a column vector \c v by
+  * applying its adjoint on the left: \f$ v = J^* v \f$ that translates to the following Eigen code:
+  * \code
+  * v.applyOnTheLeft(J.adjoint());
+  * \endcode
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar> class JacobiRotation
+{
+  public:
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    /** Default constructor without any initialization. */
+    JacobiRotation() {}
+
+    /** Construct a planar rotation from a cosine-sine pair (\a c, \c s). */
+    JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}
+
+    Scalar& c() { return m_c; }
+    Scalar c() const { return m_c; }
+    Scalar& s() { return m_s; }
+    Scalar s() const { return m_s; }
+
+    /** Concatenates two planar rotation */
+    JacobiRotation operator*(const JacobiRotation& other)
+    {
+      using numext::conj;
+      return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s,
+                            conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c)));
+    }
+
+    /** Returns the transposed transformation */
+    JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }
+
+    /** Returns the adjoint transformation */
+    JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }
+
+    template<typename Derived>
+    bool makeJacobi(const MatrixBase<Derived>&, typename Derived::Index p, typename Derived::Index q);
+    bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);
+
+    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);
+
+  protected:
+    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);
+    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);
+
+    Scalar m_c, m_s;
+};
+
+/** Makes \c *this as a Jacobi rotation \a J such that applying \a J on both the right and left sides of the selfadjoint 2x2 matrix
+  * \f$ B = \left ( \begin{array}{cc} x & y \\ \overline y & z \end{array} \right )\f$ yields a diagonal matrix \f$ A = J^* B J \f$
+  *
+  * \sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar>
+bool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)
+{
+  using std::sqrt;
+  using std::abs;
+  typedef typename NumTraits<Scalar>::Real RealScalar;
+  if(y == Scalar(0))
+  {
+    m_c = Scalar(1);
+    m_s = Scalar(0);
+    return false;
+  }
+  else
+  {
+    RealScalar tau = (x-z)/(RealScalar(2)*abs(y));
+    RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1));
+    RealScalar t;
+    if(tau>RealScalar(0))
+    {
+      t = RealScalar(1) / (tau + w);
+    }
+    else
+    {
+      t = RealScalar(1) / (tau - w);
+    }
+    RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+    RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1));
+    m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n;
+    m_c = n;
+    return true;
+  }
+}
+
+/** Makes \c *this as a Jacobi rotation \c J such that applying \a J on both the right and left sides of the 2x2 selfadjoint matrix
+  * \f$ B = \left ( \begin{array}{cc} \text{this}_{pp} & \text{this}_{pq} \\ (\text{this}_{pq})^* & \text{this}_{qq} \end{array} \right )\f$ yields
+  * a diagonal matrix \f$ A = J^* B J \f$
+  *
+  * Example: \include Jacobi_makeJacobi.cpp
+  * Output: \verbinclude Jacobi_makeJacobi.out
+  *
+  * \sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar>
+template<typename Derived>
+inline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, typename Derived::Index p, typename Derived::Index q)
+{
+  return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));
+}
+
+/** Makes \c *this as a Givens rotation \c G such that applying \f$ G^* \f$ to the left of the vector
+  * \f$ V = \left ( \begin{array}{c} p \\ q \end{array} \right )\f$ yields:
+  * \f$ G^* V = \left ( \begin{array}{c} r \\ 0 \end{array} \right )\f$.
+  *
+  * The value of \a z is returned if \a z is not null (the default is null).
+  * Also note that G is built such that the cosine is always real.
+  *
+  * Example: \include Jacobi_makeGivens.cpp
+  * Output: \verbinclude Jacobi_makeGivens.out
+  *
+  * This function implements the continuous Givens rotation generation algorithm
+  * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem.
+  * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000.
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)
+{
+  makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());
+}
+
+
+// specialization for complexes
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)
+{
+  using std::sqrt;
+  using std::abs;
+  using numext::conj;
+  
+  if(q==Scalar(0))
+  {
+    m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);
+    m_s = 0;
+    if(r) *r = m_c * p;
+  }
+  else if(p==Scalar(0))
+  {
+    m_c = 0;
+    m_s = -q/abs(q);
+    if(r) *r = abs(q);
+  }
+  else
+  {
+    RealScalar p1 = numext::norm1(p);
+    RealScalar q1 = numext::norm1(q);
+    if(p1>=q1)
+    {
+      Scalar ps = p / p1;
+      RealScalar p2 = numext::abs2(ps);
+      Scalar qs = q / p1;
+      RealScalar q2 = numext::abs2(qs);
+
+      RealScalar u = sqrt(RealScalar(1) + q2/p2);
+      if(numext::real(p)<RealScalar(0))
+        u = -u;
+
+      m_c = Scalar(1)/u;
+      m_s = -qs*conj(ps)*(m_c/p2);
+      if(r) *r = p * u;
+    }
+    else
+    {
+      Scalar ps = p / q1;
+      RealScalar p2 = numext::abs2(ps);
+      Scalar qs = q / q1;
+      RealScalar q2 = numext::abs2(qs);
+
+      RealScalar u = q1 * sqrt(p2 + q2);
+      if(numext::real(p)<RealScalar(0))
+        u = -u;
+
+      p1 = abs(p);
+      ps = p/p1;
+      m_c = p1/u;
+      m_s = -conj(ps) * (q/u);
+      if(r) *r = ps * u;
+    }
+  }
+}
+
+// specialization for reals
+template<typename Scalar>
+void JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)
+{
+  using std::sqrt;
+  using std::abs;
+  if(q==Scalar(0))
+  {
+    m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);
+    m_s = Scalar(0);
+    if(r) *r = abs(p);
+  }
+  else if(p==Scalar(0))
+  {
+    m_c = Scalar(0);
+    m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);
+    if(r) *r = abs(q);
+  }
+  else if(abs(p) > abs(q))
+  {
+    Scalar t = q/p;
+    Scalar u = sqrt(Scalar(1) + numext::abs2(t));
+    if(p<Scalar(0))
+      u = -u;
+    m_c = Scalar(1)/u;
+    m_s = -t * m_c;
+    if(r) *r = p * u;
+  }
+  else
+  {
+    Scalar t = p/q;
+    Scalar u = sqrt(Scalar(1) + numext::abs2(t));
+    if(q<Scalar(0))
+      u = -u;
+    m_s = -Scalar(1)/u;
+    m_c = -t * m_s;
+    if(r) *r = q * u;
+  }
+
+}
+
+/****************************************************************************************
+*   Implementation of MatrixBase methods
+****************************************************************************************/
+
+/** \jacobi_module
+  * Applies the clock wise 2D rotation \a j to the set of 2D vectors of cordinates \a x and \a y:
+  * \f$ \left ( \begin{array}{cc} x \\ y \end{array} \right )  =  J \left ( \begin{array}{cc} x \\ y \end{array} \right ) \f$
+  *
+  * \sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
+  */
+namespace internal {
+template<typename VectorX, typename VectorY, typename OtherScalar>
+void apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j);
+}
+
+/** \jacobi_module
+  * Applies the rotation in the plane \a j to the rows \a p and \a q of \c *this, i.e., it computes B = J * B,
+  * with \f$ B = \left ( \begin{array}{cc} \text{*this.row}(p) \\ \text{*this.row}(q) \end{array} \right ) \f$.
+  *
+  * \sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane()
+  */
+template<typename Derived>
+template<typename OtherScalar>
+inline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)
+{
+  RowXpr x(this->row(p));
+  RowXpr y(this->row(q));
+  internal::apply_rotation_in_the_plane(x, y, j);
+}
+
+/** \ingroup Jacobi_Module
+  * Applies the rotation in the plane \a j to the columns \a p and \a q of \c *this, i.e., it computes B = B * J
+  * with \f$ B = \left ( \begin{array}{cc} \text{*this.col}(p) & \text{*this.col}(q) \end{array} \right ) \f$.
+  *
+  * \sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane()
+  */
+template<typename Derived>
+template<typename OtherScalar>
+inline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)
+{
+  ColXpr x(this->col(p));
+  ColXpr y(this->col(q));
+  internal::apply_rotation_in_the_plane(x, y, j.transpose());
+}
+
+namespace internal {
+template<typename VectorX, typename VectorY, typename OtherScalar>
+void /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(VectorX& _x, VectorY& _y, const JacobiRotation<OtherScalar>& j)
+{
+  typedef typename VectorX::Index Index;
+  typedef typename VectorX::Scalar Scalar;
+  enum { PacketSize = packet_traits<Scalar>::size };
+  typedef typename packet_traits<Scalar>::type Packet;
+  eigen_assert(_x.size() == _y.size());
+  Index size = _x.size();
+  Index incrx = _x.innerStride();
+  Index incry = _y.innerStride();
+
+  Scalar* EIGEN_RESTRICT x = &_x.coeffRef(0);
+  Scalar* EIGEN_RESTRICT y = &_y.coeffRef(0);
+  
+  OtherScalar c = j.c();
+  OtherScalar s = j.s();
+  if (c==OtherScalar(1) && s==OtherScalar(0))
+    return;
+
+  /*** dynamic-size vectorized paths ***/
+
+  if(VectorX::SizeAtCompileTime == Dynamic &&
+    (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
+    ((incrx==1 && incry==1) || PacketSize == 1))
+  {
+    // both vectors are sequentially stored in memory => vectorization
+    enum { Peeling = 2 };
+
+    Index alignedStart = internal::first_aligned(y, size);
+    Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;
+
+    const Packet pc = pset1<Packet>(c);
+    const Packet ps = pset1<Packet>(s);
+    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+
+    for(Index i=0; i<alignedStart; ++i)
+    {
+      Scalar xi = x[i];
+      Scalar yi = y[i];
+      x[i] =  c * xi + numext::conj(s) * yi;
+      y[i] = -s * xi + numext::conj(c) * yi;
+    }
+
+    Scalar* EIGEN_RESTRICT px = x + alignedStart;
+    Scalar* EIGEN_RESTRICT py = y + alignedStart;
+
+    if(internal::first_aligned(x, size)==alignedStart)
+    {
+      for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
+      {
+        Packet xi = pload<Packet>(px);
+        Packet yi = pload<Packet>(py);
+        pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+        pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+        px += PacketSize;
+        py += PacketSize;
+      }
+    }
+    else
+    {
+      Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
+      for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
+      {
+        Packet xi   = ploadu<Packet>(px);
+        Packet xi1  = ploadu<Packet>(px+PacketSize);
+        Packet yi   = pload <Packet>(py);
+        Packet yi1  = pload <Packet>(py+PacketSize);
+        pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+        pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));
+        pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+        pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));
+        px += Peeling*PacketSize;
+        py += Peeling*PacketSize;
+      }
+      if(alignedEnd!=peelingEnd)
+      {
+        Packet xi = ploadu<Packet>(x+peelingEnd);
+        Packet yi = pload <Packet>(y+peelingEnd);
+        pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+        pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+      }
+    }
+
+    for(Index i=alignedEnd; i<size; ++i)
+    {
+      Scalar xi = x[i];
+      Scalar yi = y[i];
+      x[i] =  c * xi + numext::conj(s) * yi;
+      y[i] = -s * xi + numext::conj(c) * yi;
+    }
+  }
+
+  /*** fixed-size vectorized path ***/
+  else if(VectorX::SizeAtCompileTime != Dynamic &&
+          (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
+          (VectorX::Flags & VectorY::Flags & AlignedBit))
+  {
+    const Packet pc = pset1<Packet>(c);
+    const Packet ps = pset1<Packet>(s);
+    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
+    Scalar* EIGEN_RESTRICT px = x;
+    Scalar* EIGEN_RESTRICT py = y;
+    for(Index i=0; i<size; i+=PacketSize)
+    {
+      Packet xi = pload<Packet>(px);
+      Packet yi = pload<Packet>(py);
+      pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
+      pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
+      px += PacketSize;
+      py += PacketSize;
+    }
+  }
+
+  /*** non-vectorized path ***/
+  else
+  {
+    for(Index i=0; i<size; ++i)
+    {
+      Scalar xi = *x;
+      Scalar yi = *y;
+      *x =  c * xi + numext::conj(s) * yi;
+      *y = -s * xi + numext::conj(c) * yi;
+      x += incrx;
+      y += incry;
+    }
+  }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBI_H
diff --git a/Eigen/src/LU/CMakeLists.txt b/Eigen/src/LU/CMakeLists.txt
new file mode 100644
index 0000000..e0d8d78
--- /dev/null
+++ b/Eigen/src/LU/CMakeLists.txt
@@ -0,0 +1,8 @@
+FILE(GLOB Eigen_LU_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_LU_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LU COMPONENT Devel
+  )
+
+ADD_SUBDIRECTORY(arch)
diff --git a/Eigen/src/LU/Determinant.h b/Eigen/src/LU/Determinant.h
new file mode 100644
index 0000000..bb8e78a
--- /dev/null
+++ b/Eigen/src/LU/Determinant.h
@@ -0,0 +1,101 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DETERMINANT_H
+#define EIGEN_DETERMINANT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Derived>
+inline const typename Derived::Scalar bruteforce_det3_helper
+(const MatrixBase<Derived>& matrix, int a, int b, int c)
+{
+  return matrix.coeff(0,a)
+         * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
+}
+
+template<typename Derived>
+const typename Derived::Scalar bruteforce_det4_helper
+(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
+{
+  return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
+       * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
+}
+
+template<typename Derived,
+         int DeterminantType = Derived::RowsAtCompileTime
+> struct determinant_impl
+{
+  static inline typename traits<Derived>::Scalar run(const Derived& m)
+  {
+    if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0)
+      return typename traits<Derived>::Scalar(1);
+    return m.partialPivLu().determinant();
+  }
+};
+
+template<typename Derived> struct determinant_impl<Derived, 1>
+{
+  static inline typename traits<Derived>::Scalar run(const Derived& m)
+  {
+    return m.coeff(0,0);
+  }
+};
+
+template<typename Derived> struct determinant_impl<Derived, 2>
+{
+  static inline typename traits<Derived>::Scalar run(const Derived& m)
+  {
+    return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
+  }
+};
+
+template<typename Derived> struct determinant_impl<Derived, 3>
+{
+  static inline typename traits<Derived>::Scalar run(const Derived& m)
+  {
+    return bruteforce_det3_helper(m,0,1,2)
+          - bruteforce_det3_helper(m,1,0,2)
+          + bruteforce_det3_helper(m,2,0,1);
+  }
+};
+
+template<typename Derived> struct determinant_impl<Derived, 4>
+{
+  static typename traits<Derived>::Scalar run(const Derived& m)
+  {
+    // trick by Martin Costabel to compute 4x4 det with only 30 muls
+    return bruteforce_det4_helper(m,0,1,2,3)
+          - bruteforce_det4_helper(m,0,2,1,3)
+          + bruteforce_det4_helper(m,0,3,1,2)
+          + bruteforce_det4_helper(m,1,2,0,3)
+          - bruteforce_det4_helper(m,1,3,0,2)
+          + bruteforce_det4_helper(m,2,3,0,1);
+  }
+};
+
+} // end namespace internal
+
+/** \lu_module
+  *
+  * \returns the determinant of this matrix
+  */
+template<typename Derived>
+inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
+{
+  eigen_assert(rows() == cols());
+  typedef typename internal::nested<Derived,Base::RowsAtCompileTime>::type Nested;
+  return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_DETERMINANT_H
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
new file mode 100644
index 0000000..26bc714
--- /dev/null
+++ b/Eigen/src/LU/FullPivLU.h
@@ -0,0 +1,751 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LU_H
+#define EIGEN_LU_H
+
+namespace Eigen { 
+
+/** \ingroup LU_Module
+  *
+  * \class FullPivLU
+  *
+  * \brief LU decomposition of a matrix with complete pivoting, and related features
+  *
+  * \param MatrixType the type of the matrix of which we are computing the LU decomposition
+  *
+  * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
+  * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
+  * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
+  * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
+  * zeros are at the end.
+  *
+  * This decomposition provides the generic approach to solving systems of linear equations, computing
+  * the rank, invertibility, inverse, kernel, and determinant.
+  *
+  * This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD
+  * decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix,
+  * working with the SVD allows to select the smallest singular values of the matrix, something that
+  * the LU decomposition doesn't see.
+  *
+  * The data of the LU decomposition can be directly accessed through the methods matrixLU(),
+  * permutationP(), permutationQ().
+  *
+  * As an exemple, here is how the original matrix can be retrieved:
+  * \include class_FullPivLU.cpp
+  * Output: \verbinclude class_FullPivLU.out
+  *
+  * \sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()
+  */
+template<typename _MatrixType> class FullPivLU
+{
+  public:
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
+    typedef typename MatrixType::Index Index;
+    typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
+    typedef typename internal::plain_col_type<MatrixType, Index>::type IntColVectorType;
+    typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;
+    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;
+
+    /**
+      * \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via LU::compute(const MatrixType&).
+      */
+    FullPivLU();
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa FullPivLU()
+      */
+    FullPivLU(Index rows, Index cols);
+
+    /** Constructor.
+      *
+      * \param matrix the matrix of which to compute the LU decomposition.
+      *               It is required to be nonzero.
+      */
+    FullPivLU(const MatrixType& matrix);
+
+    /** Computes the LU decomposition of the given matrix.
+      *
+      * \param matrix the matrix of which to compute the LU decomposition.
+      *               It is required to be nonzero.
+      *
+      * \returns a reference to *this
+      */
+    FullPivLU& compute(const MatrixType& matrix);
+
+    /** \returns the LU decomposition matrix: the upper-triangular part is U, the
+      * unit-lower-triangular part is L (at least for square matrices; in the non-square
+      * case, special care is needed, see the documentation of class FullPivLU).
+      *
+      * \sa matrixL(), matrixU()
+      */
+    inline const MatrixType& matrixLU() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return m_lu;
+    }
+
+    /** \returns the number of nonzero pivots in the LU decomposition.
+      * Here nonzero is meant in the exact sense, not in a fuzzy sense.
+      * So that notion isn't really intrinsically interesting, but it is
+      * still useful when implementing algorithms.
+      *
+      * \sa rank()
+      */
+    inline Index nonzeroPivots() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return m_nonzero_pivots;
+    }
+
+    /** \returns the absolute value of the biggest pivot, i.e. the biggest
+      *          diagonal coefficient of U.
+      */
+    RealScalar maxPivot() const { return m_maxpivot; }
+
+    /** \returns the permutation matrix P
+      *
+      * \sa permutationQ()
+      */
+    inline const PermutationPType& permutationP() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return m_p;
+    }
+
+    /** \returns the permutation matrix Q
+      *
+      * \sa permutationP()
+      */
+    inline const PermutationQType& permutationQ() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return m_q;
+    }
+
+    /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix
+      * will form a basis of the kernel.
+      *
+      * \note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      *
+      * Example: \include FullPivLU_kernel.cpp
+      * Output: \verbinclude FullPivLU_kernel.out
+      *
+      * \sa image()
+      */
+    inline const internal::kernel_retval<FullPivLU> kernel() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return internal::kernel_retval<FullPivLU>(*this);
+    }
+
+    /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix
+      * will form a basis of the kernel.
+      *
+      * \param originalMatrix the original matrix, of which *this is the LU decomposition.
+      *                       The reason why it is needed to pass it here, is that this allows
+      *                       a large optimization, as otherwise this method would need to reconstruct it
+      *                       from the LU decomposition.
+      *
+      * \note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      *
+      * Example: \include FullPivLU_image.cpp
+      * Output: \verbinclude FullPivLU_image.out
+      *
+      * \sa kernel()
+      */
+    inline const internal::image_retval<FullPivLU>
+      image(const MatrixType& originalMatrix) const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return internal::image_retval<FullPivLU>(*this, originalMatrix);
+    }
+
+    /** \return a solution x to the equation Ax=b, where A is the matrix of which
+      * *this is the LU decomposition.
+      *
+      * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
+      *          the only requirement in order for the equation to make sense is that
+      *          b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
+      *
+      * \returns a solution.
+      *
+      * \note_about_checking_solutions
+      *
+      * \note_about_arbitrary_choice_of_solution
+      * \note_about_using_kernel_to_study_multiple_solutions
+      *
+      * Example: \include FullPivLU_solve.cpp
+      * Output: \verbinclude FullPivLU_solve.out
+      *
+      * \sa TriangularView::solve(), kernel(), inverse()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<FullPivLU, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return internal::solve_retval<FullPivLU, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the determinant of the matrix of which
+      * *this is the LU decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the LU decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
+      *       optimized paths.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      *
+      * \sa MatrixBase::determinant()
+      */
+    typename internal::traits<MatrixType>::Scalar determinant() const;
+
+    /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
+      * who need to determine when pivots are to be considered nonzero. This is not used for the
+      * LU decomposition itself.
+      *
+      * When it needs to get the threshold value, Eigen calls threshold(). By default, this
+      * uses a formula to automatically determine a reasonable threshold.
+      * Once you have called the present method setThreshold(const RealScalar&),
+      * your value is used instead.
+      *
+      * \param threshold The new value to use as the threshold.
+      *
+      * A pivot will be considered nonzero if its absolute value is strictly greater than
+      *  \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
+      * where maxpivot is the biggest pivot.
+      *
+      * If you want to come back to the default behavior, call setThreshold(Default_t)
+      */
+    FullPivLU& setThreshold(const RealScalar& threshold)
+    {
+      m_usePrescribedThreshold = true;
+      m_prescribedThreshold = threshold;
+      return *this;
+    }
+
+    /** Allows to come back to the default behavior, letting Eigen use its default formula for
+      * determining the threshold.
+      *
+      * You should pass the special object Eigen::Default as parameter here.
+      * \code lu.setThreshold(Eigen::Default); \endcode
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    FullPivLU& setThreshold(Default_t)
+    {
+      m_usePrescribedThreshold = false;
+      return *this;
+    }
+
+    /** Returns the threshold that will be used by certain methods such as rank().
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    RealScalar threshold() const
+    {
+      eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+      return m_usePrescribedThreshold ? m_prescribedThreshold
+      // this formula comes from experimenting (see "LU precision tuning" thread on the list)
+      // and turns out to be identical to Higham's formula used already in LDLt.
+                                      : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();
+    }
+
+    /** \returns the rank of the matrix of which *this is the LU decomposition.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index rank() const
+    {
+      using std::abs;
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
+      Index result = 0;
+      for(Index i = 0; i < m_nonzero_pivots; ++i)
+        result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold);
+      return result;
+    }
+
+    /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index dimensionOfKernel() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return cols() - rank();
+    }
+
+    /** \returns true if the matrix of which *this is the LU decomposition represents an injective
+      *          linear map, i.e. has trivial kernel; false otherwise.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isInjective() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return rank() == cols();
+    }
+
+    /** \returns true if the matrix of which *this is the LU decomposition represents a surjective
+      *          linear map; false otherwise.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isSurjective() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return rank() == rows();
+    }
+
+    /** \returns true if the matrix of which *this is the LU decomposition is invertible.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isInvertible() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return isInjective() && (m_lu.rows() == m_lu.cols());
+    }
+
+    /** \returns the inverse of the matrix of which *this is the LU decomposition.
+      *
+      * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
+      *       Use isInvertible() to first determine whether this matrix is invertible.
+      *
+      * \sa MatrixBase::inverse()
+      */
+    inline const internal::solve_retval<FullPivLU,typename MatrixType::IdentityReturnType> inverse() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the inverse of a non-square matrix!");
+      return internal::solve_retval<FullPivLU,typename MatrixType::IdentityReturnType>
+               (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols()));
+    }
+
+    MatrixType reconstructedMatrix() const;
+
+    inline Index rows() const { return m_lu.rows(); }
+    inline Index cols() const { return m_lu.cols(); }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    MatrixType m_lu;
+    PermutationPType m_p;
+    PermutationQType m_q;
+    IntColVectorType m_rowsTranspositions;
+    IntRowVectorType m_colsTranspositions;
+    Index m_det_pq, m_nonzero_pivots;
+    RealScalar m_maxpivot, m_prescribedThreshold;
+    bool m_isInitialized, m_usePrescribedThreshold;
+};
+
+template<typename MatrixType>
+FullPivLU<MatrixType>::FullPivLU()
+  : m_isInitialized(false), m_usePrescribedThreshold(false)
+{
+}
+
+template<typename MatrixType>
+FullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)
+  : m_lu(rows, cols),
+    m_p(rows),
+    m_q(cols),
+    m_rowsTranspositions(rows),
+    m_colsTranspositions(cols),
+    m_isInitialized(false),
+    m_usePrescribedThreshold(false)
+{
+}
+
+template<typename MatrixType>
+FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
+  : m_lu(matrix.rows(), matrix.cols()),
+    m_p(matrix.rows()),
+    m_q(matrix.cols()),
+    m_rowsTranspositions(matrix.rows()),
+    m_colsTranspositions(matrix.cols()),
+    m_isInitialized(false),
+    m_usePrescribedThreshold(false)
+{
+  compute(matrix);
+}
+
+template<typename MatrixType>
+FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
+{
+  check_template_parameters();
+  
+  // the permutations are stored as int indices, so just to be sure:
+  eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
+  
+  m_isInitialized = true;
+  m_lu = matrix;
+
+  const Index size = matrix.diagonalSize();
+  const Index rows = matrix.rows();
+  const Index cols = matrix.cols();
+
+  // will store the transpositions, before we accumulate them at the end.
+  // can't accumulate on-the-fly because that will be done in reverse order for the rows.
+  m_rowsTranspositions.resize(matrix.rows());
+  m_colsTranspositions.resize(matrix.cols());
+  Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i
+
+  m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
+  m_maxpivot = RealScalar(0);
+
+  for(Index k = 0; k < size; ++k)
+  {
+    // First, we need to find the pivot.
+
+    // biggest coefficient in the remaining bottom-right corner (starting at row k, col k)
+    Index row_of_biggest_in_corner, col_of_biggest_in_corner;
+    RealScalar biggest_in_corner;
+    biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)
+                        .cwiseAbs()
+                        .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
+    row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner,
+    col_of_biggest_in_corner += k; // need to add k to them.
+
+    if(biggest_in_corner==RealScalar(0))
+    {
+      // before exiting, make sure to initialize the still uninitialized transpositions
+      // in a sane state without destroying what we already have.
+      m_nonzero_pivots = k;
+      for(Index i = k; i < size; ++i)
+      {
+        m_rowsTranspositions.coeffRef(i) = i;
+        m_colsTranspositions.coeffRef(i) = i;
+      }
+      break;
+    }
+
+    if(biggest_in_corner > m_maxpivot) m_maxpivot = biggest_in_corner;
+
+    // Now that we've found the pivot, we need to apply the row/col swaps to
+    // bring it to the location (k,k).
+
+    m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;
+    m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;
+    if(k != row_of_biggest_in_corner) {
+      m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
+      ++number_of_transpositions;
+    }
+    if(k != col_of_biggest_in_corner) {
+      m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));
+      ++number_of_transpositions;
+    }
+
+    // Now that the pivot is at the right location, we update the remaining
+    // bottom-right corner by Gaussian elimination.
+
+    if(k<rows-1)
+      m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k);
+    if(k<size-1)
+      m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1);
+  }
+
+  // the main loop is over, we still have to accumulate the transpositions to find the
+  // permutations P and Q
+
+  m_p.setIdentity(rows);
+  for(Index k = size-1; k >= 0; --k)
+    m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));
+
+  m_q.setIdentity(cols);
+  for(Index k = 0; k < size; ++k)
+    m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));
+
+  m_det_pq = (number_of_transpositions%2) ? -1 : 1;
+  return *this;
+}
+
+template<typename MatrixType>
+typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant() const
+{
+  eigen_assert(m_isInitialized && "LU is not initialized.");
+  eigen_assert(m_lu.rows() == m_lu.cols() && "You can't take the determinant of a non-square matrix!");
+  return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod());
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
+ * This function is provided for debug purposes. */
+template<typename MatrixType>
+MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
+{
+  eigen_assert(m_isInitialized && "LU is not initialized.");
+  const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());
+  // LU
+  MatrixType res(m_lu.rows(),m_lu.cols());
+  // FIXME the .toDenseMatrix() should not be needed...
+  res = m_lu.leftCols(smalldim)
+            .template triangularView<UnitLower>().toDenseMatrix()
+      * m_lu.topRows(smalldim)
+            .template triangularView<Upper>().toDenseMatrix();
+
+  // P^{-1}(LU)
+  res = m_p.inverse() * res;
+
+  // (P^{-1}LU)Q^{-1}
+  res = res * m_q.inverse();
+
+  return res;
+}
+
+/********* Implementation of kernel() **************************************************/
+
+namespace internal {
+template<typename _MatrixType>
+struct kernel_retval<FullPivLU<_MatrixType> >
+  : kernel_retval_base<FullPivLU<_MatrixType> >
+{
+  EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>)
+
+  enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
+            MatrixType::MaxColsAtCompileTime,
+            MatrixType::MaxRowsAtCompileTime)
+  };
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    using std::abs;
+    const Index cols = dec().matrixLU().cols(), dimker = cols - rank();
+    if(dimker == 0)
+    {
+      // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's
+      // avoid crashing/asserting as that depends on floating point calculations. Let's
+      // just return a single column vector filled with zeros.
+      dst.setZero();
+      return;
+    }
+
+    /* Let us use the following lemma:
+      *
+      * Lemma: If the matrix A has the LU decomposition PAQ = LU,
+      * then Ker A = Q(Ker U).
+      *
+      * Proof: trivial: just keep in mind that P, Q, L are invertible.
+      */
+
+    /* Thus, all we need to do is to compute Ker U, and then apply Q.
+      *
+      * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end.
+      * Thus, the diagonal of U ends with exactly
+      * dimKer zero's. Let us use that to construct dimKer linearly
+      * independent vectors in Ker U.
+      */
+
+    Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
+    RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
+    Index p = 0;
+    for(Index i = 0; i < dec().nonzeroPivots(); ++i)
+      if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
+        pivots.coeffRef(p++) = i;
+    eigen_internal_assert(p == rank());
+
+    // we construct a temporaty trapezoid matrix m, by taking the U matrix and
+    // permuting the rows and cols to bring the nonnegligible pivots to the top of
+    // the main diagonal. We need that to be able to apply our triangular solvers.
+    // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified
+    Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,
+           MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>
+      m(dec().matrixLU().block(0, 0, rank(), cols));
+    for(Index i = 0; i < rank(); ++i)
+    {
+      if(i) m.row(i).head(i).setZero();
+      m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);
+    }
+    m.block(0, 0, rank(), rank());
+    m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();
+    for(Index i = 0; i < rank(); ++i)
+      m.col(i).swap(m.col(pivots.coeff(i)));
+
+    // ok, we have our trapezoid matrix, we can apply the triangular solver.
+    // notice that the math behind this suggests that we should apply this to the
+    // negative of the RHS, but for performance we just put the negative sign elsewhere, see below.
+    m.topLeftCorner(rank(), rank())
+     .template triangularView<Upper>().solveInPlace(
+        m.topRightCorner(rank(), dimker)
+      );
+
+    // now we must undo the column permutation that we had applied!
+    for(Index i = rank()-1; i >= 0; --i)
+      m.col(i).swap(m.col(pivots.coeff(i)));
+
+    // see the negative sign in the next line, that's what we were talking about above.
+    for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);
+    for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();
+    for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);
+  }
+};
+
+/***** Implementation of image() *****************************************************/
+
+template<typename _MatrixType>
+struct image_retval<FullPivLU<_MatrixType> >
+  : image_retval_base<FullPivLU<_MatrixType> >
+{
+  EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>)
+
+  enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(
+            MatrixType::MaxColsAtCompileTime,
+            MatrixType::MaxRowsAtCompileTime)
+  };
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    using std::abs;
+    if(rank() == 0)
+    {
+      // The Image is just {0}, so it doesn't have a basis properly speaking, but let's
+      // avoid crashing/asserting as that depends on floating point calculations. Let's
+      // just return a single column vector filled with zeros.
+      dst.setZero();
+      return;
+    }
+
+    Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());
+    RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();
+    Index p = 0;
+    for(Index i = 0; i < dec().nonzeroPivots(); ++i)
+      if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)
+        pivots.coeffRef(p++) = i;
+    eigen_internal_assert(p == rank());
+
+    for(Index i = 0; i < rank(); ++i)
+      dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));
+  }
+};
+
+/***** Implementation of solve() *****************************************************/
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<FullPivLU<_MatrixType>, Rhs>
+  : solve_retval_base<FullPivLU<_MatrixType>, Rhs>
+{
+  EIGEN_MAKE_SOLVE_HELPERS(FullPivLU<_MatrixType>,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
+     * So we proceed as follows:
+     * Step 1: compute c = P * rhs.
+     * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.
+     * Step 3: replace c by the solution x to Ux = c. May or may not exist.
+     * Step 4: result = Q * c;
+     */
+
+    const Index rows = dec().rows(), cols = dec().cols(),
+              nonzero_pivots = dec().nonzeroPivots();
+    eigen_assert(rhs().rows() == rows);
+    const Index smalldim = (std::min)(rows, cols);
+
+    if(nonzero_pivots == 0)
+    {
+      dst.setZero();
+      return;
+    }
+
+    typename Rhs::PlainObject c(rhs().rows(), rhs().cols());
+
+    // Step 1
+    c = dec().permutationP() * rhs();
+
+    // Step 2
+    dec().matrixLU()
+        .topLeftCorner(smalldim,smalldim)
+        .template triangularView<UnitLower>()
+        .solveInPlace(c.topRows(smalldim));
+    if(rows>cols)
+    {
+      c.bottomRows(rows-cols)
+        -= dec().matrixLU().bottomRows(rows-cols)
+         * c.topRows(cols);
+    }
+
+    // Step 3
+    dec().matrixLU()
+        .topLeftCorner(nonzero_pivots, nonzero_pivots)
+        .template triangularView<Upper>()
+        .solveInPlace(c.topRows(nonzero_pivots));
+
+    // Step 4
+    for(Index i = 0; i < nonzero_pivots; ++i)
+      dst.row(dec().permutationQ().indices().coeff(i)) = c.row(i);
+    for(Index i = nonzero_pivots; i < dec().matrixLU().cols(); ++i)
+      dst.row(dec().permutationQ().indices().coeff(i)).setZero();
+  }
+};
+
+} // end namespace internal
+
+/******* MatrixBase methods *****************************************************************/
+
+/** \lu_module
+  *
+  * \return the full-pivoting LU decomposition of \c *this.
+  *
+  * \sa class FullPivLU
+  */
+template<typename Derived>
+inline const FullPivLU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::fullPivLu() const
+{
+  return FullPivLU<PlainObject>(eval());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LU_H
diff --git a/Eigen/src/LU/Inverse.h b/Eigen/src/LU/Inverse.h
new file mode 100644
index 0000000..3cf8871
--- /dev/null
+++ b/Eigen/src/LU/Inverse.h
@@ -0,0 +1,400 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INVERSE_H
+#define EIGEN_INVERSE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/**********************************
+*** General case implementation ***
+**********************************/
+
+template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
+struct compute_inverse
+{
+  static inline void run(const MatrixType& matrix, ResultType& result)
+  {
+    result = matrix.partialPivLu().inverse();
+  }
+};
+
+template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
+struct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ };
+
+/****************************
+*** Size 1 implementation ***
+****************************/
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 1>
+{
+  static inline void run(const MatrixType& matrix, ResultType& result)
+  {
+    typedef typename MatrixType::Scalar Scalar;
+    result.coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0);
+  }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>
+{
+  static inline void run(
+    const MatrixType& matrix,
+    const typename MatrixType::RealScalar& absDeterminantThreshold,
+    ResultType& result,
+    typename ResultType::Scalar& determinant,
+    bool& invertible
+  )
+  {
+    using std::abs;
+    determinant = matrix.coeff(0,0);
+    invertible = abs(determinant) > absDeterminantThreshold;
+    if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;
+  }
+};
+
+/****************************
+*** Size 2 implementation ***
+****************************/
+
+template<typename MatrixType, typename ResultType>
+inline void compute_inverse_size2_helper(
+    const MatrixType& matrix, const typename ResultType::Scalar& invdet,
+    ResultType& result)
+{
+  result.coeffRef(0,0) = matrix.coeff(1,1) * invdet;
+  result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
+  result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
+  result.coeffRef(1,1) = matrix.coeff(0,0) * invdet;
+}
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 2>
+{
+  static inline void run(const MatrixType& matrix, ResultType& result)
+  {
+    typedef typename ResultType::Scalar Scalar;
+    const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant();
+    compute_inverse_size2_helper(matrix, invdet, result);
+  }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>
+{
+  static inline void run(
+    const MatrixType& matrix,
+    const typename MatrixType::RealScalar& absDeterminantThreshold,
+    ResultType& inverse,
+    typename ResultType::Scalar& determinant,
+    bool& invertible
+  )
+  {
+    using std::abs;
+    typedef typename ResultType::Scalar Scalar;
+    determinant = matrix.determinant();
+    invertible = abs(determinant) > absDeterminantThreshold;
+    if(!invertible) return;
+    const Scalar invdet = Scalar(1) / determinant;
+    compute_inverse_size2_helper(matrix, invdet, inverse);
+  }
+};
+
+/****************************
+*** Size 3 implementation ***
+****************************/
+
+template<typename MatrixType, int i, int j>
+inline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)
+{
+  enum {
+    i1 = (i+1) % 3,
+    i2 = (i+2) % 3,
+    j1 = (j+1) % 3,
+    j2 = (j+2) % 3
+  };
+  return m.coeff(i1, j1) * m.coeff(i2, j2)
+       - m.coeff(i1, j2) * m.coeff(i2, j1);
+}
+
+template<typename MatrixType, typename ResultType>
+inline void compute_inverse_size3_helper(
+    const MatrixType& matrix,
+    const typename ResultType::Scalar& invdet,
+    const Matrix<typename ResultType::Scalar,3,1>& cofactors_col0,
+    ResultType& result)
+{
+  result.row(0) = cofactors_col0 * invdet;
+  result.coeffRef(1,0) =  cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
+  result.coeffRef(1,1) =  cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
+  result.coeffRef(1,2) =  cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
+  result.coeffRef(2,0) =  cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
+  result.coeffRef(2,1) =  cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
+  result.coeffRef(2,2) =  cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
+}
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 3>
+{
+  static inline void run(const MatrixType& matrix, ResultType& result)
+  {
+    typedef typename ResultType::Scalar Scalar;
+    Matrix<typename MatrixType::Scalar,3,1> cofactors_col0;
+    cofactors_col0.coeffRef(0) =  cofactor_3x3<MatrixType,0,0>(matrix);
+    cofactors_col0.coeffRef(1) =  cofactor_3x3<MatrixType,1,0>(matrix);
+    cofactors_col0.coeffRef(2) =  cofactor_3x3<MatrixType,2,0>(matrix);
+    const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
+    const Scalar invdet = Scalar(1) / det;
+    compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result);
+  }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>
+{
+  static inline void run(
+    const MatrixType& matrix,
+    const typename MatrixType::RealScalar& absDeterminantThreshold,
+    ResultType& inverse,
+    typename ResultType::Scalar& determinant,
+    bool& invertible
+  )
+  {
+    using std::abs;
+    typedef typename ResultType::Scalar Scalar;
+    Matrix<Scalar,3,1> cofactors_col0;
+    cofactors_col0.coeffRef(0) =  cofactor_3x3<MatrixType,0,0>(matrix);
+    cofactors_col0.coeffRef(1) =  cofactor_3x3<MatrixType,1,0>(matrix);
+    cofactors_col0.coeffRef(2) =  cofactor_3x3<MatrixType,2,0>(matrix);
+    determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();
+    invertible = abs(determinant) > absDeterminantThreshold;
+    if(!invertible) return;
+    const Scalar invdet = Scalar(1) / determinant;
+    compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse);
+  }
+};
+
+/****************************
+*** Size 4 implementation ***
+****************************/
+
+template<typename Derived>
+inline const typename Derived::Scalar general_det3_helper
+(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)
+{
+  return matrix.coeff(i1,j1)
+         * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));
+}
+
+template<typename MatrixType, int i, int j>
+inline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)
+{
+  enum {
+    i1 = (i+1) % 4,
+    i2 = (i+2) % 4,
+    i3 = (i+3) % 4,
+    j1 = (j+1) % 4,
+    j2 = (j+2) % 4,
+    j3 = (j+3) % 4
+  };
+  return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)
+       + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)
+       + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);
+}
+
+template<int Arch, typename Scalar, typename MatrixType, typename ResultType>
+struct compute_inverse_size4
+{
+  static void run(const MatrixType& matrix, ResultType& result)
+  {
+    result.coeffRef(0,0) =  cofactor_4x4<MatrixType,0,0>(matrix);
+    result.coeffRef(1,0) = -cofactor_4x4<MatrixType,0,1>(matrix);
+    result.coeffRef(2,0) =  cofactor_4x4<MatrixType,0,2>(matrix);
+    result.coeffRef(3,0) = -cofactor_4x4<MatrixType,0,3>(matrix);
+    result.coeffRef(0,2) =  cofactor_4x4<MatrixType,2,0>(matrix);
+    result.coeffRef(1,2) = -cofactor_4x4<MatrixType,2,1>(matrix);
+    result.coeffRef(2,2) =  cofactor_4x4<MatrixType,2,2>(matrix);
+    result.coeffRef(3,2) = -cofactor_4x4<MatrixType,2,3>(matrix);
+    result.coeffRef(0,1) = -cofactor_4x4<MatrixType,1,0>(matrix);
+    result.coeffRef(1,1) =  cofactor_4x4<MatrixType,1,1>(matrix);
+    result.coeffRef(2,1) = -cofactor_4x4<MatrixType,1,2>(matrix);
+    result.coeffRef(3,1) =  cofactor_4x4<MatrixType,1,3>(matrix);
+    result.coeffRef(0,3) = -cofactor_4x4<MatrixType,3,0>(matrix);
+    result.coeffRef(1,3) =  cofactor_4x4<MatrixType,3,1>(matrix);
+    result.coeffRef(2,3) = -cofactor_4x4<MatrixType,3,2>(matrix);
+    result.coeffRef(3,3) =  cofactor_4x4<MatrixType,3,3>(matrix);
+    result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum();
+  }
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse<MatrixType, ResultType, 4>
+ : compute_inverse_size4<Architecture::Target, typename MatrixType::Scalar,
+                            MatrixType, ResultType>
+{
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
+{
+  static inline void run(
+    const MatrixType& matrix,
+    const typename MatrixType::RealScalar& absDeterminantThreshold,
+    ResultType& inverse,
+    typename ResultType::Scalar& determinant,
+    bool& invertible
+  )
+  {
+    using std::abs;
+    determinant = matrix.determinant();
+    invertible = abs(determinant) > absDeterminantThreshold;
+    if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);
+  }
+};
+
+/*************************
+*** MatrixBase methods ***
+*************************/
+
+template<typename MatrixType>
+struct traits<inverse_impl<MatrixType> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+template<typename MatrixType>
+struct inverse_impl : public ReturnByValue<inverse_impl<MatrixType> >
+{
+  typedef typename MatrixType::Index Index;
+  typedef typename internal::eval<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+  MatrixTypeNested m_matrix;
+
+  inverse_impl(const MatrixType& matrix)
+    : m_matrix(matrix)
+  {}
+
+  inline Index rows() const { return m_matrix.rows(); }
+  inline Index cols() const { return m_matrix.cols(); }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    const int Size = EIGEN_PLAIN_ENUM_MIN(MatrixType::ColsAtCompileTime,Dest::ColsAtCompileTime);
+    EIGEN_ONLY_USED_FOR_DEBUG(Size);
+    eigen_assert(( (Size<=1) || (Size>4) || (extract_data(m_matrix)!=extract_data(dst)))
+              && "Aliasing problem detected in inverse(), you need to do inverse().eval() here.");
+
+    compute_inverse<MatrixTypeNestedCleaned, Dest>::run(m_matrix, dst);
+  }
+};
+
+} // end namespace internal
+
+/** \lu_module
+  *
+  * \returns the matrix inverse of this matrix.
+  *
+  * For small fixed sizes up to 4x4, this method uses cofactors.
+  * In the general case, this method uses class PartialPivLU.
+  *
+  * \note This matrix must be invertible, otherwise the result is undefined. If you need an
+  * invertibility check, do the following:
+  * \li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck().
+  * \li for the general case, use class FullPivLU.
+  *
+  * Example: \include MatrixBase_inverse.cpp
+  * Output: \verbinclude MatrixBase_inverse.out
+  *
+  * \sa computeInverseAndDetWithCheck()
+  */
+template<typename Derived>
+inline const internal::inverse_impl<Derived> MatrixBase<Derived>::inverse() const
+{
+  EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)
+  eigen_assert(rows() == cols());
+  return internal::inverse_impl<Derived>(derived());
+}
+
+/** \lu_module
+  *
+  * Computation of matrix inverse and determinant, with invertibility check.
+  *
+  * This is only for fixed-size square matrices of size up to 4x4.
+  *
+  * \param inverse Reference to the matrix in which to store the inverse.
+  * \param determinant Reference to the variable in which to store the determinant.
+  * \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
+  * \param absDeterminantThreshold Optional parameter controlling the invertibility check.
+  *                                The matrix will be declared invertible if the absolute value of its
+  *                                determinant is greater than this threshold.
+  *
+  * Example: \include MatrixBase_computeInverseAndDetWithCheck.cpp
+  * Output: \verbinclude MatrixBase_computeInverseAndDetWithCheck.out
+  *
+  * \sa inverse(), computeInverseWithCheck()
+  */
+template<typename Derived>
+template<typename ResultType>
+inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
+    ResultType& inverse,
+    typename ResultType::Scalar& determinant,
+    bool& invertible,
+    const RealScalar& absDeterminantThreshold
+  ) const
+{
+  // i'd love to put some static assertions there, but SFINAE means that they have no effect...
+  eigen_assert(rows() == cols());
+  // for 2x2, it's worth giving a chance to avoid evaluating.
+  // for larger sizes, evaluating has negligible cost and limits code size.
+  typedef typename internal::conditional<
+    RowsAtCompileTime == 2,
+    typename internal::remove_all<typename internal::nested<Derived, 2>::type>::type,
+    PlainObject
+  >::type MatrixType;
+  internal::compute_inverse_and_det_with_check<MatrixType, ResultType>::run
+    (derived(), absDeterminantThreshold, inverse, determinant, invertible);
+}
+
+/** \lu_module
+  *
+  * Computation of matrix inverse, with invertibility check.
+  *
+  * This is only for fixed-size square matrices of size up to 4x4.
+  *
+  * \param inverse Reference to the matrix in which to store the inverse.
+  * \param invertible Reference to the bool variable in which to store whether the matrix is invertible.
+  * \param absDeterminantThreshold Optional parameter controlling the invertibility check.
+  *                                The matrix will be declared invertible if the absolute value of its
+  *                                determinant is greater than this threshold.
+  *
+  * Example: \include MatrixBase_computeInverseWithCheck.cpp
+  * Output: \verbinclude MatrixBase_computeInverseWithCheck.out
+  *
+  * \sa inverse(), computeInverseAndDetWithCheck()
+  */
+template<typename Derived>
+template<typename ResultType>
+inline void MatrixBase<Derived>::computeInverseWithCheck(
+    ResultType& inverse,
+    bool& invertible,
+    const RealScalar& absDeterminantThreshold
+  ) const
+{
+  RealScalar determinant;
+  // i'd love to put some static assertions there, but SFINAE means that they have no effect...
+  eigen_assert(rows() == cols());
+  computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_INVERSE_H
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
new file mode 100644
index 0000000..7d1db94
--- /dev/null
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -0,0 +1,509 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PARTIALLU_H
+#define EIGEN_PARTIALLU_H
+
+namespace Eigen { 
+
+/** \ingroup LU_Module
+  *
+  * \class PartialPivLU
+  *
+  * \brief LU decomposition of a matrix with partial pivoting, and related features
+  *
+  * \param MatrixType the type of the matrix of which we are computing the LU decomposition
+  *
+  * This class represents a LU decomposition of a \b square \b invertible matrix, with partial pivoting: the matrix A
+  * is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P
+  * is a permutation matrix.
+  *
+  * Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible
+  * matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class
+  * does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the
+  * matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices.
+  *
+  * The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided
+  * by class FullPivLU.
+  *
+  * This is \b not a rank-revealing LU decomposition. Many features are intentionally absent from this class,
+  * such as rank computation. If you need these features, use class FullPivLU.
+  *
+  * This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses
+  * in the general case.
+  * On the other hand, it is \b not suitable to determine whether a given matrix is invertible.
+  *
+  * The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().
+  *
+  * \sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU
+  */
+template<typename _MatrixType> class PartialPivLU
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename internal::traits<MatrixType>::StorageKind StorageKind;
+    typedef typename MatrixType::Index Index;
+    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
+    typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
+
+
+    /**
+    * \brief Default Constructor.
+    *
+    * The default constructor is useful in cases in which the user intends to
+    * perform decompositions via PartialPivLU::compute(const MatrixType&).
+    */
+    PartialPivLU();
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa PartialPivLU()
+      */
+    PartialPivLU(Index size);
+
+    /** Constructor.
+      *
+      * \param matrix the matrix of which to compute the LU decomposition.
+      *
+      * \warning The matrix should have full rank (e.g. if it's square, it should be invertible).
+      * If you need to deal with non-full rank, use class FullPivLU instead.
+      */
+    PartialPivLU(const MatrixType& matrix);
+
+    PartialPivLU& compute(const MatrixType& matrix);
+
+    /** \returns the LU decomposition matrix: the upper-triangular part is U, the
+      * unit-lower-triangular part is L (at least for square matrices; in the non-square
+      * case, special care is needed, see the documentation of class FullPivLU).
+      *
+      * \sa matrixL(), matrixU()
+      */
+    inline const MatrixType& matrixLU() const
+    {
+      eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+      return m_lu;
+    }
+
+    /** \returns the permutation matrix P.
+      */
+    inline const PermutationType& permutationP() const
+    {
+      eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+      return m_p;
+    }
+
+    /** This method returns the solution x to the equation Ax=b, where A is the matrix of which
+      * *this is the LU decomposition.
+      *
+      * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
+      *          the only requirement in order for the equation to make sense is that
+      *          b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
+      *
+      * \returns the solution.
+      *
+      * Example: \include PartialPivLU_solve.cpp
+      * Output: \verbinclude PartialPivLU_solve.out
+      *
+      * Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution
+      * theoretically exists and is unique regardless of b.
+      *
+      * \sa TriangularView::solve(), inverse(), computeInverse()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<PartialPivLU, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+      return internal::solve_retval<PartialPivLU, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the inverse of the matrix of which *this is the LU decomposition.
+      *
+      * \warning The matrix being decomposed here is assumed to be invertible. If you need to check for
+      *          invertibility, use class FullPivLU instead.
+      *
+      * \sa MatrixBase::inverse(), LU::inverse()
+      */
+    inline const internal::solve_retval<PartialPivLU,typename MatrixType::IdentityReturnType> inverse() const
+    {
+      eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+      return internal::solve_retval<PartialPivLU,typename MatrixType::IdentityReturnType>
+               (*this, MatrixType::Identity(m_lu.rows(), m_lu.cols()));
+    }
+
+    /** \returns the determinant of the matrix of which
+      * *this is the LU decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the LU decomposition has already been computed.
+      *
+      * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
+      *       optimized paths.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      *
+      * \sa MatrixBase::determinant()
+      */
+    typename internal::traits<MatrixType>::Scalar determinant() const;
+
+    MatrixType reconstructedMatrix() const;
+
+    inline Index rows() const { return m_lu.rows(); }
+    inline Index cols() const { return m_lu.cols(); }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    MatrixType m_lu;
+    PermutationType m_p;
+    TranspositionType m_rowsTranspositions;
+    Index m_det_p;
+    bool m_isInitialized;
+};
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>::PartialPivLU()
+  : m_lu(),
+    m_p(),
+    m_rowsTranspositions(),
+    m_det_p(0),
+    m_isInitialized(false)
+{
+}
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>::PartialPivLU(Index size)
+  : m_lu(size, size),
+    m_p(size),
+    m_rowsTranspositions(size),
+    m_det_p(0),
+    m_isInitialized(false)
+{
+}
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>::PartialPivLU(const MatrixType& matrix)
+  : m_lu(matrix.rows(), matrix.rows()),
+    m_p(matrix.rows()),
+    m_rowsTranspositions(matrix.rows()),
+    m_det_p(0),
+    m_isInitialized(false)
+{
+  compute(matrix);
+}
+
+namespace internal {
+
+/** \internal This is the blocked version of fullpivlu_unblocked() */
+template<typename Scalar, int StorageOrder, typename PivIndex>
+struct partial_lu_impl
+{
+  // FIXME add a stride to Map, so that the following mapping becomes easier,
+  // another option would be to create an expression being able to automatically
+  // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly
+  // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,
+  // and Block.
+  typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;
+  typedef Block<MapLU, Dynamic, Dynamic> MatrixType;
+  typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef typename MatrixType::Index Index;
+
+  /** \internal performs the LU decomposition in-place of the matrix \a lu
+    * using an unblocked algorithm.
+    *
+    * In addition, this function returns the row transpositions in the
+    * vector \a row_transpositions which must have a size equal to the number
+    * of columns of the matrix \a lu, and an integer \a nb_transpositions
+    * which returns the actual number of transpositions.
+    *
+    * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
+    */
+  static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)
+  {
+    const Index rows = lu.rows();
+    const Index cols = lu.cols();
+    const Index size = (std::min)(rows,cols);
+    nb_transpositions = 0;
+    Index first_zero_pivot = -1;
+    for(Index k = 0; k < size; ++k)
+    {
+      Index rrows = rows-k-1;
+      Index rcols = cols-k-1;
+        
+      Index row_of_biggest_in_col;
+      RealScalar biggest_in_corner
+        = lu.col(k).tail(rows-k).cwiseAbs().maxCoeff(&row_of_biggest_in_col);
+      row_of_biggest_in_col += k;
+
+      row_transpositions[k] = PivIndex(row_of_biggest_in_col);
+
+      if(biggest_in_corner != RealScalar(0))
+      {
+        if(k != row_of_biggest_in_col)
+        {
+          lu.row(k).swap(lu.row(row_of_biggest_in_col));
+          ++nb_transpositions;
+        }
+
+        // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)
+        // overflow but not the actual quotient?
+        lu.col(k).tail(rrows) /= lu.coeff(k,k);
+      }
+      else if(first_zero_pivot==-1)
+      {
+        // the pivot is exactly zero, we record the index of the first pivot which is exactly 0,
+        // and continue the factorization such we still have A = PLU
+        first_zero_pivot = k;
+      }
+
+      if(k<rows-1)
+        lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);
+    }
+    return first_zero_pivot;
+  }
+
+  /** \internal performs the LU decomposition in-place of the matrix represented
+    * by the variables \a rows, \a cols, \a lu_data, and \a lu_stride using a
+    * recursive, blocked algorithm.
+    *
+    * In addition, this function returns the row transpositions in the
+    * vector \a row_transpositions which must have a size equal to the number
+    * of columns of the matrix \a lu, and an integer \a nb_transpositions
+    * which returns the actual number of transpositions.
+    *
+    * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
+    *
+    * \note This very low level interface using pointers, etc. is to:
+    *   1 - reduce the number of instanciations to the strict minimum
+    *   2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >
+    */
+  static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
+  {
+    MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);
+    MatrixType lu(lu1,0,0,rows,cols);
+
+    const Index size = (std::min)(rows,cols);
+
+    // if the matrix is too small, no blocking:
+    if(size<=16)
+    {
+      return unblocked_lu(lu, row_transpositions, nb_transpositions);
+    }
+
+    // automatically adjust the number of subdivisions to the size
+    // of the matrix so that there is enough sub blocks:
+    Index blockSize;
+    {
+      blockSize = size/8;
+      blockSize = (blockSize/16)*16;
+      blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);
+    }
+
+    nb_transpositions = 0;
+    Index first_zero_pivot = -1;
+    for(Index k = 0; k < size; k+=blockSize)
+    {
+      Index bs = (std::min)(size-k,blockSize); // actual size of the block
+      Index trows = rows - k - bs; // trailing rows
+      Index tsize = size - k - bs; // trailing size
+
+      // partition the matrix:
+      //                          A00 | A01 | A02
+      // lu  = A_0 | A_1 | A_2 =  A10 | A11 | A12
+      //                          A20 | A21 | A22
+      BlockType A_0(lu,0,0,rows,k);
+      BlockType A_2(lu,0,k+bs,rows,tsize);
+      BlockType A11(lu,k,k,bs,bs);
+      BlockType A12(lu,k,k+bs,bs,tsize);
+      BlockType A21(lu,k+bs,k,trows,bs);
+      BlockType A22(lu,k+bs,k+bs,trows,tsize);
+
+      PivIndex nb_transpositions_in_panel;
+      // recursively call the blocked LU algorithm on [A11^T A21^T]^T
+      // with a very small blocking size:
+      Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,
+                   row_transpositions+k, nb_transpositions_in_panel, 16);
+      if(ret>=0 && first_zero_pivot==-1)
+        first_zero_pivot = k+ret;
+
+      nb_transpositions += nb_transpositions_in_panel;
+      // update permutations and apply them to A_0
+      for(Index i=k; i<k+bs; ++i)
+      {
+        Index piv = (row_transpositions[i] += k);
+        A_0.row(i).swap(A_0.row(piv));
+      }
+
+      if(trows)
+      {
+        // apply permutations to A_2
+        for(Index i=k;i<k+bs; ++i)
+          A_2.row(i).swap(A_2.row(row_transpositions[i]));
+
+        // A12 = A11^-1 A12
+        A11.template triangularView<UnitLower>().solveInPlace(A12);
+
+        A22.noalias() -= A21 * A12;
+      }
+    }
+    return first_zero_pivot;
+  }
+};
+
+/** \internal performs the LU decomposition with partial pivoting in-place.
+  */
+template<typename MatrixType, typename TranspositionType>
+void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::Index& nb_transpositions)
+{
+  eigen_assert(lu.cols() == row_transpositions.size());
+  eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
+
+  partial_lu_impl
+    <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::Index>
+    ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
+}
+
+} // end namespace internal
+
+template<typename MatrixType>
+PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
+{
+  check_template_parameters();
+  
+  // the row permutation is stored as int indices, so just to be sure:
+  eigen_assert(matrix.rows()<NumTraits<int>::highest());
+  
+  m_lu = matrix;
+
+  eigen_assert(matrix.rows() == matrix.cols() && "PartialPivLU is only for square (and moreover invertible) matrices");
+  const Index size = matrix.rows();
+
+  m_rowsTranspositions.resize(size);
+
+  typename TranspositionType::Index nb_transpositions;
+  internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);
+  m_det_p = (nb_transpositions%2) ? -1 : 1;
+
+  m_p = m_rowsTranspositions;
+
+  m_isInitialized = true;
+  return *this;
+}
+
+template<typename MatrixType>
+typename internal::traits<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const
+{
+  eigen_assert(m_isInitialized && "PartialPivLU is not initialized.");
+  return Scalar(m_det_p) * m_lu.diagonal().prod();
+}
+
+/** \returns the matrix represented by the decomposition,
+ * i.e., it returns the product: P^{-1} L U.
+ * This function is provided for debug purpose. */
+template<typename MatrixType>
+MatrixType PartialPivLU<MatrixType>::reconstructedMatrix() const
+{
+  eigen_assert(m_isInitialized && "LU is not initialized.");
+  // LU
+  MatrixType res = m_lu.template triangularView<UnitLower>().toDenseMatrix()
+                 * m_lu.template triangularView<Upper>();
+
+  // P^{-1}(LU)
+  res = m_p.inverse() * res;
+
+  return res;
+}
+
+/***** Implementation of solve() *****************************************************/
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<PartialPivLU<_MatrixType>, Rhs>
+  : solve_retval_base<PartialPivLU<_MatrixType>, Rhs>
+{
+  EIGEN_MAKE_SOLVE_HELPERS(PartialPivLU<_MatrixType>,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    /* The decomposition PA = LU can be rewritten as A = P^{-1} L U.
+    * So we proceed as follows:
+    * Step 1: compute c = Pb.
+    * Step 2: replace c by the solution x to Lx = c.
+    * Step 3: replace c by the solution x to Ux = c.
+    */
+
+    eigen_assert(rhs().rows() == dec().matrixLU().rows());
+
+    // Step 1
+    dst = dec().permutationP() * rhs();
+
+    // Step 2
+    dec().matrixLU().template triangularView<UnitLower>().solveInPlace(dst);
+
+    // Step 3
+    dec().matrixLU().template triangularView<Upper>().solveInPlace(dst);
+  }
+};
+
+} // end namespace internal
+
+/******** MatrixBase methods *******/
+
+/** \lu_module
+  *
+  * \return the partial-pivoting LU decomposition of \c *this.
+  *
+  * \sa class PartialPivLU
+  */
+template<typename Derived>
+inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::partialPivLu() const
+{
+  return PartialPivLU<PlainObject>(eval());
+}
+
+#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS
+/** \lu_module
+  *
+  * Synonym of partialPivLu().
+  *
+  * \return the partial-pivoting LU decomposition of \c *this.
+  *
+  * \sa class PartialPivLU
+  */
+template<typename Derived>
+inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::lu() const
+{
+  return PartialPivLU<PlainObject>(eval());
+}
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARTIALLU_H
diff --git a/Eigen/src/LU/PartialPivLU_MKL.h b/Eigen/src/LU/PartialPivLU_MKL.h
new file mode 100644
index 0000000..9035953
--- /dev/null
+++ b/Eigen/src/LU/PartialPivLU_MKL.h
@@ -0,0 +1,85 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *     LU decomposition with partial pivoting based on LAPACKE_?getrf function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_PARTIALLU_LAPACK_H
+#define EIGEN_PARTIALLU_LAPACK_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_LU_PARTPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template<int StorageOrder> \
+struct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int> \
+{ \
+  /* \internal performs the LU decomposition in-place of the matrix represented */ \
+  static lapack_int blocked_lu(lapack_int rows, lapack_int cols, EIGTYPE* lu_data, lapack_int luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \
+  { \
+    EIGEN_UNUSED_VARIABLE(maxBlockSize);\
+    lapack_int matrix_order, first_zero_pivot; \
+    lapack_int m, n, lda, *ipiv, info; \
+    EIGTYPE* a; \
+/* Set up parameters for ?getrf */ \
+    matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+    lda = luStride; \
+    a = lu_data; \
+    ipiv = row_transpositions; \
+    m = rows; \
+    n = cols; \
+    nb_transpositions = 0; \
+\
+    info = LAPACKE_##MKLPREFIX##getrf( matrix_order, m, n, (MKLTYPE*)a, lda, ipiv ); \
+\
+    for(int i=0;i<m;i++) { ipiv[i]--; if (ipiv[i]!=i) nb_transpositions++; } \
+\
+    eigen_assert(info >= 0); \
+/* something should be done with nb_transpositions */ \
+\
+    first_zero_pivot = info; \
+    return first_zero_pivot; \
+  } \
+};
+
+EIGEN_MKL_LU_PARTPIV(double, double, d)
+EIGEN_MKL_LU_PARTPIV(float, float, s)
+EIGEN_MKL_LU_PARTPIV(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_LU_PARTPIV(scomplex, MKL_Complex8, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARTIALLU_LAPACK_H
diff --git a/Eigen/src/LU/arch/CMakeLists.txt b/Eigen/src/LU/arch/CMakeLists.txt
new file mode 100644
index 0000000..f6b7ed9
--- /dev/null
+++ b/Eigen/src/LU/arch/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_LU_arch_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_LU_arch_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LU/arch COMPONENT Devel
+  )
diff --git a/Eigen/src/LU/arch/Inverse_SSE.h b/Eigen/src/LU/arch/Inverse_SSE.h
new file mode 100644
index 0000000..60b7a23
--- /dev/null
+++ b/Eigen/src/LU/arch/Inverse_SSE.h
@@ -0,0 +1,329 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2001 Intel Corporation
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// The SSE code for the 4x4 float and double matrix inverse in this file
+// comes from the following Intel's library:
+// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/
+//
+// Here is the respective copyright and license statement:
+//
+//   Copyright (c) 2001 Intel Corporation.
+//
+// Permition is granted to use, copy, distribute and prepare derivative works
+// of this library for any purpose and without fee, provided, that the above
+// copyright notice and this statement appear in all copies.
+// Intel makes no representations about the suitability of this software for
+// any purpose, and specifically disclaims all warranties.
+// See LEGAL.TXT for all the legal information.
+
+#ifndef EIGEN_INVERSE_SSE_H
+#define EIGEN_INVERSE_SSE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>
+{
+  enum {
+    MatrixAlignment     = bool(MatrixType::Flags&AlignedBit),
+    ResultAlignment     = bool(ResultType::Flags&AlignedBit),
+    StorageOrdersMatch  = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
+  };
+  
+  static void run(const MatrixType& matrix, ResultType& result)
+  {
+    EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };
+
+    // Load the full matrix into registers
+    __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);
+    __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);
+    __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);
+    __m128 _L4 = matrix.template packet<MatrixAlignment>(12);
+
+    // The inverse is calculated using "Divide and Conquer" technique. The
+    // original matrix is divide into four 2x2 sub-matrices. Since each
+    // register holds four matrix element, the smaller matrices are
+    // represented as a registers. Hence we get a better locality of the
+    // calculations.
+
+    __m128 A, B, C, D; // the four sub-matrices
+    if(!StorageOrdersMatch)
+    {
+      A = _mm_unpacklo_ps(_L1, _L2);
+      B = _mm_unpacklo_ps(_L3, _L4);
+      C = _mm_unpackhi_ps(_L1, _L2);
+      D = _mm_unpackhi_ps(_L3, _L4);
+    }
+    else
+    {
+      A = _mm_movelh_ps(_L1, _L2);
+      B = _mm_movehl_ps(_L2, _L1);
+      C = _mm_movelh_ps(_L3, _L4);
+      D = _mm_movehl_ps(_L4, _L3);
+    }
+
+    __m128 iA, iB, iC, iD,                 // partial inverse of the sub-matrices
+            DC, AB;
+    __m128 dA, dB, dC, dD;                 // determinant of the sub-matrices
+    __m128 det, d, d1, d2;
+    __m128 rd;                             // reciprocal of the determinant
+
+    //  AB = A# * B
+    AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);
+    AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));
+    //  DC = D# * C
+    DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);
+    DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));
+
+    //  dA = |A|
+    dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);
+    dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));
+    //  dB = |B|
+    dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);
+    dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));
+
+    //  dC = |C|
+    dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);
+    dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));
+    //  dD = |D|
+    dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);
+    dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));
+
+    //  d = trace(AB*DC) = trace(A#*B*D#*C)
+    d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);
+
+    //  iD = C*A#*B
+    iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));
+    iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));
+    //  iA = B*D#*C
+    iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));
+    iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));
+
+    //  d = trace(AB*DC) = trace(A#*B*D#*C) [continue]
+    d  = _mm_add_ps(d, _mm_movehl_ps(d, d));
+    d  = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));
+    d1 = _mm_mul_ss(dA,dD);
+    d2 = _mm_mul_ss(dB,dC);
+
+    //  iD = D*|A| - C*A#*B
+    iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);
+
+    //  iA = A*|D| - B*D#*C;
+    iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);
+
+    //  det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
+    det = _mm_sub_ss(_mm_add_ss(d1,d2),d);
+    rd  = _mm_div_ss(_mm_set_ss(1.0f), det);
+
+//     #ifdef ZERO_SINGULAR
+//         rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);
+//     #endif
+
+    //  iB = D * (A#B)# = D*B#*A
+    iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));
+    iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));
+    //  iC = A * (D#C)# = A*C#*D
+    iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));
+    iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));
+
+    rd = _mm_shuffle_ps(rd,rd,0);
+    rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));
+
+    //  iB = C*|B| - D*B#*A
+    iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);
+
+    //  iC = B*|C| - A*C#*D;
+    iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);
+
+    //  iX = iX / det
+    iA = _mm_mul_ps(rd,iA);
+    iB = _mm_mul_ps(rd,iB);
+    iC = _mm_mul_ps(rd,iC);
+    iD = _mm_mul_ps(rd,iD);
+
+    result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77));
+    result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22));
+    result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77));
+    result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22));
+  }
+
+};
+
+template<typename MatrixType, typename ResultType>
+struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>
+{
+  enum {
+    MatrixAlignment = bool(MatrixType::Flags&AlignedBit),
+    ResultAlignment = bool(ResultType::Flags&AlignedBit),
+    StorageOrdersMatch  = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)
+  };
+  static void run(const MatrixType& matrix, ResultType& result)
+  {
+    const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
+    const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
+
+    // The inverse is calculated using "Divide and Conquer" technique. The
+    // original matrix is divide into four 2x2 sub-matrices. Since each
+    // register of the matrix holds two element, the smaller matrices are
+    // consisted of two registers. Hence we get a better locality of the
+    // calculations.
+
+    // the four sub-matrices
+    __m128d A1, A2, B1, B2, C1, C2, D1, D2;
+    
+    if(StorageOrdersMatch)
+    {
+      A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);
+      A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);
+      C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
+      C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
+    }
+    else
+    {
+      __m128d tmp;
+      A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);
+      A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);
+      tmp = A1;
+      A1 = _mm_unpacklo_pd(A1,A2);
+      A2 = _mm_unpackhi_pd(tmp,A2);
+      tmp = C1;
+      C1 = _mm_unpacklo_pd(C1,C2);
+      C2 = _mm_unpackhi_pd(tmp,C2);
+      
+      B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);
+      B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);
+      tmp = B1;
+      B1 = _mm_unpacklo_pd(B1,B2);
+      B2 = _mm_unpackhi_pd(tmp,B2);
+      tmp = D1;
+      D1 = _mm_unpacklo_pd(D1,D2);
+      D2 = _mm_unpackhi_pd(tmp,D2);
+    }
+    
+    __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2,     // partial invese of the sub-matrices
+            DC1, DC2, AB1, AB2;
+    __m128d dA, dB, dC, dD;     // determinant of the sub-matrices
+    __m128d det, d1, d2, rd;
+
+    //  dA = |A|
+    dA = _mm_shuffle_pd(A2, A2, 1);
+    dA = _mm_mul_pd(A1, dA);
+    dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));
+    //  dB = |B|
+    dB = _mm_shuffle_pd(B2, B2, 1);
+    dB = _mm_mul_pd(B1, dB);
+    dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));
+
+    //  AB = A# * B
+    AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));
+    AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));
+    AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));
+    AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));
+
+    //  dC = |C|
+    dC = _mm_shuffle_pd(C2, C2, 1);
+    dC = _mm_mul_pd(C1, dC);
+    dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));
+    //  dD = |D|
+    dD = _mm_shuffle_pd(D2, D2, 1);
+    dD = _mm_mul_pd(D1, dD);
+    dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));
+
+    //  DC = D# * C
+    DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));
+    DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));
+    DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));
+    DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));
+
+    //  rd = trace(AB*DC) = trace(A#*B*D#*C)
+    d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));
+    d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));
+    rd = _mm_add_pd(d1, d2);
+    rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));
+
+    //  iD = C*A#*B
+    iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));
+    iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));
+    iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));
+    iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));
+
+    //  iA = B*D#*C
+    iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));
+    iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));
+    iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));
+    iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));
+
+    //  iD = D*|A| - C*A#*B
+    dA = _mm_shuffle_pd(dA,dA,0);
+    iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);
+    iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);
+
+    //  iA = A*|D| - B*D#*C;
+    dD = _mm_shuffle_pd(dD,dD,0);
+    iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);
+    iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);
+
+    d1 = _mm_mul_sd(dA, dD);
+    d2 = _mm_mul_sd(dB, dC);
+
+    //  iB = D * (A#B)# = D*B#*A
+    iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));
+    iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));
+    iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));
+    iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));
+
+    //  det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)
+    det = _mm_add_sd(d1, d2);
+    det = _mm_sub_sd(det, rd);
+
+    //  iC = A * (D#C)# = A*C#*D
+    iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));
+    iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));
+    iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));
+    iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));
+
+    rd = _mm_div_sd(_mm_set_sd(1.0), det);
+//     #ifdef ZERO_SINGULAR
+//         rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);
+//     #endif
+    rd = _mm_shuffle_pd(rd,rd,0);
+
+    //  iB = C*|B| - D*B#*A
+    dB = _mm_shuffle_pd(dB,dB,0);
+    iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);
+    iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);
+
+    d1 = _mm_xor_pd(rd, _Sign_PN);
+    d2 = _mm_xor_pd(rd, _Sign_NP);
+
+    //  iC = B*|C| - A*C#*D;
+    dC = _mm_shuffle_pd(dC,dC,0);
+    iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);
+    iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);
+
+    result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));     // iA# / det
+    result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));
+    result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));     // iB# / det
+    result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));
+    result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));     // iC# / det
+    result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));
+    result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));     // iD# / det
+    result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_INVERSE_SSE_H
diff --git a/Eigen/src/MetisSupport/CMakeLists.txt b/Eigen/src/MetisSupport/CMakeLists.txt
new file mode 100644
index 0000000..2bad314
--- /dev/null
+++ b/Eigen/src/MetisSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_MetisSupport_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_MetisSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/MetisSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/MetisSupport/MetisSupport.h b/Eigen/src/MetisSupport/MetisSupport.h
new file mode 100644
index 0000000..f2bbef2
--- /dev/null
+++ b/Eigen/src/MetisSupport/MetisSupport.h
@@ -0,0 +1,137 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#ifndef METIS_SUPPORT_H
+#define METIS_SUPPORT_H
+
+namespace Eigen {
+/**
+ * Get the fill-reducing ordering from the METIS package
+ * 
+ * If A is the original matrix and Ap is the permuted matrix, 
+ * the fill-reducing permutation is defined as follows :
+ * Row (column) i of A is the matperm(i) row (column) of Ap. 
+ * WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm)
+ */
+template <typename Index>
+class MetisOrdering
+{
+public:
+  typedef PermutationMatrix<Dynamic,Dynamic,Index> PermutationType;
+  typedef Matrix<Index,Dynamic,1> IndexVector; 
+  
+  template <typename MatrixType>
+  void get_symmetrized_graph(const MatrixType& A)
+  {
+    Index m = A.cols(); 
+    eigen_assert((A.rows() == A.cols()) && "ONLY FOR SQUARED MATRICES");
+    // Get the transpose of the input matrix 
+    MatrixType At = A.transpose(); 
+    // Get the number of nonzeros elements in each row/col of At+A
+    Index TotNz = 0; 
+    IndexVector visited(m); 
+    visited.setConstant(-1); 
+    for (int j = 0; j < m; j++)
+    {
+      // Compute the union structure of of A(j,:) and At(j,:)
+      visited(j) = j; // Do not include the diagonal element
+      // Get the nonzeros in row/column j of A
+      for (typename MatrixType::InnerIterator it(A, j); it; ++it)
+      {
+        Index idx = it.index(); // Get the row index (for column major) or column index (for row major)
+        if (visited(idx) != j ) 
+        {
+          visited(idx) = j; 
+          ++TotNz; 
+        }
+      }
+      //Get the nonzeros in row/column j of At
+      for (typename MatrixType::InnerIterator it(At, j); it; ++it)
+      {
+        Index idx = it.index(); 
+        if(visited(idx) != j)
+        {
+          visited(idx) = j; 
+          ++TotNz; 
+        }
+      }
+    }
+    // Reserve place for A + At
+    m_indexPtr.resize(m+1);
+    m_innerIndices.resize(TotNz); 
+
+    // Now compute the real adjacency list of each column/row 
+    visited.setConstant(-1); 
+    Index CurNz = 0; 
+    for (int j = 0; j < m; j++)
+    {
+      m_indexPtr(j) = CurNz; 
+      
+      visited(j) = j; // Do not include the diagonal element
+      // Add the pattern of row/column j of A to A+At
+      for (typename MatrixType::InnerIterator it(A,j); it; ++it)
+      {
+        Index idx = it.index(); // Get the row index (for column major) or column index (for row major)
+        if (visited(idx) != j ) 
+        {
+          visited(idx) = j; 
+          m_innerIndices(CurNz) = idx; 
+          CurNz++; 
+        }
+      }
+      //Add the pattern of row/column j of At to A+At
+      for (typename MatrixType::InnerIterator it(At, j); it; ++it)
+      {
+        Index idx = it.index(); 
+        if(visited(idx) != j)
+        {
+          visited(idx) = j; 
+          m_innerIndices(CurNz) = idx; 
+          ++CurNz; 
+        }
+      }
+    }
+    m_indexPtr(m) = CurNz;    
+  }
+  
+  template <typename MatrixType>
+  void operator() (const MatrixType& A, PermutationType& matperm)
+  {
+     Index m = A.cols();
+     IndexVector perm(m),iperm(m); 
+    // First, symmetrize the matrix graph. 
+     get_symmetrized_graph(A); 
+     int output_error;
+     
+     // Call the fill-reducing routine from METIS 
+     output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data());
+     
+    if(output_error != METIS_OK) 
+    {
+      //FIXME The ordering interface should define a class of possible errors 
+     std::cerr << "ERROR WHILE CALLING THE METIS PACKAGE \n"; 
+     return; 
+    }
+    
+    // Get the fill-reducing permutation 
+    //NOTE:  If Ap is the permuted matrix then perm and iperm vectors are defined as follows 
+    // Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap
+    
+     matperm.resize(m);
+     for (int j = 0; j < m; j++)
+       matperm.indices()(iperm(j)) = j;
+   
+  }
+  
+  protected:
+    IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column
+    IndexVector m_innerIndices; // Adjacency list 
+};
+
+}// end namespace eigen 
+#endif
diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h
new file mode 100644
index 0000000..1c28bdf
--- /dev/null
+++ b/Eigen/src/OrderingMethods/Amd.h
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/*
+
+NOTE: this routine has been adapted from the CSparse library:
+
+Copyright (c) 2006, Timothy A. Davis.
+http://www.cise.ufl.edu/research/sparse/CSparse
+
+CSparse is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+CSparse is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this Module; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+*/
+
+#include "../Core/util/NonMPL2.h"
+
+#ifndef EIGEN_SPARSE_AMD_H
+#define EIGEN_SPARSE_AMD_H
+
+namespace Eigen { 
+
+namespace internal {
+  
+template<typename T> inline T amd_flip(const T& i) { return -i-2; }
+template<typename T> inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; }
+template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; }
+template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }
+
+/* clear w */
+template<typename Index>
+static int cs_wclear (Index mark, Index lemax, Index *w, Index n)
+{
+  Index k;
+  if(mark < 2 || (mark + lemax < 0))
+  {
+    for(k = 0; k < n; k++)
+      if(w[k] != 0)
+        w[k] = 1;
+    mark = 2;
+  }
+  return (mark);     /* at this point, w[0..n-1] < mark holds */
+}
+
+/* depth-first search and postorder of a tree rooted at node j */
+template<typename Index>
+Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack)
+{
+  int i, p, top = 0;
+  if(!head || !next || !post || !stack) return (-1);    /* check inputs */
+  stack[0] = j;                 /* place j on the stack */
+  while (top >= 0)                /* while (stack is not empty) */
+  {
+    p = stack[top];           /* p = top of stack */
+    i = head[p];              /* i = youngest child of p */
+    if(i == -1)
+    {
+      top--;                 /* p has no unordered children left */
+      post[k++] = p;        /* node p is the kth postordered node */
+    }
+    else
+    {
+      head[p] = next[i];   /* remove i from children of p */
+      stack[++top] = i;     /* start dfs on child node i */
+    }
+  }
+  return k;
+}
+
+
+/** \internal
+  * \ingroup OrderingMethods_Module 
+  * Approximate minimum degree ordering algorithm.
+  * \returns the permutation P reducing the fill-in of the input matrix \a C
+  * The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
+  * On exit the values of C are destroyed */
+template<typename Scalar, typename Index>
+void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
+{
+  using std::sqrt;
+  
+  int d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
+      k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
+      ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
+  unsigned int h;
+  
+  Index n = C.cols();
+  dense = std::max<Index> (16, Index(10 * sqrt(double(n))));   /* find dense threshold */
+  dense = std::min<Index> (n-2, dense);
+  
+  Index cnz = C.nonZeros();
+  perm.resize(n+1);
+  t = cnz + cnz/5 + 2*n;                 /* add elbow room to C */
+  C.resizeNonZeros(t);
+  
+  Index* W       = new Index[8*(n+1)]; /* get workspace */
+  Index* len     = W;
+  Index* nv      = W +   (n+1);
+  Index* next    = W + 2*(n+1);
+  Index* head    = W + 3*(n+1);
+  Index* elen    = W + 4*(n+1);
+  Index* degree  = W + 5*(n+1);
+  Index* w       = W + 6*(n+1);
+  Index* hhead   = W + 7*(n+1);
+  Index* last    = perm.indices().data();                              /* use P as workspace for last */
+  
+  /* --- Initialize quotient graph ---------------------------------------- */
+  Index* Cp = C.outerIndexPtr();
+  Index* Ci = C.innerIndexPtr();
+  for(k = 0; k < n; k++)
+    len[k] = Cp[k+1] - Cp[k];
+  len[n] = 0;
+  nzmax = t;
+  
+  for(i = 0; i <= n; i++)
+  {
+    head[i]   = -1;                     // degree list i is empty
+    last[i]   = -1;
+    next[i]   = -1;
+    hhead[i]  = -1;                     // hash list i is empty 
+    nv[i]     = 1;                      // node i is just one node
+    w[i]      = 1;                      // node i is alive
+    elen[i]   = 0;                      // Ek of node i is empty
+    degree[i] = len[i];                 // degree of node i
+  }
+  mark = internal::cs_wclear<Index>(0, 0, w, n);         /* clear w */
+  elen[n] = -2;                         /* n is a dead element */
+  Cp[n] = -1;                           /* n is a root of assembly tree */
+  w[n] = 0;                             /* n is a dead element */
+  
+  /* --- Initialize degree lists ------------------------------------------ */
+  for(i = 0; i < n; i++)
+  {
+    bool has_diag = false;
+    for(p = Cp[i]; p<Cp[i+1]; ++p)
+      if(Ci[p]==i)
+      {
+        has_diag = true;
+        break;
+      }
+   
+    d = degree[i];
+    if(d == 1)                      /* node i is empty */
+    {
+      elen[i] = -2;                 /* element i is dead */
+      nel++;
+      Cp[i] = -1;                   /* i is a root of assembly tree */
+      w[i] = 0;
+    }
+    else if(d > dense || !has_diag)  /* node i is dense or has no structural diagonal element */
+    {
+      nv[i] = 0;                    /* absorb i into element n */
+      elen[i] = -1;                 /* node i is dead */
+      nel++;
+      Cp[i] = amd_flip (n);
+      nv[n]++;
+    }
+    else
+    {
+      if(head[d] != -1) last[head[d]] = i;
+      next[i] = head[d];           /* put node i in degree list d */
+      head[d] = i;
+    }
+  }
+  
+  elen[n] = -2;                         /* n is a dead element */
+  Cp[n] = -1;                           /* n is a root of assembly tree */
+  w[n] = 0;                             /* n is a dead element */
+  
+  while (nel < n)                         /* while (selecting pivots) do */
+  {
+    /* --- Select node of minimum approximate degree -------------------- */
+    for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
+    if(next[k] != -1) last[next[k]] = -1;
+    head[mindeg] = next[k];          /* remove k from degree list */
+    elenk = elen[k];                  /* elenk = |Ek| */
+    nvk = nv[k];                      /* # of nodes k represents */
+    nel += nvk;                        /* nv[k] nodes of A eliminated */
+    
+    /* --- Garbage collection ------------------------------------------- */
+    if(elenk > 0 && cnz + mindeg >= nzmax)
+    {
+      for(j = 0; j < n; j++)
+      {
+        if((p = Cp[j]) >= 0)      /* j is a live node or element */
+        {
+          Cp[j] = Ci[p];          /* save first entry of object */
+          Ci[p] = amd_flip (j);    /* first entry is now amd_flip(j) */
+        }
+      }
+      for(q = 0, p = 0; p < cnz; ) /* scan all of memory */
+      {
+        if((j = amd_flip (Ci[p++])) >= 0)  /* found object j */
+        {
+          Ci[q] = Cp[j];       /* restore first entry of object */
+          Cp[j] = q++;          /* new pointer to object j */
+          for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
+        }
+      }
+      cnz = q;                       /* Ci[cnz...nzmax-1] now free */
+    }
+    
+    /* --- Construct new element ---------------------------------------- */
+    dk = 0;
+    nv[k] = -nvk;                     /* flag k as in Lk */
+    p = Cp[k];
+    pk1 = (elenk == 0) ? p : cnz;      /* do in place if elen[k] == 0 */
+    pk2 = pk1;
+    for(k1 = 1; k1 <= elenk + 1; k1++)
+    {
+      if(k1 > elenk)
+      {
+        e = k;                     /* search the nodes in k */
+        pj = p;                    /* list of nodes starts at Ci[pj]*/
+        ln = len[k] - elenk;      /* length of list of nodes in k */
+      }
+      else
+      {
+        e = Ci[p++];              /* search the nodes in e */
+        pj = Cp[e];
+        ln = len[e];              /* length of list of nodes in e */
+      }
+      for(k2 = 1; k2 <= ln; k2++)
+      {
+        i = Ci[pj++];
+        if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */
+        dk += nvi;                 /* degree[Lk] += size of node i */
+        nv[i] = -nvi;             /* negate nv[i] to denote i in Lk*/
+        Ci[pk2++] = i;            /* place i in Lk */
+        if(next[i] != -1) last[next[i]] = last[i];
+        if(last[i] != -1)         /* remove i from degree list */
+        {
+          next[last[i]] = next[i];
+        }
+        else
+        {
+          head[degree[i]] = next[i];
+        }
+      }
+      if(e != k)
+      {
+        Cp[e] = amd_flip (k);      /* absorb e into k */
+        w[e] = 0;                 /* e is now a dead element */
+      }
+    }
+    if(elenk != 0) cnz = pk2;         /* Ci[cnz...nzmax] is free */
+    degree[k] = dk;                   /* external degree of k - |Lk\i| */
+    Cp[k] = pk1;                      /* element k is in Ci[pk1..pk2-1] */
+    len[k] = pk2 - pk1;
+    elen[k] = -2;                     /* k is now an element */
+    
+    /* --- Find set differences ----------------------------------------- */
+    mark = internal::cs_wclear<Index>(mark, lemax, w, n);  /* clear w if necessary */
+    for(pk = pk1; pk < pk2; pk++)    /* scan 1: find |Le\Lk| */
+    {
+      i = Ci[pk];
+      if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */
+      nvi = -nv[i];                      /* nv[i] was negated */
+      wnvi = mark - nvi;
+      for(p = Cp[i]; p <= Cp[i] + eln - 1; p++)  /* scan Ei */
+      {
+        e = Ci[p];
+        if(w[e] >= mark)
+        {
+          w[e] -= nvi;          /* decrement |Le\Lk| */
+        }
+        else if(w[e] != 0)        /* ensure e is a live element */
+        {
+          w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */
+        }
+      }
+    }
+    
+    /* --- Degree update ------------------------------------------------ */
+    for(pk = pk1; pk < pk2; pk++)    /* scan2: degree update */
+    {
+      i = Ci[pk];                   /* consider node i in Lk */
+      p1 = Cp[i];
+      p2 = p1 + elen[i] - 1;
+      pn = p1;
+      for(h = 0, d = 0, p = p1; p <= p2; p++)    /* scan Ei */
+      {
+        e = Ci[p];
+        if(w[e] != 0)             /* e is an unabsorbed element */
+        {
+          dext = w[e] - mark;   /* dext = |Le\Lk| */
+          if(dext > 0)
+          {
+            d += dext;         /* sum up the set differences */
+            Ci[pn++] = e;     /* keep e in Ei */
+            h += e;            /* compute the hash of node i */
+          }
+          else
+          {
+            Cp[e] = amd_flip (k);  /* aggressive absorb. e->k */
+            w[e] = 0;             /* e is a dead element */
+          }
+        }
+      }
+      elen[i] = pn - p1 + 1;        /* elen[i] = |Ei| */
+      p3 = pn;
+      p4 = p1 + len[i];
+      for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */
+      {
+        j = Ci[p];
+        if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */
+        d += nvj;                  /* degree(i) += |j| */
+        Ci[pn++] = j;             /* place j in node list of i */
+        h += j;                    /* compute hash for node i */
+      }
+      if(d == 0)                     /* check for mass elimination */
+      {
+        Cp[i] = amd_flip (k);      /* absorb i into k */
+        nvi = -nv[i];
+        dk -= nvi;                 /* |Lk| -= |i| */
+        nvk += nvi;                /* |k| += nv[i] */
+        nel += nvi;
+        nv[i] = 0;
+        elen[i] = -1;             /* node i is dead */
+      }
+      else
+      {
+        degree[i] = std::min<Index> (degree[i], d);   /* update degree(i) */
+        Ci[pn] = Ci[p3];         /* move first node to end */
+        Ci[p3] = Ci[p1];         /* move 1st el. to end of Ei */
+        Ci[p1] = k;               /* add k as 1st element in of Ei */
+        len[i] = pn - p1 + 1;     /* new len of adj. list of node i */
+        h %= n;                    /* finalize hash of i */
+        next[i] = hhead[h];      /* place i in hash bucket */
+        hhead[h] = i;
+        last[i] = h;              /* save hash of i in last[i] */
+      }
+    }                                   /* scan2 is done */
+    degree[k] = dk;                   /* finalize |Lk| */
+    lemax = std::max<Index>(lemax, dk);
+    mark = internal::cs_wclear<Index>(mark+lemax, lemax, w, n);    /* clear w */
+    
+    /* --- Supernode detection ------------------------------------------ */
+    for(pk = pk1; pk < pk2; pk++)
+    {
+      i = Ci[pk];
+      if(nv[i] >= 0) continue;         /* skip if i is dead */
+      h = last[i];                      /* scan hash bucket of node i */
+      i = hhead[h];
+      hhead[h] = -1;                    /* hash bucket will be empty */
+      for(; i != -1 && next[i] != -1; i = next[i], mark++)
+      {
+        ln = len[i];
+        eln = elen[i];
+        for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
+        jlast = i;
+        for(j = next[i]; j != -1; ) /* compare i with all j */
+        {
+          ok = (len[j] == ln) && (elen[j] == eln);
+          for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
+          {
+            if(w[Ci[p]] != mark) ok = 0;    /* compare i and j*/
+          }
+          if(ok)                     /* i and j are identical */
+          {
+            Cp[j] = amd_flip (i);  /* absorb j into i */
+            nv[i] += nv[j];
+            nv[j] = 0;
+            elen[j] = -1;         /* node j is dead */
+            j = next[j];          /* delete j from hash bucket */
+            next[jlast] = j;
+          }
+          else
+          {
+            jlast = j;             /* j and i are different */
+            j = next[j];
+          }
+        }
+      }
+    }
+    
+    /* --- Finalize new element------------------------------------------ */
+    for(p = pk1, pk = pk1; pk < pk2; pk++)   /* finalize Lk */
+    {
+      i = Ci[pk];
+      if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
+      nv[i] = nvi;                      /* restore nv[i] */
+      d = degree[i] + dk - nvi;         /* compute external degree(i) */
+      d = std::min<Index> (d, n - nel - nvi);
+      if(head[d] != -1) last[head[d]] = i;
+      next[i] = head[d];               /* put i back in degree list */
+      last[i] = -1;
+      head[d] = i;
+      mindeg = std::min<Index> (mindeg, d);       /* find new minimum degree */
+      degree[i] = d;
+      Ci[p++] = i;                      /* place i in Lk */
+    }
+    nv[k] = nvk;                      /* # nodes absorbed into k */
+    if((len[k] = p-pk1) == 0)         /* length of adj list of element k*/
+    {
+      Cp[k] = -1;                   /* k is a root of the tree */
+      w[k] = 0;                     /* k is now a dead element */
+    }
+    if(elenk != 0) cnz = p;           /* free unused space in Lk */
+  }
+  
+  /* --- Postordering ----------------------------------------------------- */
+  for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */
+  for(j = 0; j <= n; j++) head[j] = -1;
+  for(j = n; j >= 0; j--)              /* place unordered nodes in lists */
+  {
+    if(nv[j] > 0) continue;          /* skip if j is an element */
+    next[j] = head[Cp[j]];          /* place j in list of its parent */
+    head[Cp[j]] = j;
+  }
+  for(e = n; e >= 0; e--)              /* place elements in lists */
+  {
+    if(nv[e] <= 0) continue;         /* skip unless e is an element */
+    if(Cp[e] != -1)
+    {
+      next[e] = head[Cp[e]];      /* place e in list of its parent */
+      head[Cp[e]] = e;
+    }
+  }
+  for(k = 0, i = 0; i <= n; i++)       /* postorder the assembly tree */
+  {
+    if(Cp[i] == -1) k = internal::cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w);
+  }
+  
+  perm.indices().conservativeResize(n);
+
+  delete[] W;
+}
+
+} // namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_AMD_H
diff --git a/Eigen/src/OrderingMethods/CMakeLists.txt b/Eigen/src/OrderingMethods/CMakeLists.txt
new file mode 100644
index 0000000..9f4bb27
--- /dev/null
+++ b/Eigen/src/OrderingMethods/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_OrderingMethods_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_OrderingMethods_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/OrderingMethods COMPONENT Devel
+  )
diff --git a/Eigen/src/OrderingMethods/Eigen_Colamd.h b/Eigen/src/OrderingMethods/Eigen_Colamd.h
new file mode 100644
index 0000000..44548f6
--- /dev/null
+++ b/Eigen/src/OrderingMethods/Eigen_Colamd.h
@@ -0,0 +1,1850 @@
+// // This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is modified from the colamd/symamd library. The copyright is below
+
+//   The authors of the code itself are Stefan I. Larimore and Timothy A.
+//   Davis (davis@cise.ufl.edu), University of Florida.  The algorithm was
+//   developed in collaboration with John Gilbert, Xerox PARC, and Esmond
+//   Ng, Oak Ridge National Laboratory.
+// 
+//     Date:
+// 
+//   September 8, 2003.  Version 2.3.
+// 
+//     Acknowledgements:
+// 
+//   This work was supported by the National Science Foundation, under
+//   grants DMS-9504974 and DMS-9803599.
+// 
+//     Notice:
+// 
+//   Copyright (c) 1998-2003 by the University of Florida.
+//   All Rights Reserved.
+// 
+//   THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+//   EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+// 
+//   Permission is hereby granted to use, copy, modify, and/or distribute
+//   this program, provided that the Copyright, this License, and the
+//   Availability of the original version is retained on all copies and made
+//   accessible to the end-user of any code or package that includes COLAMD
+//   or any modified version of COLAMD. 
+// 
+//     Availability:
+// 
+//   The colamd/symamd library is available at
+// 
+//       http://www.cise.ufl.edu/research/sparse/colamd/
+
+//   This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
+//   file.  It is required by the colamd.c, colamdmex.c, and symamdmex.c
+//   files, and by any C code that calls the routines whose prototypes are
+//   listed below, or that uses the colamd/symamd definitions listed below.
+  
+#ifndef EIGEN_COLAMD_H
+#define EIGEN_COLAMD_H
+
+namespace internal {
+/* Ensure that debugging is turned off: */
+#ifndef COLAMD_NDEBUG
+#define COLAMD_NDEBUG
+#endif /* NDEBUG */
+/* ========================================================================== */
+/* === Knob and statistics definitions ====================================== */
+/* ========================================================================== */
+
+/* size of the knobs [ ] array.  Only knobs [0..1] are currently used. */
+#define COLAMD_KNOBS 20
+
+/* number of output statistics.  Only stats [0..6] are currently used. */
+#define COLAMD_STATS 20 
+
+/* knobs [0] and stats [0]: dense row knob and output statistic. */
+#define COLAMD_DENSE_ROW 0
+
+/* knobs [1] and stats [1]: dense column knob and output statistic. */
+#define COLAMD_DENSE_COL 1
+
+/* stats [2]: memory defragmentation count output statistic */
+#define COLAMD_DEFRAG_COUNT 2
+
+/* stats [3]: colamd status:  zero OK, > 0 warning or notice, < 0 error */
+#define COLAMD_STATUS 3
+
+/* stats [4..6]: error info, or info on jumbled columns */ 
+#define COLAMD_INFO1 4
+#define COLAMD_INFO2 5
+#define COLAMD_INFO3 6
+
+/* error codes returned in stats [3]: */
+#define COLAMD_OK       (0)
+#define COLAMD_OK_BUT_JUMBLED     (1)
+#define COLAMD_ERROR_A_not_present    (-1)
+#define COLAMD_ERROR_p_not_present    (-2)
+#define COLAMD_ERROR_nrow_negative    (-3)
+#define COLAMD_ERROR_ncol_negative    (-4)
+#define COLAMD_ERROR_nnz_negative   (-5)
+#define COLAMD_ERROR_p0_nonzero     (-6)
+#define COLAMD_ERROR_A_too_small    (-7)
+#define COLAMD_ERROR_col_length_negative  (-8)
+#define COLAMD_ERROR_row_index_out_of_bounds  (-9)
+#define COLAMD_ERROR_out_of_memory    (-10)
+#define COLAMD_ERROR_internal_error   (-999)
+
+/* ========================================================================== */
+/* === Definitions ========================================================== */
+/* ========================================================================== */
+
+#define COLAMD_MAX(a,b) (((a) > (b)) ? (a) : (b))
+#define COLAMD_MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+#define ONES_COMPLEMENT(r) (-(r)-1)
+
+/* -------------------------------------------------------------------------- */
+
+#define COLAMD_EMPTY (-1)
+
+/* Row and column status */
+#define ALIVE (0)
+#define DEAD  (-1)
+
+/* Column status */
+#define DEAD_PRINCIPAL    (-1)
+#define DEAD_NON_PRINCIPAL  (-2)
+
+/* Macros for row and column status update and checking. */
+#define ROW_IS_DEAD(r)      ROW_IS_MARKED_DEAD (Row[r].shared2.mark)
+#define ROW_IS_MARKED_DEAD(row_mark)  (row_mark < ALIVE)
+#define ROW_IS_ALIVE(r)     (Row [r].shared2.mark >= ALIVE)
+#define COL_IS_DEAD(c)      (Col [c].start < ALIVE)
+#define COL_IS_ALIVE(c)     (Col [c].start >= ALIVE)
+#define COL_IS_DEAD_PRINCIPAL(c)  (Col [c].start == DEAD_PRINCIPAL)
+#define KILL_ROW(r)     { Row [r].shared2.mark = DEAD ; }
+#define KILL_PRINCIPAL_COL(c)   { Col [c].start = DEAD_PRINCIPAL ; }
+#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }
+
+/* ========================================================================== */
+/* === Colamd reporting mechanism =========================================== */
+/* ========================================================================== */
+
+// == Row and Column structures ==
+template <typename Index>
+struct colamd_col
+{
+  Index start ;   /* index for A of first row in this column, or DEAD */
+  /* if column is dead */
+  Index length ;  /* number of rows in this column */
+  union
+  {
+    Index thickness ; /* number of original columns represented by this */
+    /* col, if the column is alive */
+    Index parent ;  /* parent in parent tree super-column structure, if */
+    /* the column is dead */
+  } shared1 ;
+  union
+  {
+    Index score ; /* the score used to maintain heap, if col is alive */
+    Index order ; /* pivot ordering of this column, if col is dead */
+  } shared2 ;
+  union
+  {
+    Index headhash ;  /* head of a hash bucket, if col is at the head of */
+    /* a degree list */
+    Index hash ;  /* hash value, if col is not in a degree list */
+    Index prev ;  /* previous column in degree list, if col is in a */
+    /* degree list (but not at the head of a degree list) */
+  } shared3 ;
+  union
+  {
+    Index degree_next ; /* next column, if col is in a degree list */
+    Index hash_next ;   /* next column, if col is in a hash list */
+  } shared4 ;
+  
+};
+ 
+template <typename Index>
+struct Colamd_Row
+{
+  Index start ;   /* index for A of first col in this row */
+  Index length ;  /* number of principal columns in this row */
+  union
+  {
+    Index degree ;  /* number of principal & non-principal columns in row */
+    Index p ;   /* used as a row pointer in init_rows_cols () */
+  } shared1 ;
+  union
+  {
+    Index mark ;  /* for computing set differences and marking dead rows*/
+    Index first_column ;/* first column in row (used in garbage collection) */
+  } shared2 ;
+  
+};
+ 
+/* ========================================================================== */
+/* === Colamd recommended memory size ======================================= */
+/* ========================================================================== */
+ 
+/*
+  The recommended length Alen of the array A passed to colamd is given by
+  the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro.  It returns -1 if any
+  argument is negative.  2*nnz space is required for the row and column
+  indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is
+  required for the Col and Row arrays, respectively, which are internal to
+  colamd.  An additional n_col space is the minimal amount of "elbow room",
+  and nnz/5 more space is recommended for run time efficiency.
+  
+  This macro is not needed when using symamd.
+  
+  Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid
+  gcc -pedantic warning messages.
+*/
+template <typename Index>
+inline Index colamd_c(Index n_col) 
+{ return Index( ((n_col) + 1) * sizeof (colamd_col<Index>) / sizeof (Index) ) ; }
+
+template <typename Index>
+inline Index  colamd_r(Index n_row)
+{ return Index(((n_row) + 1) * sizeof (Colamd_Row<Index>) / sizeof (Index)); }
+
+// Prototypes of non-user callable routines
+template <typename Index>
+static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> col [], Index A [], Index p [], Index stats[COLAMD_STATS] ); 
+
+template <typename Index>
+static void init_scoring (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg);
+
+template <typename Index>
+static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree);
+
+template <typename Index>
+static void order_children (Index n_col, colamd_col<Index> Col [], Index p []);
+
+template <typename Index>
+static void detect_super_cols (colamd_col<Index> Col [], Index A [], Index head [], Index row_start, Index row_length ) ;
+
+template <typename Index>
+static Index garbage_collection (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index *pfree) ;
+
+template <typename Index>
+static inline  Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
+
+/* === No debugging ========================================================= */
+
+#define COLAMD_DEBUG0(params) ;
+#define COLAMD_DEBUG1(params) ;
+#define COLAMD_DEBUG2(params) ;
+#define COLAMD_DEBUG3(params) ;
+#define COLAMD_DEBUG4(params) ;
+
+#define COLAMD_ASSERT(expression) ((void) 0)
+
+
+/**
+ * \brief Returns the recommended value of Alen 
+ * 
+ * Returns recommended value of Alen for use by colamd.  
+ * Returns -1 if any input argument is negative.  
+ * The use of this routine or macro is optional.  
+ * Note that the macro uses its arguments   more than once, 
+ * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.  
+ * 
+ * \param nnz nonzeros in A
+ * \param n_row number of rows in A
+ * \param n_col number of columns in A
+ * \return recommended value of Alen for use by colamd
+ */
+template <typename Index>
+inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col)
+{
+  if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
+    return (-1);
+  else
+    return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5)); 
+}
+
+/**
+ * \brief set default parameters  The use of this routine is optional.
+ * 
+ * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)
+ * entries are removed prior to ordering.  Columns with more than
+ * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to
+ * ordering, and placed last in the output column ordering. 
+ *
+ * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,
+ * respectively, in colamd.h.  Default values of these two knobs
+ * are both 0.5.  Currently, only knobs [0] and knobs [1] are
+ * used, but future versions may use more knobs.  If so, they will
+ * be properly set to their defaults by the future version of
+ * colamd_set_defaults, so that the code that calls colamd will
+ * not need to change, assuming that you either use
+ * colamd_set_defaults, or pass a (double *) NULL pointer as the
+ * knobs array to colamd or symamd.
+ * 
+ * \param knobs parameter settings for colamd
+ */
+
+static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
+{
+  /* === Local variables ================================================== */
+  
+  int i ;
+
+  if (!knobs)
+  {
+    return ;      /* no knobs to initialize */
+  }
+  for (i = 0 ; i < COLAMD_KNOBS ; i++)
+  {
+    knobs [i] = 0 ;
+  }
+  knobs [COLAMD_DENSE_ROW] = 0.5 ;  /* ignore rows over 50% dense */
+  knobs [COLAMD_DENSE_COL] = 0.5 ;  /* ignore columns over 50% dense */
+}
+
+/** 
+ * \brief  Computes a column ordering using the column approximate minimum degree ordering
+ * 
+ * Computes a column ordering (Q) of A such that P(AQ)=LU or
+ * (AQ)'AQ=LL' have less fill-in and require fewer floating point
+ * operations than factorizing the unpermuted matrix A or A'A,
+ * respectively.
+ * 
+ * 
+ * \param n_row number of rows in A
+ * \param n_col number of columns in A
+ * \param Alen, size of the array A
+ * \param A row indices of the matrix, of size ALen
+ * \param p column pointers of A, of size n_col+1
+ * \param knobs parameter settings for colamd
+ * \param stats colamd output statistics and error codes
+ */
+template <typename Index>
+static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS])
+{
+  /* === Local variables ================================================== */
+  
+  Index i ;     /* loop index */
+  Index nnz ;     /* nonzeros in A */
+  Index Row_size ;    /* size of Row [], in integers */
+  Index Col_size ;    /* size of Col [], in integers */
+  Index need ;      /* minimum required length of A */
+  Colamd_Row<Index> *Row ;   /* pointer into A of Row [0..n_row] array */
+  colamd_col<Index> *Col ;   /* pointer into A of Col [0..n_col] array */
+  Index n_col2 ;    /* number of non-dense, non-empty columns */
+  Index n_row2 ;    /* number of non-dense, non-empty rows */
+  Index ngarbage ;    /* number of garbage collections performed */
+  Index max_deg ;   /* maximum row degree */
+  double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
+  
+  
+  /* === Check the input arguments ======================================== */
+  
+  if (!stats)
+  {
+    COLAMD_DEBUG0 (("colamd: stats not present\n")) ;
+    return (false) ;
+  }
+  for (i = 0 ; i < COLAMD_STATS ; i++)
+  {
+    stats [i] = 0 ;
+  }
+  stats [COLAMD_STATUS] = COLAMD_OK ;
+  stats [COLAMD_INFO1] = -1 ;
+  stats [COLAMD_INFO2] = -1 ;
+  
+  if (!A)   /* A is not present */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;
+    COLAMD_DEBUG0 (("colamd: A not present\n")) ;
+    return (false) ;
+  }
+  
+  if (!p)   /* p is not present */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;
+    COLAMD_DEBUG0 (("colamd: p not present\n")) ;
+    return (false) ;
+  }
+  
+  if (n_row < 0)  /* n_row must be >= 0 */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;
+    stats [COLAMD_INFO1] = n_row ;
+    COLAMD_DEBUG0 (("colamd: nrow negative %d\n", n_row)) ;
+    return (false) ;
+  }
+  
+  if (n_col < 0)  /* n_col must be >= 0 */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;
+    stats [COLAMD_INFO1] = n_col ;
+    COLAMD_DEBUG0 (("colamd: ncol negative %d\n", n_col)) ;
+    return (false) ;
+  }
+  
+  nnz = p [n_col] ;
+  if (nnz < 0)  /* nnz must be >= 0 */
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;
+    stats [COLAMD_INFO1] = nnz ;
+    COLAMD_DEBUG0 (("colamd: number of entries negative %d\n", nnz)) ;
+    return (false) ;
+  }
+  
+  if (p [0] != 0)
+  {
+    stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;
+    stats [COLAMD_INFO1] = p [0] ;
+    COLAMD_DEBUG0 (("colamd: p[0] not zero %d\n", p [0])) ;
+    return (false) ;
+  }
+  
+  /* === If no knobs, set default knobs =================================== */
+  
+  if (!knobs)
+  {
+    colamd_set_defaults (default_knobs) ;
+    knobs = default_knobs ;
+  }
+  
+  /* === Allocate the Row and Col arrays from array A ===================== */
+  
+  Col_size = colamd_c (n_col) ;
+  Row_size = colamd_r (n_row) ;
+  need = 2*nnz + n_col + Col_size + Row_size ;
+  
+  if (need > Alen)
+  {
+    /* not enough space in array A to perform the ordering */
+    stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;
+    stats [COLAMD_INFO1] = need ;
+    stats [COLAMD_INFO2] = Alen ;
+    COLAMD_DEBUG0 (("colamd: Need Alen >= %d, given only Alen = %d\n", need,Alen));
+    return (false) ;
+  }
+  
+  Alen -= Col_size + Row_size ;
+  Col = (colamd_col<Index> *) &A [Alen] ;
+  Row = (Colamd_Row<Index> *) &A [Alen + Col_size] ;
+
+  /* === Construct the row and column data structures ===================== */
+  
+  if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))
+  {
+    /* input matrix is invalid */
+    COLAMD_DEBUG0 (("colamd: Matrix invalid\n")) ;
+    return (false) ;
+  }
+  
+  /* === Initialize scores, kill dense rows/columns ======================= */
+
+  Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs,
+		&n_row2, &n_col2, &max_deg) ;
+  
+  /* === Order the supercolumns =========================================== */
+  
+  ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p,
+			    n_col2, max_deg, 2*nnz) ;
+  
+  /* === Order the non-principal columns ================================== */
+  
+  Eigen::internal::order_children (n_col, Col, p) ;
+  
+  /* === Return statistics in stats ======================================= */
+  
+  stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;
+  stats [COLAMD_DENSE_COL] = n_col - n_col2 ;
+  stats [COLAMD_DEFRAG_COUNT] = ngarbage ;
+  COLAMD_DEBUG0 (("colamd: done.\n")) ; 
+  return (true) ;
+}
+
+/* ========================================================================== */
+/* === NON-USER-CALLABLE ROUTINES: ========================================== */
+/* ========================================================================== */
+
+/* There are no user-callable routines beyond this point in the file */
+
+
+/* ========================================================================== */
+/* === init_rows_cols ======================================================= */
+/* ========================================================================== */
+
+/*
+  Takes the column form of the matrix in A and creates the row form of the
+  matrix.  Also, row and column attributes are stored in the Col and Row
+  structs.  If the columns are un-sorted or contain duplicate row indices,
+  this routine will also sort and remove duplicate row indices from the
+  column form of the matrix.  Returns false if the matrix is invalid,
+  true otherwise.  Not user-callable.
+*/
+template <typename Index>
+static Index init_rows_cols  /* returns true if OK, or false otherwise */
+  (
+    /* === Parameters ======================================================= */
+
+    Index n_row,      /* number of rows of A */
+    Index n_col,      /* number of columns of A */
+    Colamd_Row<Index> Row [],    /* of size n_row+1 */
+    colamd_col<Index> Col [],    /* of size n_col+1 */
+    Index A [],     /* row indices of A, of size Alen */
+    Index p [],     /* pointers to columns in A, of size n_col+1 */
+    Index stats [COLAMD_STATS]  /* colamd statistics */ 
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index col ;     /* a column index */
+  Index row ;     /* a row index */
+  Index *cp ;     /* a column pointer */
+  Index *cp_end ;   /* a pointer to the end of a column */
+  Index *rp ;     /* a row pointer */
+  Index *rp_end ;   /* a pointer to the end of a row */
+  Index last_row ;    /* previous row */
+
+  /* === Initialize columns, and check column pointers ==================== */
+
+  for (col = 0 ; col < n_col ; col++)
+  {
+    Col [col].start = p [col] ;
+    Col [col].length = p [col+1] - p [col] ;
+
+    if (Col [col].length < 0)
+    {
+      /* column pointers must be non-decreasing */
+      stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
+      stats [COLAMD_INFO1] = col ;
+      stats [COLAMD_INFO2] = Col [col].length ;
+      COLAMD_DEBUG0 (("colamd: col %d length %d < 0\n", col, Col [col].length)) ;
+      return (false) ;
+    }
+
+    Col [col].shared1.thickness = 1 ;
+    Col [col].shared2.score = 0 ;
+    Col [col].shared3.prev = COLAMD_EMPTY ;
+    Col [col].shared4.degree_next = COLAMD_EMPTY ;
+  }
+
+  /* p [0..n_col] no longer needed, used as "head" in subsequent routines */
+
+  /* === Scan columns, compute row degrees, and check row indices ========= */
+
+  stats [COLAMD_INFO3] = 0 ;  /* number of duplicate or unsorted row indices*/
+
+  for (row = 0 ; row < n_row ; row++)
+  {
+    Row [row].length = 0 ;
+    Row [row].shared2.mark = -1 ;
+  }
+
+  for (col = 0 ; col < n_col ; col++)
+  {
+    last_row = -1 ;
+
+    cp = &A [p [col]] ;
+    cp_end = &A [p [col+1]] ;
+
+    while (cp < cp_end)
+    {
+      row = *cp++ ;
+
+      /* make sure row indices within range */
+      if (row < 0 || row >= n_row)
+      {
+	stats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;
+	stats [COLAMD_INFO1] = col ;
+	stats [COLAMD_INFO2] = row ;
+	stats [COLAMD_INFO3] = n_row ;
+	COLAMD_DEBUG0 (("colamd: row %d col %d out of bounds\n", row, col)) ;
+	return (false) ;
+      }
+
+      if (row <= last_row || Row [row].shared2.mark == col)
+      {
+	/* row index are unsorted or repeated (or both), thus col */
+	/* is jumbled.  This is a notice, not an error condition. */
+	stats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;
+	stats [COLAMD_INFO1] = col ;
+	stats [COLAMD_INFO2] = row ;
+	(stats [COLAMD_INFO3]) ++ ;
+	COLAMD_DEBUG1 (("colamd: row %d col %d unsorted/duplicate\n",row,col));
+      }
+
+      if (Row [row].shared2.mark != col)
+      {
+	Row [row].length++ ;
+      }
+      else
+      {
+	/* this is a repeated entry in the column, */
+	/* it will be removed */
+	Col [col].length-- ;
+      }
+
+      /* mark the row as having been seen in this column */
+      Row [row].shared2.mark = col ;
+
+      last_row = row ;
+    }
+  }
+
+  /* === Compute row pointers ============================================= */
+
+  /* row form of the matrix starts directly after the column */
+  /* form of matrix in A */
+  Row [0].start = p [n_col] ;
+  Row [0].shared1.p = Row [0].start ;
+  Row [0].shared2.mark = -1 ;
+  for (row = 1 ; row < n_row ; row++)
+  {
+    Row [row].start = Row [row-1].start + Row [row-1].length ;
+    Row [row].shared1.p = Row [row].start ;
+    Row [row].shared2.mark = -1 ;
+  }
+
+  /* === Create row form ================================================== */
+
+  if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+  {
+    /* if cols jumbled, watch for repeated row indices */
+    for (col = 0 ; col < n_col ; col++)
+    {
+      cp = &A [p [col]] ;
+      cp_end = &A [p [col+1]] ;
+      while (cp < cp_end)
+      {
+	row = *cp++ ;
+	if (Row [row].shared2.mark != col)
+	{
+	  A [(Row [row].shared1.p)++] = col ;
+	  Row [row].shared2.mark = col ;
+	}
+      }
+    }
+  }
+  else
+  {
+    /* if cols not jumbled, we don't need the mark (this is faster) */
+    for (col = 0 ; col < n_col ; col++)
+    {
+      cp = &A [p [col]] ;
+      cp_end = &A [p [col+1]] ;
+      while (cp < cp_end)
+      {
+	A [(Row [*cp++].shared1.p)++] = col ;
+      }
+    }
+  }
+
+  /* === Clear the row marks and set row degrees ========================== */
+
+  for (row = 0 ; row < n_row ; row++)
+  {
+    Row [row].shared2.mark = 0 ;
+    Row [row].shared1.degree = Row [row].length ;
+  }
+
+  /* === See if we need to re-create columns ============================== */
+
+  if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)
+  {
+    COLAMD_DEBUG0 (("colamd: reconstructing column form, matrix jumbled\n")) ;
+
+
+    /* === Compute col pointers ========================================= */
+
+    /* col form of the matrix starts at A [0]. */
+    /* Note, we may have a gap between the col form and the row */
+    /* form if there were duplicate entries, if so, it will be */
+    /* removed upon the first garbage collection */
+    Col [0].start = 0 ;
+    p [0] = Col [0].start ;
+    for (col = 1 ; col < n_col ; col++)
+    {
+      /* note that the lengths here are for pruned columns, i.e. */
+      /* no duplicate row indices will exist for these columns */
+      Col [col].start = Col [col-1].start + Col [col-1].length ;
+      p [col] = Col [col].start ;
+    }
+
+    /* === Re-create col form =========================================== */
+
+    for (row = 0 ; row < n_row ; row++)
+    {
+      rp = &A [Row [row].start] ;
+      rp_end = rp + Row [row].length ;
+      while (rp < rp_end)
+      {
+	A [(p [*rp++])++] = row ;
+      }
+    }
+  }
+
+  /* === Done.  Matrix is not (or no longer) jumbled ====================== */
+
+  return (true) ;
+}
+
+
+/* ========================================================================== */
+/* === init_scoring ========================================================= */
+/* ========================================================================== */
+
+/*
+  Kills dense or empty columns and rows, calculates an initial score for
+  each column, and places all columns in the degree lists.  Not user-callable.
+*/
+template <typename Index>
+static void init_scoring
+  (
+    /* === Parameters ======================================================= */
+
+    Index n_row,      /* number of rows of A */
+    Index n_col,      /* number of columns of A */
+    Colamd_Row<Index> Row [],    /* of size n_row+1 */
+    colamd_col<Index> Col [],    /* of size n_col+1 */
+    Index A [],     /* column form and row form of A */
+    Index head [],    /* of size n_col+1 */
+    double knobs [COLAMD_KNOBS],/* parameters */
+    Index *p_n_row2,    /* number of non-dense, non-empty rows */
+    Index *p_n_col2,    /* number of non-dense, non-empty columns */
+    Index *p_max_deg    /* maximum row degree */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index c ;     /* a column index */
+  Index r, row ;    /* a row index */
+  Index *cp ;     /* a column pointer */
+  Index deg ;     /* degree of a row or column */
+  Index *cp_end ;   /* a pointer to the end of a column */
+  Index *new_cp ;   /* new column pointer */
+  Index col_length ;    /* length of pruned column */
+  Index score ;     /* current column score */
+  Index n_col2 ;    /* number of non-dense, non-empty columns */
+  Index n_row2 ;    /* number of non-dense, non-empty rows */
+  Index dense_row_count ; /* remove rows with more entries than this */
+  Index dense_col_count ; /* remove cols with more entries than this */
+  Index min_score ;   /* smallest column score */
+  Index max_deg ;   /* maximum row degree */
+  Index next_col ;    /* Used to add to degree list.*/
+
+
+  /* === Extract knobs ==================================================== */
+
+  dense_row_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ;
+  dense_col_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ;
+  COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
+  max_deg = 0 ;
+  n_col2 = n_col ;
+  n_row2 = n_row ;
+
+  /* === Kill empty columns =============================================== */
+
+  /* Put the empty columns at the end in their natural order, so that LU */
+  /* factorization can proceed as far as possible. */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    deg = Col [c].length ;
+    if (deg == 0)
+    {
+      /* this is a empty column, kill and order it last */
+      Col [c].shared2.order = --n_col2 ;
+      KILL_PRINCIPAL_COL (c) ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: null columns killed: %d\n", n_col - n_col2)) ;
+
+  /* === Kill dense columns =============================================== */
+
+  /* Put the dense columns at the end, in their natural order */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    /* skip any dead columns */
+    if (COL_IS_DEAD (c))
+    {
+      continue ;
+    }
+    deg = Col [c].length ;
+    if (deg > dense_col_count)
+    {
+      /* this is a dense column, kill and order it last */
+      Col [c].shared2.order = --n_col2 ;
+      /* decrement the row degrees */
+      cp = &A [Col [c].start] ;
+      cp_end = cp + Col [c].length ;
+      while (cp < cp_end)
+      {
+	Row [*cp++].shared1.degree-- ;
+      }
+      KILL_PRINCIPAL_COL (c) ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: Dense and null columns killed: %d\n", n_col - n_col2)) ;
+
+  /* === Kill dense and empty rows ======================================== */
+
+  for (r = 0 ; r < n_row ; r++)
+  {
+    deg = Row [r].shared1.degree ;
+    COLAMD_ASSERT (deg >= 0 && deg <= n_col) ;
+    if (deg > dense_row_count || deg == 0)
+    {
+      /* kill a dense or empty row */
+      KILL_ROW (r) ;
+      --n_row2 ;
+    }
+    else
+    {
+      /* keep track of max degree of remaining rows */
+      max_deg = COLAMD_MAX (max_deg, deg) ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
+
+  /* === Compute initial column scores ==================================== */
+
+  /* At this point the row degrees are accurate.  They reflect the number */
+  /* of "live" (non-dense) columns in each row.  No empty rows exist. */
+  /* Some "live" columns may contain only dead rows, however.  These are */
+  /* pruned in the code below. */
+
+  /* now find the initial matlab score for each column */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    /* skip dead column */
+    if (COL_IS_DEAD (c))
+    {
+      continue ;
+    }
+    score = 0 ;
+    cp = &A [Col [c].start] ;
+    new_cp = cp ;
+    cp_end = cp + Col [c].length ;
+    while (cp < cp_end)
+    {
+      /* get a row */
+      row = *cp++ ;
+      /* skip if dead */
+      if (ROW_IS_DEAD (row))
+      {
+	continue ;
+      }
+      /* compact the column */
+      *new_cp++ = row ;
+      /* add row's external degree */
+      score += Row [row].shared1.degree - 1 ;
+      /* guard against integer overflow */
+      score = COLAMD_MIN (score, n_col) ;
+    }
+    /* determine pruned column length */
+    col_length = (Index) (new_cp - &A [Col [c].start]) ;
+    if (col_length == 0)
+    {
+      /* a newly-made null column (all rows in this col are "dense" */
+      /* and have already been killed) */
+      COLAMD_DEBUG2 (("Newly null killed: %d\n", c)) ;
+      Col [c].shared2.order = --n_col2 ;
+      KILL_PRINCIPAL_COL (c) ;
+    }
+    else
+    {
+      /* set column length and set score */
+      COLAMD_ASSERT (score >= 0) ;
+      COLAMD_ASSERT (score <= n_col) ;
+      Col [c].length = col_length ;
+      Col [c].shared2.score = score ;
+    }
+  }
+  COLAMD_DEBUG1 (("colamd: Dense, null, and newly-null columns killed: %d\n",
+		  n_col-n_col2)) ;
+
+  /* At this point, all empty rows and columns are dead.  All live columns */
+  /* are "clean" (containing no dead rows) and simplicial (no supercolumns */
+  /* yet).  Rows may contain dead columns, but all live rows contain at */
+  /* least one live column. */
+
+  /* === Initialize degree lists ========================================== */
+
+
+  /* clear the hash buckets */
+  for (c = 0 ; c <= n_col ; c++)
+  {
+    head [c] = COLAMD_EMPTY ;
+  }
+  min_score = n_col ;
+  /* place in reverse order, so low column indices are at the front */
+  /* of the lists.  This is to encourage natural tie-breaking */
+  for (c = n_col-1 ; c >= 0 ; c--)
+  {
+    /* only add principal columns to degree lists */
+    if (COL_IS_ALIVE (c))
+    {
+      COLAMD_DEBUG4 (("place %d score %d minscore %d ncol %d\n",
+		      c, Col [c].shared2.score, min_score, n_col)) ;
+
+      /* === Add columns score to DList =============================== */
+
+      score = Col [c].shared2.score ;
+
+      COLAMD_ASSERT (min_score >= 0) ;
+      COLAMD_ASSERT (min_score <= n_col) ;
+      COLAMD_ASSERT (score >= 0) ;
+      COLAMD_ASSERT (score <= n_col) ;
+      COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ;
+
+      /* now add this column to dList at proper score location */
+      next_col = head [score] ;
+      Col [c].shared3.prev = COLAMD_EMPTY ;
+      Col [c].shared4.degree_next = next_col ;
+
+      /* if there already was a column with the same score, set its */
+      /* previous pointer to this new column */
+      if (next_col != COLAMD_EMPTY)
+      {
+	Col [next_col].shared3.prev = c ;
+      }
+      head [score] = c ;
+
+      /* see if this score is less than current min */
+      min_score = COLAMD_MIN (min_score, score) ;
+
+
+    }
+  }
+
+
+  /* === Return number of remaining columns, and max row degree =========== */
+
+  *p_n_col2 = n_col2 ;
+  *p_n_row2 = n_row2 ;
+  *p_max_deg = max_deg ;
+}
+
+
+/* ========================================================================== */
+/* === find_ordering ======================================================== */
+/* ========================================================================== */
+
+/*
+  Order the principal columns of the supercolumn form of the matrix
+  (no supercolumns on input).  Uses a minimum approximate column minimum
+  degree ordering method.  Not user-callable.
+*/
+template <typename Index>
+static Index find_ordering /* return the number of garbage collections */
+  (
+    /* === Parameters ======================================================= */
+
+    Index n_row,      /* number of rows of A */
+    Index n_col,      /* number of columns of A */
+    Index Alen,     /* size of A, 2*nnz + n_col or larger */
+    Colamd_Row<Index> Row [],    /* of size n_row+1 */
+    colamd_col<Index> Col [],    /* of size n_col+1 */
+    Index A [],     /* column form and row form of A */
+    Index head [],    /* of size n_col+1 */
+    Index n_col2,     /* Remaining columns to order */
+    Index max_deg,    /* Maximum row degree */
+    Index pfree     /* index of first free slot (2*nnz on entry) */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index k ;     /* current pivot ordering step */
+  Index pivot_col ;   /* current pivot column */
+  Index *cp ;     /* a column pointer */
+  Index *rp ;     /* a row pointer */
+  Index pivot_row ;   /* current pivot row */
+  Index *new_cp ;   /* modified column pointer */
+  Index *new_rp ;   /* modified row pointer */
+  Index pivot_row_start ; /* pointer to start of pivot row */
+  Index pivot_row_degree ;  /* number of columns in pivot row */
+  Index pivot_row_length ;  /* number of supercolumns in pivot row */
+  Index pivot_col_score ; /* score of pivot column */
+  Index needed_memory ;   /* free space needed for pivot row */
+  Index *cp_end ;   /* pointer to the end of a column */
+  Index *rp_end ;   /* pointer to the end of a row */
+  Index row ;     /* a row index */
+  Index col ;     /* a column index */
+  Index max_score ;   /* maximum possible score */
+  Index cur_score ;   /* score of current column */
+  unsigned int hash ;   /* hash value for supernode detection */
+  Index head_column ;   /* head of hash bucket */
+  Index first_col ;   /* first column in hash bucket */
+  Index tag_mark ;    /* marker value for mark array */
+  Index row_mark ;    /* Row [row].shared2.mark */
+  Index set_difference ;  /* set difference size of row with pivot row */
+  Index min_score ;   /* smallest column score */
+  Index col_thickness ;   /* "thickness" (no. of columns in a supercol) */
+  Index max_mark ;    /* maximum value of tag_mark */
+  Index pivot_col_thickness ; /* number of columns represented by pivot col */
+  Index prev_col ;    /* Used by Dlist operations. */
+  Index next_col ;    /* Used by Dlist operations. */
+  Index ngarbage ;    /* number of garbage collections performed */
+
+
+  /* === Initialization and clear mark ==================================== */
+
+  max_mark = INT_MAX - n_col ;  /* INT_MAX defined in <limits.h> */
+  tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+  min_score = 0 ;
+  ngarbage = 0 ;
+  COLAMD_DEBUG1 (("colamd: Ordering, n_col2=%d\n", n_col2)) ;
+
+  /* === Order the columns ================================================ */
+
+  for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */)
+  {
+
+    /* === Select pivot column, and order it ============================ */
+
+    /* make sure degree list isn't empty */
+    COLAMD_ASSERT (min_score >= 0) ;
+    COLAMD_ASSERT (min_score <= n_col) ;
+    COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;
+
+    /* get pivot column from head of minimum degree list */
+    while (head [min_score] == COLAMD_EMPTY && min_score < n_col)
+    {
+      min_score++ ;
+    }
+    pivot_col = head [min_score] ;
+    COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;
+    next_col = Col [pivot_col].shared4.degree_next ;
+    head [min_score] = next_col ;
+    if (next_col != COLAMD_EMPTY)
+    {
+      Col [next_col].shared3.prev = COLAMD_EMPTY ;
+    }
+
+    COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ;
+    COLAMD_DEBUG3 (("Pivot col: %d\n", pivot_col)) ;
+
+    /* remember score for defrag check */
+    pivot_col_score = Col [pivot_col].shared2.score ;
+
+    /* the pivot column is the kth column in the pivot order */
+    Col [pivot_col].shared2.order = k ;
+
+    /* increment order count by column thickness */
+    pivot_col_thickness = Col [pivot_col].shared1.thickness ;
+    k += pivot_col_thickness ;
+    COLAMD_ASSERT (pivot_col_thickness > 0) ;
+
+    /* === Garbage_collection, if necessary ============================= */
+
+    needed_memory = COLAMD_MIN (pivot_col_score, n_col - k) ;
+    if (pfree + needed_memory >= Alen)
+    {
+      pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
+      ngarbage++ ;
+      /* after garbage collection we will have enough */
+      COLAMD_ASSERT (pfree + needed_memory < Alen) ;
+      /* garbage collection has wiped out the Row[].shared2.mark array */
+      tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+
+    }
+
+    /* === Compute pivot row pattern ==================================== */
+
+    /* get starting location for this new merged row */
+    pivot_row_start = pfree ;
+
+    /* initialize new row counts to zero */
+    pivot_row_degree = 0 ;
+
+    /* tag pivot column as having been visited so it isn't included */
+    /* in merged pivot row */
+    Col [pivot_col].shared1.thickness = -pivot_col_thickness ;
+
+    /* pivot row is the union of all rows in the pivot column pattern */
+    cp = &A [Col [pivot_col].start] ;
+    cp_end = cp + Col [pivot_col].length ;
+    while (cp < cp_end)
+    {
+      /* get a row */
+      row = *cp++ ;
+      COLAMD_DEBUG4 (("Pivot col pattern %d %d\n", ROW_IS_ALIVE (row), row)) ;
+      /* skip if row is dead */
+      if (ROW_IS_DEAD (row))
+      {
+	continue ;
+      }
+      rp = &A [Row [row].start] ;
+      rp_end = rp + Row [row].length ;
+      while (rp < rp_end)
+      {
+	/* get a column */
+	col = *rp++ ;
+	/* add the column, if alive and untagged */
+	col_thickness = Col [col].shared1.thickness ;
+	if (col_thickness > 0 && COL_IS_ALIVE (col))
+	{
+	  /* tag column in pivot row */
+	  Col [col].shared1.thickness = -col_thickness ;
+	  COLAMD_ASSERT (pfree < Alen) ;
+	  /* place column in pivot row */
+	  A [pfree++] = col ;
+	  pivot_row_degree += col_thickness ;
+	}
+      }
+    }
+
+    /* clear tag on pivot column */
+    Col [pivot_col].shared1.thickness = pivot_col_thickness ;
+    max_deg = COLAMD_MAX (max_deg, pivot_row_degree) ;
+
+
+    /* === Kill all rows used to construct pivot row ==================== */
+
+    /* also kill pivot row, temporarily */
+    cp = &A [Col [pivot_col].start] ;
+    cp_end = cp + Col [pivot_col].length ;
+    while (cp < cp_end)
+    {
+      /* may be killing an already dead row */
+      row = *cp++ ;
+      COLAMD_DEBUG3 (("Kill row in pivot col: %d\n", row)) ;
+      KILL_ROW (row) ;
+    }
+
+    /* === Select a row index to use as the new pivot row =============== */
+
+    pivot_row_length = pfree - pivot_row_start ;
+    if (pivot_row_length > 0)
+    {
+      /* pick the "pivot" row arbitrarily (first row in col) */
+      pivot_row = A [Col [pivot_col].start] ;
+      COLAMD_DEBUG3 (("Pivotal row is %d\n", pivot_row)) ;
+    }
+    else
+    {
+      /* there is no pivot row, since it is of zero length */
+      pivot_row = COLAMD_EMPTY ;
+      COLAMD_ASSERT (pivot_row_length == 0) ;
+    }
+    COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;
+
+    /* === Approximate degree computation =============================== */
+
+    /* Here begins the computation of the approximate degree.  The column */
+    /* score is the sum of the pivot row "length", plus the size of the */
+    /* set differences of each row in the column minus the pattern of the */
+    /* pivot row itself.  The column ("thickness") itself is also */
+    /* excluded from the column score (we thus use an approximate */
+    /* external degree). */
+
+    /* The time taken by the following code (compute set differences, and */
+    /* add them up) is proportional to the size of the data structure */
+    /* being scanned - that is, the sum of the sizes of each column in */
+    /* the pivot row.  Thus, the amortized time to compute a column score */
+    /* is proportional to the size of that column (where size, in this */
+    /* context, is the column "length", or the number of row indices */
+    /* in that column).  The number of row indices in a column is */
+    /* monotonically non-decreasing, from the length of the original */
+    /* column on input to colamd. */
+
+    /* === Compute set differences ====================================== */
+
+    COLAMD_DEBUG3 (("** Computing set differences phase. **\n")) ;
+
+    /* pivot row is currently dead - it will be revived later. */
+
+    COLAMD_DEBUG3 (("Pivot row: ")) ;
+    /* for each column in pivot row */
+    rp = &A [pivot_row_start] ;
+    rp_end = rp + pivot_row_length ;
+    while (rp < rp_end)
+    {
+      col = *rp++ ;
+      COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+      COLAMD_DEBUG3 (("Col: %d\n", col)) ;
+
+      /* clear tags used to construct pivot row pattern */
+      col_thickness = -Col [col].shared1.thickness ;
+      COLAMD_ASSERT (col_thickness > 0) ;
+      Col [col].shared1.thickness = col_thickness ;
+
+      /* === Remove column from degree list =========================== */
+
+      cur_score = Col [col].shared2.score ;
+      prev_col = Col [col].shared3.prev ;
+      next_col = Col [col].shared4.degree_next ;
+      COLAMD_ASSERT (cur_score >= 0) ;
+      COLAMD_ASSERT (cur_score <= n_col) ;
+      COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ;
+      if (prev_col == COLAMD_EMPTY)
+      {
+	head [cur_score] = next_col ;
+      }
+      else
+      {
+	Col [prev_col].shared4.degree_next = next_col ;
+      }
+      if (next_col != COLAMD_EMPTY)
+      {
+	Col [next_col].shared3.prev = prev_col ;
+      }
+
+      /* === Scan the column ========================================== */
+
+      cp = &A [Col [col].start] ;
+      cp_end = cp + Col [col].length ;
+      while (cp < cp_end)
+      {
+	/* get a row */
+	row = *cp++ ;
+	row_mark = Row [row].shared2.mark ;
+	/* skip if dead */
+	if (ROW_IS_MARKED_DEAD (row_mark))
+	{
+	  continue ;
+	}
+	COLAMD_ASSERT (row != pivot_row) ;
+	set_difference = row_mark - tag_mark ;
+	/* check if the row has been seen yet */
+	if (set_difference < 0)
+	{
+	  COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ;
+	  set_difference = Row [row].shared1.degree ;
+	}
+	/* subtract column thickness from this row's set difference */
+	set_difference -= col_thickness ;
+	COLAMD_ASSERT (set_difference >= 0) ;
+	/* absorb this row if the set difference becomes zero */
+	if (set_difference == 0)
+	{
+	  COLAMD_DEBUG3 (("aggressive absorption. Row: %d\n", row)) ;
+	  KILL_ROW (row) ;
+	}
+	else
+	{
+	  /* save the new mark */
+	  Row [row].shared2.mark = set_difference + tag_mark ;
+	}
+      }
+    }
+
+
+    /* === Add up set differences for each column ======================= */
+
+    COLAMD_DEBUG3 (("** Adding set differences phase. **\n")) ;
+
+    /* for each column in pivot row */
+    rp = &A [pivot_row_start] ;
+    rp_end = rp + pivot_row_length ;
+    while (rp < rp_end)
+    {
+      /* get a column */
+      col = *rp++ ;
+      COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;
+      hash = 0 ;
+      cur_score = 0 ;
+      cp = &A [Col [col].start] ;
+      /* compact the column */
+      new_cp = cp ;
+      cp_end = cp + Col [col].length ;
+
+      COLAMD_DEBUG4 (("Adding set diffs for Col: %d.\n", col)) ;
+
+      while (cp < cp_end)
+      {
+	/* get a row */
+	row = *cp++ ;
+	COLAMD_ASSERT(row >= 0 && row < n_row) ;
+	row_mark = Row [row].shared2.mark ;
+	/* skip if dead */
+	if (ROW_IS_MARKED_DEAD (row_mark))
+	{
+	  continue ;
+	}
+	COLAMD_ASSERT (row_mark > tag_mark) ;
+	/* compact the column */
+	*new_cp++ = row ;
+	/* compute hash function */
+	hash += row ;
+	/* add set difference */
+	cur_score += row_mark - tag_mark ;
+	/* integer overflow... */
+	cur_score = COLAMD_MIN (cur_score, n_col) ;
+      }
+
+      /* recompute the column's length */
+      Col [col].length = (Index) (new_cp - &A [Col [col].start]) ;
+
+      /* === Further mass elimination ================================= */
+
+      if (Col [col].length == 0)
+      {
+	COLAMD_DEBUG4 (("further mass elimination. Col: %d\n", col)) ;
+	/* nothing left but the pivot row in this column */
+	KILL_PRINCIPAL_COL (col) ;
+	pivot_row_degree -= Col [col].shared1.thickness ;
+	COLAMD_ASSERT (pivot_row_degree >= 0) ;
+	/* order it */
+	Col [col].shared2.order = k ;
+	/* increment order count by column thickness */
+	k += Col [col].shared1.thickness ;
+      }
+      else
+      {
+	/* === Prepare for supercolumn detection ==================== */
+
+	COLAMD_DEBUG4 (("Preparing supercol detection for Col: %d.\n", col)) ;
+
+	/* save score so far */
+	Col [col].shared2.score = cur_score ;
+
+	/* add column to hash table, for supercolumn detection */
+	hash %= n_col + 1 ;
+
+	COLAMD_DEBUG4 ((" Hash = %d, n_col = %d.\n", hash, n_col)) ;
+	COLAMD_ASSERT (hash <= n_col) ;
+
+	head_column = head [hash] ;
+	if (head_column > COLAMD_EMPTY)
+	{
+	  /* degree list "hash" is non-empty, use prev (shared3) of */
+	  /* first column in degree list as head of hash bucket */
+	  first_col = Col [head_column].shared3.headhash ;
+	  Col [head_column].shared3.headhash = col ;
+	}
+	else
+	{
+	  /* degree list "hash" is empty, use head as hash bucket */
+	  first_col = - (head_column + 2) ;
+	  head [hash] = - (col + 2) ;
+	}
+	Col [col].shared4.hash_next = first_col ;
+
+	/* save hash function in Col [col].shared3.hash */
+	Col [col].shared3.hash = (Index) hash ;
+	COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
+      }
+    }
+
+    /* The approximate external column degree is now computed.  */
+
+    /* === Supercolumn detection ======================================== */
+
+    COLAMD_DEBUG3 (("** Supercolumn detection phase. **\n")) ;
+
+    Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;
+
+    /* === Kill the pivotal column ====================================== */
+
+    KILL_PRINCIPAL_COL (pivot_col) ;
+
+    /* === Clear mark =================================================== */
+
+    tag_mark += (max_deg + 1) ;
+    if (tag_mark >= max_mark)
+    {
+      COLAMD_DEBUG2 (("clearing tag_mark\n")) ;
+      tag_mark = Eigen::internal::clear_mark (n_row, Row) ;
+    }
+
+    /* === Finalize the new pivot row, and column scores ================ */
+
+    COLAMD_DEBUG3 (("** Finalize scores phase. **\n")) ;
+
+    /* for each column in pivot row */
+    rp = &A [pivot_row_start] ;
+    /* compact the pivot row */
+    new_rp = rp ;
+    rp_end = rp + pivot_row_length ;
+    while (rp < rp_end)
+    {
+      col = *rp++ ;
+      /* skip dead columns */
+      if (COL_IS_DEAD (col))
+      {
+	continue ;
+      }
+      *new_rp++ = col ;
+      /* add new pivot row to column */
+      A [Col [col].start + (Col [col].length++)] = pivot_row ;
+
+      /* retrieve score so far and add on pivot row's degree. */
+      /* (we wait until here for this in case the pivot */
+      /* row's degree was reduced due to mass elimination). */
+      cur_score = Col [col].shared2.score + pivot_row_degree ;
+
+      /* calculate the max possible score as the number of */
+      /* external columns minus the 'k' value minus the */
+      /* columns thickness */
+      max_score = n_col - k - Col [col].shared1.thickness ;
+
+      /* make the score the external degree of the union-of-rows */
+      cur_score -= Col [col].shared1.thickness ;
+
+      /* make sure score is less or equal than the max score */
+      cur_score = COLAMD_MIN (cur_score, max_score) ;
+      COLAMD_ASSERT (cur_score >= 0) ;
+
+      /* store updated score */
+      Col [col].shared2.score = cur_score ;
+
+      /* === Place column back in degree list ========================= */
+
+      COLAMD_ASSERT (min_score >= 0) ;
+      COLAMD_ASSERT (min_score <= n_col) ;
+      COLAMD_ASSERT (cur_score >= 0) ;
+      COLAMD_ASSERT (cur_score <= n_col) ;
+      COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ;
+      next_col = head [cur_score] ;
+      Col [col].shared4.degree_next = next_col ;
+      Col [col].shared3.prev = COLAMD_EMPTY ;
+      if (next_col != COLAMD_EMPTY)
+      {
+	Col [next_col].shared3.prev = col ;
+      }
+      head [cur_score] = col ;
+
+      /* see if this score is less than current min */
+      min_score = COLAMD_MIN (min_score, cur_score) ;
+
+    }
+
+    /* === Resurrect the new pivot row ================================== */
+
+    if (pivot_row_degree > 0)
+    {
+      /* update pivot row length to reflect any cols that were killed */
+      /* during super-col detection and mass elimination */
+      Row [pivot_row].start  = pivot_row_start ;
+      Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ;
+      Row [pivot_row].shared1.degree = pivot_row_degree ;
+      Row [pivot_row].shared2.mark = 0 ;
+      /* pivot row is no longer dead */
+    }
+  }
+
+  /* === All principal columns have now been ordered ====================== */
+
+  return (ngarbage) ;
+}
+
+
+/* ========================================================================== */
+/* === order_children ======================================================= */
+/* ========================================================================== */
+
+/*
+  The find_ordering routine has ordered all of the principal columns (the
+  representatives of the supercolumns).  The non-principal columns have not
+  yet been ordered.  This routine orders those columns by walking up the
+  parent tree (a column is a child of the column which absorbed it).  The
+  final permutation vector is then placed in p [0 ... n_col-1], with p [0]
+  being the first column, and p [n_col-1] being the last.  It doesn't look
+  like it at first glance, but be assured that this routine takes time linear
+  in the number of columns.  Although not immediately obvious, the time
+  taken by this routine is O (n_col), that is, linear in the number of
+  columns.  Not user-callable.
+*/
+template <typename Index>
+static inline  void order_children
+(
+  /* === Parameters ======================================================= */
+
+  Index n_col,      /* number of columns of A */
+  colamd_col<Index> Col [],    /* of size n_col+1 */
+  Index p []      /* p [0 ... n_col-1] is the column permutation*/
+  )
+{
+  /* === Local variables ================================================== */
+
+  Index i ;     /* loop counter for all columns */
+  Index c ;     /* column index */
+  Index parent ;    /* index of column's parent */
+  Index order ;     /* column's order */
+
+  /* === Order each non-principal column ================================== */
+
+  for (i = 0 ; i < n_col ; i++)
+  {
+    /* find an un-ordered non-principal column */
+    COLAMD_ASSERT (COL_IS_DEAD (i)) ;
+    if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY)
+    {
+      parent = i ;
+      /* once found, find its principal parent */
+      do
+      {
+	parent = Col [parent].shared1.parent ;
+      } while (!COL_IS_DEAD_PRINCIPAL (parent)) ;
+
+      /* now, order all un-ordered non-principal columns along path */
+      /* to this parent.  collapse tree at the same time */
+      c = i ;
+      /* get order of parent */
+      order = Col [parent].shared2.order ;
+
+      do
+      {
+	COLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ;
+
+	/* order this column */
+	Col [c].shared2.order = order++ ;
+	/* collaps tree */
+	Col [c].shared1.parent = parent ;
+
+	/* get immediate parent of this column */
+	c = Col [c].shared1.parent ;
+
+	/* continue until we hit an ordered column.  There are */
+	/* guarranteed not to be anymore unordered columns */
+	/* above an ordered column */
+      } while (Col [c].shared2.order == COLAMD_EMPTY) ;
+
+      /* re-order the super_col parent to largest order for this group */
+      Col [parent].shared2.order = order ;
+    }
+  }
+
+  /* === Generate the permutation ========================================= */
+
+  for (c = 0 ; c < n_col ; c++)
+  {
+    p [Col [c].shared2.order] = c ;
+  }
+}
+
+
+/* ========================================================================== */
+/* === detect_super_cols ==================================================== */
+/* ========================================================================== */
+
+/*
+  Detects supercolumns by finding matches between columns in the hash buckets.
+  Check amongst columns in the set A [row_start ... row_start + row_length-1].
+  The columns under consideration are currently *not* in the degree lists,
+  and have already been placed in the hash buckets.
+
+  The hash bucket for columns whose hash function is equal to h is stored
+  as follows:
+
+  if head [h] is >= 0, then head [h] contains a degree list, so:
+
+  head [h] is the first column in degree bucket h.
+  Col [head [h]].headhash gives the first column in hash bucket h.
+
+  otherwise, the degree list is empty, and:
+
+  -(head [h] + 2) is the first column in hash bucket h.
+
+  For a column c in a hash bucket, Col [c].shared3.prev is NOT a "previous
+  column" pointer.  Col [c].shared3.hash is used instead as the hash number
+  for that column.  The value of Col [c].shared4.hash_next is the next column
+  in the same hash bucket.
+
+  Assuming no, or "few" hash collisions, the time taken by this routine is
+  linear in the sum of the sizes (lengths) of each column whose score has
+  just been computed in the approximate degree computation.
+  Not user-callable.
+*/
+template <typename Index>
+static void detect_super_cols
+(
+  /* === Parameters ======================================================= */
+  
+  colamd_col<Index> Col [],    /* of size n_col+1 */
+  Index A [],     /* row indices of A */
+  Index head [],    /* head of degree lists and hash buckets */
+  Index row_start,    /* pointer to set of columns to check */
+  Index row_length    /* number of columns to check */
+)
+{
+  /* === Local variables ================================================== */
+
+  Index hash ;      /* hash value for a column */
+  Index *rp ;     /* pointer to a row */
+  Index c ;     /* a column index */
+  Index super_c ;   /* column index of the column to absorb into */
+  Index *cp1 ;      /* column pointer for column super_c */
+  Index *cp2 ;      /* column pointer for column c */
+  Index length ;    /* length of column super_c */
+  Index prev_c ;    /* column preceding c in hash bucket */
+  Index i ;     /* loop counter */
+  Index *rp_end ;   /* pointer to the end of the row */
+  Index col ;     /* a column index in the row to check */
+  Index head_column ;   /* first column in hash bucket or degree list */
+  Index first_col ;   /* first column in hash bucket */
+
+  /* === Consider each column in the row ================================== */
+
+  rp = &A [row_start] ;
+  rp_end = rp + row_length ;
+  while (rp < rp_end)
+  {
+    col = *rp++ ;
+    if (COL_IS_DEAD (col))
+    {
+      continue ;
+    }
+
+    /* get hash number for this column */
+    hash = Col [col].shared3.hash ;
+    COLAMD_ASSERT (hash <= n_col) ;
+
+    /* === Get the first column in this hash bucket ===================== */
+
+    head_column = head [hash] ;
+    if (head_column > COLAMD_EMPTY)
+    {
+      first_col = Col [head_column].shared3.headhash ;
+    }
+    else
+    {
+      first_col = - (head_column + 2) ;
+    }
+
+    /* === Consider each column in the hash bucket ====================== */
+
+    for (super_c = first_col ; super_c != COLAMD_EMPTY ;
+	 super_c = Col [super_c].shared4.hash_next)
+    {
+      COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ;
+      COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ;
+      length = Col [super_c].length ;
+
+      /* prev_c is the column preceding column c in the hash bucket */
+      prev_c = super_c ;
+
+      /* === Compare super_c with all columns after it ================ */
+
+      for (c = Col [super_c].shared4.hash_next ;
+	   c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next)
+      {
+	COLAMD_ASSERT (c != super_c) ;
+	COLAMD_ASSERT (COL_IS_ALIVE (c)) ;
+	COLAMD_ASSERT (Col [c].shared3.hash == hash) ;
+
+	/* not identical if lengths or scores are different */
+	if (Col [c].length != length ||
+	    Col [c].shared2.score != Col [super_c].shared2.score)
+	{
+	  prev_c = c ;
+	  continue ;
+	}
+
+	/* compare the two columns */
+	cp1 = &A [Col [super_c].start] ;
+	cp2 = &A [Col [c].start] ;
+
+	for (i = 0 ; i < length ; i++)
+	{
+	  /* the columns are "clean" (no dead rows) */
+	  COLAMD_ASSERT (ROW_IS_ALIVE (*cp1))  ;
+	  COLAMD_ASSERT (ROW_IS_ALIVE (*cp2))  ;
+	  /* row indices will same order for both supercols, */
+	  /* no gather scatter nessasary */
+	  if (*cp1++ != *cp2++)
+	  {
+	    break ;
+	  }
+	}
+
+	/* the two columns are different if the for-loop "broke" */
+	if (i != length)
+	{
+	  prev_c = c ;
+	  continue ;
+	}
+
+	/* === Got it!  two columns are identical =================== */
+
+	COLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ;
+
+	Col [super_c].shared1.thickness += Col [c].shared1.thickness ;
+	Col [c].shared1.parent = super_c ;
+	KILL_NON_PRINCIPAL_COL (c) ;
+	/* order c later, in order_children() */
+	Col [c].shared2.order = COLAMD_EMPTY ;
+	/* remove c from hash bucket */
+	Col [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;
+      }
+    }
+
+    /* === Empty this hash bucket ======================================= */
+
+    if (head_column > COLAMD_EMPTY)
+    {
+      /* corresponding degree list "hash" is not empty */
+      Col [head_column].shared3.headhash = COLAMD_EMPTY ;
+    }
+    else
+    {
+      /* corresponding degree list "hash" is empty */
+      head [hash] = COLAMD_EMPTY ;
+    }
+  }
+}
+
+
+/* ========================================================================== */
+/* === garbage_collection =================================================== */
+/* ========================================================================== */
+
+/*
+  Defragments and compacts columns and rows in the workspace A.  Used when
+  all avaliable memory has been used while performing row merging.  Returns
+  the index of the first free position in A, after garbage collection.  The
+  time taken by this routine is linear is the size of the array A, which is
+  itself linear in the number of nonzeros in the input matrix.
+  Not user-callable.
+*/
+template <typename Index>
+static Index garbage_collection  /* returns the new value of pfree */
+  (
+    /* === Parameters ======================================================= */
+    
+    Index n_row,      /* number of rows */
+    Index n_col,      /* number of columns */
+    Colamd_Row<Index> Row [],    /* row info */
+    colamd_col<Index> Col [],    /* column info */
+    Index A [],     /* A [0 ... Alen-1] holds the matrix */
+    Index *pfree      /* &A [0] ... pfree is in use */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index *psrc ;     /* source pointer */
+  Index *pdest ;    /* destination pointer */
+  Index j ;     /* counter */
+  Index r ;     /* a row index */
+  Index c ;     /* a column index */
+  Index length ;    /* length of a row or column */
+
+  /* === Defragment the columns =========================================== */
+
+  pdest = &A[0] ;
+  for (c = 0 ; c < n_col ; c++)
+  {
+    if (COL_IS_ALIVE (c))
+    {
+      psrc = &A [Col [c].start] ;
+
+      /* move and compact the column */
+      COLAMD_ASSERT (pdest <= psrc) ;
+      Col [c].start = (Index) (pdest - &A [0]) ;
+      length = Col [c].length ;
+      for (j = 0 ; j < length ; j++)
+      {
+	r = *psrc++ ;
+	if (ROW_IS_ALIVE (r))
+	{
+	  *pdest++ = r ;
+	}
+      }
+      Col [c].length = (Index) (pdest - &A [Col [c].start]) ;
+    }
+  }
+
+  /* === Prepare to defragment the rows =================================== */
+
+  for (r = 0 ; r < n_row ; r++)
+  {
+    if (ROW_IS_ALIVE (r))
+    {
+      if (Row [r].length == 0)
+      {
+	/* this row is of zero length.  cannot compact it, so kill it */
+	COLAMD_DEBUG3 (("Defrag row kill\n")) ;
+	KILL_ROW (r) ;
+      }
+      else
+      {
+	/* save first column index in Row [r].shared2.first_column */
+	psrc = &A [Row [r].start] ;
+	Row [r].shared2.first_column = *psrc ;
+	COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+	/* flag the start of the row with the one's complement of row */
+	*psrc = ONES_COMPLEMENT (r) ;
+
+      }
+    }
+  }
+
+  /* === Defragment the rows ============================================== */
+
+  psrc = pdest ;
+  while (psrc < pfree)
+  {
+    /* find a negative number ... the start of a row */
+    if (*psrc++ < 0)
+    {
+      psrc-- ;
+      /* get the row index */
+      r = ONES_COMPLEMENT (*psrc) ;
+      COLAMD_ASSERT (r >= 0 && r < n_row) ;
+      /* restore first column index */
+      *psrc = Row [r].shared2.first_column ;
+      COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;
+
+      /* move and compact the row */
+      COLAMD_ASSERT (pdest <= psrc) ;
+      Row [r].start = (Index) (pdest - &A [0]) ;
+      length = Row [r].length ;
+      for (j = 0 ; j < length ; j++)
+      {
+	c = *psrc++ ;
+	if (COL_IS_ALIVE (c))
+	{
+	  *pdest++ = c ;
+	}
+      }
+      Row [r].length = (Index) (pdest - &A [Row [r].start]) ;
+
+    }
+  }
+  /* ensure we found all the rows */
+  COLAMD_ASSERT (debug_rows == 0) ;
+
+  /* === Return the new value of pfree ==================================== */
+
+  return ((Index) (pdest - &A [0])) ;
+}
+
+
+/* ========================================================================== */
+/* === clear_mark =========================================================== */
+/* ========================================================================== */
+
+/*
+  Clears the Row [].shared2.mark array, and returns the new tag_mark.
+  Return value is the new tag_mark.  Not user-callable.
+*/
+template <typename Index>
+static inline  Index clear_mark  /* return the new value for tag_mark */
+  (
+      /* === Parameters ======================================================= */
+
+    Index n_row,    /* number of rows in A */
+    Colamd_Row<Index> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
+    )
+{
+  /* === Local variables ================================================== */
+
+  Index r ;
+
+  for (r = 0 ; r < n_row ; r++)
+  {
+    if (ROW_IS_ALIVE (r))
+    {
+      Row [r].shared2.mark = 0 ;
+    }
+  }
+  return (1) ;
+}
+
+
+} // namespace internal 
+#endif
diff --git a/Eigen/src/OrderingMethods/Ordering.h b/Eigen/src/OrderingMethods/Ordering.h
new file mode 100644
index 0000000..f3c31f9
--- /dev/null
+++ b/Eigen/src/OrderingMethods/Ordering.h
@@ -0,0 +1,154 @@
+ 
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012  Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ORDERING_H
+#define EIGEN_ORDERING_H
+
+namespace Eigen {
+  
+#include "Eigen_Colamd.h"
+
+namespace internal {
+    
+/** \internal
+  * \ingroup OrderingMethods_Module
+  * \returns the symmetric pattern A^T+A from the input matrix A. 
+  * FIXME: The values should not be considered here
+  */
+template<typename MatrixType> 
+void ordering_helper_at_plus_a(const MatrixType& mat, MatrixType& symmat)
+{
+  MatrixType C;
+  C = mat.transpose(); // NOTE: Could be  costly
+  for (int i = 0; i < C.rows(); i++) 
+  {
+      for (typename MatrixType::InnerIterator it(C, i); it; ++it)
+        it.valueRef() = 0.0;
+  }
+  symmat = C + mat; 
+}
+    
+}
+
+#ifndef EIGEN_MPL2_ONLY
+
+/** \ingroup OrderingMethods_Module
+  * \class AMDOrdering
+  *
+  * Functor computing the \em approximate \em minimum \em degree ordering
+  * If the matrix is not structurally symmetric, an ordering of A^T+A is computed
+  * \tparam  Index The type of indices of the matrix 
+  * \sa COLAMDOrdering
+  */
+template <typename Index>
+class AMDOrdering
+{
+  public:
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+    
+    /** Compute the permutation vector from a sparse matrix
+     * This routine is much faster if the input matrix is column-major     
+     */
+    template <typename MatrixType>
+    void operator()(const MatrixType& mat, PermutationType& perm)
+    {
+      // Compute the symmetric pattern
+      SparseMatrix<typename MatrixType::Scalar, ColMajor, Index> symm;
+      internal::ordering_helper_at_plus_a(mat,symm); 
+    
+      // Call the AMD routine 
+      //m_mat.prune(keep_diag());
+      internal::minimum_degree_ordering(symm, perm);
+    }
+    
+    /** Compute the permutation with a selfadjoint matrix */
+    template <typename SrcType, unsigned int SrcUpLo> 
+    void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm)
+    { 
+      SparseMatrix<typename SrcType::Scalar, ColMajor, Index> C; C = mat;
+      
+      // Call the AMD routine 
+      // m_mat.prune(keep_diag()); //Remove the diagonal elements 
+      internal::minimum_degree_ordering(C, perm);
+    }
+};
+
+#endif // EIGEN_MPL2_ONLY
+
+/** \ingroup OrderingMethods_Module
+  * \class NaturalOrdering
+  *
+  * Functor computing the natural ordering (identity)
+  * 
+  * \note Returns an empty permutation matrix
+  * \tparam  Index The type of indices of the matrix 
+  */
+template <typename Index>
+class NaturalOrdering
+{
+  public:
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+    
+    /** Compute the permutation vector from a column-major sparse matrix */
+    template <typename MatrixType>
+    void operator()(const MatrixType& /*mat*/, PermutationType& perm)
+    {
+      perm.resize(0); 
+    }
+    
+};
+
+/** \ingroup OrderingMethods_Module
+  * \class COLAMDOrdering
+  *
+  * Functor computing the \em column \em approximate \em minimum \em degree ordering 
+  * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
+  */
+template<typename Index>
+class COLAMDOrdering
+{
+  public:
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType; 
+    typedef Matrix<Index, Dynamic, 1> IndexVector;
+    
+    /** Compute the permutation vector \a perm form the sparse matrix \a mat
+      * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+      */
+    template <typename MatrixType>
+    void operator() (const MatrixType& mat, PermutationType& perm)
+    {
+      eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
+      
+      Index m = mat.rows();
+      Index n = mat.cols();
+      Index nnz = mat.nonZeros();
+      // Get the recommended value of Alen to be used by colamd
+      Index Alen = internal::colamd_recommended(nnz, m, n); 
+      // Set the default parameters
+      double knobs [COLAMD_KNOBS]; 
+      Index stats [COLAMD_STATS];
+      internal::colamd_set_defaults(knobs);
+      
+      IndexVector p(n+1), A(Alen); 
+      for(Index i=0; i <= n; i++)   p(i) = mat.outerIndexPtr()[i];
+      for(Index i=0; i < nnz; i++)  A(i) = mat.innerIndexPtr()[i];
+      // Call Colamd routine to compute the ordering 
+      Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); 
+      EIGEN_UNUSED_VARIABLE(info);
+      eigen_assert( info && "COLAMD failed " );
+      
+      perm.resize(n);
+      for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i;
+    }
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/Eigen/src/PaStiXSupport/CMakeLists.txt b/Eigen/src/PaStiXSupport/CMakeLists.txt
new file mode 100644
index 0000000..28c657e
--- /dev/null
+++ b/Eigen/src/PaStiXSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_PastixSupport_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_PastixSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PaStiXSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/PaStiXSupport/PaStiXSupport.h b/Eigen/src/PaStiXSupport/PaStiXSupport.h
new file mode 100644
index 0000000..a955287
--- /dev/null
+++ b/Eigen/src/PaStiXSupport/PaStiXSupport.h
@@ -0,0 +1,721 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_PASTIXSUPPORT_H
+#define EIGEN_PASTIXSUPPORT_H
+
+namespace Eigen { 
+
+/** \ingroup PaStiXSupport_Module
+  * \brief Interface to the PaStix solver
+  * 
+  * This class is used to solve the linear systems A.X = B via the PaStix library. 
+  * The matrix can be either real or complex, symmetric or not.
+  *
+  * \sa TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, bool IsStrSym = false> class PastixLU;
+template<typename _MatrixType, int Options> class PastixLLT;
+template<typename _MatrixType, int Options> class PastixLDLT;
+
+namespace internal
+{
+    
+  template<class Pastix> struct pastix_traits;
+
+  template<typename _MatrixType>
+  struct pastix_traits< PastixLU<_MatrixType> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+
+  template<typename _MatrixType, int Options>
+  struct pastix_traits< PastixLLT<_MatrixType,Options> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+
+  template<typename _MatrixType, int Options>
+  struct pastix_traits< PastixLDLT<_MatrixType,Options> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+  
+  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)
+  {
+    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+    if (nbrhs == 0) {x = NULL; nbrhs=1;}
+    s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); 
+  }
+  
+  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)
+  {
+    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+    if (nbrhs == 0) {x = NULL; nbrhs=1;}
+    d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); 
+  }
+  
+  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)
+  {
+    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+    if (nbrhs == 0) {x = NULL; nbrhs=1;}
+    c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<COMPLEX*>(vals), perm, invp, reinterpret_cast<COMPLEX*>(x), nbrhs, iparm, dparm); 
+  }
+  
+  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)
+  {
+    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
+    if (nbrhs == 0) {x = NULL; nbrhs=1;}
+    z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<DCOMPLEX*>(vals), perm, invp, reinterpret_cast<DCOMPLEX*>(x), nbrhs, iparm, dparm); 
+  }
+
+  // Convert the matrix  to Fortran-style Numbering
+  template <typename MatrixType>
+  void c_to_fortran_numbering (MatrixType& mat)
+  {
+    if ( !(mat.outerIndexPtr()[0]) ) 
+    { 
+      int i;
+      for(i = 0; i <= mat.rows(); ++i)
+        ++mat.outerIndexPtr()[i];
+      for(i = 0; i < mat.nonZeros(); ++i)
+        ++mat.innerIndexPtr()[i];
+    }
+  }
+  
+  // Convert to C-style Numbering
+  template <typename MatrixType>
+  void fortran_to_c_numbering (MatrixType& mat)
+  {
+    // Check the Numbering
+    if ( mat.outerIndexPtr()[0] == 1 ) 
+    { // Convert to C-style numbering
+      int i;
+      for(i = 0; i <= mat.rows(); ++i)
+        --mat.outerIndexPtr()[i];
+      for(i = 0; i < mat.nonZeros(); ++i)
+        --mat.innerIndexPtr()[i];
+    }
+  }
+}
+
+// This is the base class to interface with PaStiX functions. 
+// Users should not used this class directly. 
+template <class Derived>
+class PastixBase : internal::noncopyable
+{
+  public:
+    typedef typename internal::pastix_traits<Derived>::MatrixType _MatrixType;
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;
+    
+  public:
+    
+    PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_isInitialized(false), m_pastixdata(0), m_size(0)
+    {
+      init();
+    }
+    
+    ~PastixBase() 
+    {
+      clean();
+    }
+
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<PastixBase, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "Pastix solver is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "PastixBase::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<PastixBase, Rhs>(*this, b.derived());
+    }
+    
+    template<typename Rhs,typename Dest>
+    bool _solve (const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;
+    
+    Derived& derived()
+    {
+      return *static_cast<Derived*>(this);
+    }
+    const Derived& derived() const
+    {
+      return *static_cast<const Derived*>(this);
+    }
+
+    /** Returns a reference to the integer vector IPARM of PaStiX parameters
+      * to modify the default parameters. 
+      * The statistics related to the different phases of factorization and solve are saved here as well
+      * \sa analyzePattern() factorize()
+      */
+    Array<Index,IPARM_SIZE,1>& iparm()
+    {
+      return m_iparm; 
+    }
+    
+    /** Return a reference to a particular index parameter of the IPARM vector 
+     * \sa iparm()
+     */
+    
+    int& iparm(int idxparam)
+    {
+      return m_iparm(idxparam);
+    }
+    
+     /** Returns a reference to the double vector DPARM of PaStiX parameters 
+      * The statistics related to the different phases of factorization and solve are saved here as well
+      * \sa analyzePattern() factorize()
+      */
+    Array<RealScalar,IPARM_SIZE,1>& dparm()
+    {
+      return m_dparm; 
+    }
+    
+    
+    /** Return a reference to a particular index parameter of the DPARM vector 
+     * \sa dparm()
+     */
+    double& dparm(int idxparam)
+    {
+      return m_dparm(idxparam);
+    }
+    
+    inline Index cols() const { return m_size; }
+    inline Index rows() const { return m_size; }
+    
+     /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the PaStiX reports a problem
+      *          \c InvalidInput if the input matrix is invalid
+      *
+      * \sa iparm()          
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<PastixBase, Rhs>
+    solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "Pastix LU, LLT or LDLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "PastixBase::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<PastixBase, Rhs>(*this, b.derived());
+    }
+    
+  protected:
+
+    // Initialize the Pastix data structure, check the matrix
+    void init(); 
+    
+    // Compute the ordering and the symbolic factorization
+    void analyzePattern(ColSpMatrix& mat);
+    
+    // Compute the numerical factorization
+    void factorize(ColSpMatrix& mat);
+    
+    // Free all the data allocated by Pastix
+    void clean()
+    {
+      eigen_assert(m_initisOk && "The Pastix structure should be allocated first"); 
+      m_iparm(IPARM_START_TASK) = API_TASK_CLEAN;
+      m_iparm(IPARM_END_TASK) = API_TASK_CLEAN;
+      internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
+                             m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
+    }
+    
+    void compute(ColSpMatrix& mat);
+    
+    int m_initisOk; 
+    int m_analysisIsOk;
+    int m_factorizationIsOk;
+    bool m_isInitialized;
+    mutable ComputationInfo m_info; 
+    mutable pastix_data_t *m_pastixdata; // Data structure for pastix
+    mutable int m_comm; // The MPI communicator identifier
+    mutable Matrix<int,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters
+    mutable Matrix<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters
+    mutable Matrix<Index,Dynamic,1> m_perm;  // Permutation vector
+    mutable Matrix<Index,Dynamic,1> m_invp;  // Inverse permutation vector
+    mutable int m_size; // Size of the matrix 
+}; 
+
+ /** Initialize the PaStiX data structure. 
+   *A first call to this function fills iparm and dparm with the default PaStiX parameters
+   * \sa iparm() dparm()
+   */
+template <class Derived>
+void PastixBase<Derived>::init()
+{
+  m_size = 0; 
+  m_iparm.setZero(IPARM_SIZE);
+  m_dparm.setZero(DPARM_SIZE);
+  
+  m_iparm(IPARM_MODIFY_PARAMETER) = API_NO;
+  pastix(&m_pastixdata, MPI_COMM_WORLD,
+         0, 0, 0, 0,
+         0, 0, 0, 1, m_iparm.data(), m_dparm.data());
+  
+  m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO;
+  m_iparm[IPARM_VERBOSE]             = 2;
+  m_iparm[IPARM_ORDERING]            = API_ORDER_SCOTCH;
+  m_iparm[IPARM_INCOMPLETE]          = API_NO;
+  m_iparm[IPARM_OOC_LIMIT]           = 2000;
+  m_iparm[IPARM_RHS_MAKING]          = API_RHS_B;
+  m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
+  
+  m_iparm(IPARM_START_TASK) = API_TASK_INIT;
+  m_iparm(IPARM_END_TASK) = API_TASK_INIT;
+  internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,
+                         0, 0, 0, 0, m_iparm.data(), m_dparm.data());
+  
+  // Check the returned error
+  if(m_iparm(IPARM_ERROR_NUMBER)) {
+    m_info = InvalidInput;
+    m_initisOk = false;
+  }
+  else { 
+    m_info = Success;
+    m_initisOk = true;
+  }
+}
+
+template <class Derived>
+void PastixBase<Derived>::compute(ColSpMatrix& mat)
+{
+  eigen_assert(mat.rows() == mat.cols() && "The input matrix should be squared");
+  
+  analyzePattern(mat);  
+  factorize(mat);
+  
+  m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;
+  m_isInitialized = m_factorizationIsOk;
+}
+
+
+template <class Derived>
+void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
+{                         
+  eigen_assert(m_initisOk && "The initialization of PaSTiX failed");
+  
+  // clean previous calls
+  if(m_size>0)
+    clean();
+  
+  m_size = mat.rows();
+  m_perm.resize(m_size);
+  m_invp.resize(m_size);
+  
+  m_iparm(IPARM_START_TASK) = API_TASK_ORDERING;
+  m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE;
+  internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
+               mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
+  
+  // Check the returned error
+  if(m_iparm(IPARM_ERROR_NUMBER))
+  {
+    m_info = NumericalIssue;
+    m_analysisIsOk = false;
+  }
+  else
+  { 
+    m_info = Success;
+    m_analysisIsOk = true;
+  }
+}
+
+template <class Derived>
+void PastixBase<Derived>::factorize(ColSpMatrix& mat)
+{
+//   if(&m_cpyMat != &mat) m_cpyMat = mat;
+  eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
+  m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
+  m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
+  m_size = mat.rows();
+  
+  internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
+               mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
+  
+  // Check the returned error
+  if(m_iparm(IPARM_ERROR_NUMBER))
+  {
+    m_info = NumericalIssue;
+    m_factorizationIsOk = false;
+    m_isInitialized = false;
+  }
+  else
+  {
+    m_info = Success;
+    m_factorizationIsOk = true;
+    m_isInitialized = true;
+  }
+}
+
+/* Solve the system */
+template<typename Base>
+template<typename Rhs,typename Dest>
+bool PastixBase<Base>::_solve (const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const
+{
+  eigen_assert(m_isInitialized && "The matrix should be factorized first");
+  EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
+                     THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+  int rhs = 1;
+  
+  x = b; /* on return, x is overwritten by the computed solution */
+  
+  for (int i = 0; i < b.cols(); i++){
+    m_iparm[IPARM_START_TASK]          = API_TASK_SOLVE;
+    m_iparm[IPARM_END_TASK]            = API_TASK_REFINE;
+  
+    internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0,
+                           m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
+  }
+  
+  // Check the returned error
+  m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue;
+  
+  return m_iparm(IPARM_ERROR_NUMBER)==0;
+}
+
+/** \ingroup PaStiXSupport_Module
+  * \class PastixLU
+  * \brief Sparse direct LU solver based on PaStiX library
+  * 
+  * This class is used to solve the linear systems A.X = B with a supernodal LU 
+  * factorization in the PaStiX library. The matrix A should be squared and nonsingular
+  * PaStiX requires that the matrix A has a symmetric structural pattern. 
+  * This interface can symmetrize the input matrix otherwise. 
+  * The vectors or matrices X and B can be either dense or sparse.
+  * 
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false
+  * NOTE : Note that if the analysis and factorization phase are called separately, 
+  * the input matrix will be symmetrized at each call, hence it is advised to 
+  * symmetrize the matrix in a end-user program and set \p IsStrSym to true
+  * 
+  * \sa \ref TutorialSparseDirectSolvers
+  * 
+  */
+template<typename _MatrixType, bool IsStrSym>
+class PastixLU : public PastixBase< PastixLU<_MatrixType> >
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef PastixBase<PastixLU<MatrixType> > Base;
+    typedef typename Base::ColSpMatrix ColSpMatrix;
+    typedef typename MatrixType::Index Index;
+    
+  public:
+    PastixLU() : Base()
+    {
+      init();
+    }
+    
+    PastixLU(const MatrixType& matrix):Base()
+    {
+      init();
+      compute(matrix);
+    }
+    /** Compute the LU supernodal factorization of \p matrix. 
+      * iparm and dparm can be used to tune the PaStiX parameters. 
+      * see the PaStiX user's manual
+      * \sa analyzePattern() factorize()
+      */
+    void compute (const MatrixType& matrix)
+    {
+      m_structureIsUptodate = false;
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::compute(temp);
+    }
+    /** Compute the LU symbolic factorization of \p matrix using its sparsity pattern. 
+      * Several ordering methods can be used at this step. See the PaStiX user's manual. 
+      * The result of this operation can be used with successive matrices having the same pattern as \p matrix
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      m_structureIsUptodate = false;
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::analyzePattern(temp);
+    }
+
+    /** Compute the LU supernodal factorization of \p matrix
+      * WARNING The matrix \p matrix should have the same structural pattern 
+      * as the same used in the analysis phase.
+      * \sa analyzePattern()
+      */ 
+    void factorize(const MatrixType& matrix)
+    {
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::factorize(temp);
+    }
+  protected:
+    
+    void init()
+    {
+      m_structureIsUptodate = false;
+      m_iparm(IPARM_SYM) = API_SYM_NO;
+      m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;
+    }
+    
+    void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
+    {
+      if(IsStrSym)
+        out = matrix;
+      else
+      {
+        if(!m_structureIsUptodate)
+        {
+          // update the transposed structure
+          m_transposedStructure = matrix.transpose();
+          
+          // Set the elements of the matrix to zero 
+          for (Index j=0; j<m_transposedStructure.outerSize(); ++j) 
+            for(typename ColSpMatrix::InnerIterator it(m_transposedStructure, j); it; ++it)
+              it.valueRef() = 0.0;
+
+          m_structureIsUptodate = true;
+        }
+        
+        out = m_transposedStructure + matrix;
+      }
+      internal::c_to_fortran_numbering(out);
+    }
+    
+    using Base::m_iparm;
+    using Base::m_dparm;
+    
+    ColSpMatrix m_transposedStructure;
+    bool m_structureIsUptodate;
+};
+
+/** \ingroup PaStiXSupport_Module
+  * \class PastixLLT
+  * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
+  * 
+  * This class is used to solve the linear systems A.X = B via a LL^T supernodal Cholesky factorization
+  * available in the PaStiX library. The matrix A should be symmetric and positive definite
+  * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
+  * The vectors or matrices X and B can be either dense or sparse
+  * 
+  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
+  * 
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, int _UpLo>
+class PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> >
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef PastixBase<PastixLLT<MatrixType, _UpLo> > Base;
+    typedef typename Base::ColSpMatrix ColSpMatrix;
+    
+  public:
+    enum { UpLo = _UpLo };
+    PastixLLT() : Base()
+    {
+      init();
+    }
+    
+    PastixLLT(const MatrixType& matrix):Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    /** Compute the L factor of the LL^T supernodal factorization of \p matrix 
+      * \sa analyzePattern() factorize()
+      */
+    void compute (const MatrixType& matrix)
+    {
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::compute(temp);
+    }
+
+     /** Compute the LL^T symbolic factorization of \p matrix using its sparsity pattern
+      * The result of this operation can be used with successive matrices having the same pattern as \p matrix
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::analyzePattern(temp);
+    }
+      /** Compute the LL^T supernodal numerical factorization of \p matrix 
+        * \sa analyzePattern()
+        */
+    void factorize(const MatrixType& matrix)
+    {
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::factorize(temp);
+    }
+  protected:
+    using Base::m_iparm;
+    
+    void init()
+    {
+      m_iparm(IPARM_SYM) = API_SYM_YES;
+      m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;
+    }
+    
+    void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
+    {
+      // Pastix supports only lower, column-major matrices 
+      out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
+      internal::c_to_fortran_numbering(out);
+    }
+};
+
+/** \ingroup PaStiXSupport_Module
+  * \class PastixLDLT
+  * \brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library
+  * 
+  * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization
+  * available in the PaStiX library. The matrix A should be symmetric and positive definite
+  * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX
+  * The vectors or matrices X and B can be either dense or sparse
+  * 
+  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX
+  * 
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType, int _UpLo>
+class PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> >
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef PastixBase<PastixLDLT<MatrixType, _UpLo> > Base; 
+    typedef typename Base::ColSpMatrix ColSpMatrix;
+    
+  public:
+    enum { UpLo = _UpLo };
+    PastixLDLT():Base()
+    {
+      init();
+    }
+    
+    PastixLDLT(const MatrixType& matrix):Base()
+    {
+      init();
+      compute(matrix);
+    }
+
+    /** Compute the L and D factors of the LDL^T factorization of \p matrix 
+      * \sa analyzePattern() factorize()
+      */
+    void compute (const MatrixType& matrix)
+    {
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::compute(temp);
+    }
+
+    /** Compute the LDL^T symbolic factorization of \p matrix using its sparsity pattern
+      * The result of this operation can be used with successive matrices having the same pattern as \p matrix
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    { 
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::analyzePattern(temp);
+    }
+    /** Compute the LDL^T supernodal numerical factorization of \p matrix 
+      * 
+      */
+    void factorize(const MatrixType& matrix)
+    {
+      ColSpMatrix temp;
+      grabMatrix(matrix, temp);
+      Base::factorize(temp);
+    }
+
+  protected:
+    using Base::m_iparm;
+    
+    void init()
+    {
+      m_iparm(IPARM_SYM) = API_SYM_YES;
+      m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;
+    }
+    
+    void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)
+    {
+      // Pastix supports only lower, column-major matrices 
+      out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();
+      internal::c_to_fortran_numbering(out);
+    }
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<PastixBase<_MatrixType>, Rhs>
+  : solve_retval_base<PastixBase<_MatrixType>, Rhs>
+{
+  typedef PastixBase<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, typename Rhs>
+struct sparse_solve_retval<PastixBase<_MatrixType>, Rhs>
+  : sparse_solve_retval_base<PastixBase<_MatrixType>, Rhs>
+{
+  typedef PastixBase<_MatrixType> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
diff --git a/Eigen/src/PardisoSupport/CMakeLists.txt b/Eigen/src/PardisoSupport/CMakeLists.txt
new file mode 100644
index 0000000..a097ab4
--- /dev/null
+++ b/Eigen/src/PardisoSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_PardisoSupport_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_PardisoSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/PardisoSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h
new file mode 100644
index 0000000..18cd7d8
--- /dev/null
+++ b/Eigen/src/PardisoSupport/PardisoSupport.h
@@ -0,0 +1,592 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL PARDISO
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_PARDISOSUPPORT_H
+#define EIGEN_PARDISOSUPPORT_H
+
+namespace Eigen { 
+
+template<typename _MatrixType> class PardisoLU;
+template<typename _MatrixType, int Options=Upper> class PardisoLLT;
+template<typename _MatrixType, int Options=Upper> class PardisoLDLT;
+
+namespace internal
+{
+  template<typename Index>
+  struct pardiso_run_selector
+  {
+    static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
+                      Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
+    {
+      Index error = 0;
+      ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
+      return error;
+    }
+  };
+  template<>
+  struct pardiso_run_selector<long long int>
+  {
+    typedef long long int Index;
+    static Index run( _MKL_DSS_HANDLE_t pt, Index maxfct, Index mnum, Index type, Index phase, Index n, void *a,
+                      Index *ia, Index *ja, Index *perm, Index nrhs, Index *iparm, Index msglvl, void *b, void *x)
+    {
+      Index error = 0;
+      ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);
+      return error;
+    }
+  };
+
+  template<class Pardiso> struct pardiso_traits;
+
+  template<typename _MatrixType>
+  struct pardiso_traits< PardisoLU<_MatrixType> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+
+  template<typename _MatrixType, int Options>
+  struct pardiso_traits< PardisoLLT<_MatrixType, Options> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;
+  };
+
+  template<typename _MatrixType, int Options>
+  struct pardiso_traits< PardisoLDLT<_MatrixType, Options> >
+  {
+    typedef _MatrixType MatrixType;
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef typename _MatrixType::Index Index;    
+  };
+
+}
+
+template<class Derived>
+class PardisoImpl
+{
+    typedef internal::pardiso_traits<Derived> Traits;
+  public:
+    typedef typename Traits::MatrixType MatrixType;
+    typedef typename Traits::Scalar Scalar;
+    typedef typename Traits::RealScalar RealScalar;
+    typedef typename Traits::Index Index;
+    typedef SparseMatrix<Scalar,RowMajor,Index> SparseMatrixType;
+    typedef Matrix<Scalar,Dynamic,1> VectorType;
+    typedef Matrix<Index, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+    typedef Matrix<Index, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+    typedef Array<Index,64,1,DontAlign> ParameterType;
+    enum {
+      ScalarIsComplex = NumTraits<Scalar>::IsComplex
+    };
+
+    PardisoImpl()
+    {
+      eigen_assert((sizeof(Index) >= sizeof(_INTEGER_t) && sizeof(Index) <= 8) && "Non-supported index type");
+      m_iparm.setZero();
+      m_msglvl = 0; // No output
+      m_initialized = false;
+    }
+
+    ~PardisoImpl()
+    {
+      pardisoRelease();
+    }
+
+    inline Index cols() const { return m_size; }
+    inline Index rows() const { return m_size; }
+  
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_initialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    /** \warning for advanced usage only.
+      * \returns a reference to the parameter array controlling PARDISO.
+      * See the PARDISO manual to know how to use it. */
+    ParameterType& pardisoParameterArray()
+    {
+      return m_iparm;
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    Derived& analyzePattern(const MatrixType& matrix);
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    Derived& factorize(const MatrixType& matrix);
+
+    Derived& compute(const MatrixType& matrix);
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<PardisoImpl, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_initialized && "Pardiso solver is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<PardisoImpl, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<PardisoImpl, Rhs>
+    solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_initialized && "Pardiso solver is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "PardisoImpl::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<PardisoImpl, Rhs>(*this, b.derived());
+    }
+
+    Derived& derived()
+    {
+      return *static_cast<Derived*>(this);
+    }
+    const Derived& derived() const
+    {
+      return *static_cast<const Derived*>(this);
+    }
+
+    template<typename BDerived, typename XDerived>
+    bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const;
+
+  protected:
+    void pardisoRelease()
+    {
+      if(m_initialized) // Factorization ran at least once
+      {
+        internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, -1, m_size, 0, 0, 0, m_perm.data(), 0,
+                                                   m_iparm.data(), m_msglvl, 0, 0);
+      }
+    }
+
+    void pardisoInit(int type)
+    {
+      m_type = type;
+      bool symmetric = std::abs(m_type) < 10;
+      m_iparm[0] = 1;   // No solver default
+      m_iparm[1] = 3;   // use Metis for the ordering
+      m_iparm[2] = 1;   // Numbers of processors, value of OMP_NUM_THREADS
+      m_iparm[3] = 0;   // No iterative-direct algorithm
+      m_iparm[4] = 0;   // No user fill-in reducing permutation
+      m_iparm[5] = 0;   // Write solution into x
+      m_iparm[6] = 0;   // Not in use
+      m_iparm[7] = 2;   // Max numbers of iterative refinement steps
+      m_iparm[8] = 0;   // Not in use
+      m_iparm[9] = 13;  // Perturb the pivot elements with 1E-13
+      m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS
+      m_iparm[11] = 0;  // Not in use
+      m_iparm[12] = symmetric ? 0 : 1;  // Maximum weighted matching algorithm is switched-off (default for symmetric).
+                                        // Try m_iparm[12] = 1 in case of inappropriate accuracy
+      m_iparm[13] = 0;  // Output: Number of perturbed pivots
+      m_iparm[14] = 0;  // Not in use
+      m_iparm[15] = 0;  // Not in use
+      m_iparm[16] = 0;  // Not in use
+      m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU
+      m_iparm[18] = -1; // Output: Mflops for LU factorization
+      m_iparm[19] = 0;  // Output: Numbers of CG Iterations
+      
+      m_iparm[20] = 0;  // 1x1 pivoting
+      m_iparm[26] = 0;  // No matrix checker
+      m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;
+      m_iparm[34] = 1;  // C indexing
+      m_iparm[59] = 1;  // Automatic switch between In-Core and Out-of-Core modes
+    }
+
+  protected:
+    // cached data to reduce reallocation, etc.
+    
+    void manageErrorCode(Index error)
+    {
+      switch(error)
+      {
+        case 0:
+          m_info = Success;
+          break;
+        case -4:
+        case -7:
+          m_info = NumericalIssue;
+          break;
+        default:
+          m_info = InvalidInput;
+      }
+    }
+
+    mutable SparseMatrixType m_matrix;
+    ComputationInfo m_info;
+    bool m_initialized, m_analysisIsOk, m_factorizationIsOk;
+    Index m_type, m_msglvl;
+    mutable void *m_pt[64];
+    mutable ParameterType m_iparm;
+    mutable IntColVectorType m_perm;
+    Index m_size;
+    
+  private:
+    PardisoImpl(PardisoImpl &) {}
+};
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::compute(const MatrixType& a)
+{
+  m_size = a.rows();
+  eigen_assert(a.rows() == a.cols());
+
+  pardisoRelease();
+  memset(m_pt, 0, sizeof(m_pt));
+  m_perm.setZero(m_size);
+  derived().getMatrix(a);
+  
+  Index error;
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 12, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+
+  manageErrorCode(error);
+  m_analysisIsOk = true;
+  m_factorizationIsOk = true;
+  m_initialized = true;
+  return derived();
+}
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)
+{
+  m_size = a.rows();
+  eigen_assert(m_size == a.cols());
+
+  pardisoRelease();
+  memset(m_pt, 0, sizeof(m_pt));
+  m_perm.setZero(m_size);
+  derived().getMatrix(a);
+  
+  Index error;
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 11, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+  
+  manageErrorCode(error);
+  m_analysisIsOk = true;
+  m_factorizationIsOk = false;
+  m_initialized = true;
+  return derived();
+}
+
+template<class Derived>
+Derived& PardisoImpl<Derived>::factorize(const MatrixType& a)
+{
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  eigen_assert(m_size == a.rows() && m_size == a.cols());
+  
+  derived().getMatrix(a);
+
+  Index error;  
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 22, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);
+  
+  manageErrorCode(error);
+  m_factorizationIsOk = true;
+  return derived();
+}
+
+template<class Base>
+template<typename BDerived,typename XDerived>
+bool PardisoImpl<Base>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const
+{
+  if(m_iparm[0] == 0) // Factorization was not computed
+    return false;
+
+  //Index n = m_matrix.rows();
+  Index nrhs = Index(b.cols());
+  eigen_assert(m_size==b.rows());
+  eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major right hand sides are not supported");
+  eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && "Row-major matrices of unknowns are not supported");
+  eigen_assert(((nrhs == 1) || b.outerStride() == b.rows()));
+
+
+//  switch (transposed) {
+//    case SvNoTrans    : m_iparm[11] = 0 ; break;
+//    case SvTranspose  : m_iparm[11] = 2 ; break;
+//    case SvAdjoint    : m_iparm[11] = 1 ; break;
+//    default:
+//      //std::cerr << "Eigen: transposition  option \"" << transposed << "\" not supported by the PARDISO backend\n";
+//      m_iparm[11] = 0;
+//  }
+
+  Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data());
+  Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp;
+  
+  // Pardiso cannot solve in-place
+  if(rhs_ptr == x.derived().data())
+  {
+    tmp = b;
+    rhs_ptr = tmp.data();
+  }
+  
+  Index error;
+  error = internal::pardiso_run_selector<Index>::run(m_pt, 1, 1, m_type, 33, m_size,
+                                                     m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),
+                                                     m_perm.data(), nrhs, m_iparm.data(), m_msglvl,
+                                                     rhs_ptr, x.derived().data());
+
+  return error==0;
+}
+
+
+/** \ingroup PardisoSupport_Module
+  * \class PardisoLU
+  * \brief A sparse direct LU factorization and solver based on the PARDISO library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
+  * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename MatrixType>
+class PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >
+{
+  protected:
+    typedef PardisoImpl< PardisoLU<MatrixType> > Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::RealScalar RealScalar;
+    using Base::pardisoInit;
+    using Base::m_matrix;
+    friend class PardisoImpl< PardisoLU<MatrixType> >;
+
+  public:
+
+    using Base::compute;
+    using Base::solve;
+
+    PardisoLU()
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 13 : 11);
+    }
+
+    PardisoLU(const MatrixType& matrix)
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 13 : 11);
+      compute(matrix);
+    }
+  protected:
+    void getMatrix(const MatrixType& matrix)
+    {
+      m_matrix = matrix;
+    }
+    
+  private:
+    PardisoLU(PardisoLU& ) {}
+};
+
+/** \ingroup PardisoSupport_Module
+  * \class PardisoLLT
+  * \brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization
+  * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.
+  *         Upper|Lower can be used to tell both triangular parts can be used as input.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename MatrixType, int _UpLo>
+class PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >
+{
+  protected:
+    typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::RealScalar RealScalar;
+    using Base::pardisoInit;
+    using Base::m_matrix;
+    friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;
+
+  public:
+
+    enum { UpLo = _UpLo };
+    using Base::compute;
+    using Base::solve;
+
+    PardisoLLT()
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 4 : 2);
+    }
+
+    PardisoLLT(const MatrixType& matrix)
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? 4 : 2);
+      compute(matrix);
+    }
+    
+  protected:
+    
+    void getMatrix(const MatrixType& matrix)
+    {
+      // PARDISO supports only upper, row-major matrices
+      PermutationMatrix<Dynamic,Dynamic,Index> p_null;
+      m_matrix.resize(matrix.rows(), matrix.cols());
+      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
+    }
+    
+  private:
+    PardisoLLT(PardisoLLT& ) {}
+};
+
+/** \ingroup PardisoSupport_Module
+  * \class PardisoLDLT
+  * \brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization
+  * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite.
+  * For complex matrices, A can also be symmetric only, see the \a Options template parameter.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.
+  *         Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.
+  *         Upper|Lower can be used to tell both triangular parts can be used as input.
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename MatrixType, int Options>
+class PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >
+{
+  protected:
+    typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::RealScalar RealScalar;
+    using Base::pardisoInit;
+    using Base::m_matrix;
+    friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;
+
+  public:
+
+    using Base::compute;
+    using Base::solve;
+    enum { UpLo = Options&(Upper|Lower) };
+
+    PardisoLDLT()
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
+    }
+
+    PardisoLDLT(const MatrixType& matrix)
+      : Base()
+    {
+      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);
+      compute(matrix);
+    }
+    
+    void getMatrix(const MatrixType& matrix)
+    {
+      // PARDISO supports only upper, row-major matrices
+      PermutationMatrix<Dynamic,Dynamic,Index> p_null;
+      m_matrix.resize(matrix.rows(), matrix.cols());
+      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);
+    }
+    
+  private:
+    PardisoLDLT(PardisoLDLT& ) {}
+};
+
+namespace internal {
+  
+template<typename _Derived, typename Rhs>
+struct solve_retval<PardisoImpl<_Derived>, Rhs>
+  : solve_retval_base<PardisoImpl<_Derived>, Rhs>
+{
+  typedef PardisoImpl<_Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<PardisoImpl<Derived>, Rhs>
+  : sparse_solve_retval_base<PardisoImpl<Derived>, Rhs>
+{
+  typedef PardisoImpl<Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_PARDISOSUPPORT_H
diff --git a/Eigen/src/QR/CMakeLists.txt b/Eigen/src/QR/CMakeLists.txt
new file mode 100644
index 0000000..96f43d7
--- /dev/null
+++ b/Eigen/src/QR/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_QR_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_QR_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/QR COMPONENT Devel
+  )
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
new file mode 100644
index 0000000..567eab7
--- /dev/null
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -0,0 +1,580 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
+#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
+
+namespace Eigen { 
+
+/** \ingroup QR_Module
+  *
+  * \class ColPivHouseholderQR
+  *
+  * \brief Householder rank-revealing QR decomposition of a matrix with column-pivoting
+  *
+  * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+  *
+  * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R
+  * such that 
+  * \f[
+  *  \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R}
+  * \f]
+  * by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an 
+  * upper triangular matrix.
+  *
+  * This decomposition performs column pivoting in order to be rank-revealing and improve
+  * numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR.
+  *
+  * \sa MatrixBase::colPivHouseholderQr()
+  */
+template<typename _MatrixType> class ColPivHouseholderQR
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, Options, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
+    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+    typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
+    typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;
+    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+    typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
+    
+  private:
+    
+    typedef typename PermutationType::Index PermIndexType;
+    
+  public:
+
+    /**
+    * \brief Default Constructor.
+    *
+    * The default constructor is useful in cases in which the user intends to
+    * perform decompositions via ColPivHouseholderQR::compute(const MatrixType&).
+    */
+    ColPivHouseholderQR()
+      : m_qr(),
+        m_hCoeffs(),
+        m_colsPermutation(),
+        m_colsTranspositions(),
+        m_temp(),
+        m_colSqNorms(),
+        m_isInitialized(false),
+        m_usePrescribedThreshold(false) {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa ColPivHouseholderQR()
+      */
+    ColPivHouseholderQR(Index rows, Index cols)
+      : m_qr(rows, cols),
+        m_hCoeffs((std::min)(rows,cols)),
+        m_colsPermutation(PermIndexType(cols)),
+        m_colsTranspositions(cols),
+        m_temp(cols),
+        m_colSqNorms(cols),
+        m_isInitialized(false),
+        m_usePrescribedThreshold(false) {}
+
+    /** \brief Constructs a QR factorization from a given matrix
+      *
+      * This constructor computes the QR factorization of the matrix \a matrix by calling
+      * the method compute(). It is a short cut for:
+      * 
+      * \code
+      * ColPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+      * qr.compute(matrix);
+      * \endcode
+      * 
+      * \sa compute()
+      */
+    ColPivHouseholderQR(const MatrixType& matrix)
+      : m_qr(matrix.rows(), matrix.cols()),
+        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
+        m_colsPermutation(PermIndexType(matrix.cols())),
+        m_colsTranspositions(matrix.cols()),
+        m_temp(matrix.cols()),
+        m_colSqNorms(matrix.cols()),
+        m_isInitialized(false),
+        m_usePrescribedThreshold(false)
+    {
+      compute(matrix);
+    }
+
+    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+      * *this is the QR decomposition, if any exists.
+      *
+      * \param b the right-hand-side of the equation to solve.
+      *
+      * \returns a solution.
+      *
+      * \note The case where b is a matrix is not yet implemented. Also, this
+      *       code is space inefficient.
+      *
+      * \note_about_checking_solutions
+      *
+      * \note_about_arbitrary_choice_of_solution
+      *
+      * Example: \include ColPivHouseholderQR_solve.cpp
+      * Output: \verbinclude ColPivHouseholderQR_solve.out
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<ColPivHouseholderQR, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return internal::solve_retval<ColPivHouseholderQR, Rhs>(*this, b.derived());
+    }
+
+    HouseholderSequenceType householderQ(void) const;
+    HouseholderSequenceType matrixQ(void) const
+    {
+      return householderQ(); 
+    }
+
+    /** \returns a reference to the matrix where the Householder QR decomposition is stored
+      */
+    const MatrixType& matrixQR() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return m_qr;
+    }
+    
+    /** \returns a reference to the matrix where the result Householder QR is stored 
+     * \warning The strict lower part of this matrix contains internal values. 
+     * Only the upper triangular part should be referenced. To get it, use
+     * \code matrixR().template triangularView<Upper>() \endcode
+     * For rank-deficient matrices, use 
+     * \code 
+     * matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>() 
+     * \endcode
+     */
+    const MatrixType& matrixR() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return m_qr;
+    }
+    
+    ColPivHouseholderQR& compute(const MatrixType& matrix);
+
+    /** \returns a const reference to the column permutation matrix */
+    const PermutationType& colsPermutation() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return m_colsPermutation;
+    }
+
+    /** \returns the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      * One way to work around that is to use logAbsDeterminant() instead.
+      *
+      * \sa logAbsDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar absDeterminant() const;
+
+    /** \returns the natural log of the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \note This method is useful to work around the risk of overflow/underflow that's inherent
+      * to determinant computation.
+      *
+      * \sa absDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar logAbsDeterminant() const;
+
+    /** \returns the rank of the matrix of which *this is the QR decomposition.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index rank() const
+    {
+      using std::abs;
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
+      Index result = 0;
+      for(Index i = 0; i < m_nonzero_pivots; ++i)
+        result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+      return result;
+    }
+
+    /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index dimensionOfKernel() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return cols() - rank();
+    }
+
+    /** \returns true if the matrix of which *this is the QR decomposition represents an injective
+      *          linear map, i.e. has trivial kernel; false otherwise.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isInjective() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return rank() == cols();
+    }
+
+    /** \returns true if the matrix of which *this is the QR decomposition represents a surjective
+      *          linear map; false otherwise.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isSurjective() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return rank() == rows();
+    }
+
+    /** \returns true if the matrix of which *this is the QR decomposition is invertible.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isInvertible() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return isInjective() && isSurjective();
+    }
+
+    /** \returns the inverse of the matrix of which *this is the QR decomposition.
+      *
+      * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
+      *       Use isInvertible() to first determine whether this matrix is invertible.
+      */
+    inline const
+    internal::solve_retval<ColPivHouseholderQR, typename MatrixType::IdentityReturnType>
+    inverse() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return internal::solve_retval<ColPivHouseholderQR,typename MatrixType::IdentityReturnType>
+               (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
+    }
+
+    inline Index rows() const { return m_qr.rows(); }
+    inline Index cols() const { return m_qr.cols(); }
+    
+    /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+      * 
+      * For advanced uses only.
+      */
+    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+    /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
+      * who need to determine when pivots are to be considered nonzero. This is not used for the
+      * QR decomposition itself.
+      *
+      * When it needs to get the threshold value, Eigen calls threshold(). By default, this
+      * uses a formula to automatically determine a reasonable threshold.
+      * Once you have called the present method setThreshold(const RealScalar&),
+      * your value is used instead.
+      *
+      * \param threshold The new value to use as the threshold.
+      *
+      * A pivot will be considered nonzero if its absolute value is strictly greater than
+      *  \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
+      * where maxpivot is the biggest pivot.
+      *
+      * If you want to come back to the default behavior, call setThreshold(Default_t)
+      */
+    ColPivHouseholderQR& setThreshold(const RealScalar& threshold)
+    {
+      m_usePrescribedThreshold = true;
+      m_prescribedThreshold = threshold;
+      return *this;
+    }
+
+    /** Allows to come back to the default behavior, letting Eigen use its default formula for
+      * determining the threshold.
+      *
+      * You should pass the special object Eigen::Default as parameter here.
+      * \code qr.setThreshold(Eigen::Default); \endcode
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    ColPivHouseholderQR& setThreshold(Default_t)
+    {
+      m_usePrescribedThreshold = false;
+      return *this;
+    }
+
+    /** Returns the threshold that will be used by certain methods such as rank().
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    RealScalar threshold() const
+    {
+      eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+      return m_usePrescribedThreshold ? m_prescribedThreshold
+      // this formula comes from experimenting (see "LU precision tuning" thread on the list)
+      // and turns out to be identical to Higham's formula used already in LDLt.
+                                      : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
+    }
+
+    /** \returns the number of nonzero pivots in the QR decomposition.
+      * Here nonzero is meant in the exact sense, not in a fuzzy sense.
+      * So that notion isn't really intrinsically interesting, but it is
+      * still useful when implementing algorithms.
+      *
+      * \sa rank()
+      */
+    inline Index nonzeroPivots() const
+    {
+      eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+      return m_nonzero_pivots;
+    }
+
+    /** \returns the absolute value of the biggest pivot, i.e. the biggest
+      *          diagonal coefficient of R.
+      */
+    RealScalar maxPivot() const { return m_maxpivot; }
+    
+    /** \brief Reports whether the QR factorization was succesful.
+      *
+      * \note This function always returns \c Success. It is provided for compatibility 
+      * with other factorization routines.
+      * \returns \c Success 
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return Success;
+    }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    MatrixType m_qr;
+    HCoeffsType m_hCoeffs;
+    PermutationType m_colsPermutation;
+    IntRowVectorType m_colsTranspositions;
+    RowVectorType m_temp;
+    RealRowVectorType m_colSqNorms;
+    bool m_isInitialized, m_usePrescribedThreshold;
+    RealScalar m_prescribedThreshold, m_maxpivot;
+    Index m_nonzero_pivots;
+    Index m_det_pq;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const
+{
+  using std::abs;
+  eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+  eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+  * the factorization is stored into \c *this, and a reference to \c *this
+  * is returned.
+  *
+  * \sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&)
+  */
+template<typename MatrixType>
+ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+  check_template_parameters();
+  
+  using std::abs;
+  Index rows = matrix.rows();
+  Index cols = matrix.cols();
+  Index size = matrix.diagonalSize();
+  
+  // the column permutation is stored as int indices, so just to be sure:
+  eigen_assert(cols<=NumTraits<int>::highest());
+
+  m_qr = matrix;
+  m_hCoeffs.resize(size);
+
+  m_temp.resize(cols);
+
+  m_colsTranspositions.resize(matrix.cols());
+  Index number_of_transpositions = 0;
+
+  m_colSqNorms.resize(cols);
+  for(Index k = 0; k < cols; ++k)
+    m_colSqNorms.coeffRef(k) = m_qr.col(k).squaredNorm();
+
+  RealScalar threshold_helper = m_colSqNorms.maxCoeff() * numext::abs2(NumTraits<Scalar>::epsilon()) / RealScalar(rows);
+
+  m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
+  m_maxpivot = RealScalar(0);
+
+  for(Index k = 0; k < size; ++k)
+  {
+    // first, we look up in our table m_colSqNorms which column has the biggest squared norm
+    Index biggest_col_index;
+    RealScalar biggest_col_sq_norm = m_colSqNorms.tail(cols-k).maxCoeff(&biggest_col_index);
+    biggest_col_index += k;
+
+    // since our table m_colSqNorms accumulates imprecision at every step, we must now recompute
+    // the actual squared norm of the selected column.
+    // Note that not doing so does result in solve() sometimes returning inf/nan values
+    // when running the unit test with 1000 repetitions.
+    biggest_col_sq_norm = m_qr.col(biggest_col_index).tail(rows-k).squaredNorm();
+
+    // we store that back into our table: it can't hurt to correct our table.
+    m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
+
+    // Track the number of meaningful pivots but do not stop the decomposition to make
+    // sure that the initial matrix is properly reproduced. See bug 941.
+    if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
+      m_nonzero_pivots = k;
+
+    // apply the transposition to the columns
+    m_colsTranspositions.coeffRef(k) = biggest_col_index;
+    if(k != biggest_col_index) {
+      m_qr.col(k).swap(m_qr.col(biggest_col_index));
+      std::swap(m_colSqNorms.coeffRef(k), m_colSqNorms.coeffRef(biggest_col_index));
+      ++number_of_transpositions;
+    }
+
+    // generate the householder vector, store it below the diagonal
+    RealScalar beta;
+    m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
+
+    // apply the householder transformation to the diagonal coefficient
+    m_qr.coeffRef(k,k) = beta;
+
+    // remember the maximum absolute value of diagonal coefficients
+    if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
+
+    // apply the householder transformation
+    m_qr.bottomRightCorner(rows-k, cols-k-1)
+        .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
+
+    // update our table of squared norms of the columns
+    m_colSqNorms.tail(cols-k-1) -= m_qr.row(k).tail(cols-k-1).cwiseAbs2();
+  }
+
+  m_colsPermutation.setIdentity(PermIndexType(cols));
+  for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
+    m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
+
+  m_det_pq = (number_of_transpositions%2) ? -1 : 1;
+  m_isInitialized = true;
+
+  return *this;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
+  : solve_retval_base<ColPivHouseholderQR<_MatrixType>, Rhs>
+{
+  EIGEN_MAKE_SOLVE_HELPERS(ColPivHouseholderQR<_MatrixType>,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    eigen_assert(rhs().rows() == dec().rows());
+
+    const Index cols = dec().cols(),
+				nonzero_pivots = dec().nonzeroPivots();
+
+    if(nonzero_pivots == 0)
+    {
+      dst.setZero();
+      return;
+    }
+
+    typename Rhs::PlainObject c(rhs());
+
+    // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
+    c.applyOnTheLeft(householderSequence(dec().matrixQR(), dec().hCoeffs())
+                     .setLength(dec().nonzeroPivots())
+		     .transpose()
+      );
+
+    dec().matrixR()
+       .topLeftCorner(nonzero_pivots, nonzero_pivots)
+       .template triangularView<Upper>()
+       .solveInPlace(c.topRows(nonzero_pivots));
+
+    for(Index i = 0; i < nonzero_pivots; ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
+    for(Index i = nonzero_pivots; i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
+  }
+};
+
+} // end namespace internal
+
+/** \returns the matrix Q as a sequence of householder transformations.
+  * You can extract the meaningful part only by using:
+  * \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
+template<typename MatrixType>
+typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
+  ::householderQ() const
+{
+  eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
+  return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
+}
+
+/** \return the column-pivoting Householder QR decomposition of \c *this.
+  *
+  * \sa class ColPivHouseholderQR
+  */
+template<typename Derived>
+const ColPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::colPivHouseholderQr() const
+{
+  return ColPivHouseholderQR<PlainObject>(eval());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H
diff --git a/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/Eigen/src/QR/ColPivHouseholderQR_MKL.h
new file mode 100644
index 0000000..b5b1983
--- /dev/null
+++ b/Eigen/src/QR/ColPivHouseholderQR_MKL.h
@@ -0,0 +1,99 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *    Householder QR decomposition of a matrix with column pivoting based on
+ *    LAPACKE_?geqp3 function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H
+#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_QR_COLPIV(EIGTYPE, MKLTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
+template<> inline \
+ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \
+ColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \
+              const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix) \
+\
+{ \
+  using std::abs; \
+  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
+  typedef MatrixType::Scalar Scalar; \
+  typedef MatrixType::RealScalar RealScalar; \
+  Index rows = matrix.rows();\
+  Index cols = matrix.cols();\
+  Index size = matrix.diagonalSize();\
+\
+  m_qr = matrix;\
+  m_hCoeffs.resize(size);\
+\
+  m_colsTranspositions.resize(cols);\
+  /*Index number_of_transpositions = 0;*/ \
+\
+  m_nonzero_pivots = 0; \
+  m_maxpivot = RealScalar(0);\
+  m_colsPermutation.resize(cols); \
+  m_colsPermutation.indices().setZero(); \
+\
+  lapack_int lda = m_qr.outerStride(), i; \
+  lapack_int matrix_order = MKLCOLROW; \
+  LAPACKE_##MKLPREFIX##geqp3( matrix_order, rows, cols, (MKLTYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (MKLTYPE*)m_hCoeffs.data()); \
+  m_isInitialized = true; \
+  m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \
+  m_hCoeffs.adjointInPlace(); \
+  RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \
+  lapack_int *perm = m_colsPermutation.indices().data(); \
+  for(i=0;i<size;i++) { \
+    m_nonzero_pivots += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\
+  } \
+  for(i=0;i<cols;i++) perm[i]--;\
+\
+  /*m_det_pq = (number_of_transpositions%2) ? -1 : 1;  // TODO: It's not needed now; fix upon availability in Eigen */ \
+\
+  return *this; \
+}
+
+EIGEN_MKL_QR_COLPIV(double,   double,        d, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_QR_COLPIV(float,    float,         s, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_QR_COLPIV(dcomplex, MKL_Complex16, z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_QR_COLPIV(scomplex, MKL_Complex8,  c, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_QR_COLPIV(double,   double,        d, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_QR_COLPIV(float,    float,         s, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_QR_COLPIV(dcomplex, MKL_Complex16, z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_QR_COLPIV(scomplex, MKL_Complex8,  c, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_MKL_H
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
new file mode 100644
index 0000000..0b39966
--- /dev/null
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -0,0 +1,622 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
+#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType;
+
+template<typename MatrixType>
+struct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
+{
+  typedef typename MatrixType::PlainObject ReturnType;
+};
+
+}
+
+/** \ingroup QR_Module
+  *
+  * \class FullPivHouseholderQR
+  *
+  * \brief Householder rank-revealing QR decomposition of a matrix with full pivoting
+  *
+  * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+  *
+  * This class performs a rank-revealing QR decomposition of a matrix \b A into matrices \b P, \b Q and \b R
+  * such that 
+  * \f[
+  *  \mathbf{A} \, \mathbf{P} = \mathbf{Q} \, \mathbf{R}
+  * \f]
+  * by using Householder transformations. Here, \b P is a permutation matrix, \b Q a unitary matrix and \b R an 
+  * upper triangular matrix.
+  *
+  * This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal
+  * numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR.
+  *
+  * \sa MatrixBase::fullPivHouseholderQr()
+  */
+template<typename _MatrixType> class FullPivHouseholderQR
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;
+    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+    typedef Matrix<Index, 1,
+                   EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,
+                   EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;
+    typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;
+    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+    typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;
+
+    /** \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via FullPivHouseholderQR::compute(const MatrixType&).
+      */
+    FullPivHouseholderQR()
+      : m_qr(),
+        m_hCoeffs(),
+        m_rows_transpositions(),
+        m_cols_transpositions(),
+        m_cols_permutation(),
+        m_temp(),
+        m_isInitialized(false),
+        m_usePrescribedThreshold(false) {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa FullPivHouseholderQR()
+      */
+    FullPivHouseholderQR(Index rows, Index cols)
+      : m_qr(rows, cols),
+        m_hCoeffs((std::min)(rows,cols)),
+        m_rows_transpositions((std::min)(rows,cols)),
+        m_cols_transpositions((std::min)(rows,cols)),
+        m_cols_permutation(cols),
+        m_temp(cols),
+        m_isInitialized(false),
+        m_usePrescribedThreshold(false) {}
+
+    /** \brief Constructs a QR factorization from a given matrix
+      *
+      * This constructor computes the QR factorization of the matrix \a matrix by calling
+      * the method compute(). It is a short cut for:
+      * 
+      * \code
+      * FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+      * qr.compute(matrix);
+      * \endcode
+      * 
+      * \sa compute()
+      */
+    FullPivHouseholderQR(const MatrixType& matrix)
+      : m_qr(matrix.rows(), matrix.cols()),
+        m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),
+        m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),
+        m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),
+        m_cols_permutation(matrix.cols()),
+        m_temp(matrix.cols()),
+        m_isInitialized(false),
+        m_usePrescribedThreshold(false)
+    {
+      compute(matrix);
+    }
+
+    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+      * \c *this is the QR decomposition.
+      *
+      * \param b the right-hand-side of the equation to solve.
+      *
+      * \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A,
+      * and an arbitrary solution otherwise.
+      *
+      * \note The case where b is a matrix is not yet implemented. Also, this
+      *       code is space inefficient.
+      *
+      * \note_about_checking_solutions
+      *
+      * \note_about_arbitrary_choice_of_solution
+      *
+      * Example: \include FullPivHouseholderQR_solve.cpp
+      * Output: \verbinclude FullPivHouseholderQR_solve.out
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<FullPivHouseholderQR, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return internal::solve_retval<FullPivHouseholderQR, Rhs>(*this, b.derived());
+    }
+
+    /** \returns Expression object representing the matrix Q
+      */
+    MatrixQReturnType matrixQ(void) const;
+
+    /** \returns a reference to the matrix where the Householder QR decomposition is stored
+      */
+    const MatrixType& matrixQR() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return m_qr;
+    }
+
+    FullPivHouseholderQR& compute(const MatrixType& matrix);
+
+    /** \returns a const reference to the column permutation matrix */
+    const PermutationType& colsPermutation() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return m_cols_permutation;
+    }
+
+    /** \returns a const reference to the vector of indices representing the rows transpositions */
+    const IntDiagSizeVectorType& rowsTranspositions() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return m_rows_transpositions;
+    }
+
+    /** \returns the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      * One way to work around that is to use logAbsDeterminant() instead.
+      *
+      * \sa logAbsDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar absDeterminant() const;
+
+    /** \returns the natural log of the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \note This method is useful to work around the risk of overflow/underflow that's inherent
+      * to determinant computation.
+      *
+      * \sa absDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar logAbsDeterminant() const;
+
+    /** \returns the rank of the matrix of which *this is the QR decomposition.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index rank() const
+    {
+      using std::abs;
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();
+      Index result = 0;
+      for(Index i = 0; i < m_nonzero_pivots; ++i)
+        result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);
+      return result;
+    }
+
+    /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index dimensionOfKernel() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return cols() - rank();
+    }
+
+    /** \returns true if the matrix of which *this is the QR decomposition represents an injective
+      *          linear map, i.e. has trivial kernel; false otherwise.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isInjective() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return rank() == cols();
+    }
+
+    /** \returns true if the matrix of which *this is the QR decomposition represents a surjective
+      *          linear map; false otherwise.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isSurjective() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return rank() == rows();
+    }
+
+    /** \returns true if the matrix of which *this is the QR decomposition is invertible.
+      *
+      * \note This method has to determine which pivots should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline bool isInvertible() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return isInjective() && isSurjective();
+    }
+
+    /** \returns the inverse of the matrix of which *this is the QR decomposition.
+      *
+      * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
+      *       Use isInvertible() to first determine whether this matrix is invertible.
+      */    inline const
+    internal::solve_retval<FullPivHouseholderQR, typename MatrixType::IdentityReturnType>
+    inverse() const
+    {
+      eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+      return internal::solve_retval<FullPivHouseholderQR,typename MatrixType::IdentityReturnType>
+               (*this, MatrixType::Identity(m_qr.rows(), m_qr.cols()));
+    }
+
+    inline Index rows() const { return m_qr.rows(); }
+    inline Index cols() const { return m_qr.cols(); }
+    
+    /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+      * 
+      * For advanced uses only.
+      */
+    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+    /** Allows to prescribe a threshold to be used by certain methods, such as rank(),
+      * who need to determine when pivots are to be considered nonzero. This is not used for the
+      * QR decomposition itself.
+      *
+      * When it needs to get the threshold value, Eigen calls threshold(). By default, this
+      * uses a formula to automatically determine a reasonable threshold.
+      * Once you have called the present method setThreshold(const RealScalar&),
+      * your value is used instead.
+      *
+      * \param threshold The new value to use as the threshold.
+      *
+      * A pivot will be considered nonzero if its absolute value is strictly greater than
+      *  \f$ \vert pivot \vert \leqslant threshold \times \vert maxpivot \vert \f$
+      * where maxpivot is the biggest pivot.
+      *
+      * If you want to come back to the default behavior, call setThreshold(Default_t)
+      */
+    FullPivHouseholderQR& setThreshold(const RealScalar& threshold)
+    {
+      m_usePrescribedThreshold = true;
+      m_prescribedThreshold = threshold;
+      return *this;
+    }
+
+    /** Allows to come back to the default behavior, letting Eigen use its default formula for
+      * determining the threshold.
+      *
+      * You should pass the special object Eigen::Default as parameter here.
+      * \code qr.setThreshold(Eigen::Default); \endcode
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    FullPivHouseholderQR& setThreshold(Default_t)
+    {
+      m_usePrescribedThreshold = false;
+      return *this;
+    }
+
+    /** Returns the threshold that will be used by certain methods such as rank().
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    RealScalar threshold() const
+    {
+      eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+      return m_usePrescribedThreshold ? m_prescribedThreshold
+      // this formula comes from experimenting (see "LU precision tuning" thread on the list)
+      // and turns out to be identical to Higham's formula used already in LDLt.
+                                      : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());
+    }
+
+    /** \returns the number of nonzero pivots in the QR decomposition.
+      * Here nonzero is meant in the exact sense, not in a fuzzy sense.
+      * So that notion isn't really intrinsically interesting, but it is
+      * still useful when implementing algorithms.
+      *
+      * \sa rank()
+      */
+    inline Index nonzeroPivots() const
+    {
+      eigen_assert(m_isInitialized && "LU is not initialized.");
+      return m_nonzero_pivots;
+    }
+
+    /** \returns the absolute value of the biggest pivot, i.e. the biggest
+      *          diagonal coefficient of U.
+      */
+    RealScalar maxPivot() const { return m_maxpivot; }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    MatrixType m_qr;
+    HCoeffsType m_hCoeffs;
+    IntDiagSizeVectorType m_rows_transpositions;
+    IntDiagSizeVectorType m_cols_transpositions;
+    PermutationType m_cols_permutation;
+    RowVectorType m_temp;
+    bool m_isInitialized, m_usePrescribedThreshold;
+    RealScalar m_prescribedThreshold, m_maxpivot;
+    Index m_nonzero_pivots;
+    RealScalar m_precision;
+    Index m_det_pq;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const
+{
+  using std::abs;
+  eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+  eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+  * the factorization is stored into \c *this, and a reference to \c *this
+  * is returned.
+  *
+  * \sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&)
+  */
+template<typename MatrixType>
+FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+  check_template_parameters();
+  
+  using std::abs;
+  Index rows = matrix.rows();
+  Index cols = matrix.cols();
+  Index size = (std::min)(rows,cols);
+
+  m_qr = matrix;
+  m_hCoeffs.resize(size);
+
+  m_temp.resize(cols);
+
+  m_precision = NumTraits<Scalar>::epsilon() * RealScalar(size);
+
+  m_rows_transpositions.resize(size);
+  m_cols_transpositions.resize(size);
+  Index number_of_transpositions = 0;
+
+  RealScalar biggest(0);
+
+  m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)
+  m_maxpivot = RealScalar(0);
+
+  for (Index k = 0; k < size; ++k)
+  {
+    Index row_of_biggest_in_corner, col_of_biggest_in_corner;
+    RealScalar biggest_in_corner;
+
+    biggest_in_corner = m_qr.bottomRightCorner(rows-k, cols-k)
+                            .cwiseAbs()
+                            .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
+    row_of_biggest_in_corner += k;
+    col_of_biggest_in_corner += k;
+    if(k==0) biggest = biggest_in_corner;
+
+    // if the corner is negligible, then we have less than full rank, and we can finish early
+    if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision))
+    {
+      m_nonzero_pivots = k;
+      for(Index i = k; i < size; i++)
+      {
+        m_rows_transpositions.coeffRef(i) = i;
+        m_cols_transpositions.coeffRef(i) = i;
+        m_hCoeffs.coeffRef(i) = Scalar(0);
+      }
+      break;
+    }
+
+    m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
+    m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
+    if(k != row_of_biggest_in_corner) {
+      m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));
+      ++number_of_transpositions;
+    }
+    if(k != col_of_biggest_in_corner) {
+      m_qr.col(k).swap(m_qr.col(col_of_biggest_in_corner));
+      ++number_of_transpositions;
+    }
+
+    RealScalar beta;
+    m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);
+    m_qr.coeffRef(k,k) = beta;
+
+    // remember the maximum absolute value of diagonal coefficients
+    if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);
+
+    m_qr.bottomRightCorner(rows-k, cols-k-1)
+        .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));
+  }
+
+  m_cols_permutation.setIdentity(cols);
+  for(Index k = 0; k < size; ++k)
+    m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k));
+
+  m_det_pq = (number_of_transpositions%2) ? -1 : 1;
+  m_isInitialized = true;
+
+  return *this;
+}
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
+  : solve_retval_base<FullPivHouseholderQR<_MatrixType>, Rhs>
+{
+  EIGEN_MAKE_SOLVE_HELPERS(FullPivHouseholderQR<_MatrixType>,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    const Index rows = dec().rows(), cols = dec().cols();
+    eigen_assert(rhs().rows() == rows);
+
+    // FIXME introduce nonzeroPivots() and use it here. and more generally,
+    // make the same improvements in this dec as in FullPivLU.
+    if(dec().rank()==0)
+    {
+      dst.setZero();
+      return;
+    }
+
+    typename Rhs::PlainObject c(rhs());
+
+    Matrix<Scalar,1,Rhs::ColsAtCompileTime> temp(rhs().cols());
+    for (Index k = 0; k < dec().rank(); ++k)
+    {
+      Index remainingSize = rows-k;
+      c.row(k).swap(c.row(dec().rowsTranspositions().coeff(k)));
+      c.bottomRightCorner(remainingSize, rhs().cols())
+       .applyHouseholderOnTheLeft(dec().matrixQR().col(k).tail(remainingSize-1),
+                                  dec().hCoeffs().coeff(k), &temp.coeffRef(0));
+    }
+
+    dec().matrixQR()
+       .topLeftCorner(dec().rank(), dec().rank())
+       .template triangularView<Upper>()
+       .solveInPlace(c.topRows(dec().rank()));
+
+    for(Index i = 0; i < dec().rank(); ++i) dst.row(dec().colsPermutation().indices().coeff(i)) = c.row(i);
+    for(Index i = dec().rank(); i < cols; ++i) dst.row(dec().colsPermutation().indices().coeff(i)).setZero();
+  }
+};
+
+/** \ingroup QR_Module
+  *
+  * \brief Expression type for return value of FullPivHouseholderQR::matrixQ()
+  *
+  * \tparam MatrixType type of underlying dense matrix
+  */
+template<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType
+  : public ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> >
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType;
+  typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+  typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1,
+                 MatrixType::MaxRowsAtCompileTime> WorkVectorType;
+
+  FullPivHouseholderQRMatrixQReturnType(const MatrixType&       qr,
+                                        const HCoeffsType&      hCoeffs,
+                                        const IntDiagSizeVectorType& rowsTranspositions)
+    : m_qr(qr),
+      m_hCoeffs(hCoeffs),
+      m_rowsTranspositions(rowsTranspositions)
+      {}
+
+  template <typename ResultType>
+  void evalTo(ResultType& result) const
+  {
+    const Index rows = m_qr.rows();
+    WorkVectorType workspace(rows);
+    evalTo(result, workspace);
+  }
+
+  template <typename ResultType>
+  void evalTo(ResultType& result, WorkVectorType& workspace) const
+  {
+    using numext::conj;
+    // compute the product H'_0 H'_1 ... H'_n-1,
+    // where H_k is the k-th Householder transformation I - h_k v_k v_k'
+    // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
+    const Index rows = m_qr.rows();
+    const Index cols = m_qr.cols();
+    const Index size = (std::min)(rows, cols);
+    workspace.resize(rows);
+    result.setIdentity(rows, rows);
+    for (Index k = size-1; k >= 0; k--)
+    {
+      result.block(k, k, rows-k, rows-k)
+            .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));
+      result.row(k).swap(result.row(m_rowsTranspositions.coeff(k)));
+    }
+  }
+
+    Index rows() const { return m_qr.rows(); }
+    Index cols() const { return m_qr.rows(); }
+
+protected:
+  typename MatrixType::Nested m_qr;
+  typename HCoeffsType::Nested m_hCoeffs;
+  typename IntDiagSizeVectorType::Nested m_rowsTranspositions;
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+inline typename FullPivHouseholderQR<MatrixType>::MatrixQReturnType FullPivHouseholderQR<MatrixType>::matrixQ() const
+{
+  eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized.");
+  return MatrixQReturnType(m_qr, m_hCoeffs, m_rows_transpositions);
+}
+
+/** \return the full-pivoting Householder QR decomposition of \c *this.
+  *
+  * \sa class FullPivHouseholderQR
+  */
+template<typename Derived>
+const FullPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::fullPivHouseholderQr() const
+{
+  return FullPivHouseholderQR<PlainObject>(eval());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h
new file mode 100644
index 0000000..343a664
--- /dev/null
+++ b/Eigen/src/QR/HouseholderQR.h
@@ -0,0 +1,388 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+// Copyright (C) 2010 Vincent Lejeune
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_QR_H
+#define EIGEN_QR_H
+
+namespace Eigen { 
+
+/** \ingroup QR_Module
+  *
+  *
+  * \class HouseholderQR
+  *
+  * \brief Householder QR decomposition of a matrix
+  *
+  * \param MatrixType the type of the matrix of which we are computing the QR decomposition
+  *
+  * This class performs a QR decomposition of a matrix \b A into matrices \b Q and \b R
+  * such that 
+  * \f[
+  *  \mathbf{A} = \mathbf{Q} \, \mathbf{R}
+  * \f]
+  * by using Householder transformations. Here, \b Q a unitary matrix and \b R an upper triangular matrix.
+  * The result is stored in a compact way compatible with LAPACK.
+  *
+  * Note that no pivoting is performed. This is \b not a rank-revealing decomposition.
+  * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead.
+  *
+  * This Householder QR decomposition is faster, but less numerically stable and less feature-full than
+  * FullPivHouseholderQR or ColPivHouseholderQR.
+  *
+  * \sa MatrixBase::householderQr()
+  */
+template<typename _MatrixType> class HouseholderQR
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      Options = MatrixType::Options,
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;
+    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;
+    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;
+    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;
+
+    /**
+      * \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via HouseholderQR::compute(const MatrixType&).
+      */
+    HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem \a size.
+      * \sa HouseholderQR()
+      */
+    HouseholderQR(Index rows, Index cols)
+      : m_qr(rows, cols),
+        m_hCoeffs((std::min)(rows,cols)),
+        m_temp(cols),
+        m_isInitialized(false) {}
+
+    /** \brief Constructs a QR factorization from a given matrix
+      *
+      * This constructor computes the QR factorization of the matrix \a matrix by calling
+      * the method compute(). It is a short cut for:
+      * 
+      * \code
+      * HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());
+      * qr.compute(matrix);
+      * \endcode
+      * 
+      * \sa compute()
+      */
+    HouseholderQR(const MatrixType& matrix)
+      : m_qr(matrix.rows(), matrix.cols()),
+        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),
+        m_temp(matrix.cols()),
+        m_isInitialized(false)
+    {
+      compute(matrix);
+    }
+
+    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
+      * *this is the QR decomposition, if any exists.
+      *
+      * \param b the right-hand-side of the equation to solve.
+      *
+      * \returns a solution.
+      *
+      * \note The case where b is a matrix is not yet implemented. Also, this
+      *       code is space inefficient.
+      *
+      * \note_about_checking_solutions
+      *
+      * \note_about_arbitrary_choice_of_solution
+      *
+      * Example: \include HouseholderQR_solve.cpp
+      * Output: \verbinclude HouseholderQR_solve.out
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<HouseholderQR, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+      return internal::solve_retval<HouseholderQR, Rhs>(*this, b.derived());
+    }
+
+    /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.
+      *
+      * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object.
+      * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*:
+      *
+      * Example: \include HouseholderQR_householderQ.cpp
+      * Output: \verbinclude HouseholderQR_householderQ.out
+      */
+    HouseholderSequenceType householderQ() const
+    {
+      eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+      return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
+    }
+
+    /** \returns a reference to the matrix where the Householder QR decomposition is stored
+      * in a LAPACK-compatible way.
+      */
+    const MatrixType& matrixQR() const
+    {
+        eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+        return m_qr;
+    }
+
+    HouseholderQR& compute(const MatrixType& matrix);
+
+    /** \returns the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      * One way to work around that is to use logAbsDeterminant() instead.
+      *
+      * \sa logAbsDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar absDeterminant() const;
+
+    /** \returns the natural log of the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition. It has only linear complexity
+      * (that is, O(n) where n is the dimension of the square matrix)
+      * as the QR decomposition has already been computed.
+      *
+      * \note This is only for square matrices.
+      *
+      * \note This method is useful to work around the risk of overflow/underflow that's inherent
+      * to determinant computation.
+      *
+      * \sa absDeterminant(), MatrixBase::determinant()
+      */
+    typename MatrixType::RealScalar logAbsDeterminant() const;
+
+    inline Index rows() const { return m_qr.rows(); }
+    inline Index cols() const { return m_qr.cols(); }
+    
+    /** \returns a const reference to the vector of Householder coefficients used to represent the factor \c Q.
+      * 
+      * For advanced uses only.
+      */
+    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
+
+  protected:
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+    
+    MatrixType m_qr;
+    HCoeffsType m_hCoeffs;
+    RowVectorType m_temp;
+    bool m_isInitialized;
+};
+
+template<typename MatrixType>
+typename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const
+{
+  using std::abs;
+  eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return abs(m_qr.diagonal().prod());
+}
+
+template<typename MatrixType>
+typename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const
+{
+  eigen_assert(m_isInitialized && "HouseholderQR is not initialized.");
+  eigen_assert(m_qr.rows() == m_qr.cols() && "You can't take the determinant of a non-square matrix!");
+  return m_qr.diagonal().cwiseAbs().array().log().sum();
+}
+
+namespace internal {
+
+/** \internal */
+template<typename MatrixQR, typename HCoeffs>
+void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)
+{
+  typedef typename MatrixQR::Index Index;
+  typedef typename MatrixQR::Scalar Scalar;
+  typedef typename MatrixQR::RealScalar RealScalar;
+  Index rows = mat.rows();
+  Index cols = mat.cols();
+  Index size = (std::min)(rows,cols);
+
+  eigen_assert(hCoeffs.size() == size);
+
+  typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;
+  TempType tempVector;
+  if(tempData==0)
+  {
+    tempVector.resize(cols);
+    tempData = tempVector.data();
+  }
+
+  for(Index k = 0; k < size; ++k)
+  {
+    Index remainingRows = rows - k;
+    Index remainingCols = cols - k - 1;
+
+    RealScalar beta;
+    mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
+    mat.coeffRef(k,k) = beta;
+
+    // apply H to remaining part of m_qr from the left
+    mat.bottomRightCorner(remainingRows, remainingCols)
+        .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
+  }
+}
+
+/** \internal */
+template<typename MatrixQR, typename HCoeffs,
+  typename MatrixQRScalar = typename MatrixQR::Scalar,
+  bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>
+struct householder_qr_inplace_blocked
+{
+  // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
+  static void run(MatrixQR& mat, HCoeffs& hCoeffs,
+      typename MatrixQR::Index maxBlockSize=32,
+      typename MatrixQR::Scalar* tempData = 0)
+  {
+    typedef typename MatrixQR::Index Index;
+    typedef typename MatrixQR::Scalar Scalar;
+    typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
+
+    Index rows = mat.rows();
+    Index cols = mat.cols();
+    Index size = (std::min)(rows, cols);
+
+    typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
+    TempType tempVector;
+    if(tempData==0)
+    {
+      tempVector.resize(cols);
+      tempData = tempVector.data();
+    }
+
+    Index blockSize = (std::min)(maxBlockSize,size);
+
+    Index k = 0;
+    for (k = 0; k < size; k += blockSize)
+    {
+      Index bs = (std::min)(size-k,blockSize);  // actual size of the block
+      Index tcols = cols - k - bs;            // trailing columns
+      Index brows = rows-k;                   // rows of the block
+
+      // partition the matrix:
+      //        A00 | A01 | A02
+      // mat  = A10 | A11 | A12
+      //        A20 | A21 | A22
+      // and performs the qr dec of [A11^T A12^T]^T
+      // and update [A21^T A22^T]^T using level 3 operations.
+      // Finally, the algorithm continue on A22
+
+      BlockType A11_21 = mat.block(k,k,brows,bs);
+      Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+
+      householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+
+      if(tcols)
+      {
+        BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
+        apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+      }
+    }
+  }
+};
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
+  : solve_retval_base<HouseholderQR<_MatrixType>, Rhs>
+{
+  EIGEN_MAKE_SOLVE_HELPERS(HouseholderQR<_MatrixType>,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    const Index rows = dec().rows(), cols = dec().cols();
+    const Index rank = (std::min)(rows, cols);
+    eigen_assert(rhs().rows() == rows);
+
+    typename Rhs::PlainObject c(rhs());
+
+    // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
+    c.applyOnTheLeft(householderSequence(
+      dec().matrixQR().leftCols(rank),
+      dec().hCoeffs().head(rank)).transpose()
+    );
+
+    dec().matrixQR()
+       .topLeftCorner(rank, rank)
+       .template triangularView<Upper>()
+       .solveInPlace(c.topRows(rank));
+
+    dst.topRows(rank) = c.topRows(rank);
+    dst.bottomRows(cols-rank).setZero();
+  }
+};
+
+} // end namespace internal
+
+/** Performs the QR factorization of the given matrix \a matrix. The result of
+  * the factorization is stored into \c *this, and a reference to \c *this
+  * is returned.
+  *
+  * \sa class HouseholderQR, HouseholderQR(const MatrixType&)
+  */
+template<typename MatrixType>
+HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
+{
+  check_template_parameters();
+  
+  Index rows = matrix.rows();
+  Index cols = matrix.cols();
+  Index size = (std::min)(rows,cols);
+
+  m_qr = matrix;
+  m_hCoeffs.resize(size);
+
+  m_temp.resize(cols);
+
+  internal::householder_qr_inplace_blocked<MatrixType, HCoeffsType>::run(m_qr, m_hCoeffs, 48, m_temp.data());
+
+  m_isInitialized = true;
+  return *this;
+}
+
+/** \return the Householder QR decomposition of \c *this.
+  *
+  * \sa class HouseholderQR
+  */
+template<typename Derived>
+const HouseholderQR<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::householderQr() const
+{
+  return HouseholderQR<PlainObject>(eval());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_QR_H
diff --git a/Eigen/src/QR/HouseholderQR_MKL.h b/Eigen/src/QR/HouseholderQR_MKL.h
new file mode 100644
index 0000000..b80f1b4
--- /dev/null
+++ b/Eigen/src/QR/HouseholderQR_MKL.h
@@ -0,0 +1,71 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *    Householder QR decomposition of a matrix w/o pivoting based on
+ *    LAPACKE_?geqrf function.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_QR_MKL_H
+#define EIGEN_QR_MKL_H
+
+#include "../Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+  namespace internal {
+
+    /** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_QR_NOPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \
+template<typename MatrixQR, typename HCoeffs> \
+struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \
+{ \
+  static void run(MatrixQR& mat, HCoeffs& hCoeffs, \
+      typename MatrixQR::Index = 32, \
+      typename MatrixQR::Scalar* = 0) \
+  { \
+    lapack_int m = (lapack_int) mat.rows(); \
+    lapack_int n = (lapack_int) mat.cols(); \
+    lapack_int lda = (lapack_int) mat.outerStride(); \
+    lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+    LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \
+    hCoeffs.adjointInPlace(); \
+  } \
+};
+
+EIGEN_MKL_QR_NOPIV(double, double, d)
+EIGEN_MKL_QR_NOPIV(float, float, s)
+EIGEN_MKL_QR_NOPIV(dcomplex, MKL_Complex16, z)
+EIGEN_MKL_QR_NOPIV(scomplex, MKL_Complex8, c)
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_QR_MKL_H
diff --git a/Eigen/src/SPQRSupport/CMakeLists.txt b/Eigen/src/SPQRSupport/CMakeLists.txt
new file mode 100644
index 0000000..4968bea
--- /dev/null
+++ b/Eigen/src/SPQRSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SPQRSupport_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_SPQRSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SPQRSupport/ COMPONENT Devel
+  )
diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
new file mode 100644
index 0000000..de00877
--- /dev/null
+++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -0,0 +1,338 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SUITESPARSEQRSUPPORT_H
+#define EIGEN_SUITESPARSEQRSUPPORT_H
+
+namespace Eigen {
+  
+  template<typename MatrixType> class SPQR; 
+  template<typename SPQRType> struct SPQRMatrixQReturnType; 
+  template<typename SPQRType> struct SPQRMatrixQTransposeReturnType; 
+  template <typename SPQRType, typename Derived> struct SPQR_QProduct;
+  namespace internal {
+    template <typename SPQRType> struct traits<SPQRMatrixQReturnType<SPQRType> >
+    {
+      typedef typename SPQRType::MatrixType ReturnType;
+    };
+    template <typename SPQRType> struct traits<SPQRMatrixQTransposeReturnType<SPQRType> >
+    {
+      typedef typename SPQRType::MatrixType ReturnType;
+    };
+    template <typename SPQRType, typename Derived> struct traits<SPQR_QProduct<SPQRType, Derived> >
+    {
+      typedef typename Derived::PlainObject ReturnType;
+    };
+  } // End namespace internal
+  
+/**
+ * \ingroup SPQRSupport_Module
+ * \class SPQR
+ * \brief Sparse QR factorization based on SuiteSparseQR library
+ * 
+ * This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition 
+ * of sparse matrices. The result is then used to solve linear leasts_square systems.
+ * Clearly, a QR factorization is returned such that A*P = Q*R where :
+ * 
+ * P is the column permutation. Use colsPermutation() to get it.
+ * 
+ * Q is the orthogonal matrix represented as Householder reflectors. 
+ * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
+ * You can then apply it to a vector.
+ * 
+ * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.
+ * NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index
+ * 
+ * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
+ * NOTE 
+ * 
+ */
+template<typename _MatrixType>
+class SPQR
+{
+  public:
+    typedef typename _MatrixType::Scalar Scalar;
+    typedef typename _MatrixType::RealScalar RealScalar;
+    typedef UF_long Index ; 
+    typedef SparseMatrix<Scalar, ColMajor, Index> MatrixType;
+    typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
+  public:
+    SPQR() 
+      : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
+    { 
+      cholmod_l_start(&m_cc);
+    }
+    
+    SPQR(const _MatrixType& matrix)
+    : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
+    {
+      cholmod_l_start(&m_cc);
+      compute(matrix);
+    }
+    
+    ~SPQR()
+    {
+      SPQR_free();
+      cholmod_l_finish(&m_cc);
+    }
+    void SPQR_free()
+    {
+      cholmod_l_free_sparse(&m_H, &m_cc);
+      cholmod_l_free_sparse(&m_cR, &m_cc);
+      cholmod_l_free_dense(&m_HTau, &m_cc);
+      std::free(m_E);
+      std::free(m_HPinv);
+    }
+
+    void compute(const _MatrixType& matrix)
+    {
+      if(m_isInitialized) SPQR_free();
+
+      MatrixType mat(matrix);
+      
+      /* Compute the default threshold as in MatLab, see:
+       * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+       * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 
+       */
+      RealScalar pivotThreshold = m_tolerance;
+      if(m_useDefaultThreshold) 
+      {
+        using std::max;
+        RealScalar max2Norm = 0.0;
+        for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm());
+        if(max2Norm==RealScalar(0))
+          max2Norm = RealScalar(1);
+        pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
+      }
+      
+      cholmod_sparse A; 
+      A = viewAsCholmod(mat);
+      Index col = matrix.cols();
+      m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A, 
+                             &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
+
+      if (!m_cR)
+      {
+        m_info = NumericalIssue; 
+        m_isInitialized = false;
+        return;
+      }
+      m_info = Success;
+      m_isInitialized = true;
+      m_isRUpToDate = false;
+    }
+    /** 
+     * Get the number of rows of the input matrix and the Q matrix
+     */
+    inline Index rows() const {return m_cR->nrow; }
+    
+    /** 
+     * Get the number of columns of the input matrix. 
+     */
+    inline Index cols() const { return m_cR->ncol; }
+   
+      /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<SPQR, Rhs> solve(const MatrixBase<Rhs>& B) const 
+    {
+      eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
+      eigen_assert(this->rows()==B.rows()
+                    && "SPQR::solve(): invalid number of rows of the right hand side matrix B");
+          return internal::solve_retval<SPQR, Rhs>(*this, B.derived());
+    }
+    
+    template<typename Rhs, typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+    {
+      eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
+      eigen_assert(b.cols()==1 && "This method is for vectors only");
+
+      //Compute Q^T * b
+      typename Dest::PlainObject y, y2;
+      y = matrixQ().transpose() * b;
+      
+      // Solves with the triangular matrix R
+      Index rk = this->rank();
+      y2 = y;
+      y.resize((std::max)(cols(),Index(y.rows())),y.cols());
+      y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
+
+      // Apply the column permutation 
+      // colsPermutation() performs a copy of the permutation,
+      // so let's apply it manually:
+      for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
+      for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
+      
+//       y.bottomRows(y.rows()-rk).setZero();
+//       dest = colsPermutation() * y.topRows(cols());
+      
+      m_info = Success;
+    }
+    
+    /** \returns the sparse triangular factor R. It is a sparse matrix
+     */
+    const MatrixType matrixR() const
+    {
+      eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
+      if(!m_isRUpToDate) {
+        m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::Index>(*m_cR);
+        m_isRUpToDate = true;
+      }
+      return m_R;
+    }
+    /// Get an expression of the matrix Q
+    SPQRMatrixQReturnType<SPQR> matrixQ() const
+    {
+      return SPQRMatrixQReturnType<SPQR>(*this);
+    }
+    /// Get the permutation that was applied to columns of A
+    PermutationType colsPermutation() const
+    { 
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      Index n = m_cR->ncol;
+      PermutationType colsPerm(n);
+      for(Index j = 0; j <n; j++) colsPerm.indices()(j) = m_E[j];
+      return colsPerm; 
+      
+    }
+    /**
+     * Gets the rank of the matrix. 
+     * It should be equal to matrixQR().cols if the matrix is full-rank
+     */
+    Index rank() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_cc.SPQR_istat[4];
+    }
+    /// Set the fill-reducing ordering method to be used
+    void setSPQROrdering(int ord) { m_ordering = ord;}
+    /// Set the tolerance tol to treat columns with 2-norm < =tol as zero
+    void setPivotThreshold(const RealScalar& tol)
+    {
+      m_useDefaultThreshold = false;
+      m_tolerance = tol;
+    }
+    
+    /** \returns a pointer to the SPQR workspace */
+    cholmod_common *cholmodCommon() const { return &m_cc; }
+    
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the sparse QR can not be computed
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+  protected:
+    bool m_isInitialized;
+    bool m_analysisIsOk;
+    bool m_factorizationIsOk;
+    mutable bool m_isRUpToDate;
+    mutable ComputationInfo m_info;
+    int m_ordering; // Ordering method to use, see SPQR's manual
+    int m_allow_tol; // Allow to use some tolerance during numerical factorization.
+    RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero
+    mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format
+    mutable MatrixType m_R; // The sparse matrix R in Eigen format
+    mutable Index *m_E; // The permutation applied to columns
+    mutable cholmod_sparse *m_H;  //The householder vectors
+    mutable Index *m_HPinv; // The row permutation of H
+    mutable cholmod_dense *m_HTau; // The Householder coefficients
+    mutable Index m_rank; // The rank of the matrix
+    mutable cholmod_common m_cc; // Workspace and parameters
+    bool m_useDefaultThreshold;     // Use default threshold
+    template<typename ,typename > friend struct SPQR_QProduct;
+};
+
+template <typename SPQRType, typename Derived>
+struct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> >
+{
+  typedef typename SPQRType::Scalar Scalar;
+  typedef typename SPQRType::Index Index;
+  //Define the constructor to get reference to argument types
+  SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {}
+  
+  inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); }
+  inline Index cols() const { return m_other.cols(); }
+  // Assign to a vector
+  template<typename ResType>
+  void evalTo(ResType& res) const
+  {
+    cholmod_dense y_cd;
+    cholmod_dense *x_cd; 
+    int method = m_transpose ? SPQR_QTX : SPQR_QX; 
+    cholmod_common *cc = m_spqr.cholmodCommon();
+    y_cd = viewAsCholmod(m_other.const_cast_derived());
+    x_cd = SuiteSparseQR_qmult<Scalar>(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc);
+    res = Matrix<Scalar,ResType::RowsAtCompileTime,ResType::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x), x_cd->nrow, x_cd->ncol);
+    cholmod_l_free_dense(&x_cd, cc);
+  }
+  const SPQRType& m_spqr; 
+  const Derived& m_other; 
+  bool m_transpose; 
+  
+};
+template<typename SPQRType>
+struct SPQRMatrixQReturnType{
+  
+  SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {}
+  template<typename Derived>
+  SPQR_QProduct<SPQRType, Derived> operator*(const MatrixBase<Derived>& other)
+  {
+    return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(),false);
+  }
+  SPQRMatrixQTransposeReturnType<SPQRType> adjoint() const
+  {
+    return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);
+  }
+  // To use for operations with the transpose of Q
+  SPQRMatrixQTransposeReturnType<SPQRType> transpose() const
+  {
+    return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);
+  }
+  const SPQRType& m_spqr;
+};
+
+template<typename SPQRType>
+struct SPQRMatrixQTransposeReturnType{
+  SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {}
+  template<typename Derived>
+  SPQR_QProduct<SPQRType,Derived> operator*(const MatrixBase<Derived>& other)
+  {
+    return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(), true);
+  }
+  const SPQRType& m_spqr;
+};
+
+namespace internal {
+  
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<SPQR<_MatrixType>, Rhs>
+  : solve_retval_base<SPQR<_MatrixType>, Rhs>
+{
+  typedef SPQR<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+} // end namespace internal
+
+}// End namespace Eigen
+#endif
diff --git a/Eigen/src/SVD/CMakeLists.txt b/Eigen/src/SVD/CMakeLists.txt
new file mode 100644
index 0000000..55efc44
--- /dev/null
+++ b/Eigen/src/SVD/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SVD_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_SVD_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SVD COMPONENT Devel
+  )
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 0000000..1b29774
--- /dev/null
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,976 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace Eigen { 
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+         bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+  enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+             MatrixType::ColsAtCompileTime != Dynamic &&
+             MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+         b = MatrixType::RowsAtCompileTime != Dynamic &&
+             MatrixType::ColsAtCompileTime != Dynamic &&
+             MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+         ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+                  (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+                  (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+  };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+         bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
+  bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+  {
+    return false;
+  }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+  };
+  typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
+
+  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+    {
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.rows(), svd.cols());
+    }
+    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+  }
+
+  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.rows() > matrix.cols())
+    {
+      m_qr.compute(matrix);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+      if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
+      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+      return true;
+    }
+    return false;
+  }
+private:
+  typedef FullPivHouseholderQR<MatrixType> QRType;
+  QRType m_qr;
+  WorkspaceType m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    Options = MatrixType::Options
+  };
+  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+          TransposeTypeWithSameStorageOrder;
+
+  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+    {
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.cols(), svd.rows());
+    }
+    m_adjoint.resize(svd.cols(), svd.rows());
+    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+  }
+
+  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.cols() > matrix.rows())
+    {
+      m_adjoint = matrix.adjoint();
+      m_qr.compute(m_adjoint);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+      if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
+      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+      return true;
+    }
+    else return false;
+  }
+private:
+  typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+  QRType m_qr;
+  TransposeTypeWithSameStorageOrder m_adjoint;
+  typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+
+  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+    {
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.rows(), svd.cols());
+    }
+    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+  }
+
+  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.rows() > matrix.cols())
+    {
+      m_qr.compute(matrix);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+      else if(svd.m_computeThinU)
+      {
+        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+      }
+      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+      return true;
+    }
+    return false;
+  }
+
+private:
+  typedef ColPivHouseholderQR<MatrixType> QRType;
+  QRType m_qr;
+  typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    Options = MatrixType::Options
+  };
+
+  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+          TransposeTypeWithSameStorageOrder;
+
+  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+  {
+    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+    {
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.cols(), svd.rows());
+    }
+    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+    m_adjoint.resize(svd.cols(), svd.rows());
+  }
+
+  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.cols() > matrix.rows())
+    {
+      m_adjoint = matrix.adjoint();
+      m_qr.compute(m_adjoint);
+
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+      else if(svd.m_computeThinV)
+      {
+        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+      }
+      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+      return true;
+    }
+    else return false;
+  }
+
+private:
+  typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+  QRType m_qr;
+  TransposeTypeWithSameStorageOrder m_adjoint;
+  typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+
+  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+  {
+    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+    {
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.rows(), svd.cols());
+    }
+    if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+  }
+
+  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.rows() > matrix.cols())
+    {
+      m_qr.compute(matrix);
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+      else if(svd.m_computeThinU)
+      {
+        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+      }
+      if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+      return true;
+    }
+    return false;
+  }
+private:
+  typedef HouseholderQR<MatrixType> QRType;
+  QRType m_qr;
+  typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  enum
+  {
+    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+    Options = MatrixType::Options
+  };
+
+  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+          TransposeTypeWithSameStorageOrder;
+
+  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+  {
+    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+    {
+      m_qr.~QRType();
+      ::new (&m_qr) QRType(svd.cols(), svd.rows());
+    }
+    if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+    m_adjoint.resize(svd.cols(), svd.rows());
+  }
+
+  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+  {
+    if(matrix.cols() > matrix.rows())
+    {
+      m_adjoint = matrix.adjoint();
+      m_qr.compute(m_adjoint);
+
+      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+      else if(svd.m_computeThinV)
+      {
+        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+      }
+      if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+      return true;
+    }
+    else return false;
+  }
+
+private:
+  typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+  QRType m_qr;
+  TransposeTypeWithSameStorageOrder m_adjoint;
+  typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+  typedef typename SVD::Index Index;
+  static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef typename MatrixType::RealScalar RealScalar;
+  typedef typename SVD::Index Index;
+  static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+  {
+    using std::sqrt;
+    Scalar z;
+    JacobiRotation<Scalar> rot;
+    RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+    
+    if(n==0)
+    {
+      z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+      work_matrix.row(p) *= z;
+      if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+      if(work_matrix.coeff(q,q)!=Scalar(0))
+      {
+        z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+        work_matrix.row(q) *= z;
+        if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+      }
+      // otherwise the second row is already zero, so we have nothing to do.
+    }
+    else
+    {
+      rot.c() = conj(work_matrix.coeff(p,p)) / n;
+      rot.s() = work_matrix.coeff(q,p) / n;
+      work_matrix.applyOnTheLeft(p,q,rot);
+      if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+      if(work_matrix.coeff(p,q) != Scalar(0))
+      {
+        Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+        work_matrix.col(q) *= z;
+        if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+      }
+      if(work_matrix.coeff(q,q) != Scalar(0))
+      {
+        z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+        work_matrix.row(q) *= z;
+        if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+      }
+    }
+  }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+                            JacobiRotation<RealScalar> *j_left,
+                            JacobiRotation<RealScalar> *j_right)
+{
+  using std::sqrt;
+  using std::abs;
+  Matrix<RealScalar,2,2> m;
+  m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+       numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
+  JacobiRotation<RealScalar> rot1;
+  RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+  RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+  if(t == RealScalar(0))
+  {
+    rot1.c() = RealScalar(0);
+    rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+  }
+  else
+  {
+    RealScalar t2d2 = numext::hypot(t,d);
+    rot1.c() = abs(t)/t2d2;
+    rot1.s() = d/t2d2;
+    if(t<RealScalar(0))
+      rot1.s() = -rot1.s();
+  }
+  m.applyOnTheLeft(0,1,rot1);
+  j_right->makeJacobi(m,0,1);
+  *j_left  = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+  *
+  *
+  * \class JacobiSVD
+  *
+  * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+  *
+  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+  * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+  *                        for the R-SVD step for non-square matrices. See discussion of possible values below.
+  *
+  * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+  *   \f[ A = U S V^* \f]
+  * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+  * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+  * and right \em singular \em vectors of \a A respectively.
+  *
+  * Singular values are always sorted in decreasing order.
+  *
+  * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+  *
+  * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+  * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+  * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+  * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+  *
+  * Here's an example demonstrating basic usage:
+  * \include JacobiSVD_basic.cpp
+  * Output: \verbinclude JacobiSVD_basic.out
+  *
+  * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+  * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+  * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+  * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+  *
+  * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+  * terminate in finite (and reasonable) time.
+  *
+  * The possible values for QRPreconditioner are:
+  * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+  * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+  *     Contrary to other QRs, it doesn't allow computing thin unitaries.
+  * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+  *     This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+  *     is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+  *     process is more reliable than the optimized bidiagonal SVD iterations.
+  * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+  *     JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+  *     faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+  *     if QR preconditioning is needed before applying it anyway.
+  *
+  * \sa MatrixBase::jacobiSvd()
+  */
+template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+    typedef typename MatrixType::Index Index;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+      MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+      MatrixOptions = MatrixType::Options
+    };
+
+    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+                   MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+            MatrixUType;
+    typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+                   MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+            MatrixVType;
+    typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+    typedef typename internal::plain_row_type<MatrixType>::type RowType;
+    typedef typename internal::plain_col_type<MatrixType>::type ColType;
+    typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+                   MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+            WorkMatrixType;
+
+    /** \brief Default Constructor.
+      *
+      * The default constructor is useful in cases in which the user intends to
+      * perform decompositions via JacobiSVD::compute(const MatrixType&).
+      */
+    JacobiSVD()
+      : m_isInitialized(false),
+        m_isAllocated(false),
+        m_usePrescribedThreshold(false),
+        m_computationOptions(0),
+        m_rows(-1), m_cols(-1), m_diagSize(0)
+    {}
+
+
+    /** \brief Default Constructor with memory preallocation
+      *
+      * Like the default constructor but with preallocation of the internal data
+      * according to the specified problem size.
+      * \sa JacobiSVD()
+      */
+    JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+      : m_isInitialized(false),
+        m_isAllocated(false),
+        m_usePrescribedThreshold(false),
+        m_computationOptions(0),
+        m_rows(-1), m_cols(-1)
+    {
+      allocate(rows, cols, computationOptions);
+    }
+
+    /** \brief Constructor performing the decomposition of given matrix.
+     *
+     * \param matrix the matrix to decompose
+     * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+     *                           #ComputeFullV, #ComputeThinV.
+     *
+     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+     * available with the (non-default) FullPivHouseholderQR preconditioner.
+     */
+    JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+      : m_isInitialized(false),
+        m_isAllocated(false),
+        m_usePrescribedThreshold(false),
+        m_computationOptions(0),
+        m_rows(-1), m_cols(-1)
+    {
+      compute(matrix, computationOptions);
+    }
+
+    /** \brief Method performing the decomposition of given matrix using custom options.
+     *
+     * \param matrix the matrix to decompose
+     * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+     *                           #ComputeFullV, #ComputeThinV.
+     *
+     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+     * available with the (non-default) FullPivHouseholderQR preconditioner.
+     */
+    JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+    /** \brief Method performing the decomposition of given matrix using current options.
+     *
+     * \param matrix the matrix to decompose
+     *
+     * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+     */
+    JacobiSVD& compute(const MatrixType& matrix)
+    {
+      return compute(matrix, m_computationOptions);
+    }
+
+    /** \returns the \a U matrix.
+     *
+     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+     * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+     *
+     * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+     *
+     * This method asserts that you asked for \a U to be computed.
+     */
+    const MatrixUType& matrixU() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      eigen_assert(computeU() && "This JacobiSVD decomposition didn't compute U. Did you ask for it?");
+      return m_matrixU;
+    }
+
+    /** \returns the \a V matrix.
+     *
+     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+     * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+     *
+     * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+     *
+     * This method asserts that you asked for \a V to be computed.
+     */
+    const MatrixVType& matrixV() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      eigen_assert(computeV() && "This JacobiSVD decomposition didn't compute V. Did you ask for it?");
+      return m_matrixV;
+    }
+
+    /** \returns the vector of singular values.
+     *
+     * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+     * returned vector has size \a m.  Singular values are always sorted in decreasing order.
+     */
+    const SingularValuesType& singularValues() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      return m_singularValues;
+    }
+
+    /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+    inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+    /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+    inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+    /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+      *
+      * \param b the right-hand-side of the equation to solve.
+      *
+      * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+      *
+      * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+      * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<JacobiSVD, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      eigen_assert(computeU() && computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+      return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the number of singular values that are not exactly 0 */
+    Index nonzeroSingularValues() const
+    {
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      return m_nonzeroSingularValues;
+    }
+    
+    /** \returns the rank of the matrix of which \c *this is the SVD.
+      *
+      * \note This method has to determine which singular values should be considered nonzero.
+      *       For that, it uses the threshold value that you can control by calling
+      *       setThreshold(const RealScalar&).
+      */
+    inline Index rank() const
+    {
+      using std::abs;
+      eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+      if(m_singularValues.size()==0) return 0;
+      RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
+      Index i = m_nonzeroSingularValues-1;
+      while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
+      return i+1;
+    }
+    
+    /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
+      * which need to determine when singular values are to be considered nonzero.
+      * This is not used for the SVD decomposition itself.
+      *
+      * When it needs to get the threshold value, Eigen calls threshold().
+      * The default is \c NumTraits<Scalar>::epsilon()
+      *
+      * \param threshold The new value to use as the threshold.
+      *
+      * A singular value will be considered nonzero if its value is strictly greater than
+      *  \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
+      *
+      * If you want to come back to the default behavior, call setThreshold(Default_t)
+      */
+    JacobiSVD& setThreshold(const RealScalar& threshold)
+    {
+      m_usePrescribedThreshold = true;
+      m_prescribedThreshold = threshold;
+      return *this;
+    }
+
+    /** Allows to come back to the default behavior, letting Eigen use its default formula for
+      * determining the threshold.
+      *
+      * You should pass the special object Eigen::Default as parameter here.
+      * \code svd.setThreshold(Eigen::Default); \endcode
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    JacobiSVD& setThreshold(Default_t)
+    {
+      m_usePrescribedThreshold = false;
+      return *this;
+    }
+
+    /** Returns the threshold that will be used by certain methods such as rank().
+      *
+      * See the documentation of setThreshold(const RealScalar&).
+      */
+    RealScalar threshold() const
+    {
+      eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+      return m_usePrescribedThreshold ? m_prescribedThreshold
+                                      : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+    }
+
+    inline Index rows() const { return m_rows; }
+    inline Index cols() const { return m_cols; }
+
+  private:
+    void allocate(Index rows, Index cols, unsigned int computationOptions);
+    
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+    }
+
+  protected:
+    MatrixUType m_matrixU;
+    MatrixVType m_matrixV;
+    SingularValuesType m_singularValues;
+    WorkMatrixType m_workMatrix;
+    bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
+    bool m_computeFullU, m_computeThinU;
+    bool m_computeFullV, m_computeThinV;
+    unsigned int m_computationOptions;
+    Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+    RealScalar m_prescribedThreshold;
+
+    template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+    friend struct internal::svd_precondition_2x2_block_to_be_real;
+    template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+    friend struct internal::qr_preconditioner_impl;
+
+    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
+    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+    MatrixType m_scaledMatrix;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+  eigen_assert(rows >= 0 && cols >= 0);
+
+  if (m_isAllocated &&
+      rows == m_rows &&
+      cols == m_cols &&
+      computationOptions == m_computationOptions)
+  {
+    return;
+  }
+
+  m_rows = rows;
+  m_cols = cols;
+  m_isInitialized = false;
+  m_isAllocated = true;
+  m_computationOptions = computationOptions;
+  m_computeFullU = (computationOptions & ComputeFullU) != 0;
+  m_computeThinU = (computationOptions & ComputeThinU) != 0;
+  m_computeFullV = (computationOptions & ComputeFullV) != 0;
+  m_computeThinV = (computationOptions & ComputeThinV) != 0;
+  eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
+  eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
+  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+              "JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
+  if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+  {
+      eigen_assert(!(m_computeThinU || m_computeThinV) &&
+              "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+              "Use the ColPivHouseholderQR preconditioner instead.");
+  }
+  m_diagSize = (std::min)(m_rows, m_cols);
+  m_singularValues.resize(m_diagSize);
+  if(RowsAtCompileTime==Dynamic)
+    m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+                            : m_computeThinU ? m_diagSize
+                            : 0);
+  if(ColsAtCompileTime==Dynamic)
+    m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+                            : m_computeThinV ? m_diagSize
+                            : 0);
+  m_workMatrix.resize(m_diagSize, m_diagSize);
+  
+  if(m_cols>m_rows)   m_qr_precond_morecols.allocate(*this);
+  if(m_rows>m_cols)   m_qr_precond_morerows.allocate(*this);
+  if(m_cols!=m_cols)  m_scaledMatrix.resize(rows,cols);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+JacobiSVD<MatrixType, QRPreconditioner>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+  check_template_parameters();
+  
+  using std::abs;
+  allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+  // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+  // only worsening the precision of U and V as we accumulate more rotations
+  const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+  // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
+  const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+
+  // Scaling factor to reduce over/under-flows
+  RealScalar scale = matrix.cwiseAbs().maxCoeff();
+  if(scale==RealScalar(0)) scale = RealScalar(1);
+  
+  /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+  if(m_rows!=m_cols)
+  {
+    m_scaledMatrix = matrix / scale;
+    m_qr_precond_morecols.run(*this, m_scaledMatrix);
+    m_qr_precond_morerows.run(*this, m_scaledMatrix);
+  }
+  else
+  {
+    m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
+    if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
+    if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
+    if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
+    if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
+  }
+
+  /*** step 2. The main Jacobi SVD iteration. ***/
+
+  bool finished = false;
+  while(!finished)
+  {
+    finished = true;
+
+    // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+    for(Index p = 1; p < m_diagSize; ++p)
+    {
+      for(Index q = 0; q < p; ++q)
+      {
+        // if this 2x2 sub-matrix is not diagonal already...
+        // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+        // keep us iterating forever. Similarly, small denormal numbers are considered zero.
+        using std::max;
+        RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+                                                                       abs(m_workMatrix.coeff(q,q))));
+        // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791)
+        if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
+        {
+          finished = false;
+
+          // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+          internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+          JacobiRotation<RealScalar> j_left, j_right;
+          internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+          // accumulate resulting Jacobi rotations
+          m_workMatrix.applyOnTheLeft(p,q,j_left);
+          if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+          m_workMatrix.applyOnTheRight(p,q,j_right);
+          if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
+        }
+      }
+    }
+  }
+
+  /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+  for(Index i = 0; i < m_diagSize; ++i)
+  {
+    RealScalar a = abs(m_workMatrix.coeff(i,i));
+    m_singularValues.coeffRef(i) = a;
+    if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
+  }
+
+  /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+  m_nonzeroSingularValues = m_diagSize;
+  for(Index i = 0; i < m_diagSize; i++)
+  {
+    Index pos;
+    RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
+    if(maxRemainingSingularValue == RealScalar(0))
+    {
+      m_nonzeroSingularValues = i;
+      break;
+    }
+    if(pos)
+    {
+      pos += i;
+      std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
+      if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
+      if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
+    }
+  }
+  
+  m_singularValues *= scale;
+
+  m_isInitialized = true;
+  return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+  : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+  typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+  EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    eigen_assert(rhs().rows() == dec().rows());
+
+    // A = U S V^*
+    // So A^{-1} = V S^{-1} U^*
+
+    Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
+    Index rank = dec().rank();
+    
+    tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs();
+    tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp;
+    dst = dec().matrixV().leftCols(rank) * tmp;
+  }
+};
+} // end namespace internal
+
+/** \svd_module
+  *
+  * \return the singular value decomposition of \c *this computed by two-sided
+  * Jacobi transformations.
+  *
+  * \sa class JacobiSVD
+  */
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+  return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/Eigen/src/SVD/JacobiSVD_MKL.h b/Eigen/src/SVD/JacobiSVD_MKL.h
new file mode 100644
index 0000000..decda75
--- /dev/null
+++ b/Eigen/src/SVD/JacobiSVD_MKL.h
@@ -0,0 +1,92 @@
+/*
+ Copyright (c) 2011, Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without modification,
+ are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ********************************************************************************
+ *   Content : Eigen bindings to Intel(R) MKL
+ *    Singular Value Decomposition - SVD.
+ ********************************************************************************
+*/
+
+#ifndef EIGEN_JACOBISVD_MKL_H
+#define EIGEN_JACOBISVD_MKL_H
+
+#include "Eigen/src/Core/util/MKL_support.h"
+
+namespace Eigen { 
+
+/** \internal Specialization for the data types supported by MKL */
+
+#define EIGEN_MKL_SVD(EIGTYPE, MKLTYPE, MKLRTYPE, MKLPREFIX, EIGCOLROW, MKLCOLROW) \
+template<> inline \
+JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
+JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
+{ \
+  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
+  typedef MatrixType::Scalar Scalar; \
+  typedef MatrixType::RealScalar RealScalar; \
+  allocate(matrix.rows(), matrix.cols(), computationOptions); \
+\
+  /*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
+  m_nonzeroSingularValues = m_diagSize; \
+\
+  lapack_int lda = matrix.outerStride(), ldu, ldvt; \
+  lapack_int matrix_order = MKLCOLROW; \
+  char jobu, jobvt; \
+  MKLTYPE *u, *vt, dummy; \
+  jobu  = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
+  jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
+  if (computeU()) { \
+    ldu  = m_matrixU.outerStride(); \
+    u    = (MKLTYPE*)m_matrixU.data(); \
+  } else { ldu=1; u=&dummy; }\
+  MatrixType localV; \
+  ldvt = (m_computeFullV) ? m_cols : (m_computeThinV) ? m_diagSize : 1; \
+  if (computeV()) { \
+    localV.resize(ldvt, m_cols); \
+    vt   = (MKLTYPE*)localV.data(); \
+  } else { ldvt=1; vt=&dummy; }\
+  Matrix<MKLRTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
+  MatrixType m_temp; m_temp = matrix; \
+  LAPACKE_##MKLPREFIX##gesvd( matrix_order, jobu, jobvt, m_rows, m_cols, (MKLTYPE*)m_temp.data(), lda, (MKLRTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
+  if (computeV()) m_matrixV = localV.adjoint(); \
+ /* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
+  m_isInitialized = true; \
+  return *this; \
+}
+
+EIGEN_MKL_SVD(double,   double,        double, d, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(float,    float,         float , s, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(dcomplex, MKL_Complex16, double, z, ColMajor, LAPACK_COL_MAJOR)
+EIGEN_MKL_SVD(scomplex, MKL_Complex8,  float , c, ColMajor, LAPACK_COL_MAJOR)
+
+EIGEN_MKL_SVD(double,   double,        double, d, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(float,    float,         float , s, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(dcomplex, MKL_Complex16, double, z, RowMajor, LAPACK_ROW_MAJOR)
+EIGEN_MKL_SVD(scomplex, MKL_Complex8,  float , c, RowMajor, LAPACK_ROW_MAJOR)
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_MKL_H
diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h
new file mode 100644
index 0000000..587de37
--- /dev/null
+++ b/Eigen/src/SVD/UpperBidiagonalization.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BIDIAGONALIZATION_H
+#define EIGEN_BIDIAGONALIZATION_H
+
+namespace Eigen { 
+
+namespace internal {
+// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
+// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
+
+template<typename _MatrixType> class UpperBidiagonalization
+{
+  public:
+
+    typedef _MatrixType MatrixType;
+    enum {
+      RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+      ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+      ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
+    };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
+    typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
+    typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0> BidiagonalType;
+    typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
+    typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
+    typedef HouseholderSequence<
+              const MatrixType,
+              CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Diagonal<const MatrixType,0> >
+            > HouseholderUSequenceType;
+    typedef HouseholderSequence<
+              const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
+              Diagonal<const MatrixType,1>,
+              OnTheRight
+            > HouseholderVSequenceType;
+    
+    /**
+    * \brief Default Constructor.
+    *
+    * The default constructor is useful in cases in which the user intends to
+    * perform decompositions via Bidiagonalization::compute(const MatrixType&).
+    */
+    UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
+
+    UpperBidiagonalization(const MatrixType& matrix)
+      : m_householder(matrix.rows(), matrix.cols()),
+        m_bidiagonal(matrix.cols(), matrix.cols()),
+        m_isInitialized(false)
+    {
+      compute(matrix);
+    }
+    
+    UpperBidiagonalization& compute(const MatrixType& matrix);
+    
+    const MatrixType& householder() const { return m_householder; }
+    const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
+    
+    const HouseholderUSequenceType householderU() const
+    {
+      eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
+      return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
+    }
+
+    const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
+    {
+      eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
+      return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
+             .setLength(m_householder.cols()-1)
+             .setShift(1);
+    }
+    
+  protected:
+    MatrixType m_householder;
+    BidiagonalType m_bidiagonal;
+    bool m_isInitialized;
+};
+
+template<typename _MatrixType>
+UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
+{
+  Index rows = matrix.rows();
+  Index cols = matrix.cols();
+  
+  eigen_assert(rows >= cols && "UpperBidiagonalization is only for matrices satisfying rows>=cols.");
+  
+  m_householder = matrix;
+
+  ColVectorType temp(rows);
+
+  for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
+  {
+    Index remainingRows = rows - k;
+    Index remainingCols = cols - k - 1;
+
+    // construct left householder transform in-place in m_householder
+    m_householder.col(k).tail(remainingRows)
+                 .makeHouseholderInPlace(m_householder.coeffRef(k,k),
+                                         m_bidiagonal.template diagonal<0>().coeffRef(k));
+    // apply householder transform to remaining part of m_householder on the left
+    m_householder.bottomRightCorner(remainingRows, remainingCols)
+                 .applyHouseholderOnTheLeft(m_householder.col(k).tail(remainingRows-1),
+                                            m_householder.coeff(k,k),
+                                            temp.data());
+
+    if(k == cols-1) break;
+    
+    // construct right householder transform in-place in m_householder
+    m_householder.row(k).tail(remainingCols)
+                 .makeHouseholderInPlace(m_householder.coeffRef(k,k+1),
+                                         m_bidiagonal.template diagonal<1>().coeffRef(k));
+    // apply householder transform to remaining part of m_householder on the left
+    m_householder.bottomRightCorner(remainingRows-1, remainingCols)
+                 .applyHouseholderOnTheRight(m_householder.row(k).tail(remainingCols-1).transpose(),
+                                             m_householder.coeff(k,k+1),
+                                             temp.data());
+  }
+  m_isInitialized = true;
+  return *this;
+}
+
+#if 0
+/** \return the Householder QR decomposition of \c *this.
+  *
+  * \sa class Bidiagonalization
+  */
+template<typename Derived>
+const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::bidiagonalization() const
+{
+  return UpperBidiagonalization<PlainObject>(eval());
+}
+#endif
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_BIDIAGONALIZATION_H
diff --git a/Eigen/src/SparseCholesky/CMakeLists.txt b/Eigen/src/SparseCholesky/CMakeLists.txt
new file mode 100644
index 0000000..375a59d
--- /dev/null
+++ b/Eigen/src/SparseCholesky/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseCholesky_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_SparseCholesky_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCholesky COMPONENT Devel
+  )
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky.h b/Eigen/src/SparseCholesky/SimplicialCholesky.h
new file mode 100644
index 0000000..e1f96ba
--- /dev/null
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky.h
@@ -0,0 +1,671 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H
+#define EIGEN_SIMPLICIAL_CHOLESKY_H
+
+namespace Eigen { 
+
+enum SimplicialCholeskyMode {
+  SimplicialCholeskyLLT,
+  SimplicialCholeskyLDLT
+};
+
+/** \ingroup SparseCholesky_Module
+  * \brief A direct sparse Cholesky factorizations
+  *
+  * These classes provide LL^T and LDL^T Cholesky factorizations of sparse matrices that are
+  * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+  * X and B can be either dense or sparse.
+  * 
+  * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization
+  * such that the factorized matrix is P A P^-1.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  *
+  */
+template<typename Derived>
+class SimplicialCholeskyBase : internal::noncopyable
+{
+  public:
+    typedef typename internal::traits<Derived>::MatrixType MatrixType;
+    typedef typename internal::traits<Derived>::OrderingType OrderingType;
+    enum { UpLo = internal::traits<Derived>::UpLo };
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+    typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+  public:
+
+    /** Default constructor */
+    SimplicialCholeskyBase()
+      : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1)
+    {}
+
+    SimplicialCholeskyBase(const MatrixType& matrix)
+      : m_info(Success), m_isInitialized(false), m_shiftOffset(0), m_shiftScale(1)
+    {
+      derived().compute(matrix);
+    }
+
+    ~SimplicialCholeskyBase()
+    {
+    }
+
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    
+    inline Index cols() const { return m_matrix.cols(); }
+    inline Index rows() const { return m_matrix.rows(); }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<SimplicialCholeskyBase, Rhs>
+    solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "SimplicialCholeskyBase::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<SimplicialCholeskyBase, Rhs>(*this, b.derived());
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<SimplicialCholeskyBase, Rhs>
+    solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "Simplicial LLT or LDLT is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "SimplicialCholesky::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<SimplicialCholeskyBase, Rhs>(*this, b.derived());
+    }
+    
+    /** \returns the permutation P
+      * \sa permutationPinv() */
+    const PermutationMatrix<Dynamic,Dynamic,Index>& permutationP() const
+    { return m_P; }
+    
+    /** \returns the inverse P^-1 of the permutation P
+      * \sa permutationP() */
+    const PermutationMatrix<Dynamic,Dynamic,Index>& permutationPinv() const
+    { return m_Pinv; }
+
+    /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization.
+      *
+      * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\n
+      * \c d_ii = \a offset + \a scale * \c d_ii
+      *
+      * The default is the identity transformation with \a offset=0, and \a scale=1.
+      *
+      * \returns a reference to \c *this.
+      */
+    Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1)
+    {
+      m_shiftOffset = offset;
+      m_shiftScale = scale;
+      return derived();
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Stream>
+    void dumpMemory(Stream& s)
+    {
+      int total = 0;
+      s << "  L:        " << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << "Mb" << "\n";
+      s << "  diag:     " << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << "Mb" << "\n";
+      s << "  tree:     " << ((total+=m_parent.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+      s << "  nonzeros: " << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+      s << "  perm:     " << ((total+=m_P.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+      s << "  perm^-1:  " << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << "Mb" << "\n";
+      s << "  TOTAL:    " << (total>> 20) << "Mb" << "\n";
+    }
+
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+    {
+      eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+      eigen_assert(m_matrix.rows()==b.rows());
+
+      if(m_info!=Success)
+        return;
+
+      if(m_P.size()>0)
+        dest = m_P * b;
+      else
+        dest = b;
+
+      if(m_matrix.nonZeros()>0) // otherwise L==I
+        derived().matrixL().solveInPlace(dest);
+
+      if(m_diag.size()>0)
+        dest = m_diag.asDiagonal().inverse() * dest;
+
+      if (m_matrix.nonZeros()>0) // otherwise U==I
+        derived().matrixU().solveInPlace(dest);
+
+      if(m_P.size()>0)
+        dest = m_Pinv * dest;
+    }
+
+#endif // EIGEN_PARSED_BY_DOXYGEN
+
+  protected:
+    
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    template<bool DoLDLT>
+    void compute(const MatrixType& matrix)
+    {
+      eigen_assert(matrix.rows()==matrix.cols());
+      Index size = matrix.cols();
+      CholMatrixType ap(size,size);
+      ordering(matrix, ap);
+      analyzePattern_preordered(ap, DoLDLT);
+      factorize_preordered<DoLDLT>(ap);
+    }
+    
+    template<bool DoLDLT>
+    void factorize(const MatrixType& a)
+    {
+      eigen_assert(a.rows()==a.cols());
+      int size = a.cols();
+      CholMatrixType ap(size,size);
+      ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);
+      factorize_preordered<DoLDLT>(ap);
+    }
+
+    template<bool DoLDLT>
+    void factorize_preordered(const CholMatrixType& a);
+
+    void analyzePattern(const MatrixType& a, bool doLDLT)
+    {
+      eigen_assert(a.rows()==a.cols());
+      int size = a.cols();
+      CholMatrixType ap(size,size);
+      ordering(a, ap);
+      analyzePattern_preordered(ap,doLDLT);
+    }
+    void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT);
+    
+    void ordering(const MatrixType& a, CholMatrixType& ap);
+
+    /** keeps off-diagonal entries; drops diagonal entries */
+    struct keep_diag {
+      inline bool operator() (const Index& row, const Index& col, const Scalar&) const
+      {
+        return row!=col;
+      }
+    };
+
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_factorizationIsOk;
+    bool m_analysisIsOk;
+    
+    CholMatrixType m_matrix;
+    VectorType m_diag;                                // the diagonal coefficients (LDLT mode)
+    VectorXi m_parent;                                // elimination tree
+    VectorXi m_nonZerosPerCol;
+    PermutationMatrix<Dynamic,Dynamic,Index> m_P;     // the permutation
+    PermutationMatrix<Dynamic,Dynamic,Index> m_Pinv;  // the inverse permutation
+
+    RealScalar m_shiftOffset;
+    RealScalar m_shiftScale;
+};
+
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLLT;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialLDLT;
+template<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::Index> > class SimplicialCholesky;
+
+namespace internal {
+
+template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Ordering OrderingType;
+  enum { UpLo = _UpLo };
+  typedef typename MatrixType::Scalar                         Scalar;
+  typedef typename MatrixType::Index                          Index;
+  typedef SparseMatrix<Scalar, ColMajor, Index>               CholMatrixType;
+  typedef SparseTriangularView<CholMatrixType, Eigen::Lower>  MatrixL;
+  typedef SparseTriangularView<typename CholMatrixType::AdjointReturnType, Eigen::Upper>   MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m; }
+  static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Ordering OrderingType;
+  enum { UpLo = _UpLo };
+  typedef typename MatrixType::Scalar                             Scalar;
+  typedef typename MatrixType::Index                              Index;
+  typedef SparseMatrix<Scalar, ColMajor, Index>                   CholMatrixType;
+  typedef SparseTriangularView<CholMatrixType, Eigen::UnitLower>  MatrixL;
+  typedef SparseTriangularView<typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;
+  static inline MatrixL getL(const MatrixType& m) { return m; }
+  static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); }
+};
+
+template<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
+{
+  typedef _MatrixType MatrixType;
+  typedef _Ordering OrderingType;
+  enum { UpLo = _UpLo };
+};
+
+}
+
+/** \ingroup SparseCholesky_Module
+  * \class SimplicialLLT
+  * \brief A direct sparse LLT Cholesky factorizations
+  *
+  * This class provides a LL^T Cholesky factorizations of sparse matrices that are
+  * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+  * X and B can be either dense or sparse.
+  * 
+  * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization
+  * such that the factorized matrix is P A P^-1.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
+  *
+  * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering
+  */
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+    class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >
+{
+public:
+    typedef _MatrixType MatrixType;
+    enum { UpLo = _UpLo };
+    typedef SimplicialCholeskyBase<SimplicialLLT> Base;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+    typedef Matrix<Scalar,Dynamic,1> VectorType;
+    typedef internal::traits<SimplicialLLT> Traits;
+    typedef typename Traits::MatrixL  MatrixL;
+    typedef typename Traits::MatrixU  MatrixU;
+public:
+    /** Default constructor */
+    SimplicialLLT() : Base() {}
+    /** Constructs and performs the LLT factorization of \a matrix */
+    SimplicialLLT(const MatrixType& matrix)
+        : Base(matrix) {}
+
+    /** \returns an expression of the factor L */
+    inline const MatrixL matrixL() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized");
+        return Traits::getL(Base::m_matrix);
+    }
+
+    /** \returns an expression of the factor U (= L^*) */
+    inline const MatrixU matrixU() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial LLT not factorized");
+        return Traits::getU(Base::m_matrix);
+    }
+    
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    SimplicialLLT& compute(const MatrixType& matrix)
+    {
+      Base::template compute<false>(matrix);
+      return *this;
+    }
+
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      *
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& a)
+    {
+      Base::analyzePattern(a, false);
+    }
+
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& a)
+    {
+      Base::template factorize<false>(a);
+    }
+
+    /** \returns the determinant of the underlying matrix from the current factorization */
+    Scalar determinant() const
+    {
+      Scalar detL = Base::m_matrix.diagonal().prod();
+      return numext::abs2(detL);
+    }
+};
+
+/** \ingroup SparseCholesky_Module
+  * \class SimplicialLDLT
+  * \brief A direct sparse LDLT Cholesky factorizations without square root.
+  *
+  * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are
+  * selfadjoint and positive definite. The factorization allows for solving A.X = B where
+  * X and B can be either dense or sparse.
+  * 
+  * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization
+  * such that the factorized matrix is P A P^-1.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+  *               or Upper. Default is Lower.
+  * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>
+  *
+  * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering
+  */
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+    class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >
+{
+public:
+    typedef _MatrixType MatrixType;
+    enum { UpLo = _UpLo };
+    typedef SimplicialCholeskyBase<SimplicialLDLT> Base;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+    typedef Matrix<Scalar,Dynamic,1> VectorType;
+    typedef internal::traits<SimplicialLDLT> Traits;
+    typedef typename Traits::MatrixL  MatrixL;
+    typedef typename Traits::MatrixU  MatrixU;
+public:
+    /** Default constructor */
+    SimplicialLDLT() : Base() {}
+
+    /** Constructs and performs the LLT factorization of \a matrix */
+    SimplicialLDLT(const MatrixType& matrix)
+        : Base(matrix) {}
+
+    /** \returns a vector expression of the diagonal D */
+    inline const VectorType vectorD() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
+        return Base::m_diag;
+    }
+    /** \returns an expression of the factor L */
+    inline const MatrixL matrixL() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
+        return Traits::getL(Base::m_matrix);
+    }
+
+    /** \returns an expression of the factor U (= L^*) */
+    inline const MatrixU matrixU() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial LDLT not factorized");
+        return Traits::getU(Base::m_matrix);
+    }
+
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    SimplicialLDLT& compute(const MatrixType& matrix)
+    {
+      Base::template compute<true>(matrix);
+      return *this;
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      *
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& a)
+    {
+      Base::analyzePattern(a, true);
+    }
+
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& a)
+    {
+      Base::template factorize<true>(a);
+    }
+
+    /** \returns the determinant of the underlying matrix from the current factorization */
+    Scalar determinant() const
+    {
+      return Base::m_diag.prod();
+    }
+};
+
+/** \deprecated use SimplicialLDLT or class SimplicialLLT
+  * \ingroup SparseCholesky_Module
+  * \class SimplicialCholesky
+  *
+  * \sa class SimplicialLDLT, class SimplicialLLT
+  */
+template<typename _MatrixType, int _UpLo, typename _Ordering>
+    class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >
+{
+public:
+    typedef _MatrixType MatrixType;
+    enum { UpLo = _UpLo };
+    typedef SimplicialCholeskyBase<SimplicialCholesky> Base;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;
+    typedef Matrix<Scalar,Dynamic,1> VectorType;
+    typedef internal::traits<SimplicialCholesky> Traits;
+    typedef internal::traits<SimplicialLDLT<MatrixType,UpLo> > LDLTTraits;
+    typedef internal::traits<SimplicialLLT<MatrixType,UpLo>  > LLTTraits;
+  public:
+    SimplicialCholesky() : Base(), m_LDLT(true) {}
+
+    SimplicialCholesky(const MatrixType& matrix)
+      : Base(), m_LDLT(true)
+    {
+      compute(matrix);
+    }
+
+    SimplicialCholesky& setMode(SimplicialCholeskyMode mode)
+    {
+      switch(mode)
+      {
+      case SimplicialCholeskyLLT:
+        m_LDLT = false;
+        break;
+      case SimplicialCholeskyLDLT:
+        m_LDLT = true;
+        break;
+      default:
+        break;
+      }
+
+      return *this;
+    }
+
+    inline const VectorType vectorD() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
+        return Base::m_diag;
+    }
+    inline const CholMatrixType rawMatrix() const {
+        eigen_assert(Base::m_factorizationIsOk && "Simplicial Cholesky not factorized");
+        return Base::m_matrix;
+    }
+    
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    SimplicialCholesky& compute(const MatrixType& matrix)
+    {
+      if(m_LDLT)
+        Base::template compute<true>(matrix);
+      else
+        Base::template compute<false>(matrix);
+      return *this;
+    }
+
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      *
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& a)
+    {
+      Base::analyzePattern(a, m_LDLT);
+    }
+
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& a)
+    {
+      if(m_LDLT)
+        Base::template factorize<true>(a);
+      else
+        Base::template factorize<false>(a);
+    }
+
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
+    {
+      eigen_assert(Base::m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
+      eigen_assert(Base::m_matrix.rows()==b.rows());
+
+      if(Base::m_info!=Success)
+        return;
+
+      if(Base::m_P.size()>0)
+        dest = Base::m_P * b;
+      else
+        dest = b;
+
+      if(Base::m_matrix.nonZeros()>0) // otherwise L==I
+      {
+        if(m_LDLT)
+          LDLTTraits::getL(Base::m_matrix).solveInPlace(dest);
+        else
+          LLTTraits::getL(Base::m_matrix).solveInPlace(dest);
+      }
+
+      if(Base::m_diag.size()>0)
+        dest = Base::m_diag.asDiagonal().inverse() * dest;
+
+      if (Base::m_matrix.nonZeros()>0) // otherwise I==I
+      {
+        if(m_LDLT)
+          LDLTTraits::getU(Base::m_matrix).solveInPlace(dest);
+        else
+          LLTTraits::getU(Base::m_matrix).solveInPlace(dest);
+      }
+
+      if(Base::m_P.size()>0)
+        dest = Base::m_Pinv * dest;
+    }
+    
+    Scalar determinant() const
+    {
+      if(m_LDLT)
+      {
+        return Base::m_diag.prod();
+      }
+      else
+      {
+        Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();
+        return numext::abs2(detL);
+      }
+    }
+    
+  protected:
+    bool m_LDLT;
+};
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, CholMatrixType& ap)
+{
+  eigen_assert(a.rows()==a.cols());
+  const Index size = a.rows();
+  // Note that amd compute the inverse permutation
+  {
+    CholMatrixType C;
+    C = a.template selfadjointView<UpLo>();
+    
+    OrderingType ordering;
+    ordering(C,m_Pinv);
+  }
+
+  if(m_Pinv.size()>0)
+    m_P = m_Pinv.inverse();
+  else
+    m_P.resize(0);
+
+  ap.resize(size,size);
+  ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);
+}
+
+namespace internal {
+  
+template<typename Derived, typename Rhs>
+struct solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
+  : solve_retval_base<SimplicialCholeskyBase<Derived>, Rhs>
+{
+  typedef SimplicialCholeskyBase<Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec().derived()._solve(rhs(),dst);
+  }
+};
+
+template<typename Derived, typename Rhs>
+struct sparse_solve_retval<SimplicialCholeskyBase<Derived>, Rhs>
+  : sparse_solve_retval_base<SimplicialCholeskyBase<Derived>, Rhs>
+{
+  typedef SimplicialCholeskyBase<Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SIMPLICIAL_CHOLESKY_H
diff --git a/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
new file mode 100644
index 0000000..7aaf702
--- /dev/null
+++ b/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
@@ -0,0 +1,199 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/*
+
+NOTE: thes functions vave been adapted from the LDL library:
+
+LDL Copyright (c) 2005 by Timothy A. Davis.  All Rights Reserved.
+
+LDL License:
+
+    Your use or distribution of LDL or any modified version of
+    LDL implies that you agree to this License.
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301
+    USA
+
+    Permission is hereby granted to use or copy this program under the
+    terms of the GNU LGPL, provided that the Copyright, this License,
+    and the Availability of the original version is retained on all copies.
+    User documentation of any code that uses this code or any modified
+    version of this code must cite the Copyright, this License, the
+    Availability note, and "Used by permission." Permission to modify
+    the code and to distribute modified code is granted, provided the
+    Copyright, this License, and the Availability note are retained,
+    and a notice that the code was modified is included.
+ */
+
+#include "../Core/util/NonMPL2.h"
+
+#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
+#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
+
+namespace Eigen {
+
+template<typename Derived>
+void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
+{
+  const Index size = ap.rows();
+  m_matrix.resize(size, size);
+  m_parent.resize(size);
+  m_nonZerosPerCol.resize(size);
+
+  ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
+
+  for(Index k = 0; k < size; ++k)
+  {
+    /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
+    m_parent[k] = -1;             /* parent of k is not yet known */
+    tags[k] = k;                  /* mark node k as visited */
+    m_nonZerosPerCol[k] = 0;      /* count of nonzeros in column k of L */
+    for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
+    {
+      Index i = it.index();
+      if(i < k)
+      {
+        /* follow path from i to root of etree, stop at flagged node */
+        for(; tags[i] != k; i = m_parent[i])
+        {
+          /* find parent of i if not yet determined */
+          if (m_parent[i] == -1)
+            m_parent[i] = k;
+          m_nonZerosPerCol[i]++;        /* L (k,i) is nonzero */
+          tags[i] = k;                  /* mark i as visited */
+        }
+      }
+    }
+  }
+
+  /* construct Lp index array from m_nonZerosPerCol column counts */
+  Index* Lp = m_matrix.outerIndexPtr();
+  Lp[0] = 0;
+  for(Index k = 0; k < size; ++k)
+    Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
+
+  m_matrix.resizeNonZeros(Lp[size]);
+
+  m_isInitialized     = true;
+  m_info              = Success;
+  m_analysisIsOk      = true;
+  m_factorizationIsOk = false;
+}
+
+
+template<typename Derived>
+template<bool DoLDLT>
+void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)
+{
+  using std::sqrt;
+
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  eigen_assert(ap.rows()==ap.cols());
+  const Index size = ap.rows();
+  eigen_assert(m_parent.size()==size);
+  eigen_assert(m_nonZerosPerCol.size()==size);
+
+  const Index* Lp = m_matrix.outerIndexPtr();
+  Index* Li = m_matrix.innerIndexPtr();
+  Scalar* Lx = m_matrix.valuePtr();
+
+  ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);
+  ei_declare_aligned_stack_constructed_variable(Index,  pattern, size, 0);
+  ei_declare_aligned_stack_constructed_variable(Index,  tags, size, 0);
+
+  bool ok = true;
+  m_diag.resize(DoLDLT ? size : 0);
+
+  for(Index k = 0; k < size; ++k)
+  {
+    // compute nonzero pattern of kth row of L, in topological order
+    y[k] = 0.0;                     // Y(0:k) is now all zero
+    Index top = size;               // stack for pattern is empty
+    tags[k] = k;                    // mark node k as visited
+    m_nonZerosPerCol[k] = 0;        // count of nonzeros in column k of L
+    for(typename MatrixType::InnerIterator it(ap,k); it; ++it)
+    {
+      Index i = it.index();
+      if(i <= k)
+      {
+        y[i] += numext::conj(it.value());            /* scatter A(i,k) into Y (sum duplicates) */
+        Index len;
+        for(len = 0; tags[i] != k; i = m_parent[i])
+        {
+          pattern[len++] = i;     /* L(k,i) is nonzero */
+          tags[i] = k;            /* mark i as visited */
+        }
+        while(len > 0)
+          pattern[--top] = pattern[--len];
+      }
+    }
+
+    /* compute numerical values kth row of L (a sparse triangular solve) */
+
+    RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset;    // get D(k,k), apply the shift function, and clear Y(k)
+    y[k] = 0.0;
+    for(; top < size; ++top)
+    {
+      Index i = pattern[top];       /* pattern[top:n-1] is pattern of L(:,k) */
+      Scalar yi = y[i];             /* get and clear Y(i) */
+      y[i] = 0.0;
+
+      /* the nonzero entry L(k,i) */
+      Scalar l_ki;
+      if(DoLDLT)
+        l_ki = yi / m_diag[i];
+      else
+        yi = l_ki = yi / Lx[Lp[i]];
+
+      Index p2 = Lp[i] + m_nonZerosPerCol[i];
+      Index p;
+      for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)
+        y[Li[p]] -= numext::conj(Lx[p]) * yi;
+      d -= numext::real(l_ki * numext::conj(yi));
+      Li[p] = k;                          /* store L(k,i) in column form of L */
+      Lx[p] = l_ki;
+      ++m_nonZerosPerCol[i];              /* increment count of nonzeros in col i */
+    }
+    if(DoLDLT)
+    {
+      m_diag[k] = d;
+      if(d == RealScalar(0))
+      {
+        ok = false;                         /* failure, D(k,k) is zero */
+        break;
+      }
+    }
+    else
+    {
+      Index p = Lp[k] + m_nonZerosPerCol[k]++;
+      Li[p] = k ;                /* store L(k,k) = sqrt (d) in column k */
+      if(d <= RealScalar(0)) {
+        ok = false;              /* failure, matrix is not positive definite */
+        break;
+      }
+      Lx[p] = sqrt(d) ;
+    }
+  }
+
+  m_info = ok ? Success : NumericalIssue;
+  m_factorizationIsOk = true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h
new file mode 100644
index 0000000..220c645
--- /dev/null
+++ b/Eigen/src/SparseCore/AmbiVector.h
@@ -0,0 +1,373 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_AMBIVECTOR_H
+#define EIGEN_AMBIVECTOR_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal
+  * Hybrid sparse/dense vector class designed for intensive read-write operations.
+  *
+  * See BasicSparseLLT and SparseProduct for usage examples.
+  */
+template<typename _Scalar, typename _Index>
+class AmbiVector
+{
+  public:
+    typedef _Scalar Scalar;
+    typedef _Index Index;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    AmbiVector(Index size)
+      : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+    {
+      resize(size);
+    }
+
+    void init(double estimatedDensity);
+    void init(int mode);
+
+    Index nonZeros() const;
+
+    /** Specifies a sub-vector to work on */
+    void setBounds(Index start, Index end) { m_start = start; m_end = end; }
+
+    void setZero();
+
+    void restart();
+    Scalar& coeffRef(Index i);
+    Scalar& coeff(Index i);
+
+    class Iterator;
+
+    ~AmbiVector() { delete[] m_buffer; }
+
+    void resize(Index size)
+    {
+      if (m_allocatedSize < size)
+        reallocate(size);
+      m_size = size;
+    }
+
+    Index size() const { return m_size; }
+
+  protected:
+
+    void reallocate(Index size)
+    {
+      // if the size of the matrix is not too large, let's allocate a bit more than needed such
+      // that we can handle dense vector even in sparse mode.
+      delete[] m_buffer;
+      if (size<1000)
+      {
+        Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
+        m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+        m_buffer = new Scalar[allocSize];
+      }
+      else
+      {
+        m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+        m_buffer = new Scalar[size];
+      }
+      m_size = size;
+      m_start = 0;
+      m_end = m_size;
+    }
+
+    void reallocateSparse()
+    {
+      Index copyElements = m_allocatedElements;
+      m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+      Index allocSize = m_allocatedElements * sizeof(ListEl);
+      allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
+      Scalar* newBuffer = new Scalar[allocSize];
+      memcpy(newBuffer,  m_buffer,  copyElements * sizeof(ListEl));
+      delete[] m_buffer;
+      m_buffer = newBuffer;
+    }
+
+  protected:
+    // element type of the linked list
+    struct ListEl
+    {
+      Index next;
+      Index index;
+      Scalar value;
+    };
+
+    // used to store data in both mode
+    Scalar* m_buffer;
+    Scalar m_zero;
+    Index m_size;
+    Index m_start;
+    Index m_end;
+    Index m_allocatedSize;
+    Index m_allocatedElements;
+    Index m_mode;
+
+    // linked list mode
+    Index m_llStart;
+    Index m_llCurrent;
+    Index m_llSize;
+};
+
+/** \returns the number of non zeros in the current sub vector */
+template<typename _Scalar,typename _Index>
+_Index AmbiVector<_Scalar,_Index>::nonZeros() const
+{
+  if (m_mode==IsSparse)
+    return m_llSize;
+  else
+    return m_end - m_start;
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
+{
+  if (estimatedDensity>0.1)
+    init(IsDense);
+  else
+    init(IsSparse);
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(int mode)
+{
+  m_mode = mode;
+  if (m_mode==IsSparse)
+  {
+    m_llSize = 0;
+    m_llStart = -1;
+  }
+}
+
+/** Must be called whenever we might perform a write access
+  * with an index smaller than the previous one.
+  *
+  * Don't worry, this function is extremely cheap.
+  */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::restart()
+{
+  m_llCurrent = m_llStart;
+}
+
+/** Set all coefficients of current subvector to zero */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::setZero()
+{
+  if (m_mode==IsDense)
+  {
+    for (Index i=m_start; i<m_end; ++i)
+      m_buffer[i] = Scalar(0);
+  }
+  else
+  {
+    eigen_assert(m_mode==IsSparse);
+    m_llSize = 0;
+    m_llStart = -1;
+  }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
+{
+  if (m_mode==IsDense)
+    return m_buffer[i];
+  else
+  {
+    ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+    // TODO factorize the following code to reduce code generation
+    eigen_assert(m_mode==IsSparse);
+    if (m_llSize==0)
+    {
+      // this is the first element
+      m_llStart = 0;
+      m_llCurrent = 0;
+      ++m_llSize;
+      llElements[0].value = Scalar(0);
+      llElements[0].index = i;
+      llElements[0].next = -1;
+      return llElements[0].value;
+    }
+    else if (i<llElements[m_llStart].index)
+    {
+      // this is going to be the new first element of the list
+      ListEl& el = llElements[m_llSize];
+      el.value = Scalar(0);
+      el.index = i;
+      el.next = m_llStart;
+      m_llStart = m_llSize;
+      ++m_llSize;
+      m_llCurrent = m_llStart;
+      return el.value;
+    }
+    else
+    {
+      Index nextel = llElements[m_llCurrent].next;
+      eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+      while (nextel >= 0 && llElements[nextel].index<=i)
+      {
+        m_llCurrent = nextel;
+        nextel = llElements[nextel].next;
+      }
+
+      if (llElements[m_llCurrent].index==i)
+      {
+        // the coefficient already exists and we found it !
+        return llElements[m_llCurrent].value;
+      }
+      else
+      {
+        if (m_llSize>=m_allocatedElements)
+        {
+          reallocateSparse();
+          llElements = reinterpret_cast<ListEl*>(m_buffer);
+        }
+        eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
+        // let's insert a new coefficient
+        ListEl& el = llElements[m_llSize];
+        el.value = Scalar(0);
+        el.index = i;
+        el.next = llElements[m_llCurrent].next;
+        llElements[m_llCurrent].next = m_llSize;
+        ++m_llSize;
+        return el.value;
+      }
+    }
+  }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
+{
+  if (m_mode==IsDense)
+    return m_buffer[i];
+  else
+  {
+    ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+    eigen_assert(m_mode==IsSparse);
+    if ((m_llSize==0) || (i<llElements[m_llStart].index))
+    {
+      return m_zero;
+    }
+    else
+    {
+      Index elid = m_llStart;
+      while (elid >= 0 && llElements[elid].index<i)
+        elid = llElements[elid].next;
+
+      if (llElements[elid].index==i)
+        return llElements[m_llCurrent].value;
+      else
+        return m_zero;
+    }
+  }
+}
+
+/** Iterator over the nonzero coefficients */
+template<typename _Scalar,typename _Index>
+class AmbiVector<_Scalar,_Index>::Iterator
+{
+  public:
+    typedef _Scalar Scalar;
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    /** Default constructor
+      * \param vec the vector on which we iterate
+      * \param epsilon the minimal value used to prune zero coefficients.
+      * In practice, all coefficients having a magnitude smaller than \a epsilon
+      * are skipped.
+      */
+    Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
+      : m_vector(vec)
+    {
+      using std::abs;
+      m_epsilon = epsilon;
+      m_isDense = m_vector.m_mode==IsDense;
+      if (m_isDense)
+      {
+        m_currentEl = 0;   // this is to avoid a compilation warning
+        m_cachedValue = 0; // this is to avoid a compilation warning
+        m_cachedIndex = m_vector.m_start-1;
+        ++(*this);
+      }
+      else
+      {
+        ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+        m_currentEl = m_vector.m_llStart;
+        while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
+          m_currentEl = llElements[m_currentEl].next;
+        if (m_currentEl<0)
+        {
+          m_cachedValue = 0; // this is to avoid a compilation warning
+          m_cachedIndex = -1;
+        }
+        else
+        {
+          m_cachedIndex = llElements[m_currentEl].index;
+          m_cachedValue = llElements[m_currentEl].value;
+        }
+      }
+    }
+
+    Index index() const { return m_cachedIndex; }
+    Scalar value() const { return m_cachedValue; }
+
+    operator bool() const { return m_cachedIndex>=0; }
+
+    Iterator& operator++()
+    {
+      using std::abs;
+      if (m_isDense)
+      {
+        do {
+          ++m_cachedIndex;
+        } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+        if (m_cachedIndex<m_vector.m_end)
+          m_cachedValue = m_vector.m_buffer[m_cachedIndex];
+        else
+          m_cachedIndex=-1;
+      }
+      else
+      {
+        ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+        do {
+          m_currentEl = llElements[m_currentEl].next;
+        } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon);
+        if (m_currentEl<0)
+        {
+          m_cachedIndex = -1;
+        }
+        else
+        {
+          m_cachedIndex = llElements[m_currentEl].index;
+          m_cachedValue = llElements[m_currentEl].value;
+        }
+      }
+      return *this;
+    }
+
+  protected:
+    const AmbiVector& m_vector; // the target vector
+    Index m_currentEl;            // the current element in sparse/linked-list mode
+    RealScalar m_epsilon;       // epsilon used to prune zero coefficients
+    Index m_cachedIndex;          // current coordinate
+    Scalar m_cachedValue;       // current value
+    bool m_isDense;             // mode of the vector
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_AMBIVECTOR_H
diff --git a/Eigen/src/SparseCore/CMakeLists.txt b/Eigen/src/SparseCore/CMakeLists.txt
new file mode 100644
index 0000000..d860452
--- /dev/null
+++ b/Eigen/src/SparseCore/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseCore_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_SparseCore_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCore COMPONENT Devel
+  )
diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h
new file mode 100644
index 0000000..a667cb5
--- /dev/null
+++ b/Eigen/src/SparseCore/CompressedStorage.h
@@ -0,0 +1,233 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \internal
+  * Stores a sparse set of values as a list of values and a list of indices.
+  *
+  */
+template<typename _Scalar,typename _Index>
+class CompressedStorage
+{
+  public:
+
+    typedef _Scalar Scalar;
+    typedef _Index Index;
+
+  protected:
+
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+  public:
+
+    CompressedStorage()
+      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+    {}
+
+    CompressedStorage(size_t size)
+      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+    {
+      resize(size);
+    }
+
+    CompressedStorage(const CompressedStorage& other)
+      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+    {
+      *this = other;
+    }
+
+    CompressedStorage& operator=(const CompressedStorage& other)
+    {
+      resize(other.size());
+      internal::smart_copy(other.m_values,  other.m_values  + m_size, m_values);
+      internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
+      return *this;
+    }
+
+    void swap(CompressedStorage& other)
+    {
+      std::swap(m_values, other.m_values);
+      std::swap(m_indices, other.m_indices);
+      std::swap(m_size, other.m_size);
+      std::swap(m_allocatedSize, other.m_allocatedSize);
+    }
+
+    ~CompressedStorage()
+    {
+      delete[] m_values;
+      delete[] m_indices;
+    }
+
+    void reserve(size_t size)
+    {
+      size_t newAllocatedSize = m_size + size;
+      if (newAllocatedSize > m_allocatedSize)
+        reallocate(newAllocatedSize);
+    }
+
+    void squeeze()
+    {
+      if (m_allocatedSize>m_size)
+        reallocate(m_size);
+    }
+
+    void resize(size_t size, double reserveSizeFactor = 0)
+    {
+      if (m_allocatedSize<size)
+        reallocate(size + size_t(reserveSizeFactor*double(size)));
+      m_size = size;
+    }
+
+    void append(const Scalar& v, Index i)
+    {
+      Index id = static_cast<Index>(m_size);
+      resize(m_size+1, 1);
+      m_values[id] = v;
+      m_indices[id] = i;
+    }
+
+    inline size_t size() const { return m_size; }
+    inline size_t allocatedSize() const { return m_allocatedSize; }
+    inline void clear() { m_size = 0; }
+
+    inline Scalar& value(size_t i) { return m_values[i]; }
+    inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+    inline Index& index(size_t i) { return m_indices[i]; }
+    inline const Index& index(size_t i) const { return m_indices[i]; }
+
+    static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
+    {
+      CompressedStorage res;
+      res.m_indices = indices;
+      res.m_values = values;
+      res.m_allocatedSize = res.m_size = size;
+      return res;
+    }
+
+    /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+    inline Index searchLowerIndex(Index key) const
+    {
+      return searchLowerIndex(0, m_size, key);
+    }
+
+    /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+    inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+    {
+      while(end>start)
+      {
+        size_t mid = (end+start)>>1;
+        if (m_indices[mid]<key)
+          start = mid+1;
+        else
+          end = mid;
+      }
+      return static_cast<Index>(start);
+    }
+
+    /** \returns the stored value at index \a key
+      * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+    inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
+    {
+      if (m_size==0)
+        return defaultValue;
+      else if (key==m_indices[m_size-1])
+        return m_values[m_size-1];
+      // ^^  optimization: let's first check if it is the last coefficient
+      // (very common in high level algorithms)
+      const size_t id = searchLowerIndex(0,m_size-1,key);
+      return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+    }
+
+    /** Like at(), but the search is performed in the range [start,end) */
+    inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const
+    {
+      if (start>=end)
+        return Scalar(0);
+      else if (end>start && key==m_indices[end-1])
+        return m_values[end-1];
+      // ^^  optimization: let's first check if it is the last coefficient
+      // (very common in high level algorithms)
+      const size_t id = searchLowerIndex(start,end-1,key);
+      return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+    }
+
+    /** \returns a reference to the value at index \a key
+      * If the value does not exist, then the value \a defaultValue is inserted
+      * such that the keys are sorted. */
+    inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
+    {
+      size_t id = searchLowerIndex(0,m_size,key);
+      if (id>=m_size || m_indices[id]!=key)
+      {
+        resize(m_size+1,1);
+        for (size_t j=m_size-1; j>id; --j)
+        {
+          m_indices[j] = m_indices[j-1];
+          m_values[j] = m_values[j-1];
+        }
+        m_indices[id] = key;
+        m_values[id] = defaultValue;
+      }
+      return m_values[id];
+    }
+
+    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      size_t k = 0;
+      size_t n = size();
+      for (size_t i=0; i<n; ++i)
+      {
+        if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
+        {
+          value(k) = value(i);
+          index(k) = index(i);
+          ++k;
+        }
+      }
+      resize(k,0);
+    }
+
+  protected:
+
+    inline void reallocate(size_t size)
+    {
+      Scalar* newValues  = new Scalar[size];
+      Index* newIndices = new Index[size];
+      size_t copySize = (std::min)(size, m_size);
+      // copy
+      internal::smart_copy(m_values, m_values+copySize, newValues);
+      internal::smart_copy(m_indices, m_indices+copySize, newIndices);
+      // delete old stuff
+      delete[] m_values;
+      delete[] m_indices;
+      m_values = newValues;
+      m_indices = newIndices;
+      m_allocatedSize = size;
+    }
+
+  protected:
+    Scalar* m_values;
+    Index* m_indices;
+    size_t m_size;
+    size_t m_allocatedSize;
+
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
new file mode 100644
index 0000000..5c320e2
--- /dev/null
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -0,0 +1,245 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
+#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+  typedef typename remove_all<Lhs>::type::Scalar Scalar;
+  typedef typename remove_all<Lhs>::type::Index Index;
+
+  // make sure to call innerSize/outerSize since we fake the storage order.
+  Index rows = lhs.innerSize();
+  Index cols = rhs.outerSize();
+  eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+  std::vector<bool> mask(rows,false);
+  Matrix<Scalar,Dynamic,1> values(rows);
+  Matrix<Index,Dynamic,1>  indices(rows);
+
+  // estimate the number of non zero entries
+  // given a rhs column containing Y non zeros, we assume that the respective Y columns
+  // of the lhs differs in average of one non zeros, thus the number of non zeros for
+  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+  // per column of the lhs.
+  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+  Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+  res.setZero();
+  res.reserve(Index(estimated_nnz_prod));
+  // we compute each column of the result, one after the other
+  for (Index j=0; j<cols; ++j)
+  {
+
+    res.startVec(j);
+    Index nnz = 0;
+    for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+    {
+      Scalar y = rhsIt.value();
+      Index k = rhsIt.index();
+      for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+      {
+        Index i = lhsIt.index();
+        Scalar x = lhsIt.value();
+        if(!mask[i])
+        {
+          mask[i] = true;
+          values[i] = x * y;
+          indices[nnz] = i;
+          ++nnz;
+        }
+        else
+          values[i] += x * y;
+      }
+    }
+
+    // unordered insertion
+    for(Index k=0; k<nnz; ++k)
+    {
+      Index i = indices[k];
+      res.insertBackByOuterInnerUnordered(j,i) = values[i];
+      mask[i] = false;
+    }
+
+#if 0
+    // alternative ordered insertion code:
+
+    Index t200 = rows/(log2(200)*1.39);
+    Index t = (rows*100)/139;
+
+    // FIXME reserve nnz non zeros
+    // FIXME implement fast sort algorithms for very small nnz
+    // if the result is sparse enough => use a quick sort
+    // otherwise => loop through the entire vector
+    // In order to avoid to perform an expensive log2 when the
+    // result is clearly very sparse we use a linear bound up to 200.
+    //if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
+    //res.startVec(j);
+    if(true)
+    {
+      if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+      for(Index k=0; k<nnz; ++k)
+      {
+        Index i = indices[k];
+        res.insertBackByOuterInner(j,i) = values[i];
+        mask[i] = false;
+      }
+    }
+    else
+    {
+      // dense path
+      for(Index i=0; i<rows; ++i)
+      {
+        if(mask[i])
+        {
+          mask[i] = false;
+          res.insertBackByOuterInner(j,i) = values[i];
+        }
+      }
+    }
+#endif
+
+  }
+  res.finalize();
+}
+
+
+} // end namespace internal
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+  int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+  int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
+  int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
+struct conservative_sparse_sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+  typedef typename remove_all<Lhs>::type LhsCleaned;
+  typedef typename LhsCleaned::Scalar Scalar;
+
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+    ColMajorMatrix resCol(lhs.rows(),rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+    // sort the non zeros:
+    RowMajorMatrix resRow(resCol);
+    res = resRow;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+     typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+     RowMajorMatrix rhsRow = rhs;
+     RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+     internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
+     res = resRow;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+    RowMajorMatrix lhsRow = lhs;
+    RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+    res = resRow;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+    RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+    res = resRow;
+  }
+};
+
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+  typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+    ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+    res = resCol;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+    ColMajorMatrix lhsCol = lhs;
+    ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+    res = resCol;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+    ColMajorMatrix rhsCol = rhs;
+    ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+    res = resCol;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix;
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix;
+    RowMajorMatrix resRow(lhs.rows(),rhs.cols());
+    internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+    // sort the non zeros:
+    ColMajorMatrix resCol(resRow);
+    res = resCol;
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h
new file mode 100644
index 0000000..ab1a266
--- /dev/null
+++ b/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -0,0 +1,181 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
+#define EIGEN_MAPPED_SPARSEMATRIX_H
+
+namespace Eigen { 
+
+/** \class MappedSparseMatrix
+  *
+  * \brief Sparse matrix
+  *
+  * \param _Scalar the scalar type, i.e. the type of the coefficients
+  *
+  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+  *
+  */
+namespace internal {
+template<typename _Scalar, int _Flags, typename _Index>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
+{};
+}
+
+template<typename _Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix
+  : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+{
+  public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
+    enum { IsRowMajor = Base::IsRowMajor };
+
+  protected:
+
+    Index   m_outerSize;
+    Index   m_innerSize;
+    Index   m_nnz;
+    Index*  m_outerIndex;
+    Index*  m_innerIndices;
+    Scalar* m_values;
+
+  public:
+
+    inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+    inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+    inline Index innerSize() const { return m_innerSize; }
+    inline Index outerSize() const { return m_outerSize; }
+    
+    bool isCompressed() const { return true; }
+
+    //----------------------------------------
+    // direct access interface
+    inline const Scalar* valuePtr() const { return m_values; }
+    inline Scalar* valuePtr() { return m_values; }
+
+    inline const Index* innerIndexPtr() const { return m_innerIndices; }
+    inline Index* innerIndexPtr() { return m_innerIndices; }
+
+    inline const Index* outerIndexPtr() const { return m_outerIndex; }
+    inline Index* outerIndexPtr() { return m_outerIndex; }
+    //----------------------------------------
+
+    inline Scalar coeff(Index row, Index col) const
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      Index start = m_outerIndex[outer];
+      Index end = m_outerIndex[outer+1];
+      if (start==end)
+        return Scalar(0);
+      else if (end>0 && inner==m_innerIndices[end-1])
+        return m_values[end-1];
+      // ^^  optimization: let's first check if it is the last coefficient
+      // (very common in high level algorithms)
+
+      const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+      const Index id = r-&m_innerIndices[0];
+      return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      Index start = m_outerIndex[outer];
+      Index end = m_outerIndex[outer+1];
+      eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+      eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+      Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+      const Index id = r-&m_innerIndices[0];
+      eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+      return m_values[id];
+    }
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    /** \returns the number of non zero coefficients */
+    inline Index nonZeros() const  { return m_nnz; }
+
+    inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
+      : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+        m_innerIndices(innerIndexPtr), m_values(valuePtr)
+    {}
+
+    /** Empty destructor */
+    inline ~MappedSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
+{
+  public:
+    InnerIterator(const MappedSparseMatrix& mat, Index outer)
+      : m_matrix(mat),
+        m_outer(outer),
+        m_id(mat.outerIndexPtr()[outer]),
+        m_start(m_id),
+        m_end(mat.outerIndexPtr()[outer+1])
+    {}
+
+    inline InnerIterator& operator++() { m_id++; return *this; }
+
+    inline Scalar value() const { return m_matrix.valuePtr()[m_id]; }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id]); }
+
+    inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+  protected:
+    const MappedSparseMatrix& m_matrix;
+    const Index m_outer;
+    Index m_id;
+    const Index m_start;
+    const Index m_end;
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
+{
+  public:
+    ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer)
+      : m_matrix(mat),
+        m_outer(outer),
+        m_id(mat.outerIndexPtr()[outer+1]),
+        m_start(mat.outerIndexPtr()[outer]),
+        m_end(m_id)
+    {}
+
+    inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+    inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id-1]); }
+
+    inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); }
+
+  protected:
+    const MappedSparseMatrix& m_matrix;
+    const Index m_outer;
+    Index m_id;
+    const Index m_start;
+    const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
new file mode 100644
index 0000000..0c90baf
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -0,0 +1,537 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_BLOCK_H
+#define EIGEN_SPARSE_BLOCK_H
+
+namespace Eigen { 
+
+template<typename XprType, int BlockRows, int BlockCols>
+class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
+  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
+{
+    typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+    typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+protected:
+    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+    
+    class InnerIterator: public XprType::InnerIterator
+    {
+        typedef typename BlockImpl::Index Index;
+      public:
+        inline InnerIterator(const BlockType& xpr, Index outer)
+          : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+    class ReverseInnerIterator: public XprType::ReverseInnerIterator
+    {
+        typedef typename BlockImpl::Index Index;
+      public:
+        inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+          : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+
+    inline BlockImpl(const XprType& xpr, int i)
+      : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+    {}
+
+    inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+    {}
+    
+    inline const Scalar coeff(int row, int col) const
+    {
+      return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 :  m_outerStart);
+    }
+    
+    inline const Scalar coeff(int index) const
+    {
+      return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+  protected:
+
+    typename XprType::Nested m_matrix;
+    Index m_outerStart;
+    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
+
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+  private:
+    Index nonZeros() const;
+};
+
+
+/***************************************************************************
+* specialisation for SparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
+class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
+  : public SparseMatrixBase<Block<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
+{
+    typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
+    typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
+    typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
+    typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> ConstBlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+protected:
+    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+    
+    class InnerIterator: public SparseMatrixType::InnerIterator
+    {
+      public:
+        inline InnerIterator(const BlockType& xpr, Index outer)
+          : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+    class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
+    {
+      public:
+        inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+          : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+
+    inline BlockImpl(const SparseMatrixType& xpr, int i)
+      : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+    {}
+
+    inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+    {}
+
+    template<typename OtherDerived>
+    inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
+      _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+      // This assignement is slow if this vector set is not empty
+      // and/or it is not at the end of the nonzeros of the underlying matrix.
+
+      // 1 - eval to a temporary to avoid transposition and/or aliasing issues
+      SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+
+      // 2 - let's check whether there is enough allocated memory
+      Index nnz           = tmp.nonZeros();
+      Index start         = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
+      Index end           = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block
+      Index block_size    = end - start;                                                // available room in the current block
+      Index tail_size     = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
+      
+      Index free_size     = m_matrix.isCompressed()
+                          ? Index(matrix.data().allocatedSize()) + block_size
+                          : block_size;
+
+      if(nnz>free_size) 
+      {
+        // realloc manually to reduce copies
+        typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
+
+        std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar));
+        std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index));
+
+        std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar));
+        std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index));
+
+        std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar));
+        std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index));
+        
+        newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
+
+        matrix.data().swap(newdata);
+      }
+      else
+      {
+        // no need to realloc, simply copy the tail at its respective position and insert tmp
+        matrix.data().resize(start + nnz + tail_size);
+
+        std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar));
+        std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index));
+
+        std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar));
+        std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index));
+      }
+      
+      // update innerNonZeros
+      if(!m_matrix.isCompressed())
+        for(Index j=0; j<m_outerSize.value(); ++j)
+          matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros();
+
+      // update outer index pointers
+      Index p = start;
+      for(Index k=0; k<m_outerSize.value(); ++k)
+      {
+        matrix.outerIndexPtr()[m_outerStart+k] = p;
+        p += tmp.innerVector(k).nonZeros();
+      }
+      std::ptrdiff_t offset = nnz - block_size;
+      for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+      {
+        matrix.outerIndexPtr()[k] += offset;
+      }
+
+      return derived();
+    }
+
+    inline BlockType& operator=(const BlockType& other)
+    {
+      return operator=<BlockType>(other);
+    }
+
+    inline const Scalar* valuePtr() const
+    { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+    inline Scalar* valuePtr()
+    { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+    inline const Index* innerIndexPtr() const
+    { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+    inline Index* innerIndexPtr()
+    { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+    inline const Index* outerIndexPtr() const
+    { return m_matrix.outerIndexPtr() + m_outerStart; }
+    inline Index* outerIndexPtr()
+    { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
+
+    Index nonZeros() const
+    {
+      if(m_matrix.isCompressed())
+        return  std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
+              - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
+      else if(m_outerSize.value()==0)
+        return 0;
+      else
+        return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+    }
+    
+    inline Scalar& coeffRef(int row, int col)
+    {
+      return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
+    }
+    
+    inline const Scalar coeff(int row, int col) const
+    {
+      return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
+    }
+    
+    inline const Scalar coeff(int index) const
+    {
+      return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
+    }
+
+    const Scalar& lastCoeff() const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
+      eigen_assert(nonZeros()>0);
+      if(m_matrix.isCompressed())
+        return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
+      else
+        return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+  protected:
+
+    typename SparseMatrixType::Nested m_matrix;
+    Index m_outerStart;
+    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
+
+};
+
+
+template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols>
+class BlockImpl<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse>
+  : public SparseMatrixBase<Block<const SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> >
+{
+    typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType;
+    typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
+    typedef Block<const SparseMatrixType, BlockRows, BlockCols, true> BlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+protected:
+    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
+public:
+    
+    class InnerIterator: public SparseMatrixType::InnerIterator
+    {
+      public:
+        inline InnerIterator(const BlockType& xpr, Index outer)
+          : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+    class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator
+    {
+      public:
+        inline ReverseInnerIterator(const BlockType& xpr, Index outer)
+          : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+        {}
+        inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+        inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+      protected:
+        Index m_outer;
+    };
+
+    inline BlockImpl(const SparseMatrixType& xpr, int i)
+      : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
+    {}
+
+    inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
+    {}
+
+    inline const Scalar* valuePtr() const
+    { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+    inline const Index* innerIndexPtr() const
+    { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+    inline const Index* outerIndexPtr() const
+    { return m_matrix.outerIndexPtr() + m_outerStart; }
+
+    Index nonZeros() const
+    {
+      if(m_matrix.isCompressed())
+        return  std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
+              - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
+      else if(m_outerSize.value()==0)
+        return 0;
+      else
+        return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+    }
+    
+    inline const Scalar coeff(int row, int col) const
+    {
+      return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
+    }
+    
+    inline const Scalar coeff(int index) const
+    {
+      return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
+    }
+
+    const Scalar& lastCoeff() const
+    {
+      EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl);
+      eigen_assert(nonZeros()>0);
+      if(m_matrix.isCompressed())
+        return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
+      else
+        return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+  protected:
+
+    typename SparseMatrixType::Nested m_matrix;
+    Index m_outerStart;
+    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
+
+};
+
+//----------
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+  * is col-major (resp. row-major).
+  */
+template<typename Derived>
+typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return InnerVectorReturnType(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+  * is col-major (resp. row-major). Read-only.
+  */
+template<typename Derived>
+const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return ConstInnerVectorReturnType(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+  * is col-major (resp. row-major).
+  */
+template<typename Derived>
+typename SparseMatrixBase<Derived>::InnerVectorsReturnType
+SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+{
+  return Block<Derived,Dynamic,Dynamic,true>(derived(),
+                                             IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+                                             IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+  
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+  * is col-major (resp. row-major). Read-only.
+  */
+template<typename Derived>
+const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
+SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+{
+  return Block<const Derived,Dynamic,Dynamic,true>(derived(),
+                                                  IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
+                                                  IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
+  
+}
+
+/** Generic implementation of sparse Block expression.
+  * Real-only. 
+  */
+template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
+class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
+  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
+{
+  typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
+  typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
+public:
+    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
+    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+
+    /** Column or Row constructor
+      */
+    inline BlockImpl(const XprType& xpr, int i)
+      : m_matrix(xpr),
+        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
+        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
+        m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+        m_blockCols(BlockCols==1 ? 1 : xpr.cols())
+    {}
+
+    /** Dynamic-size constructor
+      */
+    inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
+      : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols)
+    {}
+
+    inline int rows() const { return m_blockRows.value(); }
+    inline int cols() const { return m_blockCols.value(); }
+
+    inline Scalar& coeffRef(int row, int col)
+    {
+      return m_matrix.const_cast_derived()
+               .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    inline const Scalar coeff(int row, int col) const
+    {
+      return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
+    }
+
+    inline Scalar& coeffRef(int index)
+    {
+      return m_matrix.const_cast_derived()
+             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+
+    inline const Scalar coeff(int index) const
+    {
+      return m_matrix
+             .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+                    m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+    }
+    
+    inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+    
+    class InnerIterator : public _MatrixTypeNested::InnerIterator
+    {
+      typedef typename _MatrixTypeNested::InnerIterator Base;
+      const BlockType& m_block;
+      Index m_end;
+    public:
+
+      EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer)
+        : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+          m_block(block),
+          m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value())
+      {
+        while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) )
+          Base::operator++();
+      }
+
+      inline Index index()  const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+      inline Index outer()  const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+      inline Index row()    const { return Base::row()   - m_block.m_startRow.value(); }
+      inline Index col()    const { return Base::col()   - m_block.m_startCol.value(); }
+      
+      inline operator bool() const { return Base::operator bool() && Base::index() < m_end; }
+    };
+    class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
+    {
+      typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+      const BlockType& m_block;
+      Index m_begin;
+    public:
+
+      EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer)
+        : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())),
+          m_block(block),
+          m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value())
+      {
+        while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) )
+          Base::operator--();
+      }
+
+      inline Index index()  const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); }
+      inline Index outer()  const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); }
+      inline Index row()    const { return Base::row()   - m_block.m_startRow.value(); }
+      inline Index col()    const { return Base::col()   - m_block.m_startCol.value(); }
+      
+      inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
+    };
+  protected:
+    friend class InnerIterator;
+    friend class ReverseInnerIterator;
+    
+    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+
+    typename XprType::Nested m_matrix;
+    const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
+    const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
+    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
+    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_BLOCK_H
+
diff --git a/Eigen/src/SparseCore/SparseColEtree.h b/Eigen/src/SparseCore/SparseColEtree.h
new file mode 100644
index 0000000..f8745f4
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseColEtree.h
@@ -0,0 +1,206 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+/* 
+ 
+ * NOTE: This file is the modified version of sp_coletree.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 3.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * August 1, 2008
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSE_COLETREE_H
+#define SPARSE_COLETREE_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** Find the root of the tree/set containing the vertex i : Use Path halving */ 
+template<typename Index, typename IndexVector>
+Index etree_find (Index i, IndexVector& pp)
+{
+  Index p = pp(i); // Parent 
+  Index gp = pp(p); // Grand parent 
+  while (gp != p) 
+  {
+    pp(i) = gp; // Parent pointer on find path is changed to former grand parent
+    i = gp; 
+    p = pp(i);
+    gp = pp(p);
+  }
+  return p; 
+}
+
+/** Compute the column elimination tree of a sparse matrix
+  * \param mat The matrix in column-major format. 
+  * \param parent The elimination tree
+  * \param firstRowElt The column index of the first element in each row
+  * \param perm The permutation to apply to the column of \b mat
+  */
+template <typename MatrixType, typename IndexVector>
+int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0)
+{
+  typedef typename MatrixType::Index Index;
+  Index nc = mat.cols(); // Number of columns 
+  Index m = mat.rows();
+  Index diagSize = (std::min)(nc,m);
+  IndexVector root(nc); // root of subtree of etree 
+  root.setZero();
+  IndexVector pp(nc); // disjoint sets 
+  pp.setZero(); // Initialize disjoint sets 
+  parent.resize(mat.cols());
+  //Compute first nonzero column in each row 
+  Index row,col; 
+  firstRowElt.resize(m);
+  firstRowElt.setConstant(nc);
+  firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
+  bool found_diag;
+  for (col = 0; col < nc; col++)
+  {
+    Index pcol = col;
+    if(perm) pcol  = perm[col];
+    for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
+    { 
+      row = it.row();
+      firstRowElt(row) = (std::min)(firstRowElt(row), col);
+    }
+  }
+  /* Compute etree by Liu's algorithm for symmetric matrices,
+          except use (firstRowElt[r],c) in place of an edge (r,c) of A.
+    Thus each row clique in A'*A is replaced by a star
+    centered at its first vertex, which has the same fill. */
+  Index rset, cset, rroot; 
+  for (col = 0; col < nc; col++) 
+  {
+    found_diag = col>=m;
+    pp(col) = col; 
+    cset = col; 
+    root(cset) = col; 
+    parent(col) = nc; 
+    /* The diagonal element is treated here even if it does not exist in the matrix
+     * hence the loop is executed once more */ 
+    Index pcol = col;
+    if(perm) pcol  = perm[col];
+    for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
+    { //  A sequence of interleaved find and union is performed 
+      Index i = col;
+      if(it) i = it.index();
+      if (i == col) found_diag = true;
+      
+      row = firstRowElt(i);
+      if (row >= col) continue; 
+      rset = internal::etree_find(row, pp); // Find the name of the set containing row
+      rroot = root(rset);
+      if (rroot != col) 
+      {
+        parent(rroot) = col; 
+        pp(cset) = rset; 
+        cset = rset; 
+        root(cset) = col; 
+      }
+    }
+  }
+  return 0;  
+}
+
+/** 
+  * Depth-first search from vertex n.  No recursion.
+  * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
+*/
+template <typename Index, typename IndexVector>
+void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum)
+{
+  Index current = n, first, next;
+  while (postnum != n) 
+  {
+    // No kid for the current node
+    first = first_kid(current);
+    
+    // no kid for the current node
+    if (first == -1) 
+    {
+      // Numbering this node because it has no kid 
+      post(current) = postnum++;
+      
+      // looking for the next kid 
+      next = next_kid(current); 
+      while (next == -1) 
+      {
+        // No more kids : back to the parent node
+        current = parent(current); 
+        // numbering the parent node 
+        post(current) = postnum++;
+        
+        // Get the next kid 
+        next = next_kid(current); 
+      }
+      // stopping criterion 
+      if (postnum == n+1) return; 
+      
+      // Updating current node 
+      current = next; 
+    }
+    else 
+    {
+      current = first; 
+    }
+  }
+}
+
+
+/**
+  * \brief Post order a tree 
+  * \param n the number of nodes
+  * \param parent Input tree
+  * \param post postordered tree
+  */
+template <typename Index, typename IndexVector>
+void treePostorder(Index n, IndexVector& parent, IndexVector& post)
+{
+  IndexVector first_kid, next_kid; // Linked list of children 
+  Index postnum; 
+  // Allocate storage for working arrays and results 
+  first_kid.resize(n+1); 
+  next_kid.setZero(n+1);
+  post.setZero(n+1);
+  
+  // Set up structure describing children
+  Index v, dad; 
+  first_kid.setConstant(-1); 
+  for (v = n-1; v >= 0; v--) 
+  {
+    dad = parent(v);
+    next_kid(v) = first_kid(dad); 
+    first_kid(dad) = v; 
+  }
+  
+  // Depth-first search from dummy root vertex #n
+  postnum = 0; 
+  internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // SPARSE_COLETREE_H
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
new file mode 100644
index 0000000..60ca769
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -0,0 +1,325 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+namespace Eigen { 
+
+// Here we have to handle 3 cases:
+//  1 - sparse op dense
+//  2 - dense op sparse
+//  3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+//  4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+//                configuration      returned mode
+//  1 - sparse op dense    product      sparse
+//                         generic      dense
+//  2 - dense op sparse    product      sparse
+//                         generic      dense
+//  3 - sparse op sparse   product      sparse
+//                         generic      sparse
+//  4 - dense op dense     product      dense
+//                         generic      dense
+
+namespace internal {
+
+template<> struct promote_storage_type<Dense,Sparse>
+{ typedef Sparse ret; };
+
+template<> struct promote_storage_type<Sparse,Dense>
+{ typedef Sparse ret; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+  typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
+  typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
+class sparse_cwise_binary_op_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
+  : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+  public:
+    class InnerIterator;
+    class ReverseInnerIterator;
+    typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+    EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+    CwiseBinaryOpImpl()
+    {
+      typedef typename internal::traits<Lhs>::StorageKind LhsStorageKind;
+      typedef typename internal::traits<Rhs>::StorageKind RhsStorageKind;
+      EIGEN_STATIC_ASSERT((
+                (!internal::is_same<LhsStorageKind,RhsStorageKind>::value)
+            ||  ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))),
+            THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
+    }
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
+  : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
+{
+  public:
+    typedef typename Lhs::Index Index;
+    typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
+      BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+    // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11
+    EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer)
+      : Base(binOp.derived(),outer)
+    {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
+
+namespace internal {
+
+// sparse - sparse  (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+    typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+    typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
+    typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+    typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+    typedef typename _LhsNested::InnerIterator LhsIterator;
+    typedef typename _RhsNested::InnerIterator RhsIterator;
+    typedef typename Lhs::Index Index;
+
+  public:
+
+    EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+      : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+    {
+      this->operator++();
+    }
+
+    EIGEN_STRONG_INLINE Derived& operator++()
+    {
+      if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+      {
+        m_id = m_lhsIter.index();
+        m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+        ++m_lhsIter;
+        ++m_rhsIter;
+      }
+      else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+      {
+        m_id = m_lhsIter.index();
+        m_value = m_functor(m_lhsIter.value(), Scalar(0));
+        ++m_lhsIter;
+      }
+      else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+      {
+        m_id = m_rhsIter.index();
+        m_value = m_functor(Scalar(0), m_rhsIter.value());
+        ++m_rhsIter;
+      }
+      else
+      {
+        m_value = 0; // this is to avoid a compilation warning
+        m_id = -1;
+      }
+      return *static_cast<Derived*>(this);
+    }
+
+    EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+    EIGEN_STRONG_INLINE Index index() const { return m_id; }
+    EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+    EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+    EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+  protected:
+    LhsIterator m_lhsIter;
+    RhsIterator m_rhsIter;
+    const BinaryOp& m_functor;
+    Scalar m_value;
+    Index m_id;
+};
+
+// sparse - sparse  (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+    typedef scalar_product_op<T> BinaryFunc;
+    typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+    typedef typename CwiseBinaryXpr::Scalar Scalar;
+    typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+    typedef typename _LhsNested::InnerIterator LhsIterator;
+    typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+    typedef typename _RhsNested::InnerIterator RhsIterator;
+    typedef typename Lhs::Index Index;
+  public:
+
+    EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+      : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+    {
+      while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+      {
+        if (m_lhsIter.index() < m_rhsIter.index())
+          ++m_lhsIter;
+        else
+          ++m_rhsIter;
+      }
+    }
+
+    EIGEN_STRONG_INLINE Derived& operator++()
+    {
+      ++m_lhsIter;
+      ++m_rhsIter;
+      while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+      {
+        if (m_lhsIter.index() < m_rhsIter.index())
+          ++m_lhsIter;
+        else
+          ++m_rhsIter;
+      }
+      return *static_cast<Derived*>(this);
+    }
+
+    EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+    EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+    EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+    EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+    EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+  protected:
+    LhsIterator m_lhsIter;
+    RhsIterator m_rhsIter;
+    const BinaryFunc& m_functor;
+};
+
+// sparse - dense  (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+{
+    typedef scalar_product_op<T> BinaryFunc;
+    typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+    typedef typename CwiseBinaryXpr::Scalar Scalar;
+    typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+    typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
+    typedef typename _LhsNested::InnerIterator LhsIterator;
+    typedef typename Lhs::Index Index;
+    enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+  public:
+
+    EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+      : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+    {}
+
+    EIGEN_STRONG_INLINE Derived& operator++()
+    {
+      ++m_lhsIter;
+      return *static_cast<Derived*>(this);
+    }
+
+    EIGEN_STRONG_INLINE Scalar value() const
+    { return m_functor(m_lhsIter.value(),
+                       m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+    EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+    EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+    EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+    EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+  protected:
+    RhsNested m_rhs;
+    LhsIterator m_lhsIter;
+    const BinaryFunc m_functor;
+    const Index m_outer;
+};
+
+// sparse - dense  (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+{
+    typedef scalar_product_op<T> BinaryFunc;
+    typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+    typedef typename CwiseBinaryXpr::Scalar Scalar;
+    typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+    typedef typename _RhsNested::InnerIterator RhsIterator;
+    typedef typename Lhs::Index Index;
+
+    enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+  public:
+
+    EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+      : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+    {}
+
+    EIGEN_STRONG_INLINE Derived& operator++()
+    {
+      ++m_rhsIter;
+      return *static_cast<Derived*>(this);
+    }
+
+    EIGEN_STRONG_INLINE Scalar value() const
+    { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+    EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+    EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+    EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+    EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+  protected:
+    const CwiseBinaryXpr& m_xpr;
+    RhsIterator m_rhsIter;
+    const BinaryFunc& m_functor;
+    const Index m_outer;
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+  return derived() = derived() - other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+  return derived() = derived() + other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
+{
+  return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
new file mode 100644
index 0000000..5a50c78
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -0,0 +1,163 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+namespace Eigen { 
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
+  : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+{
+  public:
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
+    EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+  protected:
+    typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
+    typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+    typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
+    : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator
+{
+    typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+    typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+  public:
+
+    EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
+      : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+    {}
+
+    EIGEN_STRONG_INLINE InnerIterator& operator++()
+    { Base::operator++(); return *this; }
+
+    EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+
+  protected:
+    const UnaryOp m_functor;
+  private:
+    typename CwiseUnaryOpImpl::Scalar& valueRef();
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::ReverseInnerIterator
+    : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator
+{
+    typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+    typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
+  public:
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
+      : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+    {}
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+    { Base::operator--(); return *this; }
+
+    EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+
+  protected:
+    const UnaryOp m_functor;
+  private:
+    typename CwiseUnaryOpImpl::Scalar& valueRef();
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
+  : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+{
+  public:
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+    EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+  protected:
+    typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
+    typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+    typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
+    : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator
+{
+    typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+    typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+  public:
+
+    EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
+      : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+    {}
+
+    EIGEN_STRONG_INLINE InnerIterator& operator++()
+    { Base::operator++(); return *this; }
+
+    EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
+    EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+  protected:
+    const ViewOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::ReverseInnerIterator
+    : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator
+{
+    typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+    typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
+  public:
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
+      : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+    {}
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+    { Base::operator--(); return *this; }
+
+    EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
+    EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+  protected:
+    const ViewOp m_functor;
+};
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+  for (Index j=0; j<outerSize(); ++j)
+    for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+      i.valueRef() *= other;
+  return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+  for (Index j=0; j<outerSize(); ++j)
+    for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+      i.valueRef() /= other;
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
new file mode 100644
index 0000000..ccb6ae7
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -0,0 +1,311 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+namespace Eigen { 
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+  typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+  typedef typename internal::conditional<
+    Lhs::IsRowMajor,
+    SparseDenseOuterProduct<Rhs,Lhs,true>,
+    SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+  typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+  typedef typename internal::conditional<
+    Rhs::IsRowMajor,
+    SparseDenseOuterProduct<Rhs,Lhs,true>,
+    SparseDenseOuterProduct<Lhs,Rhs,false> >::type Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+  typedef Sparse StorageKind;
+  typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+                                         typename traits<Rhs>::Scalar>::ReturnType Scalar;
+  typedef typename Lhs::Index Index;
+  typedef typename Lhs::Nested LhsNested;
+  typedef typename Rhs::Nested RhsNested;
+  typedef typename remove_all<LhsNested>::type _LhsNested;
+  typedef typename remove_all<RhsNested>::type _RhsNested;
+
+  enum {
+    LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+    RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+    RowsAtCompileTime    = Tr ? int(traits<Rhs>::RowsAtCompileTime)     : int(traits<Lhs>::RowsAtCompileTime),
+    ColsAtCompileTime    = Tr ? int(traits<Lhs>::ColsAtCompileTime)     : int(traits<Rhs>::ColsAtCompileTime),
+    MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime)  : int(traits<Lhs>::MaxRowsAtCompileTime),
+    MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime)  : int(traits<Rhs>::MaxColsAtCompileTime),
+
+    Flags = Tr ? RowMajorBit : 0,
+
+    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+  };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+  public:
+
+    typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+    typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+  private:
+
+    typedef typename Traits::LhsNested LhsNested;
+    typedef typename Traits::RhsNested RhsNested;
+    typedef typename Traits::_LhsNested _LhsNested;
+    typedef typename Traits::_RhsNested _RhsNested;
+
+  public:
+
+    class InnerIterator;
+
+    EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+    }
+
+    EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+  protected:
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+    typedef typename _LhsNested::InnerIterator Base;
+    typedef typename SparseDenseOuterProduct::Index Index;
+  public:
+    EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+      : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits<Rhs>::StorageKind() ))
+    { }
+
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return Transpose ? m_outer : Base::index(); }
+    inline Index col() const { return Transpose ? Base::index() : m_outer; }
+
+    inline Scalar value() const { return Base::value() * m_factor; }
+
+  protected:
+    static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense())
+    {
+      return rhs.coeff(outer);
+    }
+    
+    static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse())
+    {
+      typename Traits::_RhsNested::InnerIterator it(rhs, outer);
+      if (it && it.index()==0)
+        return it.value();
+      
+      return Scalar(0);
+    }
+    
+    Index m_outer;
+    Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+  typedef Dense StorageKind;
+  typedef MatrixXpr XprKind;
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
+         int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
+         bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
+struct sparse_time_dense_product_impl;
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::Index Index;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+  {
+    for(Index c=0; c<rhs.cols(); ++c)
+    {
+      Index n = lhs.outerSize();
+      for(Index j=0; j<n; ++j)
+      {
+        typename Res::Scalar tmp(0);
+        for(LhsInnerIterator it(lhs,j); it ;++it)
+          tmp += it.value() * rhs.coeff(it.index(),c);
+        res.coeffRef(j,c) += alpha * tmp;
+      }
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  typedef typename Lhs::Index Index;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+  {
+    for(Index c=0; c<rhs.cols(); ++c)
+    {
+      for(Index j=0; j<lhs.outerSize(); ++j)
+      {
+        typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
+        for(LhsInnerIterator it(lhs,j); it ;++it)
+          res.coeffRef(it.index(),c) += it.value() * rhs_j;
+      }
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  typedef typename Lhs::Index Index;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+  {
+    for(Index j=0; j<lhs.outerSize(); ++j)
+    {
+      typename Res::RowXpr res_j(res.row(j));
+      for(LhsInnerIterator it(lhs,j); it ;++it)
+        res_j += (alpha*it.value()) * rhs.row(it.index());
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false>
+{
+  typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+  typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+  typedef typename internal::remove_all<DenseResType>::type Res;
+  typedef typename Lhs::InnerIterator LhsInnerIterator;
+  typedef typename Lhs::Index Index;
+  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+  {
+    for(Index j=0; j<lhs.outerSize(); ++j)
+    {
+      typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
+      for(LhsInnerIterator it(lhs,j); it ;++it)
+        res.row(it.index()) += (alpha*it.value()) * rhs_j;
+    }
+  }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
+inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+{
+  sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha);
+}
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+  : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+    SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+    {
+      internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
+    }
+
+  private:
+    SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+  typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+  : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+    DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+    {
+      Transpose<const _LhsNested> lhs_t(m_lhs);
+      Transpose<const _RhsNested> rhs_t(m_rhs);
+      Transpose<Dest> dest_t(dest);
+      internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha);
+    }
+
+  private:
+    DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h
new file mode 100644
index 0000000..1bb590e
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -0,0 +1,196 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+namespace Eigen { 
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+//     => each inner vector <=> scalar * sparse vector product
+//     => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+//     => each inner vector <=> densevector * sparse vector cwise product
+//     => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+//        for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+  typedef typename remove_all<Lhs>::type _Lhs;
+  typedef typename remove_all<Rhs>::type _Rhs;
+  typedef typename _Lhs::Scalar Scalar;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+    ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+    MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+    SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+    Flags = (SparseFlags&RowMajorBit),
+    CoeffReadCost = Dynamic
+  };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+  : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+    internal::no_assignment_operator
+{
+    typedef typename Lhs::Nested LhsNested;
+    typedef typename Rhs::Nested RhsNested;
+
+    typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+    typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+    enum {
+      LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+              : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+      RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+              : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+    };
+
+  public:
+
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+    typedef internal::sparse_diagonal_product_inner_iterator_selector
+                      <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+    
+    // We do not want ReverseInnerIterator for diagonal-sparse products,
+    // but this dummy declaration is needed to make diag * sparse * diag compile.
+    class ReverseInnerIterator;
+
+    EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs)
+    {
+      eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+  protected:
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+  : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+    typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+    {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+  : public CwiseBinaryOp<
+      scalar_product_op<typename Lhs::Scalar>,
+      const typename Rhs::ConstInnerVectorReturnType,
+      const typename Lhs::DiagonalVectorType>::InnerIterator
+{
+    typedef typename CwiseBinaryOp<
+      scalar_product_op<typename Lhs::Scalar>,
+      const typename Rhs::ConstInnerVectorReturnType,
+      const typename Lhs::DiagonalVectorType>::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+    Index m_outer;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer)
+    {}
+    
+    inline Index outer() const { return m_outer; }
+    inline Index col() const { return m_outer; }
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+  : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+    typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+    {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+  : public CwiseBinaryOp<
+      scalar_product_op<typename Rhs::Scalar>,
+      const typename Lhs::ConstInnerVectorReturnType,
+      const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+    typedef typename CwiseBinaryOp<
+      scalar_product_op<typename Rhs::Scalar>,
+      const typename Lhs::ConstInnerVectorReturnType,
+      const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+    typedef typename Lhs::Index Index;
+    Index m_outer;
+  public:
+    inline sparse_diagonal_product_inner_iterator_selector(
+              const SparseDiagonalProductType& expr, Index outer)
+      : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer)
+    {}
+    
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return m_outer; }
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+  return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseDot.h b/Eigen/src/SparseCore/SparseDot.h
new file mode 100644
index 0000000..db39c9a
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseDot.h
@@ -0,0 +1,101 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_DOT_H
+#define EIGEN_SPARSE_DOT_H
+
+namespace Eigen { 
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  eigen_assert(size() == other.size());
+  eigen_assert(other.size()>0 && "you are using a non initialized vector");
+
+  typename Derived::InnerIterator i(derived(),0);
+  Scalar res(0);
+  while (i)
+  {
+    res += numext::conj(i.value()) * other.coeff(i.index());
+    ++i;
+  }
+  return res;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+  eigen_assert(size() == other.size());
+
+  typedef typename Derived::Nested  Nested;
+  typedef typename OtherDerived::Nested  OtherNested;
+  typedef typename internal::remove_all<Nested>::type  NestedCleaned;
+  typedef typename internal::remove_all<OtherNested>::type  OtherNestedCleaned;
+
+  Nested nthis(derived());
+  OtherNested nother(other.derived());
+
+  typename NestedCleaned::InnerIterator i(nthis,0);
+  typename OtherNestedCleaned::InnerIterator j(nother,0);
+  Scalar res(0);
+  while (i && j)
+  {
+    if (i.index()==j.index())
+    {
+      res += numext::conj(i.value()) * j.value();
+      ++i; ++j;
+    }
+    else if (i.index()<j.index())
+      ++i;
+    else
+      ++j;
+  }
+  return res;
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::squaredNorm() const
+{
+  return numext::real((*this).cwiseAbs2().sum());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::norm() const
+{
+  using std::sqrt;
+  return sqrt(squaredNorm());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::blueNorm() const
+{
+  return internal::blueNorm_impl(*this);
+}
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DOT_H
diff --git a/Eigen/src/SparseCore/SparseFuzzy.h b/Eigen/src/SparseCore/SparseFuzzy.h
new file mode 100644
index 0000000..45f36e9
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseFuzzy.h
@@ -0,0 +1,26 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_FUZZY_H
+#define EIGEN_SPARSE_FUZZY_H
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// bool SparseMatrixBase<Derived>::isApprox(
+//   const OtherDerived& other,
+//   typename NumTraits<Scalar>::Real prec
+// ) const
+// {
+//   const typename internal::nested<Derived,2>::type nested(derived());
+//   const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+//   return    (nested - otherNested).cwise().abs2().sum()
+//          <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// }
+
+#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
new file mode 100644
index 0000000..ba5e3a9
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -0,0 +1,1259 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  *
+  * \class SparseMatrix
+  *
+  * \brief A versatible sparse matrix representation
+  *
+  * This class implements a more versatile variants of the common \em compressed row/column storage format.
+  * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
+  * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
+  * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+  * can be done with limited memory reallocation and copies.
+  *
+  * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
+  * compatible with many library.
+  *
+  * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
+  *
+  * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+  * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+  *                 is ColMajor or RowMajor. The default is 0 which means column-major.
+  * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+  */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+  typedef _Scalar Scalar;
+  typedef _Index Index;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = Dynamic,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = Dynamic,
+    Flags = _Options | NestByRefBit | LvalueBit,
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    SupportedAccessPatterns = InnerRandomAccessPattern
+  };
+};
+
+template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
+struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
+{
+  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
+  typedef typename nested<MatrixType>::type MatrixTypeNested;
+  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+
+  typedef _Scalar Scalar;
+  typedef Dense StorageKind;
+  typedef _Index Index;
+  typedef MatrixXpr XprKind;
+
+  enum {
+    RowsAtCompileTime = Dynamic,
+    ColsAtCompileTime = 1,
+    MaxRowsAtCompileTime = Dynamic,
+    MaxColsAtCompileTime = 1,
+    Flags = 0,
+    CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+  };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+  public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+
+    typedef MappedSparseMatrix<Scalar,Flags> Map;
+    using Base::IsRowMajor;
+    typedef internal::CompressedStorage<Scalar,Index> Storage;
+    enum {
+      Options = _Options
+    };
+
+  protected:
+
+    typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+    Index m_outerSize;
+    Index m_innerSize;
+    Index* m_outerIndex;
+    Index* m_innerNonZeros;     // optional, if null then the data is compressed
+    Storage m_data;
+    
+    Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+    const  Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+
+  public:
+    
+    /** \returns whether \c *this is in compressed form. */
+    inline bool isCompressed() const { return m_innerNonZeros==0; }
+
+    /** \returns the number of rows of the matrix */
+    inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+    /** \returns the number of columns of the matrix */
+    inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+    /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
+    inline Index innerSize() const { return m_innerSize; }
+    /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
+    inline Index outerSize() const { return m_outerSize; }
+    
+    /** \returns a const pointer to the array of values.
+      * This function is aimed at interoperability with other libraries.
+      * \sa innerIndexPtr(), outerIndexPtr() */
+    inline const Scalar* valuePtr() const { return &m_data.value(0); }
+    /** \returns a non-const pointer to the array of values.
+      * This function is aimed at interoperability with other libraries.
+      * \sa innerIndexPtr(), outerIndexPtr() */
+    inline Scalar* valuePtr() { return &m_data.value(0); }
+
+    /** \returns a const pointer to the array of inner indices.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), outerIndexPtr() */
+    inline const Index* innerIndexPtr() const { return &m_data.index(0); }
+    /** \returns a non-const pointer to the array of inner indices.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), outerIndexPtr() */
+    inline Index* innerIndexPtr() { return &m_data.index(0); }
+
+    /** \returns a const pointer to the array of the starting positions of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), innerIndexPtr() */
+    inline const Index* outerIndexPtr() const { return m_outerIndex; }
+    /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \sa valuePtr(), innerIndexPtr() */
+    inline Index* outerIndexPtr() { return m_outerIndex; }
+
+    /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \warning it returns the null pointer 0 in compressed mode */
+    inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+    /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
+      * This function is aimed at interoperability with other libraries.
+      * \warning it returns the null pointer 0 in compressed mode */
+    inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
+
+    /** \internal */
+    inline Storage& data() { return m_data; }
+    /** \internal */
+    inline const Storage& data() const { return m_data; }
+
+    /** \returns the value of the matrix at position \a i, \a j
+      * This function returns Scalar(0) if the element is an explicit \em zero */
+    inline Scalar coeff(Index row, Index col) const
+    {
+      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+      
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+      return m_data.atInRange(m_outerIndex[outer], end, inner);
+    }
+
+    /** \returns a non-const reference to the value of the matrix at position \a i, \a j
+      *
+      * If the element does not exist then it is inserted via the insert(Index,Index) function
+      * which itself turns the matrix into a non compressed form if that was not the case.
+      *
+      * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
+      * function if the element does not already exist.
+      */
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+      
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      Index start = m_outerIndex[outer];
+      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+      eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+      if(end<=start)
+        return insert(row,col);
+      const Index p = m_data.searchLowerIndex(start,end-1,inner);
+      if((p<end) && (m_data.index(p)==inner))
+        return m_data.value(p);
+      else
+        return insert(row,col);
+    }
+
+    /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+      * The non zero coefficient must \b not already exist.
+      *
+      * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
+      * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
+      * call reserve(const SizesType &) to reserve a more appropriate number of elements per
+      * inner vector that better match your scenario.
+      *
+      * This function performs a sorted insertion in O(1) if the elements of each inner vector are
+      * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
+      *
+      */
+    Scalar& insert(Index row, Index col)
+    {
+      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+      
+      if(isCompressed())
+      {
+        reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
+      }
+      return insertUncompressed(row,col);
+    }
+
+  public:
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    /** Removes all non zeros but keep allocated memory */
+    inline void setZero()
+    {
+      m_data.clear();
+      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+      if(m_innerNonZeros)
+        memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
+    }
+
+    /** \returns the number of non zero coefficients */
+    inline Index nonZeros() const
+    {
+      if(m_innerNonZeros)
+        return innerNonZeros().sum();
+      return static_cast<Index>(m_data.size());
+    }
+
+    /** Preallocates \a reserveSize non zeros.
+      *
+      * Precondition: the matrix must be in compressed mode. */
+    inline void reserve(Index reserveSize)
+    {
+      eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
+      m_data.reserve(reserveSize);
+    }
+    
+    #ifdef EIGEN_PARSED_BY_DOXYGEN
+    /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
+      *
+      * This function turns the matrix in non-compressed mode */
+    template<class SizesType>
+    inline void reserve(const SizesType& reserveSizes);
+    #else
+    template<class SizesType>
+    inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
+    {
+      EIGEN_UNUSED_VARIABLE(enableif);
+      reserveInnerVectors(reserveSizes);
+    }
+    template<class SizesType>
+    inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
+    #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
+        typename
+    #endif
+        SizesType::Scalar())
+    {
+      EIGEN_UNUSED_VARIABLE(enableif);
+      reserveInnerVectors(reserveSizes);
+    }
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+  protected:
+    template<class SizesType>
+    inline void reserveInnerVectors(const SizesType& reserveSizes)
+    {
+      if(isCompressed())
+      {
+        std::size_t totalReserveSize = 0;
+        // turn the matrix into non-compressed mode
+        m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+        if (!m_innerNonZeros) internal::throw_std_bad_alloc();
+        
+        // temporarily use m_innerSizes to hold the new starting points.
+        Index* newOuterIndex = m_innerNonZeros;
+        
+        Index count = 0;
+        for(Index j=0; j<m_outerSize; ++j)
+        {
+          newOuterIndex[j] = count;
+          count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
+          totalReserveSize += reserveSizes[j];
+        }
+        m_data.reserve(totalReserveSize);
+        Index previousOuterIndex = m_outerIndex[m_outerSize];
+        for(Index j=m_outerSize-1; j>=0; --j)
+        {
+          Index innerNNZ = previousOuterIndex - m_outerIndex[j];
+          for(Index i=innerNNZ-1; i>=0; --i)
+          {
+            m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+            m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+          }
+          previousOuterIndex = m_outerIndex[j];
+          m_outerIndex[j] = newOuterIndex[j];
+          m_innerNonZeros[j] = innerNNZ;
+        }
+        m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+        
+        m_data.resize(m_outerIndex[m_outerSize]);
+      }
+      else
+      {
+        Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
+        if (!newOuterIndex) internal::throw_std_bad_alloc();
+        
+        Index count = 0;
+        for(Index j=0; j<m_outerSize; ++j)
+        {
+          newOuterIndex[j] = count;
+          Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
+          Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
+          count += toReserve + m_innerNonZeros[j];
+        }
+        newOuterIndex[m_outerSize] = count;
+        
+        m_data.resize(count);
+        for(Index j=m_outerSize-1; j>=0; --j)
+        {
+          Index offset = newOuterIndex[j] - m_outerIndex[j];
+          if(offset>0)
+          {
+            Index innerNNZ = m_innerNonZeros[j];
+            for(Index i=innerNNZ-1; i>=0; --i)
+            {
+              m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+              m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+            }
+          }
+        }
+        
+        std::swap(m_outerIndex, newOuterIndex);
+        std::free(newOuterIndex);
+      }
+      
+    }
+  public:
+
+    //--- low level purely coherent filling ---
+
+    /** \internal
+      * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+      * - the nonzero does not already exist
+      * - the new coefficient is the last one according to the storage order
+      *
+      * Before filling a given inner vector you must call the statVec(Index) function.
+      *
+      * After an insertion session, you should call the finalize() function.
+      *
+      * \sa insert, insertBackByOuterInner, startVec */
+    inline Scalar& insertBack(Index row, Index col)
+    {
+      return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+    }
+
+    /** \internal
+      * \sa insertBack, startVec */
+    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+    {
+      eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+      eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+      Index p = m_outerIndex[outer+1];
+      ++m_outerIndex[outer+1];
+      m_data.append(0, inner);
+      return m_data.value(p);
+    }
+
+    /** \internal
+      * \warning use it only if you know what you are doing */
+    inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+    {
+      Index p = m_outerIndex[outer+1];
+      ++m_outerIndex[outer+1];
+      m_data.append(0, inner);
+      return m_data.value(p);
+    }
+
+    /** \internal
+      * \sa insertBack, insertBackByOuterInner */
+    inline void startVec(Index outer)
+    {
+      eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
+      eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+      m_outerIndex[outer+1] = m_outerIndex[outer];
+    }
+
+    /** \internal
+      * Must be called after inserting a set of non zero entries using the low level compressed API.
+      */
+    inline void finalize()
+    {
+      if(isCompressed())
+      {
+        Index size = static_cast<Index>(m_data.size());
+        Index i = m_outerSize;
+        // find the last filled column
+        while (i>=0 && m_outerIndex[i]==0)
+          --i;
+        ++i;
+        while (i<=m_outerSize)
+        {
+          m_outerIndex[i] = size;
+          ++i;
+        }
+      }
+    }
+
+    //---
+
+    template<typename InputIterators>
+    void setFromTriplets(const InputIterators& begin, const InputIterators& end);
+
+    void sumupDuplicates();
+
+    //---
+    
+    /** \internal
+      * same as insert(Index,Index) except that the indices are given relative to the storage order */
+    Scalar& insertByOuterInner(Index j, Index i)
+    {
+      return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
+    }
+
+    /** Turns the matrix into the \em compressed format.
+      */
+    void makeCompressed()
+    {
+      if(isCompressed())
+        return;
+      
+      Index oldStart = m_outerIndex[1];
+      m_outerIndex[1] = m_innerNonZeros[0];
+      for(Index j=1; j<m_outerSize; ++j)
+      {
+        Index nextOldStart = m_outerIndex[j+1];
+        Index offset = oldStart - m_outerIndex[j];
+        if(offset>0)
+        {
+          for(Index k=0; k<m_innerNonZeros[j]; ++k)
+          {
+            m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
+            m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
+          }
+        }
+        m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
+        oldStart = nextOldStart;
+      }
+      std::free(m_innerNonZeros);
+      m_innerNonZeros = 0;
+      m_data.resize(m_outerIndex[m_outerSize]);
+      m_data.squeeze();
+    }
+
+    /** Turns the matrix into the uncompressed mode */
+    void uncompress()
+    {
+      if(m_innerNonZeros != 0)
+        return; 
+      m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+      for (Index i = 0; i < m_outerSize; i++)
+      {
+        m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 
+      }
+    }
+    
+    /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      prune(default_prunning_func(reference,epsilon));
+    }
+    
+    /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
+      * The functor type \a KeepFunc must implement the following function:
+      * \code
+      * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+      * \endcode
+      * \sa prune(Scalar,RealScalar)
+      */
+    template<typename KeepFunc>
+    void prune(const KeepFunc& keep = KeepFunc())
+    {
+      // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
+      // TODO also implement a unit test
+      makeCompressed();
+
+      Index k = 0;
+      for(Index j=0; j<m_outerSize; ++j)
+      {
+        Index previousStart = m_outerIndex[j];
+        m_outerIndex[j] = k;
+        Index end = m_outerIndex[j+1];
+        for(Index i=previousStart; i<end; ++i)
+        {
+          if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+          {
+            m_data.value(k) = m_data.value(i);
+            m_data.index(k) = m_data.index(i);
+            ++k;
+          }
+        }
+      }
+      m_outerIndex[m_outerSize] = k;
+      m_data.resize(k,0);
+    }
+
+    /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
+      * \sa resizeNonZeros(Index), reserve(), setZero()
+      */
+    void conservativeResize(Index rows, Index cols) 
+    {
+      // No change
+      if (this->rows() == rows && this->cols() == cols) return;
+      
+      // If one dimension is null, then there is nothing to be preserved
+      if(rows==0 || cols==0) return resize(rows,cols);
+
+      Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
+      Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
+      Index newInnerSize = IsRowMajor ? cols : rows;
+
+      // Deals with inner non zeros
+      if (m_innerNonZeros)
+      {
+        // Resize m_innerNonZeros
+        Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
+        if (!newInnerNonZeros) internal::throw_std_bad_alloc();
+        m_innerNonZeros = newInnerNonZeros;
+        
+        for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)          
+          m_innerNonZeros[i] = 0;
+      } 
+      else if (innerChange < 0) 
+      {
+        // Inner size decreased: allocate a new m_innerNonZeros
+        m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
+        if (!m_innerNonZeros) internal::throw_std_bad_alloc();
+        for(Index i = 0; i < m_outerSize; i++)
+          m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+      }
+      
+      // Change the m_innerNonZeros in case of a decrease of inner size
+      if (m_innerNonZeros && innerChange < 0)
+      {
+        for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
+        {
+          Index &n = m_innerNonZeros[i];
+          Index start = m_outerIndex[i];
+          while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; 
+        }
+      }
+      
+      m_innerSize = newInnerSize;
+
+      // Re-allocate outer index structure if necessary
+      if (outerChange == 0)
+        return;
+          
+      Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
+      if (!newOuterIndex) internal::throw_std_bad_alloc();
+      m_outerIndex = newOuterIndex;
+      if (outerChange > 0)
+      {
+        Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+        for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)          
+          m_outerIndex[i] = last; 
+      }
+      m_outerSize += outerChange;
+    }
+    
+    /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
+      * \sa resizeNonZeros(Index), reserve(), setZero()
+      */
+    void resize(Index rows, Index cols)
+    {
+      const Index outerSize = IsRowMajor ? rows : cols;
+      m_innerSize = IsRowMajor ? cols : rows;
+      m_data.clear();
+      if (m_outerSize != outerSize || m_outerSize==0)
+      {
+        std::free(m_outerIndex);
+        m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
+        if (!m_outerIndex) internal::throw_std_bad_alloc();
+        
+        m_outerSize = outerSize;
+      }
+      if(m_innerNonZeros)
+      {
+        std::free(m_innerNonZeros);
+        m_innerNonZeros = 0;
+      }
+      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+    }
+
+    /** \internal
+      * Resize the nonzero vector to \a size */
+    void resizeNonZeros(Index size)
+    {
+      // TODO remove this function
+      m_data.resize(size);
+    }
+
+    /** \returns a const expression of the diagonal coefficients */
+    const Diagonal<const SparseMatrix> diagonal() const { return *this; }
+
+    /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+    inline SparseMatrix()
+      : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      resize(0, 0);
+    }
+
+    /** Constructs a \a rows \c x \a cols empty matrix */
+    inline SparseMatrix(Index rows, Index cols)
+      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      resize(rows, cols);
+    }
+
+    /** Constructs a sparse matrix from the sparse expression \a other */
+    template<typename OtherDerived>
+    inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+      check_template_parameters();
+      *this = other.derived();
+    }
+    
+    /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
+    template<typename OtherDerived, unsigned int UpLo>
+    inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
+      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      *this = other;
+    }
+
+    /** Copy constructor (it performs a deep copy) */
+    inline SparseMatrix(const SparseMatrix& other)
+      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      *this = other.derived();
+    }
+
+    /** \brief Copy constructor with in-place evaluation */
+    template<typename OtherDerived>
+    SparseMatrix(const ReturnByValue<OtherDerived>& other)
+      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+    {
+      check_template_parameters();
+      initAssignment(other);
+      other.evalTo(*this);
+    }
+
+    /** Swaps the content of two sparse matrices of the same type.
+      * This is a fast operation that simply swaps the underlying pointers and parameters. */
+    inline void swap(SparseMatrix& other)
+    {
+      //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+      std::swap(m_outerIndex, other.m_outerIndex);
+      std::swap(m_innerSize, other.m_innerSize);
+      std::swap(m_outerSize, other.m_outerSize);
+      std::swap(m_innerNonZeros, other.m_innerNonZeros);
+      m_data.swap(other.m_data);
+    }
+
+    /** Sets *this to the identity matrix */
+    inline void setIdentity()
+    {
+      eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
+      this->m_data.resize(rows());
+      Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
+      Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
+      Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+    }
+    inline SparseMatrix& operator=(const SparseMatrix& other)
+    {
+      if (other.isRValue())
+      {
+        swap(other.const_cast_derived());
+      }
+      else if(this!=&other)
+      {
+        initAssignment(other);
+        if(other.isCompressed())
+        {
+          memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+          m_data = other.m_data;
+        }
+        else
+        {
+          Base::operator=(other);
+        }
+      }
+      return *this;
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Lhs, typename Rhs>
+    inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+    { return Base::operator=(product); }
+    
+    template<typename OtherDerived>
+    inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+    {
+      initAssignment(other);
+      return Base::operator=(other.derived());
+    }
+    
+    template<typename OtherDerived>
+    inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+    { return Base::operator=(other.derived()); }
+    #endif
+
+    template<typename OtherDerived>
+    EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
+
+    friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+    {
+      EIGEN_DBG_SPARSE(
+        s << "Nonzero entries:\n";
+        if(m.isCompressed())
+          for (Index i=0; i<m.nonZeros(); ++i)
+            s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+        else
+          for (Index i=0; i<m.outerSize(); ++i)
+          {
+            Index p = m.m_outerIndex[i];
+            Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+            Index k=p;
+            for (; k<pe; ++k)
+              s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
+            for (; k<m.m_outerIndex[i+1]; ++k)
+              s << "(_,_) ";
+          }
+        s << std::endl;
+        s << std::endl;
+        s << "Outer pointers:\n";
+        for (Index i=0; i<m.outerSize(); ++i)
+          s << m.m_outerIndex[i] << " ";
+        s << " $" << std::endl;
+        if(!m.isCompressed())
+        {
+          s << "Inner non zeros:\n";
+          for (Index i=0; i<m.outerSize(); ++i)
+            s << m.m_innerNonZeros[i] << " ";
+          s << " $" << std::endl;
+        }
+        s << std::endl;
+      );
+      s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+      return s;
+    }
+
+    /** Destructor */
+    inline ~SparseMatrix()
+    {
+      std::free(m_outerIndex);
+      std::free(m_innerNonZeros);
+    }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** Overloaded for performance */
+    Scalar sum() const;
+#endif
+    
+#   ifdef EIGEN_SPARSEMATRIX_PLUGIN
+#     include EIGEN_SPARSEMATRIX_PLUGIN
+#   endif
+
+protected:
+
+    template<typename Other>
+    void initAssignment(const Other& other)
+    {
+      resize(other.rows(), other.cols());
+      if(m_innerNonZeros)
+      {
+        std::free(m_innerNonZeros);
+        m_innerNonZeros = 0;
+      }
+    }
+
+    /** \internal
+      * \sa insert(Index,Index) */
+    EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
+
+    /** \internal
+      * A vector object that is equal to 0 everywhere but v at the position i */
+    class SingletonVector
+    {
+        Index m_index;
+        Index m_value;
+      public:
+        typedef Index value_type;
+        SingletonVector(Index i, Index v)
+          : m_index(i), m_value(v)
+        {}
+
+        Index operator[](Index i) const { return i==m_index ? m_value : 0; }
+    };
+
+    /** \internal
+      * \sa insert(Index,Index) */
+    EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
+
+public:
+    /** \internal
+      * \sa insert(Index,Index) */
+    EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
+    {
+      const Index outer = IsRowMajor ? row : col;
+      const Index inner = IsRowMajor ? col : row;
+
+      eigen_assert(!isCompressed());
+      eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
+
+      Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
+      m_data.index(p) = inner;
+      return (m_data.value(p) = 0);
+    }
+
+private:
+  static void check_template_parameters()
+  {
+    EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+    EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
+  }
+
+  struct default_prunning_func {
+    default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
+    inline bool operator() (const Index&, const Index&, const Scalar& value) const
+    {
+      return !internal::isMuchSmallerThan(value, reference, epsilon);
+    }
+    Scalar reference;
+    RealScalar epsilon;
+  };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+  public:
+    InnerIterator(const SparseMatrix& mat, Index outer)
+      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
+    {
+      if(mat.isCompressed())
+        m_end = mat.m_outerIndex[outer+1];
+      else
+        m_end = m_id + mat.m_innerNonZeros[outer];
+    }
+
+    inline InnerIterator& operator++() { m_id++; return *this; }
+
+    inline const Scalar& value() const { return m_values[m_id]; }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+    inline Index index() const { return m_indices[m_id]; }
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    inline operator bool() const { return (m_id < m_end); }
+
+  protected:
+    const Scalar* m_values;
+    const Index* m_indices;
+    const Index m_outer;
+    Index m_id;
+    Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+  public:
+    ReverseInnerIterator(const SparseMatrix& mat, Index outer)
+      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
+    {
+      if(mat.isCompressed())
+        m_id = mat.m_outerIndex[outer+1];
+      else
+        m_id = m_start + mat.m_innerNonZeros[outer];
+    }
+
+    inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+
+    inline const Scalar& value() const { return m_values[m_id-1]; }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
+
+    inline Index index() const { return m_indices[m_id-1]; }
+    inline Index outer() const { return m_outer; }
+    inline Index row() const { return IsRowMajor ? m_outer : index(); }
+    inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+    inline operator bool() const { return (m_id > m_start); }
+
+  protected:
+    const Scalar* m_values;
+    const Index* m_indices;
+    const Index m_outer;
+    Index m_id;
+    const Index m_start;
+};
+
+namespace internal {
+
+template<typename InputIterator, typename SparseMatrixType>
+void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
+{
+  EIGEN_UNUSED_VARIABLE(Options);
+  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
+  typedef typename SparseMatrixType::Scalar Scalar;
+  typedef typename SparseMatrixType::Index Index;
+  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
+
+  if(begin!=end)
+  {
+    // pass 1: count the nnz per inner-vector
+    Matrix<Index,Dynamic,1> wi(trMat.outerSize());
+    wi.setZero();
+    for(InputIterator it(begin); it!=end; ++it)
+    {
+      eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
+      wi(IsRowMajor ? it->col() : it->row())++;
+    }
+
+    // pass 2: insert all the elements into trMat
+    trMat.reserve(wi);
+    for(InputIterator it(begin); it!=end; ++it)
+      trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+
+    // pass 3:
+    trMat.sumupDuplicates();
+  }
+
+  // pass 4: transposed copy -> implicit sorting
+  mat = trMat;
+}
+
+}
+
+
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
+  *
+  * A \em triplet is a tuple (i,j,value) defining a non-zero element.
+  * The input list of triplets does not have to be sorted, and can contains duplicated elements.
+  * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
+  * This is a \em O(n) operation, with \em n the number of triplet elements.
+  * The initial contents of \c *this is destroyed.
+  * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
+  * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
+  *
+  * The \a InputIterators value_type must provide the following interface:
+  * \code
+  * Scalar value() const; // the value
+  * Scalar row() const;   // the row index i
+  * Scalar col() const;   // the column index j
+  * \endcode
+  * See for instance the Eigen::Triplet template class.
+  *
+  * Here is a typical usage example:
+  * \code
+    typedef Triplet<double> T;
+    std::vector<T> tripletList;
+    triplets.reserve(estimation_of_entries);
+    for(...)
+    {
+      // ...
+      tripletList.push_back(T(i,j,v_ij));
+    }
+    SparseMatrixType m(rows,cols);
+    m.setFromTriplets(tripletList.begin(), tripletList.end());
+    // m is ready to go!
+  * \endcode
+  *
+  * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
+  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
+  * be explicitely stored into a std::vector for instance.
+  */
+template<typename Scalar, int _Options, typename _Index>
+template<typename InputIterators>
+void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+{
+  internal::set_from_triplets(begin, end, *this);
+}
+
+/** \internal */
+template<typename Scalar, int _Options, typename _Index>
+void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
+{
+  eigen_assert(!isCompressed());
+  // TODO, in practice we should be able to use m_innerNonZeros for that task
+  Matrix<Index,Dynamic,1> wi(innerSize());
+  wi.fill(-1);
+  Index count = 0;
+  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
+  for(Index j=0; j<outerSize(); ++j)
+  {
+    Index start   = count;
+    Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
+    for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
+    {
+      Index i = m_data.index(k);
+      if(wi(i)>=start)
+      {
+        // we already meet this entry => accumulate it
+        m_data.value(wi(i)) += m_data.value(k);
+      }
+      else
+      {
+        m_data.value(count) = m_data.value(k);
+        m_data.index(count) = m_data.index(k);
+        wi(i) = count;
+        ++count;
+      }
+    }
+    m_outerIndex[j] = start;
+  }
+  m_outerIndex[m_outerSize] = count;
+
+  // turn the matrix into compressed form
+  std::free(m_innerNonZeros);
+  m_innerNonZeros = 0;
+  m_data.resize(m_outerIndex[m_outerSize]);
+}
+
+template<typename Scalar, int _Options, typename _Index>
+template<typename OtherDerived>
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+  
+  const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+  if (needToTranspose)
+  {
+    // two passes algorithm:
+    //  1 - compute the number of coeffs per dest inner vector
+    //  2 - do the actual copy/eval
+    // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+    typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+    typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+    OtherCopy otherCopy(other.derived());
+
+    SparseMatrix dest(other.rows(),other.cols());
+    Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+
+    // pass 1
+    // FIXME the above copy could be merged with that pass
+    for (Index j=0; j<otherCopy.outerSize(); ++j)
+      for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+        ++dest.m_outerIndex[it.index()];
+
+    // prefix sum
+    Index count = 0;
+    Matrix<Index,Dynamic,1> positions(dest.outerSize());
+    for (Index j=0; j<dest.outerSize(); ++j)
+    {
+      Index tmp = dest.m_outerIndex[j];
+      dest.m_outerIndex[j] = count;
+      positions[j] = count;
+      count += tmp;
+    }
+    dest.m_outerIndex[dest.outerSize()] = count;
+    // alloc
+    dest.m_data.resize(count);
+    // pass 2
+    for (Index j=0; j<otherCopy.outerSize(); ++j)
+    {
+      for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+      {
+        Index pos = positions[it.index()]++;
+        dest.m_data.index(pos) = j;
+        dest.m_data.value(pos) = it.value();
+      }
+    }
+    this->swap(dest);
+    return *this;
+  }
+  else
+  {
+    if(other.isRValue())
+      initAssignment(other.derived());
+    // there is no special optimization
+    return Base::operator=(other.derived());
+  }
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
+{
+  eigen_assert(!isCompressed());
+
+  const Index outer = IsRowMajor ? row : col;
+  const Index inner = IsRowMajor ? col : row;
+
+  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
+  Index innerNNZ = m_innerNonZeros[outer];
+  if(innerNNZ>=room)
+  {
+    // this inner vector is full, we need to reallocate the whole buffer :(
+    reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
+  }
+
+  Index startId = m_outerIndex[outer];
+  Index p = startId + m_innerNonZeros[outer];
+  while ( (p > startId) && (m_data.index(p-1) > inner) )
+  {
+    m_data.index(p) = m_data.index(p-1);
+    m_data.value(p) = m_data.value(p-1);
+    --p;
+  }
+  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+
+  m_innerNonZeros[outer]++;
+
+  m_data.index(p) = inner;
+  return (m_data.value(p) = 0);
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
+{
+  eigen_assert(isCompressed());
+
+  const Index outer = IsRowMajor ? row : col;
+  const Index inner = IsRowMajor ? col : row;
+
+  Index previousOuter = outer;
+  if (m_outerIndex[outer+1]==0)
+  {
+    // we start a new inner vector
+    while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+    {
+      m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+      --previousOuter;
+    }
+    m_outerIndex[outer+1] = m_outerIndex[outer];
+  }
+
+  // here we have to handle the tricky case where the outerIndex array
+  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+  // the 2nd inner vector...
+  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+                && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+  size_t startId = m_outerIndex[outer];
+  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+  size_t p = m_outerIndex[outer+1];
+  ++m_outerIndex[outer+1];
+
+  double reallocRatio = 1;
+  if (m_data.allocatedSize()<=m_data.size())
+  {
+    // if there is no preallocated memory, let's reserve a minimum of 32 elements
+    if (m_data.size()==0)
+    {
+      m_data.reserve(32);
+    }
+    else
+    {
+      // we need to reallocate the data, to reduce multiple reallocations
+      // we use a smart resize algorithm based on the current filling ratio
+      // in addition, we use double to avoid integers overflows
+      double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
+      reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
+      // furthermore we bound the realloc ratio to:
+      //   1) reduce multiple minor realloc when the matrix is almost filled
+      //   2) avoid to allocate too much memory when the matrix is almost empty
+      reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
+    }
+  }
+  m_data.resize(m_data.size()+1,reallocRatio);
+
+  if (!isLastVec)
+  {
+    if (previousOuter==-1)
+    {
+      // oops wrong guess.
+      // let's correct the outer offsets
+      for (Index k=0; k<=(outer+1); ++k)
+        m_outerIndex[k] = 0;
+      Index k=outer+1;
+      while(m_outerIndex[k]==0)
+        m_outerIndex[k++] = 1;
+      while (k<=m_outerSize && m_outerIndex[k]!=0)
+        m_outerIndex[k++]++;
+      p = 0;
+      --k;
+      k = m_outerIndex[k]-1;
+      while (k>0)
+      {
+        m_data.index(k) = m_data.index(k-1);
+        m_data.value(k) = m_data.value(k-1);
+        k--;
+      }
+    }
+    else
+    {
+      // we are not inserting into the last inner vec
+      // update outer indices:
+      Index j = outer+2;
+      while (j<=m_outerSize && m_outerIndex[j]!=0)
+        m_outerIndex[j++]++;
+      --j;
+      // shift data of last vecs:
+      Index k = m_outerIndex[j]-1;
+      while (k>=Index(p))
+      {
+        m_data.index(k) = m_data.index(k-1);
+        m_data.value(k) = m_data.value(k-1);
+        k--;
+      }
+    }
+  }
+
+  while ( (p > startId) && (m_data.index(p-1) > inner) )
+  {
+    m_data.index(p) = m_data.index(p-1);
+    m_data.value(p) = m_data.value(p-1);
+    --p;
+  }
+
+  m_data.index(p) = inner;
+  return (m_data.value(p) = 0);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
new file mode 100644
index 0000000..6b2169a
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  *
+  * \class SparseMatrixBase
+  *
+  * \brief Base class of any sparse matrices or sparse expressions
+  *
+  * \tparam Derived
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+  */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+{
+  public:
+
+    typedef typename internal::traits<Derived>::Scalar Scalar;
+    typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+    typedef typename internal::traits<Derived>::StorageKind StorageKind;
+    typedef typename internal::traits<Derived>::Index Index;
+    typedef typename internal::add_const_on_value_type_if_arithmetic<
+                         typename internal::packet_traits<Scalar>::type
+                     >::type PacketReturnType;
+
+    typedef SparseMatrixBase StorageBaseType;
+    typedef EigenBase<Derived> Base;
+    
+    template<typename OtherDerived>
+    Derived& operator=(const EigenBase<OtherDerived> &other)
+    {
+      other.derived().evalTo(derived());
+      return derived();
+    }
+
+    enum {
+
+      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+        /**< The number of rows at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+        /**< The number of columns at compile-time. This is just a copy of the value provided
+          * by the \a Derived type. If a value is not known at compile-time,
+          * it is set to the \a Dynamic constant.
+          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),
+        /**< This is equal to the number of coefficients, i.e. the number of
+          * rows times the number of columns, or to \a Dynamic if this is not
+          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+      MaxRowsAtCompileTime = RowsAtCompileTime,
+      MaxColsAtCompileTime = ColsAtCompileTime,
+
+      MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+                                                      MaxColsAtCompileTime>::ret),
+
+      IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+        /**< This is set to true if either the number of rows or the number of
+          * columns is known at compile-time to be equal to 1. Indeed, in that case,
+          * we are dealing with a column-vector (if there is only one column) or with
+          * a row-vector (if there is only one row). */
+
+      Flags = internal::traits<Derived>::Flags,
+        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+          * constructed from this one. See the \ref flags "list of flags".
+          */
+
+      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+        /**< This is a rough measure of how expensive it is to read one coefficient from
+          * this expression.
+          */
+
+      IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+      
+      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
+                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
+
+      #ifndef EIGEN_PARSED_BY_DOXYGEN
+      _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+      #endif
+    };
+
+    /** \internal the return type of MatrixBase::adjoint() */
+    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+                        Transpose<const Derived>
+                     >::type AdjointReturnType;
+
+
+    typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject;
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+      * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+      * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+      *
+      * \sa class NumTraits
+      */
+    typedef typename NumTraits<Scalar>::Real RealScalar;
+
+    /** \internal the return type of coeff()
+      */
+    typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+    /** \internal Represents a matrix with all coefficients equal to one another*/
+    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
+    /** type of the equivalent square matrix */
+    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    inline Derived& derived() { return *static_cast<Derived*>(this); }
+    inline Derived& const_cast_derived() const
+    { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+#   include "../plugins/CommonCwiseUnaryOps.h"
+#   include "../plugins/CommonCwiseBinaryOps.h"
+#   include "../plugins/MatrixCwiseUnaryOps.h"
+#   include "../plugins/MatrixCwiseBinaryOps.h"
+#   include "../plugins/BlockMethods.h"
+#   ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+#     include EIGEN_SPARSEMATRIXBASE_PLUGIN
+#   endif
+#   undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+    /** \returns the number of rows. \sa cols() */
+    inline Index rows() const { return derived().rows(); }
+    /** \returns the number of columns. \sa rows() */
+    inline Index cols() const { return derived().cols(); }
+    /** \returns the number of coefficients, which is \a rows()*cols().
+      * \sa rows(), cols(). */
+    inline Index size() const { return rows() * cols(); }
+    /** \returns the number of nonzero coefficients which is in practice the number
+      * of stored coefficients. */
+    inline Index nonZeros() const { return derived().nonZeros(); }
+    /** \returns true if either the number of rows or the number of columns is equal to 1.
+      * In other words, this function returns
+      * \code rows()==1 || cols()==1 \endcode
+      * \sa rows(), cols(), IsVectorAtCompileTime. */
+    inline bool isVector() const { return rows()==1 || cols()==1; }
+    /** \returns the size of the storage major dimension,
+      * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+    Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+    /** \returns the size of the inner dimension according to the storage order,
+      * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+    Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+    bool isRValue() const { return m_isRValue; }
+    Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+    SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+    
+    template<typename OtherDerived>
+    Derived& operator=(const ReturnByValue<OtherDerived>& other)
+    {
+      other.evalTo(derived());
+      return derived();
+    }
+
+
+    template<typename OtherDerived>
+    inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      return assign(other.derived());
+    }
+
+    inline Derived& operator=(const Derived& other)
+    {
+//       if (other.isRValue())
+//         derived().swap(other.const_cast_derived());
+//       else
+      return assign(other.derived());
+    }
+
+  protected:
+
+    template<typename OtherDerived>
+    inline Derived& assign(const OtherDerived& other)
+    {
+      const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+      const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+      if ((!transpose) && other.isRValue())
+      {
+        // eval without temporary
+        derived().resize(other.rows(), other.cols());
+        derived().setZero();
+        derived().reserve((std::max)(this->rows(),this->cols())*2);
+        for (Index j=0; j<outerSize; ++j)
+        {
+          derived().startVec(j);
+          for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
+          {
+            Scalar v = it.value();
+            derived().insertBackByOuterInner(j,it.index()) = v;
+          }
+        }
+        derived().finalize();
+      }
+      else
+      {
+        assignGeneric(other);
+      }
+      return derived();
+    }
+
+    template<typename OtherDerived>
+    inline void assignGeneric(const OtherDerived& other)
+    {
+      //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+      eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+                  (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+                  "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+      enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+      const Index outerSize = other.outerSize();
+      //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+      // thanks to shallow copies, we always eval to a tempary
+      Derived temp(other.rows(), other.cols());
+
+      temp.reserve((std::max)(this->rows(),this->cols())*2);
+      for (Index j=0; j<outerSize; ++j)
+      {
+        temp.startVec(j);
+        for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+        {
+          Scalar v = it.value();
+          temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+        }
+      }
+      temp.finalize();
+
+      derived() = temp.markAsRValue();
+    }
+
+  public:
+
+    template<typename Lhs, typename Rhs>
+    inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+    friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+    {
+      typedef typename Derived::Nested Nested;
+      typedef typename internal::remove_all<Nested>::type NestedCleaned;
+
+      if (Flags&RowMajorBit)
+      {
+        const Nested nm(m.derived());
+        for (Index row=0; row<nm.outerSize(); ++row)
+        {
+          Index col = 0;
+          for (typename NestedCleaned::InnerIterator it(nm.derived(), row); it; ++it)
+          {
+            for ( ; col<it.index(); ++col)
+              s << "0 ";
+            s << it.value() << " ";
+            ++col;
+          }
+          for ( ; col<m.cols(); ++col)
+            s << "0 ";
+          s << std::endl;
+        }
+      }
+      else
+      {
+        const Nested nm(m.derived());
+        if (m.cols() == 1) {
+          Index row = 0;
+          for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it)
+          {
+            for ( ; row<it.index(); ++row)
+              s << "0" << std::endl;
+            s << it.value() << std::endl;
+            ++row;
+          }
+          for ( ; row<m.rows(); ++row)
+            s << "0" << std::endl;
+        }
+        else
+        {
+          SparseMatrix<Scalar, RowMajorBit, Index> trans = m;
+          s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans);
+        }
+      }
+      return s;
+    }
+
+    template<typename OtherDerived>
+    Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+    template<typename OtherDerived>
+    Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+    Derived& operator*=(const Scalar& other);
+    Derived& operator/=(const Scalar& other);
+
+    #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+      CwiseBinaryOp< \
+        internal::scalar_product_op< \
+          typename internal::scalar_product_traits< \
+            typename internal::traits<Derived>::Scalar, \
+            typename internal::traits<OtherDerived>::Scalar \
+          >::ReturnType \
+        >, \
+        const Derived, \
+        const OtherDerived \
+      >
+
+    template<typename OtherDerived>
+    EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+    cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+    // sparse * sparse
+    template<typename OtherDerived>
+    const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+    operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+    // sparse * diagonal
+    template<typename OtherDerived>
+    const SparseDiagonalProduct<Derived,OtherDerived>
+    operator*(const DiagonalBase<OtherDerived> &other) const;
+
+    // diagonal * sparse
+    template<typename OtherDerived> friend
+    const SparseDiagonalProduct<OtherDerived,Derived>
+    operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+    { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+    /** dense * sparse (return a dense object unless it is an outer product) */
+    template<typename OtherDerived> friend
+    const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
+    operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+    { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+    /** sparse * dense (returns a dense object unless it is an outer product) */
+    template<typename OtherDerived>
+    const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+    operator*(const MatrixBase<OtherDerived> &other) const
+    { return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
+    
+     /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
+    SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+    {
+      return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
+    }
+
+    template<typename OtherDerived>
+    Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+    #ifdef EIGEN2_SUPPORT
+    // deprecated
+    template<typename OtherDerived>
+    typename internal::plain_matrix_type_column_major<OtherDerived>::type
+    solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+    // deprecated
+    template<typename OtherDerived>
+    void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+    #endif // EIGEN2_SUPPORT
+
+    template<int Mode>
+    inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+    template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+    template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+
+    template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+    template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+    RealScalar squaredNorm() const;
+    RealScalar norm()  const;
+    RealScalar blueNorm() const;
+
+    Transpose<Derived> transpose() { return derived(); }
+    const Transpose<const Derived> transpose() const { return derived(); }
+    const AdjointReturnType adjoint() const { return transpose(); }
+
+    // inner-vector
+    typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true>       InnerVectorReturnType;
+    typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
+    InnerVectorReturnType innerVector(Index outer);
+    const ConstInnerVectorReturnType innerVector(Index outer) const;
+
+    // set of inner-vectors
+    typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
+    typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
+    InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
+    const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
+
+    /** \internal use operator= */
+    template<typename DenseDerived>
+    void evalTo(MatrixBase<DenseDerived>& dst) const
+    {
+      dst.setZero();
+      for (Index j=0; j<outerSize(); ++j)
+        for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+          dst.coeffRef(i.row(),i.col()) = i.value();
+    }
+
+    Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+    {
+      return derived();
+    }
+
+    template<typename OtherDerived>
+    bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
+    { return toDense().isApprox(other.toDense(),prec); }
+
+    template<typename OtherDerived>
+    bool isApprox(const MatrixBase<OtherDerived>& other,
+                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
+    { return toDense().isApprox(other,prec); }
+
+    /** \returns the matrix or vector obtained by evaluating this expression.
+      *
+      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+      * a const reference, in order to avoid a useless copy.
+      */
+    inline const typename internal::eval<Derived>::type eval() const
+    { return typename internal::eval<Derived>::type(derived()); }
+
+    Scalar sum() const;
+
+  protected:
+
+    bool m_isRValue;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/Eigen/src/SparseCore/SparsePermutation.h b/Eigen/src/SparseCore/SparsePermutation.h
new file mode 100644
index 0000000..75e2100
--- /dev/null
+++ b/Eigen/src/SparseCore/SparsePermutation.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_PERMUTATION_H
+#define EIGEN_SPARSE_PERMUTATION_H
+
+// This file implements sparse * permutation products
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct traits<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+  typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+  typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
+  typedef typename MatrixTypeNestedCleaned::Index Index;
+  enum {
+    SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+    MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
+  };
+
+  typedef typename internal::conditional<MoveOuter,
+        SparseMatrix<Scalar,SrcStorageOrder,Index>,
+        SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> >::type ReturnType;
+};
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct permut_sparsematrix_product_retval
+ : public ReturnByValue<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+    typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+    typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
+    typedef typename MatrixTypeNestedCleaned::Index Index;
+
+    enum {
+      SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+      MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
+    };
+
+    permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
+      : m_permutation(perm), m_matrix(matrix)
+    {}
+
+    inline int rows() const { return m_matrix.rows(); }
+    inline int cols() const { return m_matrix.cols(); }
+
+    template<typename Dest> inline void evalTo(Dest& dst) const
+    {
+      if(MoveOuter)
+      {
+        SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
+        Matrix<Index,Dynamic,1> sizes(m_matrix.outerSize());
+        for(Index j=0; j<m_matrix.outerSize(); ++j)
+        {
+          Index jp = m_permutation.indices().coeff(j);
+          sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros();
+        }
+        tmp.reserve(sizes);
+        for(Index j=0; j<m_matrix.outerSize(); ++j)
+        {
+          Index jp = m_permutation.indices().coeff(j);
+          Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
+          Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
+          for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,jsrc); it; ++it)
+            tmp.insertByOuterInner(jdst,it.index()) = it.value();
+        }
+        dst = tmp;
+      }
+      else
+      {
+        SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
+        Matrix<Index,Dynamic,1> sizes(tmp.outerSize());
+        sizes.setZero();
+        PermutationMatrix<Dynamic,Dynamic,Index> perm;
+        if((Side==OnTheLeft) ^ Transposed)
+          perm = m_permutation;
+        else
+          perm = m_permutation.transpose();
+
+        for(Index j=0; j<m_matrix.outerSize(); ++j)
+          for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
+            sizes[perm.indices().coeff(it.index())]++;
+        tmp.reserve(sizes);
+        for(Index j=0; j<m_matrix.outerSize(); ++j)
+          for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
+            tmp.insertByOuterInner(perm.indices().coeff(it.index()),j) = it.value();
+        dst = tmp;
+      }
+    }
+
+  protected:
+    const PermutationType& m_permutation;
+    typename MatrixType::Nested m_matrix;
+};
+
+}
+
+
+
+/** \returns the matrix with the permutation applied to the columns
+  */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
+{
+  return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>(perm, matrix.derived());
+}
+
+/** \returns the matrix with the permutation applied to the rows
+  */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>
+operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+  return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>(perm, matrix.derived());
+}
+
+
+
+/** \returns the matrix with the inverse permutation applied to the columns.
+  */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const Transpose<PermutationBase<PermDerived> >& tperm)
+{
+  return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived());
+}
+
+/** \returns the matrix with the inverse permutation applied to the rows.
+  */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>
+operator*(const Transpose<PermutationBase<PermDerived> >& tperm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+  return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
new file mode 100644
index 0000000..cf76630
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -0,0 +1,188 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+namespace Eigen { 
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+  typedef typename internal::traits<Lhs>::Scalar Scalar;
+  typedef typename internal::traits<Lhs>::Index Index;
+  enum {
+    LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+    RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+    TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+    TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+  };
+
+  typedef typename internal::conditional<TransposeLhs,
+    SparseMatrix<Scalar,0,Index>,
+    typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+  typedef typename internal::conditional<TransposeRhs,
+    SparseMatrix<Scalar,0,Index>,
+    typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+  typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+  typedef MatrixXpr XprKind;
+  // clean the nested types:
+  typedef typename remove_all<LhsNested>::type _LhsNested;
+  typedef typename remove_all<RhsNested>::type _RhsNested;
+  typedef typename _LhsNested::Scalar Scalar;
+  typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+                                         typename traits<_RhsNested>::Index>::type Index;
+
+  enum {
+    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+    LhsFlags = _LhsNested::Flags,
+    RhsFlags = _RhsNested::Flags,
+
+    RowsAtCompileTime    = _LhsNested::RowsAtCompileTime,
+    ColsAtCompileTime    = _RhsNested::ColsAtCompileTime,
+    MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+    MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+    EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+    RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+    Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+          | EvalBeforeAssigningBit
+          | EvalBeforeNestingBit,
+
+    CoeffReadCost = Dynamic
+  };
+
+  typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+  public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+  public:
+
+    typedef SparseMatrixBase<SparseSparseProduct> Base;
+    EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+  private:
+
+    typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+    typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+  public:
+
+    template<typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+      : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true)
+    {
+      init();
+    }
+
+    template<typename Lhs, typename Rhs>
+    EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance)
+      : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
+    {
+      init();
+    }
+
+    SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const
+    {
+      using std::abs;
+      return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon);
+    }
+
+    template<typename Dest>
+    void evalTo(Dest& result) const
+    {
+      if(m_conservative)
+        internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result);
+      else
+        internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance);
+    }
+
+    EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+    EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+    EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+    EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+  protected:
+    void init()
+    {
+      eigen_assert(m_lhs.cols() == m_rhs.rows());
+
+      enum {
+        ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+                      || _RhsNested::RowsAtCompileTime==Dynamic
+                      || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+        AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+        SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+      };
+      // note to the lost user:
+      //    * for a dot product use: v1.dot(v2)
+      //    * for a coeff-wise product use: v1.cwise()*v2
+      EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+      EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+      EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+    }
+
+    LhsNested m_lhs;
+    RhsNested m_rhs;
+    RealScalar m_tolerance;
+    bool m_conservative;
+};
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+  product.evalTo(derived());
+  return derived();
+}
+
+/** \returns an expression of the product of two sparse matrices.
+  * By default a conservative product preserving the symbolic non zeros is performed.
+  * The automatic pruning of the small values can be achieved by calling the pruned() function
+  * in which case a totally different product algorithm is employed:
+  * \code
+  * C = (A*B).pruned();             // supress numerical zeros (exact)
+  * C = (A*B).pruned(ref);
+  * C = (A*B).pruned(ref,epsilon);
+  * \endcode
+  * where \c ref is a meaningful non zero reference value.
+  * */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+  return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseRedux.h b/Eigen/src/SparseCore/SparseRedux.h
new file mode 100644
index 0000000..f3da93a
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseRedux.h
@@ -0,0 +1,45 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+namespace Eigen { 
+
+template<typename Derived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+  eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+  Scalar res(0);
+  for (Index j=0; j<outerSize(); ++j)
+    for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+      res += iter.value();
+  return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
+SparseMatrix<_Scalar,_Options,_Index>::sum() const
+{
+  eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+  return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
+SparseVector<_Scalar,_Options,_Index>::sum() const
+{
+  eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+  return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
new file mode 100644
index 0000000..0eda96b
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -0,0 +1,507 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  * \class SparseSelfAdjointView
+  *
+  * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+  *
+  * \param MatrixType the type of the dense matrix storing the coefficients
+  * \param UpLo can be either \c #Lower or \c #Upper
+  *
+  * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+  * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+  * and most of the time this is the only way that it is used.
+  *
+  * \sa SparseMatrixBase::selfadjointView()
+  */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+namespace internal {
+  
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+  : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+  public:
+
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Index,Dynamic,1> VectorI;
+    typedef typename MatrixType::Nested MatrixTypeNested;
+    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+    inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+    {
+      eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+    }
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    /** \internal \returns a reference to the nested matrix */
+    const _MatrixTypeNested& matrix() const { return m_matrix; }
+    _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+    /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
+      *
+      * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
+      * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
+      */
+    template<typename OtherDerived>
+    SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
+    operator*(const SparseMatrixBase<OtherDerived>& rhs) const
+    {
+      return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
+    }
+
+    /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
+      *
+      * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
+      * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
+      */
+    template<typename OtherDerived> friend
+    SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
+    operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+    {
+      return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
+    }
+    
+    /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+    template<typename OtherDerived>
+    SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+    operator*(const MatrixBase<OtherDerived>& rhs) const
+    {
+      return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+    }
+
+    /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+    template<typename OtherDerived> friend
+    DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+    operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+    {
+      return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+    }
+
+    /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+      * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+      *
+      * \returns a reference to \c *this
+      *
+      * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+      * call this function with u.adjoint().
+      */
+    template<typename DerivedU>
+    SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
+    
+    /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+    template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
+    {
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+    }
+    
+    template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
+    {
+      // TODO directly evaluate into _dest;
+      SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+      _dest = tmp;
+    }
+    
+    /** \returns an expression of P H P^-1 */
+    SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+    {
+      return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+    }
+    
+    template<typename SrcMatrixType,int SrcUpLo>
+    SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+    {
+      permutedMatrix.evalTo(*this);
+      return *this;
+    }
+
+
+    SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
+    {
+      PermutationMatrix<Dynamic> pnull;
+      return *this = src.twistedBy(pnull);
+    }
+
+    template<typename SrcMatrixType,unsigned int SrcUpLo>
+    SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
+    {
+      PermutationMatrix<Dynamic> pnull;
+      return *this = src.twistedBy(pnull);
+    }
+    
+
+    // const SparseLLT<PlainObject, UpLo> llt() const;
+    // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+  protected:
+
+    typename MatrixType::Nested m_matrix;
+    mutable VectorI m_countPerRow;
+    mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+  return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+  return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
+{
+  SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+  if(alpha==Scalar(0))
+    m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+  else
+    m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+  return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+  typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+  : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+    SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
+    {
+      EIGEN_ONLY_USED_FOR_DEBUG(alpha);
+      // TODO use alpha
+      eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+      typedef typename internal::remove_all<Lhs>::type _Lhs;
+      typedef typename _Lhs::InnerIterator LhsInnerIterator;
+      enum {
+        LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+        ProcessFirstHalf =
+                 ((UpLo&(Upper|Lower))==(Upper|Lower))
+              || ( (UpLo&Upper) && !LhsIsRowMajor)
+              || ( (UpLo&Lower) && LhsIsRowMajor),
+        ProcessSecondHalf = !ProcessFirstHalf
+      };
+      for (Index j=0; j<m_lhs.outerSize(); ++j)
+      {
+        LhsInnerIterator i(m_lhs,j);
+        if (ProcessSecondHalf)
+        {
+          while (i && i.index()<j) ++i;
+          if(i && i.index()==j)
+          {
+            dest.row(j) += i.value() * m_rhs.row(j);
+            ++i;
+          }
+        }
+        for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+        {
+          Index a = LhsIsRowMajor ? j : i.index();
+          Index b = LhsIsRowMajor ? i.index() : j;
+          typename Lhs::Scalar v = i.value();
+          dest.row(a) += (v) * m_rhs.row(b);
+          dest.row(b) += numext::conj(v) * m_rhs.row(a);
+        }
+        if (ProcessFirstHalf && i && (i.index()==j))
+          dest.row(j) += i.value() * m_rhs.row(j);
+      }
+    }
+
+  private:
+    SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+  : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+  public:
+    EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+    DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+    {}
+
+    template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
+    {
+      // TODO
+    }
+
+  private:
+    DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+  
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+  typedef Matrix<Index,Dynamic,1> VectorI;
+  
+  Dest& dest(_dest.derived());
+  enum {
+    StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+  };
+  
+  Index size = mat.rows();
+  VectorI count;
+  count.resize(size);
+  count.setZero();
+  dest.resize(size,size);
+  for(Index j = 0; j<size; ++j)
+  {
+    Index jp = perm ? perm[j] : j;
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      Index r = it.row();
+      Index c = it.col();
+      Index ip = perm ? perm[i] : i;
+      if(UpLo==(Upper|Lower))
+        count[StorageOrderMatch ? jp : ip]++;
+      else if(r==c)
+        count[ip]++;
+      else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
+      {
+        count[ip]++;
+        count[jp]++;
+      }
+    }
+  }
+  Index nnz = count.sum();
+  
+  // reserve space
+  dest.resizeNonZeros(nnz);
+  dest.outerIndexPtr()[0] = 0;
+  for(Index j=0; j<size; ++j)
+    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+  for(Index j=0; j<size; ++j)
+    count[j] = dest.outerIndexPtr()[j];
+  
+  // copy data
+  for(Index j = 0; j<size; ++j)
+  {
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      Index r = it.row();
+      Index c = it.col();
+      
+      Index jp = perm ? perm[j] : j;
+      Index ip = perm ? perm[i] : i;
+      
+      if(UpLo==(Upper|Lower))
+      {
+        Index k = count[StorageOrderMatch ? jp : ip]++;
+        dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
+        dest.valuePtr()[k] = it.value();
+      }
+      else if(r==c)
+      {
+        Index k = count[ip]++;
+        dest.innerIndexPtr()[k] = ip;
+        dest.valuePtr()[k] = it.value();
+      }
+      else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
+      {
+        if(!StorageOrderMatch)
+          std::swap(ip,jp);
+        Index k = count[jp]++;
+        dest.innerIndexPtr()[k] = ip;
+        dest.valuePtr()[k] = it.value();
+        k = count[ip]++;
+        dest.innerIndexPtr()[k] = jp;
+        dest.valuePtr()[k] = numext::conj(it.value());
+      }
+    }
+  }
+}
+
+template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+  typedef typename MatrixType::Index Index;
+  typedef typename MatrixType::Scalar Scalar;
+  SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
+  typedef Matrix<Index,Dynamic,1> VectorI;
+  enum {
+    SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
+    StorageOrderMatch = int(SrcOrder) == int(DstOrder),
+    DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
+    SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
+  };
+  
+  Index size = mat.rows();
+  VectorI count(size);
+  count.setZero();
+  dest.resize(size,size);
+  for(Index j = 0; j<size; ++j)
+  {
+    Index jp = perm ? perm[j] : j;
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+        continue;
+                  
+      Index ip = perm ? perm[i] : i;
+      count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+    }
+  }
+  dest.outerIndexPtr()[0] = 0;
+  for(Index j=0; j<size; ++j)
+    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
+  for(Index j=0; j<size; ++j)
+    count[j] = dest.outerIndexPtr()[j];
+  
+  for(Index j = 0; j<size; ++j)
+  {
+    
+    for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+    {
+      Index i = it.index();
+      if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+        continue;
+                  
+      Index jp = perm ? perm[j] : j;
+      Index ip = perm? perm[i] : i;
+      
+      Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+      dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
+      
+      if(!StorageOrderMatch) std::swap(ip,jp);
+      if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
+        dest.valuePtr()[k] = numext::conj(it.value());
+      else
+        dest.valuePtr()[k] = it.value();
+    }
+  }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+  public:
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::Index Index;
+  protected:
+    typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
+  public:
+    typedef Matrix<Index,Dynamic,1> VectorI;
+    typedef typename MatrixType::Nested MatrixTypeNested;
+    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+    
+    SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+      : m_matrix(mat), m_perm(perm)
+    {}
+    
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    
+    template<typename DestScalar, int Options, typename DstIndex>
+    void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
+    {
+//       internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+      SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
+      internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
+      _dest = tmp;
+    }
+    
+    template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+    {
+      internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+    }
+    
+  protected:
+    MatrixTypeNested m_matrix;
+    const Perm& m_perm;
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
new file mode 100644
index 0000000..fcc18f5
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -0,0 +1,150 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+
+namespace Eigen { 
+
+namespace internal {
+
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
+{
+  // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
+
+  typedef typename remove_all<Lhs>::type::Scalar Scalar;
+  typedef typename remove_all<Lhs>::type::Index Index;
+
+  // make sure to call innerSize/outerSize since we fake the storage order.
+  Index rows = lhs.innerSize();
+  Index cols = rhs.outerSize();
+  //Index size = lhs.outerSize();
+  eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+  // allocate a temporary buffer
+  AmbiVector<Scalar,Index> tempVector(rows);
+
+  // estimate the number of non zero entries
+  // given a rhs column containing Y non zeros, we assume that the respective Y columns
+  // of the lhs differs in average of one non zeros, thus the number of non zeros for
+  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+  // per column of the lhs.
+  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+  Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+  // mimics a resizeByInnerOuter:
+  if(ResultType::IsRowMajor)
+    res.resize(cols, rows);
+  else
+    res.resize(rows, cols);
+
+  res.reserve(estimated_nnz_prod);
+  double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
+  for (Index j=0; j<cols; ++j)
+  {
+    // FIXME:
+    //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
+    // let's do a more accurate determination of the nnz ratio for the current column j of res
+    tempVector.init(ratioColRes);
+    tempVector.setZero();
+    for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+    {
+      // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+      tempVector.restart();
+      Scalar x = rhsIt.value();
+      for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+      {
+        tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+      }
+    }
+    res.startVec(j);
+    for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
+      res.insertBackByOuterInner(j,it.index()) = it.value();
+  }
+  res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+  int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+  int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+  int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_sparse_product_with_pruning_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+  typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+  typedef typename ResultType::RealScalar RealScalar;
+
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+  {
+    typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
+    res.swap(_res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+  typedef typename ResultType::RealScalar RealScalar;
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+  {
+    // we need a col-major matrix to hold the result
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType;
+    SparseTemporaryType _res(res.rows(), res.cols());
+    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
+    res = _res;
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+  typedef typename ResultType::RealScalar RealScalar;
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+  {
+    // let's transpose the product to get a column x column product
+    typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+    internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
+    res.swap(_res);
+  }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+  typedef typename ResultType::RealScalar RealScalar;
+  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+  {
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
+    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
+    ColMajorMatrixLhs colLhs(lhs);
+    ColMajorMatrixRhs colRhs(rhs);
+    internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
+
+    // let's transpose the product to get a column x column product
+//     typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+//     SparseTemporaryType _res(res.cols(), res.rows());
+//     sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+//     res = _res.transpose();
+  }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h
new file mode 100644
index 0000000..76d031d
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseTranspose.h
@@ -0,0 +1,63 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+namespace Eigen { 
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+  : public SparseMatrixBase<Transpose<MatrixType> >
+{
+    typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+  public:
+
+    EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType> )
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+// NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
+// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
+// does not fix the issue.
+// An alternative is to define the nested class in the parent class itself.
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+  : public _MatrixTypeNested::InnerIterator
+{
+    typedef typename _MatrixTypeNested::InnerIterator Base;
+    typedef typename TransposeImpl::Index Index;
+  public:
+
+    EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+      : Base(trans.derived().nestedExpression(), outer)
+    {}
+    typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+    typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+  : public _MatrixTypeNested::ReverseInnerIterator
+{
+    typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+    typedef typename TransposeImpl::Index Index;
+  public:
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+      : Base(xpr.derived().nestedExpression(), outer)
+    {}
+    typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+    typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h
new file mode 100644
index 0000000..333127b
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseTriangularView.h
@@ -0,0 +1,179 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
+#define EIGEN_SPARSE_TRIANGULARVIEW_H
+
+namespace Eigen { 
+
+namespace internal {
+  
+template<typename MatrixType, int Mode>
+struct traits<SparseTriangularView<MatrixType,Mode> >
+: public traits<MatrixType>
+{};
+
+} // namespace internal
+
+template<typename MatrixType, int Mode> class SparseTriangularView
+  : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+{
+    enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
+                    || ((Mode&Upper) &&  (MatrixType::Flags&RowMajorBit)),
+           SkipLast = !SkipFirst,
+           SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
+           HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
+    };
+
+  public:
+    
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+
+    typedef typename MatrixType::Nested MatrixTypeNested;
+    typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
+    typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+
+    inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
+
+    /** \internal */
+    inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+
+    template<typename OtherDerived>
+    typename internal::plain_matrix_type_column_major<OtherDerived>::type
+    solve(const MatrixBase<OtherDerived>& other) const;
+
+    template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
+    template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
+  protected:
+    MatrixTypeNested m_matrix;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
+{
+    typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
+    typedef typename SparseTriangularView::Index Index;
+  public:
+
+    EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
+      : Base(view.nestedExpression(), outer), m_returnOne(false)
+    {
+      if(SkipFirst)
+      {
+        while((*this) && ((HasUnitDiag||SkipDiag)  ? this->index()<=outer : this->index()<outer))
+          Base::operator++();
+        if(HasUnitDiag)
+          m_returnOne = true;
+      }
+      else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+      {
+        if((!SkipFirst) && Base::operator bool())
+          Base::operator++();
+        m_returnOne = true;
+      }
+    }
+
+    EIGEN_STRONG_INLINE InnerIterator& operator++()
+    {
+      if(HasUnitDiag && m_returnOne)
+        m_returnOne = false;
+      else
+      {
+        Base::operator++();
+        if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+        {
+          if((!SkipFirst) && Base::operator bool())
+            Base::operator++();
+          m_returnOne = true;
+        }
+      }
+      return *this;
+    }
+
+    inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
+    inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
+    inline Index index() const
+    {
+      if(HasUnitDiag && m_returnOne)  return Base::outer();
+      else                            return Base::index();
+    }
+    inline Scalar value() const
+    {
+      if(HasUnitDiag && m_returnOne)  return Scalar(1);
+      else                            return Base::value();
+    }
+
+    EIGEN_STRONG_INLINE operator bool() const
+    {
+      if(HasUnitDiag && m_returnOne)
+        return true;
+      if(SkipFirst) return  Base::operator bool();
+      else
+      {
+        if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
+        else return (Base::operator bool() && this->index() <= this->outer());
+      }
+    }
+  protected:
+    bool m_returnOne;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
+{
+    typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
+    typedef typename SparseTriangularView::Index Index;
+  public:
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
+      : Base(view.nestedExpression(), outer)
+    {
+      eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
+      if(SkipLast) {
+        while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
+          --(*this);
+      }
+    }
+
+    EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+    { Base::operator--(); return *this; }
+
+    inline Index row() const { return Base::row(); }
+    inline Index col() const { return Base::col(); }
+
+    EIGEN_STRONG_INLINE operator bool() const
+    {
+      if (SkipLast) return Base::operator bool() ;
+      else
+      {
+        if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
+        else return (Base::operator bool() && this->index() >= this->outer());
+      }
+    }
+};
+
+template<typename Derived>
+template<int Mode>
+inline const SparseTriangularView<Derived, Mode>
+SparseMatrixBase<Derived>::triangularView() const
+{
+  return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
new file mode 100644
index 0000000..0ba4713
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseUtil.h
@@ -0,0 +1,173 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+namespace Eigen { 
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+  return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+  return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+  return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+  typedef BaseClass Base; \
+  typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
+  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+  typedef typename Eigen::internal::nested<Derived >::type Nested; \
+  typedef typename Eigen::internal::traits<Derived >::StorageKind StorageKind; \
+  typedef typename Eigen::internal::traits<Derived >::Index Index; \
+  enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
+        ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
+        Flags = Eigen::internal::traits<Derived >::Flags, \
+        CoeffReadCost = Eigen::internal::traits<Derived >::CoeffReadCost, \
+        SizeAtCompileTime = Base::SizeAtCompileTime, \
+        IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+  using Base::derived; \
+  using Base::const_cast_derived;
+
+#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
+  _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
+
+const int CoherentAccessPattern     = 0x1;
+const int InnerRandomAccessPattern  = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern  = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern       = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0, typename _Index = int>  class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int>  class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int>  class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _Index = int>  class MappedSparseMatrix;
+
+template<typename MatrixType, int Mode>           class SparseTriangularView;
+template<typename MatrixType, unsigned int UpLo>  class SparseSelfAdjointView;
+template<typename Lhs, typename Rhs>              class SparseDiagonalProduct;
+template<typename MatrixType> class SparseView;
+
+template<typename Lhs, typename Rhs>        class SparseSparseProduct;
+template<typename Lhs, typename Rhs>        class SparseTimeDenseProduct;
+template<typename Lhs, typename Rhs>        class DenseTimeSparseProduct;
+template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
+
+template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
+template<typename Lhs, typename Rhs,
+         int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;         
+template<typename Lhs, typename Rhs,
+         int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
+template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
+
+namespace internal {
+
+template<typename T,int Rows,int Cols> struct sparse_eval;
+
+template<typename T> struct eval<T,Sparse>
+  : public sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime>
+{};
+
+template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
+    typedef typename traits<T>::Scalar _Scalar;
+    typedef typename traits<T>::Index _Index;
+  public:
+    typedef SparseVector<_Scalar, RowMajor, _Index> type;
+};
+
+template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
+    typedef typename traits<T>::Scalar _Scalar;
+    typedef typename traits<T>::Index _Index;
+  public:
+    typedef SparseVector<_Scalar, ColMajor, _Index> type;
+};
+
+template<typename T,int Rows,int Cols> struct sparse_eval {
+    typedef typename traits<T>::Scalar _Scalar;
+    typedef typename traits<T>::Index _Index;
+    enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
+  public:
+    typedef SparseMatrix<_Scalar, _Options, _Index> type;
+};
+
+template<typename T> struct sparse_eval<T,1,1> {
+    typedef typename traits<T>::Scalar _Scalar;
+  public:
+    typedef Matrix<_Scalar, 1, 1> type;
+};
+
+template<typename T> struct plain_matrix_type<T,Sparse>
+{
+  typedef typename traits<T>::Scalar _Scalar;
+  typedef typename traits<T>::Index _Index;
+  enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
+  public:
+    typedef SparseMatrix<_Scalar, _Options, _Index> type;
+};
+
+} // end namespace internal
+
+/** \ingroup SparseCore_Module
+  *
+  * \class Triplet
+  *
+  * \brief A small structure to hold a non zero as a triplet (i,j,value).
+  *
+  * \sa SparseMatrix::setFromTriplets()
+  */
+template<typename Scalar, typename Index=typename SparseMatrix<Scalar>::Index >
+class Triplet
+{
+public:
+  Triplet() : m_row(0), m_col(0), m_value(0) {}
+
+  Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0))
+    : m_row(i), m_col(j), m_value(v)
+  {}
+
+  /** \returns the row index of the element */
+  const Index& row() const { return m_row; }
+
+  /** \returns the column index of the element */
+  const Index& col() const { return m_col; }
+
+  /** \returns the value of the element */
+  const Scalar& value() const { return m_value; }
+protected:
+  Index m_row, m_col;
+  Scalar m_value;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
new file mode 100644
index 0000000..7e15c81
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+namespace Eigen { 
+
+/** \ingroup SparseCore_Module
+  * \class SparseVector
+  *
+  * \brief a sparse vector class
+  *
+  * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+  *
+  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+  *
+  * This class can be extended with the help of the plugin mechanism described on the page
+  * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+  */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
+{
+  typedef _Scalar Scalar;
+  typedef _Index Index;
+  typedef Sparse StorageKind;
+  typedef MatrixXpr XprKind;
+  enum {
+    IsColVector = (_Options & RowMajorBit) ? 0 : 1,
+
+    RowsAtCompileTime = IsColVector ? Dynamic : 1,
+    ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+    MaxRowsAtCompileTime = RowsAtCompileTime,
+    MaxColsAtCompileTime = ColsAtCompileTime,
+    Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit),
+    CoeffReadCost = NumTraits<Scalar>::ReadCost,
+    SupportedAccessPatterns = InnerRandomAccessPattern
+  };
+};
+
+// Sparse-Vector-Assignment kinds:
+enum {
+  SVA_RuntimeSwitch,
+  SVA_Inner,
+  SVA_Outer
+};
+
+template< typename Dest, typename Src,
+          int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
+                             : Src::InnerSizeAtCompileTime==1 ? SVA_Outer
+                             : SVA_Inner>
+struct sparse_vector_assign_selector;
+
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseVector
+  : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+{
+    typedef SparseMatrixBase<SparseVector> SparseBase;
+    
+  public:
+    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+    
+    typedef internal::CompressedStorage<Scalar,Index> Storage;
+    enum { IsColVector = internal::traits<SparseVector>::IsColVector };
+    
+    enum {
+      Options = _Options
+    };
+    
+    EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+    EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+    EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+    EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+
+    EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
+    EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
+
+    EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
+    EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
+    
+    /** \internal */
+    inline Storage& data() { return m_data; }
+    /** \internal */
+    inline const Storage& data() const { return m_data; }
+
+    inline Scalar coeff(Index row, Index col) const
+    {
+      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
+      return coeff(IsColVector ? row : col);
+    }
+    inline Scalar coeff(Index i) const
+    {
+      eigen_assert(i>=0 && i<m_size);
+      return m_data.at(i);
+    }
+
+    inline Scalar& coeffRef(Index row, Index col)
+    {
+      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
+      return coeff(IsColVector ? row : col);
+    }
+
+    /** \returns a reference to the coefficient value at given index \a i
+      * This operation involes a log(rho*size) binary search. If the coefficient does not
+      * exist yet, then a sorted insertion into a sequential buffer is performed.
+      *
+      * This insertion might be very costly if the number of nonzeros above \a i is large.
+      */
+    inline Scalar& coeffRef(Index i)
+    {
+      eigen_assert(i>=0 && i<m_size);
+      return m_data.atWithInsertion(i);
+    }
+
+  public:
+
+    class InnerIterator;
+    class ReverseInnerIterator;
+
+    inline void setZero() { m_data.clear(); }
+
+    /** \returns the number of non zero coefficients */
+    inline Index nonZeros() const  { return static_cast<Index>(m_data.size()); }
+
+    inline void startVec(Index outer)
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+    }
+
+    inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+      return insertBack(inner);
+    }
+    inline Scalar& insertBack(Index i)
+    {
+      m_data.append(0, i);
+      return m_data.value(m_data.size()-1);
+    }
+
+    inline Scalar& insert(Index row, Index col)
+    {
+      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
+      
+      Index inner = IsColVector ? row : col;
+      Index outer = IsColVector ? col : row;
+      eigen_assert(outer==0);
+      return insert(inner);
+    }
+    Scalar& insert(Index i)
+    {
+      eigen_assert(i>=0 && i<m_size);
+      
+      Index startId = 0;
+      Index p = Index(m_data.size()) - 1;
+      // TODO smart realloc
+      m_data.resize(p+2,1);
+
+      while ( (p >= startId) && (m_data.index(p) > i) )
+      {
+        m_data.index(p+1) = m_data.index(p);
+        m_data.value(p+1) = m_data.value(p);
+        --p;
+      }
+      m_data.index(p+1) = i;
+      m_data.value(p+1) = 0;
+      return m_data.value(p+1);
+    }
+
+    /**
+      */
+    inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+    inline void finalize() {}
+
+    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
+    {
+      m_data.prune(reference,epsilon);
+    }
+
+    void resize(Index rows, Index cols)
+    {
+      eigen_assert(rows==1 || cols==1);
+      resize(IsColVector ? rows : cols);
+    }
+
+    void resize(Index newSize)
+    {
+      m_size = newSize;
+      m_data.clear();
+    }
+
+    void resizeNonZeros(Index size) { m_data.resize(size); }
+
+    inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
+
+    inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
+
+    inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
+
+    template<typename OtherDerived>
+    inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+      : m_size(0)
+    {
+      check_template_parameters();
+      *this = other.derived();
+    }
+
+    inline SparseVector(const SparseVector& other)
+      : SparseBase(other), m_size(0)
+    {
+      check_template_parameters();
+      *this = other.derived();
+    }
+
+    /** Swaps the values of \c *this and \a other.
+      * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+      * \sa SparseMatrixBase::swap()
+      */
+    inline void swap(SparseVector& other)
+    {
+      std::swap(m_size, other.m_size);
+      m_data.swap(other.m_data);
+    }
+
+    inline SparseVector& operator=(const SparseVector& other)
+    {
+      if (other.isRValue())
+      {
+        swap(other.const_cast_derived());
+      }
+      else
+      {
+        resize(other.size());
+        m_data = other.m_data;
+      }
+      return *this;
+    }
+
+    template<typename OtherDerived>
+    inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+    {
+      SparseVector tmp(other.size());
+      internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
+      this->swap(tmp);
+      return *this;
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    template<typename Lhs, typename Rhs>
+    inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+    {
+      return Base::operator=(product);
+    }
+    #endif
+
+    friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+    {
+      for (Index i=0; i<m.nonZeros(); ++i)
+        s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+      s << std::endl;
+      return s;
+    }
+
+    /** Destructor */
+    inline ~SparseVector() {}
+
+    /** Overloaded for performance */
+    Scalar sum() const;
+
+  public:
+
+    /** \internal \deprecated use setZero() and reserve() */
+    EIGEN_DEPRECATED void startFill(Index reserve)
+    {
+      setZero();
+      m_data.reserve(reserve);
+    }
+
+    /** \internal \deprecated use insertBack(Index,Index) */
+    EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+    {
+      eigen_assert(r==0 || c==0);
+      return fill(IsColVector ? r : c);
+    }
+
+    /** \internal \deprecated use insertBack(Index) */
+    EIGEN_DEPRECATED Scalar& fill(Index i)
+    {
+      m_data.append(0, i);
+      return m_data.value(m_data.size()-1);
+    }
+
+    /** \internal \deprecated use insert(Index,Index) */
+    EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+    {
+      eigen_assert(r==0 || c==0);
+      return fillrand(IsColVector ? r : c);
+    }
+
+    /** \internal \deprecated use insert(Index) */
+    EIGEN_DEPRECATED Scalar& fillrand(Index i)
+    {
+      return insert(i);
+    }
+
+    /** \internal \deprecated use finalize() */
+    EIGEN_DEPRECATED void endFill() {}
+    
+    // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
+    /** \internal \deprecated use data() */
+    EIGEN_DEPRECATED Storage& _data() { return m_data; }
+    /** \internal \deprecated use data() */
+    EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
+    
+#   ifdef EIGEN_SPARSEVECTOR_PLUGIN
+#     include EIGEN_SPARSEVECTOR_PLUGIN
+#   endif
+
+protected:
+  
+    static void check_template_parameters()
+    {
+      EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+      EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
+    }
+    
+    Storage m_data;
+    Index m_size;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+  public:
+    InnerIterator(const SparseVector& vec, Index outer=0)
+      : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+    }
+
+    InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+      : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+    {}
+
+    inline InnerIterator& operator++() { m_id++; return *this; }
+
+    inline Scalar value() const { return m_data.value(m_id); }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+    inline Index index() const { return m_data.index(m_id); }
+    inline Index row() const { return IsColVector ? index() : 0; }
+    inline Index col() const { return IsColVector ? 0 : index(); }
+
+    inline operator bool() const { return (m_id < m_end); }
+
+  protected:
+    const internal::CompressedStorage<Scalar,Index>& m_data;
+    Index m_id;
+    const Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+  public:
+    ReverseInnerIterator(const SparseVector& vec, Index outer=0)
+      : m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+    {
+      EIGEN_UNUSED_VARIABLE(outer);
+      eigen_assert(outer==0);
+    }
+
+    ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+      : m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+    {}
+
+    inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+    inline Scalar value() const { return m_data.value(m_id-1); }
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
+
+    inline Index index() const { return m_data.index(m_id-1); }
+    inline Index row() const { return IsColVector ? index() : 0; }
+    inline Index col() const { return IsColVector ? 0 : index(); }
+
+    inline operator bool() const { return (m_id > m_start); }
+
+  protected:
+    const internal::CompressedStorage<Scalar,Index>& m_data;
+    Index m_id;
+    const Index m_start;
+};
+
+namespace internal {
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
+  static void run(Dest& dst, const Src& src) {
+    eigen_internal_assert(src.innerSize()==src.size());
+    for(typename Src::InnerIterator it(src, 0); it; ++it)
+      dst.insert(it.index()) = it.value();
+  }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
+  static void run(Dest& dst, const Src& src) {
+    eigen_internal_assert(src.outerSize()==src.size());
+    for(typename Dest::Index i=0; i<src.size(); ++i)
+    {
+      typename Src::InnerIterator it(src, i);
+      if(it)
+        dst.insert(i) = it.value();
+    }
+  }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
+  static void run(Dest& dst, const Src& src) {
+    if(src.outerSize()==1)  sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
+    else                    sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
+  }
+};
+
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h
new file mode 100644
index 0000000..fd84504
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseView.h
@@ -0,0 +1,99 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+  typedef typename MatrixType::Index Index;
+  typedef Sparse StorageKind;
+  enum {
+    Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+  };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+  typedef typename MatrixType::Nested MatrixTypeNested;
+  typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+  SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+             typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) : 
+    m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+  class InnerIterator;
+
+  inline Index rows() const { return m_matrix.rows(); }
+  inline Index cols() const { return m_matrix.cols(); }
+
+  inline Index innerSize() const { return m_matrix.innerSize(); }
+  inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+  MatrixTypeNested m_matrix;
+  Scalar m_reference;
+  typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+  typedef typename SparseView::Index Index;
+public:
+  typedef typename _MatrixTypeNested::InnerIterator IterBase;
+  InnerIterator(const SparseView& view, Index outer) :
+  IterBase(view.m_matrix, outer), m_view(view)
+  {
+    incrementToNonZero();
+  }
+
+  EIGEN_STRONG_INLINE InnerIterator& operator++()
+  {
+    IterBase::operator++();
+    incrementToNonZero();
+    return *this;
+  }
+
+  using IterBase::value;
+
+protected:
+  const SparseView& m_view;
+
+private:
+  void incrementToNonZero()
+  {
+    while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
+    {
+      IterBase::operator++();
+    }
+  }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+                                                          const typename NumTraits<Scalar>::Real& m_epsilon) const
+{
+  return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+} // end namespace Eigen
+
+#endif
diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h
new file mode 100644
index 0000000..ccc12af
--- /dev/null
+++ b/Eigen/src/SparseCore/TriangularSolver.h
@@ -0,0 +1,334 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+  int UpLo = (Mode & Lower)
+           ? Lower
+           : (Mode & Upper)
+           ? Upper
+           : -1,
+  int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
+struct sparse_solve_triangular_selector;
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+  typedef typename Rhs::Scalar Scalar;
+  static void run(const Lhs& lhs, Rhs& other)
+  {
+    for(int col=0 ; col<other.cols() ; ++col)
+    {
+      for(int i=0; i<lhs.rows(); ++i)
+      {
+        Scalar tmp = other.coeff(i,col);
+        Scalar lastVal(0);
+        int lastIndex = 0;
+        for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+        {
+          lastVal = it.value();
+          lastIndex = it.index();
+          if(lastIndex==i)
+            break;
+          tmp -= lastVal * other.coeff(lastIndex,col);
+        }
+        if (Mode & UnitDiag)
+          other.coeffRef(i,col) = tmp;
+        else
+        {
+          eigen_assert(lastIndex==i);
+          other.coeffRef(i,col) = tmp/lastVal;
+        }
+      }
+    }
+  }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+  typedef typename Rhs::Scalar Scalar;
+  static void run(const Lhs& lhs, Rhs& other)
+  {
+    for(int col=0 ; col<other.cols() ; ++col)
+    {
+      for(int i=lhs.rows()-1 ; i>=0 ; --i)
+      {
+        Scalar tmp = other.coeff(i,col);
+        Scalar l_ii(0);
+        typename Lhs::InnerIterator it(lhs, i);
+        while(it && it.index()<i)
+          ++it;
+        if(!(Mode & UnitDiag))
+        {
+          eigen_assert(it && it.index()==i);
+          l_ii = it.value();
+          ++it;
+        }
+        else if (it && it.index() == i)
+          ++it;
+        for(; it; ++it)
+        {
+          tmp -= it.value() * other.coeff(it.index(),col);
+        }
+
+        if (Mode & UnitDiag)
+          other.coeffRef(i,col) = tmp;
+        else
+          other.coeffRef(i,col) = tmp/l_ii;
+      }
+    }
+  }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+  typedef typename Rhs::Scalar Scalar;
+  static void run(const Lhs& lhs, Rhs& other)
+  {
+    for(int col=0 ; col<other.cols() ; ++col)
+    {
+      for(int i=0; i<lhs.cols(); ++i)
+      {
+        Scalar& tmp = other.coeffRef(i,col);
+        if (tmp!=Scalar(0)) // optimization when other is actually sparse
+        {
+          typename Lhs::InnerIterator it(lhs, i);
+          while(it && it.index()<i)
+            ++it;
+          if(!(Mode & UnitDiag))
+          {
+            eigen_assert(it && it.index()==i);
+            tmp /= it.value();
+          }
+          if (it && it.index()==i)
+            ++it;
+          for(; it; ++it)
+            other.coeffRef(it.index(), col) -= tmp * it.value();
+        }
+      }
+    }
+  }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+  typedef typename Rhs::Scalar Scalar;
+  static void run(const Lhs& lhs, Rhs& other)
+  {
+    for(int col=0 ; col<other.cols() ; ++col)
+    {
+      for(int i=lhs.cols()-1; i>=0; --i)
+      {
+        Scalar& tmp = other.coeffRef(i,col);
+        if (tmp!=Scalar(0)) // optimization when other is actually sparse
+        {
+          if(!(Mode & UnitDiag))
+          {
+            // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
+            typename Lhs::ReverseInnerIterator it(lhs, i);
+            while(it && it.index()!=i)
+              --it;
+            eigen_assert(it && it.index()==i);
+            other.coeffRef(i,col) /= it.value();
+          }
+          typename Lhs::InnerIterator it(lhs, i);
+          for(; it && it.index()<i; ++it)
+            other.coeffRef(it.index(), col) -= tmp * it.value();
+        }
+      }
+    }
+  }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+{
+  eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+  eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+  enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+  typedef typename internal::conditional<copy,
+    typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+  OtherCopy otherCopy(other.derived());
+
+  internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+
+  if (copy)
+    other = otherCopy;
+}
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+{
+  typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+  solveInPlace(res);
+  return res;
+}
+
+// pure sparse path
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+  int UpLo = (Mode & Lower)
+           ? Lower
+           : (Mode & Upper)
+           ? Upper
+           : -1,
+  int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
+struct sparse_solve_triangular_sparse_selector;
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode, int UpLo>
+struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
+{
+  typedef typename Rhs::Scalar Scalar;
+  typedef typename promote_index_type<typename traits<Lhs>::Index,
+                                         typename traits<Rhs>::Index>::type Index;
+  static void run(const Lhs& lhs, Rhs& other)
+  {
+    const bool IsLower = (UpLo==Lower);
+    AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+    tempVector.setBounds(0,other.rows());
+
+    Rhs res(other.rows(), other.cols());
+    res.reserve(other.nonZeros());
+
+    for(int col=0 ; col<other.cols() ; ++col)
+    {
+      // FIXME estimate number of non zeros
+      tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
+      tempVector.setZero();
+      tempVector.restart();
+      for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
+      {
+        tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
+      }
+
+      for(int i=IsLower?0:lhs.cols()-1;
+          IsLower?i<lhs.cols():i>=0;
+          i+=IsLower?1:-1)
+      {
+        tempVector.restart();
+        Scalar& ci = tempVector.coeffRef(i);
+        if (ci!=Scalar(0))
+        {
+          // find
+          typename Lhs::InnerIterator it(lhs, i);
+          if(!(Mode & UnitDiag))
+          {
+            if (IsLower)
+            {
+              eigen_assert(it.index()==i);
+              ci /= it.value();
+            }
+            else
+              ci /= lhs.coeff(i,i);
+          }
+          tempVector.restart();
+          if (IsLower)
+          {
+            if (it.index()==i)
+              ++it;
+            for(; it; ++it)
+              tempVector.coeffRef(it.index()) -= ci * it.value();
+          }
+          else
+          {
+            for(; it && it.index()<i; ++it)
+              tempVector.coeffRef(it.index()) -= ci * it.value();
+          }
+        }
+      }
+
+
+      int count = 0;
+      // FIXME compute a reference value to filter zeros
+      for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+      {
+        ++ count;
+//         std::cerr << "fill " << it.index() << ", " << col << "\n";
+//         std::cout << it.value() << "  ";
+        // FIXME use insertBack
+        res.insert(it.index(), col) = it.value();
+      }
+//       std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
+    }
+    res.finalize();
+    other = res.markAsRValue();
+  }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+{
+  eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+  eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+//   enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+//   typedef typename internal::conditional<copy,
+//     typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+//   OtherCopy otherCopy(other.derived());
+
+  internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+
+//   if (copy)
+//     other = otherCopy;
+}
+
+#ifdef EIGEN2_SUPPORT
+
+// deprecated stuff:
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+  this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
+}
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+  typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+  derived().solveTriangularInPlace(res);
+  return res;
+}
+#endif // EIGEN2_SUPPORT
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H
diff --git a/Eigen/src/SparseLU/CMakeLists.txt b/Eigen/src/SparseLU/CMakeLists.txt
new file mode 100644
index 0000000..69729ee
--- /dev/null
+++ b/Eigen/src/SparseLU/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseLU_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_SparseLU_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseLU COMPONENT Devel
+  )
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h
new file mode 100644
index 0000000..4514cfd
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU.h
@@ -0,0 +1,806 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_SPARSE_LU_H
+#define EIGEN_SPARSE_LU_H
+
+namespace Eigen {
+
+template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::Index> > class SparseLU;
+template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
+template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
+
+/** \ingroup SparseLU_Module
+  * \class SparseLU
+  * 
+  * \brief Sparse supernodal LU factorization for general matrices
+  * 
+  * This class implements the supernodal LU factorization for general matrices.
+  * It uses the main techniques from the sequential SuperLU package 
+  * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real 
+  * and complex arithmetics with single and double precision, depending on the 
+  * scalar type of your input matrix. 
+  * The code has been optimized to provide BLAS-3 operations during supernode-panel updates. 
+  * It benefits directly from the built-in high-performant Eigen BLAS routines. 
+  * Moreover, when the size of a supernode is very small, the BLAS calls are avoided to 
+  * enable a better optimization from the compiler. For best performance, 
+  * you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors. 
+  * 
+  * An important parameter of this class is the ordering method. It is used to reorder the columns 
+  * (and eventually the rows) of the matrix to reduce the number of new elements that are created during 
+  * numerical factorization. The cheapest method available is COLAMD. 
+  * See  \link OrderingMethods_Module the OrderingMethods module \endlink for the list of 
+  * built-in and external ordering methods. 
+  *
+  * Simple example with key steps 
+  * \code
+  * VectorXd x(n), b(n);
+  * SparseMatrix<double, ColMajor> A;
+  * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> >   solver;
+  * // fill A and b;
+  * // Compute the ordering permutation vector from the structural pattern of A
+  * solver.analyzePattern(A); 
+  * // Compute the numerical factorization 
+  * solver.factorize(A); 
+  * //Use the factors to solve the linear system 
+  * x = solver.solve(b); 
+  * \endcode
+  * 
+  * \warning The input matrix A should be in a \b compressed and \b column-major form.
+  * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
+  * 
+  * \note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix. 
+  * For badly scaled matrices, this step can be useful to reduce the pivoting during factorization. 
+  * If this is the case for your matrices, you can try the basic scaling method at
+  *  "unsupported/Eigen/src/IterativeSolvers/Scaling.h"
+  * 
+  * \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<>
+  * \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD
+  * 
+  * 
+  * \sa \ref TutorialSparseDirectSolvers
+  * \sa \ref OrderingMethods_Module
+  */
+template <typename _MatrixType, typename _OrderingType>
+class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
+{
+  public:
+    typedef _MatrixType MatrixType; 
+    typedef _OrderingType OrderingType;
+    typedef typename MatrixType::Scalar Scalar; 
+    typedef typename MatrixType::RealScalar RealScalar; 
+    typedef typename MatrixType::Index Index; 
+    typedef SparseMatrix<Scalar,ColMajor,Index> NCMatrix;
+    typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix; 
+    typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+    typedef Matrix<Index,Dynamic,1> IndexVector;
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+    typedef internal::SparseLUImpl<Scalar, Index> Base;
+    
+  public:
+    SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
+    {
+      initperfvalues(); 
+    }
+    SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
+    {
+      initperfvalues(); 
+      compute(matrix);
+    }
+    
+    ~SparseLU()
+    {
+      // Free all explicit dynamic pointers 
+    }
+    
+    void analyzePattern (const MatrixType& matrix);
+    void factorize (const MatrixType& matrix);
+    void simplicialfactorize(const MatrixType& matrix);
+    
+    /**
+      * Compute the symbolic and numeric factorization of the input sparse matrix.
+      * The input matrix should be in column-major storage. 
+      */
+    void compute (const MatrixType& matrix)
+    {
+      // Analyze 
+      analyzePattern(matrix); 
+      //Factorize
+      factorize(matrix);
+    } 
+    
+    inline Index rows() const { return m_mat.rows(); }
+    inline Index cols() const { return m_mat.cols(); }
+    /** Indicate that the pattern of the input matrix is symmetric */
+    void isSymmetric(bool sym)
+    {
+      m_symmetricmode = sym;
+    }
+    
+    /** \returns an expression of the matrix L, internally stored as supernodes
+      * The only operation available with this expression is the triangular solve
+      * \code
+      * y = b; matrixL().solveInPlace(y);
+      * \endcode
+      */
+    SparseLUMatrixLReturnType<SCMatrix> matrixL() const
+    {
+      return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
+    }
+    /** \returns an expression of the matrix U,
+      * The only operation available with this expression is the triangular solve
+      * \code
+      * y = b; matrixU().solveInPlace(y);
+      * \endcode
+      */
+    SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> > matrixU() const
+    {
+      return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
+    }
+
+    /**
+      * \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$
+      * \sa colsPermutation()
+      */
+    inline const PermutationType& rowsPermutation() const
+    {
+      return m_perm_r;
+    }
+    /**
+      * \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$
+      * \sa rowsPermutation()
+      */
+    inline const PermutationType& colsPermutation() const
+    {
+      return m_perm_c;
+    }
+    /** Set the threshold used for a diagonal entry to be an acceptable pivot. */
+    void setPivotThreshold(const RealScalar& thresh)
+    {
+      m_diagpivotthresh = thresh; 
+    }
+
+    /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+      *
+      * \warning the destination matrix X in X = this->solve(B) must be colmun-major.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const 
+    {
+      eigen_assert(m_factorizationIsOk && "SparseLU is not initialized."); 
+      eigen_assert(rows()==B.rows()
+                    && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
+          return internal::solve_retval<SparseLU, Rhs>(*this, B.derived());
+    }
+
+    /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<SparseLU, Rhs> solve(const SparseMatrixBase<Rhs>& B) const 
+    {
+      eigen_assert(m_factorizationIsOk && "SparseLU is not initialized."); 
+      eigen_assert(rows()==B.rows()
+                    && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
+          return internal::sparse_solve_retval<SparseLU, Rhs>(*this, B.derived());
+    }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
+      *          \c InvalidInput if the input matrix is invalid
+      *
+      * \sa iparm()          
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+    
+    /**
+      * \returns A string describing the type of error
+      */
+    std::string lastErrorMessage() const
+    {
+      return m_lastError; 
+    }
+
+    template<typename Rhs, typename Dest>
+    bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
+    {
+      Dest& X(X_base.derived());
+      eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
+      EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
+                        THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+      
+      // Permute the right hand side to form X = Pr*B
+      // on return, X is overwritten by the computed solution
+      X.resize(B.rows(),B.cols());
+
+      // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
+      for(Index j = 0; j < B.cols(); ++j)
+        X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
+      
+      //Forward substitution with L
+      this->matrixL().solveInPlace(X);
+      this->matrixU().solveInPlace(X);
+      
+      // Permute back the solution 
+      for (Index j = 0; j < B.cols(); ++j)
+        X.col(j) = colsPermutation().inverse() * X.col(j);
+      
+      return true; 
+    }
+    
+    /**
+      * \returns the absolute value of the determinant of the matrix of which
+      * *this is the QR decomposition.
+      *
+      * \warning a determinant can be very big or small, so for matrices
+      * of large enough dimension, there is a risk of overflow/underflow.
+      * One way to work around that is to use logAbsDeterminant() instead.
+      *
+      * \sa logAbsDeterminant(), signDeterminant()
+      */
+     Scalar absDeterminant()
+    {
+      eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+      // Initialize with the determinant of the row matrix
+      Scalar det = Scalar(1.);
+      // Note that the diagonal blocks of U are stored in supernodes,
+      // which are available in the  L part :)
+      for (Index j = 0; j < this->cols(); ++j)
+      {
+        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+        {
+          if(it.index() == j)
+          {
+            using std::abs;
+            det *= abs(it.value());
+            break;
+          }
+        }
+       }
+       return det;
+     }
+
+     /** \returns the natural log of the absolute value of the determinant of the matrix
+       * of which **this is the QR decomposition
+       *
+       * \note This method is useful to work around the risk of overflow/underflow that's
+       * inherent to the determinant computation.
+       *
+       * \sa absDeterminant(), signDeterminant()
+       */
+     Scalar logAbsDeterminant() const
+     {
+       eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+       Scalar det = Scalar(0.);
+       for (Index j = 0; j < this->cols(); ++j)
+       {
+         for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+         {
+           if(it.row() < j) continue;
+           if(it.row() == j)
+           {
+             using std::log; using std::abs;
+             det += log(abs(it.value()));
+             break;
+           }
+         }
+       }
+       return det;
+     }
+
+    /** \returns A number representing the sign of the determinant
+      *
+      * \sa absDeterminant(), logAbsDeterminant()
+      */
+    Scalar signDeterminant()
+    {
+      eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+      // Initialize with the determinant of the row matrix
+      Index det = 1;
+      // Note that the diagonal blocks of U are stored in supernodes,
+      // which are available in the  L part :)
+      for (Index j = 0; j < this->cols(); ++j)
+      {
+        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+        {
+          if(it.index() == j)
+          {
+            if(it.value()<0)
+              det = -det;
+            else if(it.value()==0)
+              return 0;
+            break;
+          }
+        }
+      }
+      return det * m_detPermR * m_detPermC;
+    }
+    
+    /** \returns The determinant of the matrix.
+      *
+      * \sa absDeterminant(), logAbsDeterminant()
+      */
+    Scalar determinant()
+    {
+      eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+      // Initialize with the determinant of the row matrix
+      Scalar det = Scalar(1.);
+      // Note that the diagonal blocks of U are stored in supernodes,
+      // which are available in the  L part :)
+      for (Index j = 0; j < this->cols(); ++j)
+      {
+        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+        {
+          if(it.index() == j)
+          {
+            det *= it.value();
+            break;
+          }
+        }
+      }
+      return det * Scalar(m_detPermR * m_detPermC);
+    }
+
+  protected:
+    // Functions 
+    void initperfvalues()
+    {
+      m_perfv.panel_size = 16;
+      m_perfv.relax = 1; 
+      m_perfv.maxsuper = 128; 
+      m_perfv.rowblk = 16; 
+      m_perfv.colblk = 8; 
+      m_perfv.fillfactor = 20;  
+    }
+      
+    // Variables 
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    bool m_factorizationIsOk;
+    bool m_analysisIsOk;
+    std::string m_lastError;
+    NCMatrix m_mat; // The input (permuted ) matrix 
+    SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
+    MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore; // The upper triangular matrix
+    PermutationType m_perm_c; // Column permutation 
+    PermutationType m_perm_r ; // Row permutation
+    IndexVector m_etree; // Column elimination tree 
+    
+    typename Base::GlobalLU_t m_glu; 
+                               
+    // SparseLU options 
+    bool m_symmetricmode;
+    // values for performance 
+    internal::perfvalues<Index> m_perfv; 
+    RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
+    Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
+    Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
+  private:
+    // Disable copy constructor 
+    SparseLU (const SparseLU& );
+  
+}; // End class SparseLU
+
+
+
+// Functions needed by the anaysis phase
+/** 
+  * Compute the column permutation to minimize the fill-in
+  * 
+  *  - Apply this permutation to the input matrix - 
+  * 
+  *  - Compute the column elimination tree on the permuted matrix 
+  * 
+  *  - Postorder the elimination tree and the column permutation
+  * 
+  */
+template <typename MatrixType, typename OrderingType>
+void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
+{
+  
+  //TODO  It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
+  
+  OrderingType ord; 
+  ord(mat,m_perm_c);
+  
+  // Apply the permutation to the column of the input  matrix
+  //First copy the whole input matrix. 
+  m_mat = mat;
+  if (m_perm_c.size()) {
+    m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.  
+    //Then, permute only the column pointers
+    const Index * outerIndexPtr;
+    if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
+    else
+    {
+      Index *outerIndexPtr_t = new Index[mat.cols()+1];
+      for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
+      outerIndexPtr = outerIndexPtr_t;
+    }
+    for (Index i = 0; i < mat.cols(); i++)
+    {
+      m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
+      m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
+    }
+    if(!mat.isCompressed()) delete[] outerIndexPtr;
+  }
+  // Compute the column elimination tree of the permuted matrix 
+  IndexVector firstRowElt;
+  internal::coletree(m_mat, m_etree,firstRowElt); 
+     
+  // In symmetric mode, do not do postorder here
+  if (!m_symmetricmode) {
+    IndexVector post, iwork; 
+    // Post order etree
+    internal::treePostorder(m_mat.cols(), m_etree, post); 
+      
+   
+    // Renumber etree in postorder 
+    Index m = m_mat.cols(); 
+    iwork.resize(m+1);
+    for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
+    m_etree = iwork;
+    
+    // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
+    PermutationType post_perm(m); 
+    for (Index i = 0; i < m; i++) 
+      post_perm.indices()(i) = post(i); 
+        
+    // Combine the two permutations : postorder the permutation for future use
+    if(m_perm_c.size()) {
+      m_perm_c = post_perm * m_perm_c;
+    }
+    
+  } // end postordering 
+  
+  m_analysisIsOk = true; 
+}
+
+// Functions needed by the numerical factorization phase
+
+
+/** 
+  *  - Numerical factorization 
+  *  - Interleaved with the symbolic factorization 
+  * On exit,  info is 
+  * 
+  *    = 0: successful factorization
+  * 
+  *    > 0: if info = i, and i is
+  * 
+  *       <= A->ncol: U(i,i) is exactly zero. The factorization has
+  *          been completed, but the factor U is exactly singular,
+  *          and division by zero will occur if it is used to solve a
+  *          system of equations.
+  * 
+  *       > A->ncol: number of bytes allocated when memory allocation
+  *         failure occurred, plus A->ncol. If lwork = -1, it is
+  *         the estimated amount of space needed, plus A->ncol.  
+  */
+template <typename MatrixType, typename OrderingType>
+void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
+{
+  using internal::emptyIdxLU;
+  eigen_assert(m_analysisIsOk && "analyzePattern() should be called first"); 
+  eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
+  
+  typedef typename IndexVector::Scalar Index; 
+  
+  
+  // Apply the column permutation computed in analyzepattern()
+  //   m_mat = matrix * m_perm_c.inverse(); 
+  m_mat = matrix;
+  if (m_perm_c.size()) 
+  {
+    m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
+    //Then, permute only the column pointers
+    const Index * outerIndexPtr;
+    if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
+    else
+    {
+      Index* outerIndexPtr_t = new Index[matrix.cols()+1];
+      for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
+      outerIndexPtr = outerIndexPtr_t;
+    }
+    for (Index i = 0; i < matrix.cols(); i++)
+    {
+      m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
+      m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
+    }
+    if(!matrix.isCompressed()) delete[] outerIndexPtr;
+  } 
+  else 
+  { //FIXME This should not be needed if the empty permutation is handled transparently
+    m_perm_c.resize(matrix.cols());
+    for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
+  }
+  
+  Index m = m_mat.rows();
+  Index n = m_mat.cols();
+  Index nnz = m_mat.nonZeros();
+  Index maxpanel = m_perfv.panel_size * m;
+  // Allocate working storage common to the factor routines
+  Index lwork = 0;
+  Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu); 
+  if (info) 
+  {
+    m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
+    m_factorizationIsOk = false;
+    return ; 
+  }
+  
+  // Set up pointers for integer working arrays 
+  IndexVector segrep(m); segrep.setZero();
+  IndexVector parent(m); parent.setZero();
+  IndexVector xplore(m); xplore.setZero();
+  IndexVector repfnz(maxpanel);
+  IndexVector panel_lsub(maxpanel);
+  IndexVector xprune(n); xprune.setZero();
+  IndexVector marker(m*internal::LUNoMarker); marker.setZero();
+  
+  repfnz.setConstant(-1); 
+  panel_lsub.setConstant(-1);
+  
+  // Set up pointers for scalar working arrays 
+  ScalarVector dense; 
+  dense.setZero(maxpanel);
+  ScalarVector tempv; 
+  tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
+  
+  // Compute the inverse of perm_c
+  PermutationType iperm_c(m_perm_c.inverse()); 
+  
+  // Identify initial relaxed snodes
+  IndexVector relax_end(n);
+  if ( m_symmetricmode == true ) 
+    Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
+  else
+    Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
+  
+  
+  m_perm_r.resize(m); 
+  m_perm_r.indices().setConstant(-1);
+  marker.setConstant(-1);
+  m_detPermR = 1; // Record the determinant of the row permutation
+  
+  m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
+  m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
+  
+  // Work on one 'panel' at a time. A panel is one of the following :
+  //  (a) a relaxed supernode at the bottom of the etree, or
+  //  (b) panel_size contiguous columns, <panel_size> defined by the user
+  Index jcol; 
+  IndexVector panel_histo(n);
+  Index pivrow; // Pivotal row number in the original row matrix
+  Index nseg1; // Number of segments in U-column above panel row jcol
+  Index nseg; // Number of segments in each U-column 
+  Index irep; 
+  Index i, k, jj; 
+  for (jcol = 0; jcol < n; )
+  {
+    // Adjust panel size so that a panel won't overlap with the next relaxed snode. 
+    Index panel_size = m_perfv.panel_size; // upper bound on panel width
+    for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
+    {
+      if (relax_end(k) != emptyIdxLU) 
+      {
+        panel_size = k - jcol; 
+        break; 
+      }
+    }
+    if (k == n) 
+      panel_size = n - jcol; 
+      
+    // Symbolic outer factorization on a panel of columns 
+    Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu); 
+    
+    // Numeric sup-panel updates in topological order 
+    Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu); 
+    
+    // Sparse LU within the panel, and below the panel diagonal 
+    for ( jj = jcol; jj< jcol + panel_size; jj++) 
+    {
+      k = (jj - jcol) * m; // Column index for w-wide arrays 
+      
+      nseg = nseg1; // begin after all the panel segments
+      //Depth-first-search for the current column
+      VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
+      VectorBlock<IndexVector> repfnz_k(repfnz, k, m); 
+      info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu); 
+      if ( info ) 
+      {
+        m_lastError =  "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
+        m_info = NumericalIssue; 
+        m_factorizationIsOk = false; 
+        return; 
+      }
+      // Numeric updates to this column 
+      VectorBlock<ScalarVector> dense_k(dense, k, m); 
+      VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1); 
+      info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu); 
+      if ( info ) 
+      {
+        m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
+        m_info = NumericalIssue; 
+        m_factorizationIsOk = false; 
+        return; 
+      }
+      
+      // Copy the U-segments to ucol(*)
+      info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu); 
+      if ( info ) 
+      {
+        m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
+        m_info = NumericalIssue; 
+        m_factorizationIsOk = false; 
+        return; 
+      }
+      
+      // Form the L-segment 
+      info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
+      if ( info ) 
+      {
+        m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
+        std::ostringstream returnInfo;
+        returnInfo << info; 
+        m_lastError += returnInfo.str();
+        m_info = NumericalIssue; 
+        m_factorizationIsOk = false; 
+        return; 
+      }
+      
+      // Update the determinant of the row permutation matrix
+      // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
+      if (pivrow != jj) m_detPermR = -m_detPermR;
+
+      // Prune columns (0:jj-1) using column jj
+      Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu); 
+      
+      // Reset repfnz for this column 
+      for (i = 0; i < nseg; i++)
+      {
+        irep = segrep(i); 
+        repfnz_k(irep) = emptyIdxLU; 
+      }
+    } // end SparseLU within the panel  
+    jcol += panel_size;  // Move to the next panel
+  } // end for -- end elimination 
+  
+  m_detPermR = m_perm_r.determinant();
+  m_detPermC = m_perm_c.determinant();
+  
+  // Count the number of nonzeros in factors 
+  Base::countnz(n, m_nnzL, m_nnzU, m_glu); 
+  // Apply permutation  to the L subscripts 
+  Base::fixupL(n, m_perm_r.indices(), m_glu);
+  
+  // Create supernode matrix L 
+  m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); 
+  // Create the column major upper sparse matrix  U; 
+  new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, Index> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() ); 
+  
+  m_info = Success;
+  m_factorizationIsOk = true;
+}
+
+template<typename MappedSupernodalType>
+struct SparseLUMatrixLReturnType : internal::no_assignment_operator
+{
+  typedef typename MappedSupernodalType::Index Index;
+  typedef typename MappedSupernodalType::Scalar Scalar;
+  SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
+  { }
+  Index rows() { return m_mapL.rows(); }
+  Index cols() { return m_mapL.cols(); }
+  template<typename Dest>
+  void solveInPlace( MatrixBase<Dest> &X) const
+  {
+    m_mapL.solveInPlace(X);
+  }
+  const MappedSupernodalType& m_mapL;
+};
+
+template<typename MatrixLType, typename MatrixUType>
+struct SparseLUMatrixUReturnType : internal::no_assignment_operator
+{
+  typedef typename MatrixLType::Index Index;
+  typedef typename MatrixLType::Scalar Scalar;
+  SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
+  : m_mapL(mapL),m_mapU(mapU)
+  { }
+  Index rows() { return m_mapL.rows(); }
+  Index cols() { return m_mapL.cols(); }
+
+  template<typename Dest>   void solveInPlace(MatrixBase<Dest> &X) const
+  {
+    Index nrhs = X.cols();
+    Index n = X.rows();
+    // Backward solve with U
+    for (Index k = m_mapL.nsuper(); k >= 0; k--)
+    {
+      Index fsupc = m_mapL.supToCol()[k];
+      Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
+      Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
+      Index luptr = m_mapL.colIndexPtr()[fsupc];
+
+      if (nsupc == 1)
+      {
+        for (Index j = 0; j < nrhs; j++)
+        {
+          X(fsupc, j) /= m_mapL.valuePtr()[luptr];
+        }
+      }
+      else
+      {
+        Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
+        Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
+        U = A.template triangularView<Upper>().solve(U);
+      }
+
+      for (Index j = 0; j < nrhs; ++j)
+      {
+        for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
+        {
+          typename MatrixUType::InnerIterator it(m_mapU, jcol);
+          for ( ; it; ++it)
+          {
+            Index irow = it.index();
+            X(irow, j) -= X(jcol, j) * it.value();
+          }
+        }
+      }
+    } // End For U-solve
+  }
+  const MatrixLType& m_mapL;
+  const MatrixUType& m_mapU;
+};
+
+namespace internal {
+  
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
+  : solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
+{
+  typedef SparseLU<_MatrixType,Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
+  : sparse_solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
+{
+  typedef SparseLU<_MatrixType,Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+} // end namespace internal
+
+} // End namespace Eigen 
+
+#endif
diff --git a/Eigen/src/SparseLU/SparseLUImpl.h b/Eigen/src/SparseLU/SparseLUImpl.h
new file mode 100644
index 0000000..14d7089
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLUImpl.h
@@ -0,0 +1,64 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#ifndef SPARSELU_IMPL_H
+#define SPARSELU_IMPL_H
+
+namespace Eigen {
+namespace internal {
+  
+/** \ingroup SparseLU_Module
+  * \class SparseLUImpl
+  * Base class for sparseLU
+  */
+template <typename Scalar, typename Index>
+class SparseLUImpl
+{
+  public:
+    typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+    typedef Matrix<Index,Dynamic,1> IndexVector; 
+    typedef typename ScalarVector::RealScalar RealScalar; 
+    typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;
+    typedef Ref<Matrix<Index,Dynamic,1> > BlockIndexVector;
+    typedef LU_GlobalLU_t<IndexVector, ScalarVector> GlobalLU_t; 
+    typedef SparseMatrix<Scalar,ColMajor,Index> MatrixType; 
+    
+  protected:
+     template <typename VectorType>
+     Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions);
+     Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size,  GlobalLU_t& glu); 
+     template <typename VectorType>
+     Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions);
+     void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); 
+     void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); 
+     Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat,  IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu); 
+     Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
+     Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
+     template <typename Traits>
+     void dfs_kernel(const Index jj, IndexVector& perm_r,
+                    Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+                    Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
+                    IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);
+     void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+    
+     void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);
+     Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg,  BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);
+     Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu); 
+     Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu); 
+     void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);
+     void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu); 
+     void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu); 
+     
+     template<typename , typename >
+     friend struct column_dfs_traits;
+}; 
+
+} // end namespace internal
+} // namespace Eigen
+
+#endif
diff --git a/Eigen/src/SparseLU/SparseLU_Memory.h b/Eigen/src/SparseLU/SparseLU_Memory.h
new file mode 100644
index 0000000..1ffa7d5
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_Memory.h
@@ -0,0 +1,227 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of [s,d,c,z]memory.c files in SuperLU 
+ 
+ * -- SuperLU routine (version 3.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * August 1, 2008
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
+#ifndef EIGEN_SPARSELU_MEMORY
+#define EIGEN_SPARSELU_MEMORY
+
+namespace Eigen {
+namespace internal {
+  
+enum { LUNoMarker = 3 };
+enum {emptyIdxLU = -1};
+template<typename Index>
+inline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b)
+{
+  return (std::max)(m, (t+b)*w);
+}
+
+template< typename Scalar, typename Index>
+inline Index LUTempSpace(Index&m, Index& w)
+{
+  return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);
+}
+
+
+
+
+/** 
+  * Expand the existing storage to accomodate more fill-ins
+  * \param vec Valid pointer to the vector to allocate or expand
+  * \param[in,out] length  At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
+  * \param[in] nbElts Current number of elements in the factors
+  * \param keep_prev  1: use length  and do not expand the vector; 0: compute new_len and expand
+  * \param[in,out] num_expansions Number of times the memory has been expanded
+  */
+template <typename Scalar, typename Index>
+template <typename VectorType>
+Index  SparseLUImpl<Scalar,Index>::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions) 
+{
+  
+  float alpha = 1.5; // Ratio of the memory increase 
+  Index new_len; // New size of the allocated memory
+  
+  if(num_expansions == 0 || keep_prev) 
+    new_len = length ; // First time allocate requested
+  else 
+    new_len = (std::max)(length+1,Index(alpha * length));
+  
+  VectorType old_vec; // Temporary vector to hold the previous values   
+  if (nbElts > 0 )
+    old_vec = vec.segment(0,nbElts); 
+  
+  //Allocate or expand the current vector
+#ifdef EIGEN_EXCEPTIONS
+  try
+#endif
+  {
+    vec.resize(new_len); 
+  }
+#ifdef EIGEN_EXCEPTIONS
+  catch(std::bad_alloc& )
+#else
+  if(!vec.size())
+#endif
+  {
+    if (!num_expansions)
+    {
+      // First time to allocate from LUMemInit()
+      // Let LUMemInit() deals with it.
+      return -1;
+    }
+    if (keep_prev)
+    {
+      // In this case, the memory length should not not be reduced
+      return new_len;
+    }
+    else 
+    {
+      // Reduce the size and increase again 
+      Index tries = 0; // Number of attempts
+      do 
+      {
+        alpha = (alpha + 1)/2;
+        new_len = (std::max)(length+1,Index(alpha * length));
+#ifdef EIGEN_EXCEPTIONS
+        try
+#endif
+        {
+          vec.resize(new_len); 
+        }
+#ifdef EIGEN_EXCEPTIONS
+        catch(std::bad_alloc& )
+#else
+        if (!vec.size())
+#endif
+        {
+          tries += 1; 
+          if ( tries > 10) return new_len; 
+        }
+      } while (!vec.size());
+    }
+  }
+  //Copy the previous values to the newly allocated space 
+  if (nbElts > 0)
+    vec.segment(0, nbElts) = old_vec;   
+   
+  
+  length  = new_len;
+  if(num_expansions) ++num_expansions;
+  return 0; 
+}
+
+/**
+ * \brief  Allocate various working space for the numerical factorization phase.
+ * \param m number of rows of the input matrix 
+ * \param n number of columns 
+ * \param annz number of initial nonzeros in the matrix 
+ * \param lwork  if lwork=-1, this routine returns an estimated size of the required memory
+ * \param glu persistent data to facilitate multiple factors : will be deleted later ??
+ * \param fillratio estimated ratio of fill in the factors
+ * \param panel_size Size of a panel
+ * \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success
+ * \note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size,  GlobalLU_t& glu)
+{
+  Index& num_expansions = glu.num_expansions; //No memory expansions so far
+  num_expansions = 0;
+  glu.nzumax = glu.nzlumax = (std::min)(fillratio * annz / n, m) * n; // estimated number of nonzeros in U 
+  glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated  nnz in L factor
+  // Return the estimated size to the user if necessary
+  Index tempSpace;
+  tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);
+  if (lwork == emptyIdxLU) 
+  {
+    Index estimated_size;
+    estimated_size = (5 * n + 5) * sizeof(Index)  + tempSpace
+                    + (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) *  sizeof(Scalar) + n; 
+    return estimated_size;
+  }
+  
+  // Setup the required space 
+  
+  // First allocate Integer pointers for L\U factors
+  glu.xsup.resize(n+1);
+  glu.supno.resize(n+1);
+  glu.xlsub.resize(n+1);
+  glu.xlusup.resize(n+1);
+  glu.xusub.resize(n+1);
+
+  // Reserve memory for L/U factors
+  do 
+  {
+    if(     (expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0)
+        ||  (expand<ScalarVector>(glu.ucol,  glu.nzumax,  0, 0, num_expansions)<0)
+        ||  (expand<IndexVector> (glu.lsub,  glu.nzlmax,  0, 0, num_expansions)<0)
+        ||  (expand<IndexVector> (glu.usub,  glu.nzumax,  0, 1, num_expansions)<0) )
+    {
+      //Reduce the estimated size and retry
+      glu.nzlumax /= 2;
+      glu.nzumax /= 2;
+      glu.nzlmax /= 2;
+      if (glu.nzlumax < annz ) return glu.nzlumax; 
+    }
+  } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());
+  
+  ++num_expansions;
+  return 0;
+  
+} // end LuMemInit
+
+/** 
+ * \brief Expand the existing storage 
+ * \param vec vector to expand 
+ * \param[in,out] maxlen On input, previous size of vec (Number of elements to copy ). on output, new size
+ * \param nbElts current number of elements in the vector.
+ * \param memtype Type of the element to expand
+ * \param num_expansions Number of expansions 
+ * \return 0 on success, > 0 size of the memory allocated so far
+ */
+template <typename Scalar, typename Index>
+template <typename VectorType>
+Index SparseLUImpl<Scalar,Index>::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions)
+{
+  Index failed_size; 
+  if (memtype == USUB)
+     failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);
+  else
+    failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);
+
+  if (failed_size)
+    return failed_size; 
+  
+  return 0 ;  
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // EIGEN_SPARSELU_MEMORY
diff --git a/Eigen/src/SparseLU/SparseLU_Structs.h b/Eigen/src/SparseLU/SparseLU_Structs.h
new file mode 100644
index 0000000..24d6bf1
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_Structs.h
@@ -0,0 +1,111 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ * NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h
+ * -- SuperLU routine (version 4.1) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November, 2010
+ * 
+ * Global data structures used in LU factorization -
+ * 
+ *   nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].
+ *   (xsup,supno): supno[i] is the supernode no to which i belongs;
+ *  xsup(s) points to the beginning of the s-th supernode.
+ *  e.g.   supno 0 1 2 2 3 3 3 4 4 4 4 4   (n=12)
+ *          xsup 0 1 2 4 7 12
+ *  Note: dfs will be performed on supernode rep. relative to the new 
+ *        row pivoting ordering
+ *
+ *   (xlsub,lsub): lsub[*] contains the compressed subscript of
+ *  rectangular supernodes; xlsub[j] points to the starting
+ *  location of the j-th column in lsub[*]. Note that xlsub 
+ *  is indexed by column.
+ *  Storage: original row subscripts
+ *
+ *      During the course of sparse LU factorization, we also use
+ *  (xlsub,lsub) for the purpose of symmetric pruning. For each
+ *  supernode {s,s+1,...,t=s+r} with first column s and last
+ *  column t, the subscript set
+ *    lsub[j], j=xlsub[s], .., xlsub[s+1]-1
+ *  is the structure of column s (i.e. structure of this supernode).
+ *  It is used for the storage of numerical values.
+ *  Furthermore,
+ *    lsub[j], j=xlsub[t], .., xlsub[t+1]-1
+ *  is the structure of the last column t of this supernode.
+ *  It is for the purpose of symmetric pruning. Therefore, the
+ *  structural subscripts can be rearranged without making physical
+ *  interchanges among the numerical values.
+ *
+ *  However, if the supernode has only one column, then we
+ *  only keep one set of subscripts. For any subscript interchange
+ *  performed, similar interchange must be done on the numerical
+ *  values.
+ *
+ *  The last column structures (for pruning) will be removed
+ *  after the numercial LU factorization phase.
+ *
+ *   (xlusup,lusup): lusup[*] contains the numerical values of the
+ *  rectangular supernodes; xlusup[j] points to the starting
+ *  location of the j-th column in storage vector lusup[*]
+ *  Note: xlusup is indexed by column.
+ *  Each rectangular supernode is stored by column-major
+ *  scheme, consistent with Fortran 2-dim array storage.
+ *
+ *   (xusub,ucol,usub): ucol[*] stores the numerical values of
+ *  U-columns outside the rectangular supernodes. The row
+ *  subscript of nonzero ucol[k] is stored in usub[k].
+ *  xusub[i] points to the starting location of column i in ucol.
+ *  Storage: new row subscripts; that is subscripts of PA.
+ */
+
+#ifndef EIGEN_LU_STRUCTS
+#define EIGEN_LU_STRUCTS
+namespace Eigen {
+namespace internal {
+  
+typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType; 
+
+template <typename IndexVector, typename ScalarVector>
+struct LU_GlobalLU_t {
+  typedef typename IndexVector::Scalar Index; 
+  IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode
+  IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)
+  ScalarVector  lusup; // nonzero values of L ordered by columns 
+  IndexVector lsub; // Compressed row indices of L rectangular supernodes. 
+  IndexVector xlusup; // pointers to the beginning of each column in lusup
+  IndexVector xlsub; // pointers to the beginning of each column in lsub
+  Index   nzlmax; // Current max size of lsub
+  Index   nzlumax; // Current max size of lusup
+  ScalarVector  ucol; // nonzero values of U ordered by columns 
+  IndexVector usub; // row indices of U columns in ucol
+  IndexVector xusub; // Pointers to the beginning of each column of U in ucol 
+  Index   nzumax; // Current max size of ucol
+  Index   n; // Number of columns in the matrix  
+  Index   num_expansions; 
+};
+
+// Values to set for performance
+template <typename Index>
+struct perfvalues {
+  Index panel_size; // a panel consists of at most <panel_size> consecutive columns
+  Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns) 
+                // in a subtree of the elimination tree is less than relax, this subtree is considered 
+                // as one supernode regardless of the row structures of those columns
+  Index maxsuper; // The maximum size for a supernode in complete LU
+  Index rowblk; // The minimum row dimension for 2-D blocking to be used;
+  Index colblk; // The minimum column dimension for 2-D blocking to be used;
+  Index fillfactor; // The estimated fills factors for L and U, compared with A
+}; 
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // EIGEN_LU_STRUCTS
diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
new file mode 100644
index 0000000..b16afd6
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h
@@ -0,0 +1,298 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
+#define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H
+
+namespace Eigen {
+namespace internal {
+
+/** \ingroup SparseLU_Module
+ * \brief a class to manipulate the L supernodal factor from the SparseLU factorization
+ * 
+ * This class  contain the data to easily store 
+ * and manipulate the supernodes during the factorization and solution phase of Sparse LU. 
+ * Only the lower triangular matrix has supernodes.
+ * 
+ * NOTE : This class corresponds to the SCformat structure in SuperLU
+ * 
+ */
+/* TODO
+ * InnerIterator as for sparsematrix 
+ * SuperInnerIterator to iterate through all supernodes 
+ * Function for triangular solve
+ */
+template <typename _Scalar, typename _Index>
+class MappedSuperNodalMatrix
+{
+  public:
+    typedef _Scalar Scalar; 
+    typedef _Index Index;
+    typedef Matrix<Index,Dynamic,1> IndexVector; 
+    typedef Matrix<Scalar,Dynamic,1> ScalarVector;
+  public:
+    MappedSuperNodalMatrix()
+    {
+      
+    }
+    MappedSuperNodalMatrix(Index m, Index n,  ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, 
+             IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
+    {
+      setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
+    }
+    
+    ~MappedSuperNodalMatrix()
+    {
+      
+    }
+    /**
+     * Set appropriate pointers for the lower triangular supernodal matrix
+     * These infos are available at the end of the numerical factorization
+     * FIXME This class will be modified such that it can be use in the course 
+     * of the factorization.
+     */
+    void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind, 
+             IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
+    {
+      m_row = m;
+      m_col = n; 
+      m_nzval = nzval.data(); 
+      m_nzval_colptr = nzval_colptr.data(); 
+      m_rowind = rowind.data(); 
+      m_rowind_colptr = rowind_colptr.data(); 
+      m_nsuper = col_to_sup(n); 
+      m_col_to_sup = col_to_sup.data(); 
+      m_sup_to_col = sup_to_col.data(); 
+    }
+    
+    /**
+     * Number of rows
+     */
+    Index rows() { return m_row; }
+    
+    /**
+     * Number of columns
+     */
+    Index cols() { return m_col; }
+    
+    /**
+     * Return the array of nonzero values packed by column
+     * 
+     * The size is nnz
+     */
+    Scalar* valuePtr() {  return m_nzval; }
+    
+    const Scalar* valuePtr() const 
+    {
+      return m_nzval; 
+    }
+    /**
+     * Return the pointers to the beginning of each column in \ref valuePtr()
+     */
+    Index* colIndexPtr()
+    {
+      return m_nzval_colptr; 
+    }
+    
+    const Index* colIndexPtr() const
+    {
+      return m_nzval_colptr; 
+    }
+    
+    /**
+     * Return the array of compressed row indices of all supernodes
+     */
+    Index* rowIndex()  { return m_rowind; }
+    
+    const Index* rowIndex() const
+    {
+      return m_rowind; 
+    }
+    
+    /**
+     * Return the location in \em rowvaluePtr() which starts each column
+     */
+    Index* rowIndexPtr() { return m_rowind_colptr; }
+    
+    const Index* rowIndexPtr() const 
+    {
+      return m_rowind_colptr; 
+    }
+    
+    /** 
+     * Return the array of column-to-supernode mapping 
+     */
+    Index* colToSup()  { return m_col_to_sup; }
+    
+    const Index* colToSup() const
+    {
+      return m_col_to_sup;       
+    }
+    /**
+     * Return the array of supernode-to-column mapping
+     */
+    Index* supToCol() { return m_sup_to_col; }
+    
+    const Index* supToCol() const 
+    {
+      return m_sup_to_col;
+    }
+    
+    /**
+     * Return the number of supernodes
+     */
+    Index nsuper() const 
+    {
+      return m_nsuper; 
+    }
+    
+    class InnerIterator; 
+    template<typename Dest>
+    void solveInPlace( MatrixBase<Dest>&X) const;
+    
+      
+      
+    
+  protected:
+    Index m_row; // Number of rows
+    Index m_col; // Number of columns 
+    Index m_nsuper; // Number of supernodes 
+    Scalar* m_nzval; //array of nonzero values packed by column
+    Index* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j 
+    Index* m_rowind; // Array of compressed row indices of rectangular supernodes
+    Index* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j
+    Index* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs
+    Index* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode
+    
+  private :
+};
+
+/**
+  * \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L
+  * 
+  */
+template<typename Scalar, typename Index>
+class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
+{
+  public:
+     InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
+      : m_matrix(mat),
+        m_outer(outer), 
+        m_supno(mat.colToSup()[outer]),
+        m_idval(mat.colIndexPtr()[outer]),
+        m_startidval(m_idval),
+        m_endidval(mat.colIndexPtr()[outer+1]),
+        m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]),
+        m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1])
+    {}
+    inline InnerIterator& operator++()
+    { 
+      m_idval++; 
+      m_idrow++;
+      return *this;
+    }
+    inline Scalar value() const { return m_matrix.valuePtr()[m_idval]; }
+    
+    inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_idval]); }
+    
+    inline Index index() const { return m_matrix.rowIndex()[m_idrow]; }
+    inline Index row() const { return index(); }
+    inline Index col() const { return m_outer; }
+    
+    inline Index supIndex() const { return m_supno; }
+    
+    inline operator bool() const 
+    { 
+      return ( (m_idval < m_endidval) && (m_idval >= m_startidval)
+                && (m_idrow < m_endidrow) );
+    }
+    
+  protected:
+    const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix 
+    const Index m_outer;                    // Current column 
+    const Index m_supno;                    // Current SuperNode number
+    Index m_idval;                          // Index to browse the values in the current column
+    const Index m_startidval;               // Start of the column value
+    const Index m_endidval;                 // End of the column value
+    Index m_idrow;                          // Index to browse the row indices 
+    Index m_endidrow;                       // End index of row indices of the current column
+};
+
+/**
+ * \brief Solve with the supernode triangular matrix
+ * 
+ */
+template<typename Scalar, typename Index>
+template<typename Dest>
+void MappedSuperNodalMatrix<Scalar,Index>::solveInPlace( MatrixBase<Dest>&X) const
+{
+    Index n = X.rows(); 
+    Index nrhs = X.cols(); 
+    const Scalar * Lval = valuePtr();                 // Nonzero values 
+    Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs);     // working vector
+    work.setZero();
+    for (Index k = 0; k <= nsuper(); k ++)
+    {
+      Index fsupc = supToCol()[k];                    // First column of the current supernode 
+      Index istart = rowIndexPtr()[fsupc];            // Pointer index to the subscript of the current column
+      Index nsupr = rowIndexPtr()[fsupc+1] - istart;  // Number of rows in the current supernode
+      Index nsupc = supToCol()[k+1] - fsupc;          // Number of columns in the current supernode
+      Index nrow = nsupr - nsupc;                     // Number of rows in the non-diagonal part of the supernode
+      Index irow;                                     //Current index row
+      
+      if (nsupc == 1 )
+      {
+        for (Index j = 0; j < nrhs; j++)
+        {
+          InnerIterator it(*this, fsupc);
+          ++it; // Skip the diagonal element
+          for (; it; ++it)
+          {
+            irow = it.row();
+            X(irow, j) -= X(fsupc, j) * it.value();
+          }
+        }
+      }
+      else
+      {
+        // The supernode has more than one column 
+        Index luptr = colIndexPtr()[fsupc]; 
+        Index lda = colIndexPtr()[fsupc+1] - luptr;
+        
+        // Triangular solve 
+        Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) ); 
+        Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); 
+        U = A.template triangularView<UnitLower>().solve(U); 
+        
+        // Matrix-vector product 
+        new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); 
+        work.block(0, 0, nrow, nrhs) = A * U; 
+        
+        //Begin Scatter 
+        for (Index j = 0; j < nrhs; j++)
+        {
+          Index iptr = istart + nsupc; 
+          for (Index i = 0; i < nrow; i++)
+          {
+            irow = rowIndex()[iptr]; 
+            X(irow, j) -= work(i, j); // Scatter operation
+            work(i, j) = Scalar(0); 
+            iptr++;
+          }
+        }
+      }
+    } 
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSELU_MATRIX_H
diff --git a/Eigen/src/SparseLU/SparseLU_Utils.h b/Eigen/src/SparseLU/SparseLU_Utils.h
new file mode 100644
index 0000000..15352ac
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_Utils.h
@@ -0,0 +1,80 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_SPARSELU_UTILS_H
+#define EIGEN_SPARSELU_UTILS_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Count Nonzero elements in the factors
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu)
+{
+ nnzL = 0; 
+ nnzU = (glu.xusub)(n); 
+ Index nsuper = (glu.supno)(n); 
+ Index jlen; 
+ Index i, j, fsupc;
+ if (n <= 0 ) return; 
+ // For each supernode
+ for (i = 0; i <= nsuper; i++)
+ {
+   fsupc = glu.xsup(i); 
+   jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); 
+   
+   for (j = fsupc; j < glu.xsup(i+1); j++)
+   {
+     nnzL += jlen; 
+     nnzU += j - fsupc + 1; 
+     jlen--; 
+   }
+ }
+}
+
+/**
+ * \brief Fix up the data storage lsub for L-subscripts. 
+ * 
+ * It removes the subscripts sets for structural pruning, 
+ * and applies permutation to the remaining subscripts
+ * 
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu)
+{
+  Index fsupc, i, j, k, jstart; 
+  
+  Index nextl = 0; 
+  Index nsuper = (glu.supno)(n); 
+  
+  // For each supernode 
+  for (i = 0; i <= nsuper; i++)
+  {
+    fsupc = glu.xsup(i); 
+    jstart = glu.xlsub(fsupc); 
+    glu.xlsub(fsupc) = nextl; 
+    for (j = jstart; j < glu.xlsub(fsupc + 1); j++)
+    {
+      glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A
+      nextl++;
+    }
+    for (k = fsupc+1; k < glu.xsup(i+1); k++)
+      glu.xlsub(k) = nextl; // other columns in supernode i
+  }
+  
+  glu.xlsub(n) = nextl; 
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // EIGEN_SPARSELU_UTILS_H
diff --git a/Eigen/src/SparseLU/SparseLU_column_bmod.h b/Eigen/src/SparseLU/SparseLU_column_bmod.h
new file mode 100644
index 0000000..f24bd87
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_column_bmod.h
@@ -0,0 +1,180 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_COLUMN_BMOD_H
+#define SPARSELU_COLUMN_BMOD_H
+
+namespace Eigen {
+
+namespace internal {
+/**
+ * \brief Performs numeric block updates (sup-col) in topological order
+ * 
+ * \param jcol current column to update
+ * \param nseg Number of segments in the U part
+ * \param dense Store the full representation of the column
+ * \param tempv working array 
+ * \param segrep segment representative ...
+ * \param repfnz ??? First nonzero column in each row ???  ...
+ * \param fpanelc First column in the current panel
+ * \param glu Global LU data. 
+ * \return 0 - successful return 
+ *         > 0 - number of bytes allocated when run out of space
+ * 
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu)
+{
+  Index  jsupno, k, ksub, krep, ksupno; 
+  Index lptr, nrow, isub, irow, nextlu, new_next, ufirst; 
+  Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros; 
+  /* krep = representative of current k-th supernode
+    * fsupc =  first supernodal column
+    * nsupc = number of columns in a supernode
+    * nsupr = number of rows in a supernode
+    * luptr = location of supernodal LU-block in storage
+    * kfnz = first nonz in the k-th supernodal segment
+    * no_zeros = no lf leading zeros in a supernodal U-segment
+    */
+  
+  jsupno = glu.supno(jcol);
+  // For each nonzero supernode segment of U[*,j] in topological order 
+  k = nseg - 1; 
+  Index d_fsupc; // distance between the first column of the current panel and the 
+               // first column of the current snode
+  Index fst_col; // First column within small LU update
+  Index segsize; 
+  for (ksub = 0; ksub < nseg; ksub++)
+  {
+    krep = segrep(k); k--; 
+    ksupno = glu.supno(krep); 
+    if (jsupno != ksupno )
+    {
+      // outside the rectangular supernode 
+      fsupc = glu.xsup(ksupno); 
+      fst_col = (std::max)(fsupc, fpanelc); 
+      
+      // Distance from the current supernode to the current panel; 
+      // d_fsupc = 0 if fsupc > fpanelc
+      d_fsupc = fst_col - fsupc; 
+      
+      luptr = glu.xlusup(fst_col) + d_fsupc; 
+      lptr = glu.xlsub(fsupc) + d_fsupc; 
+      
+      kfnz = repfnz(krep); 
+      kfnz = (std::max)(kfnz, fpanelc); 
+      
+      segsize = krep - kfnz + 1; 
+      nsupc = krep - fst_col + 1; 
+      nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); 
+      nrow = nsupr - d_fsupc - nsupc;
+      Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);
+      
+      
+      // Perform a triangular solver and block update, 
+      // then scatter the result of sup-col update to dense
+      no_zeros = kfnz - fst_col; 
+      if(segsize==1)
+        LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+      else
+        LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+    } // end if jsupno 
+  } // end for each segment
+  
+  // Process the supernodal portion of  L\U[*,j]
+  nextlu = glu.xlusup(jcol); 
+  fsupc = glu.xsup(jsupno);
+  
+  // copy the SPA dense into L\U[*,j]
+  Index mem; 
+  new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc); 
+  Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;
+  if(offset)
+    new_next += offset;
+  while (new_next > glu.nzlumax )
+  {
+    mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);  
+    if (mem) return mem; 
+  }
+  
+  for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++)
+  {
+    irow = glu.lsub(isub);
+    glu.lusup(nextlu) = dense(irow);
+    dense(irow) = Scalar(0.0); 
+    ++nextlu; 
+  }
+  
+  if(offset)
+  {
+    glu.lusup.segment(nextlu,offset).setZero();
+    nextlu += offset;
+  }
+  glu.xlusup(jcol + 1) = nextlu;  // close L\U(*,jcol); 
+  
+  /* For more updates within the panel (also within the current supernode),
+   * should start from the first column of the panel, or the first column
+   * of the supernode, whichever is bigger. There are two cases:
+   *  1) fsupc < fpanelc, then fst_col <-- fpanelc
+   *  2) fsupc >= fpanelc, then fst_col <-- fsupc
+   */
+  fst_col = (std::max)(fsupc, fpanelc); 
+  
+  if (fst_col  < jcol)
+  {
+    // Distance between the current supernode and the current panel
+    // d_fsupc = 0 if fsupc >= fpanelc
+    d_fsupc = fst_col - fsupc; 
+    
+    lptr = glu.xlsub(fsupc) + d_fsupc; 
+    luptr = glu.xlusup(fst_col) + d_fsupc; 
+    nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension
+    nsupc = jcol - fst_col; // excluding jcol 
+    nrow = nsupr - d_fsupc - nsupc; 
+    
+    // points to the beginning of jcol in snode L\U(jsupno) 
+    ufirst = glu.xlusup(jcol) + d_fsupc; 
+    Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);
+    Map<Matrix<Scalar,Dynamic,Dynamic>, 0,  OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); 
+    VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc); 
+    u = A.template triangularView<UnitLower>().solve(u); 
+    
+    new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); 
+    VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow); 
+    l.noalias() -= A * u;
+    
+  } // End if fst_col
+  return 0; 
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_COLUMN_BMOD_H
diff --git a/Eigen/src/SparseLU/SparseLU_column_dfs.h b/Eigen/src/SparseLU/SparseLU_column_dfs.h
new file mode 100644
index 0000000..4c04b0e
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_column_dfs.h
@@ -0,0 +1,177 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of [s,d,c,z]column_dfs.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_COLUMN_DFS_H
+#define SPARSELU_COLUMN_DFS_H
+
+template <typename Scalar, typename Index> class SparseLUImpl;
+namespace Eigen {
+
+namespace internal {
+
+template<typename IndexVector, typename ScalarVector>
+struct column_dfs_traits : no_assignment_operator
+{
+  typedef typename ScalarVector::Scalar Scalar;
+  typedef typename IndexVector::Scalar Index;
+  column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl<Scalar, Index>::GlobalLU_t& glu, SparseLUImpl<Scalar, Index>& luImpl)
+   : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl)
+ {}
+  bool update_segrep(Index /*krep*/, Index /*jj*/)
+  {
+    return true;
+  }
+  void mem_expand(IndexVector& lsub, Index& nextl, Index chmark)
+  {
+    if (nextl >= m_glu.nzlmax)
+      m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions); 
+    if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU;
+  }
+  enum { ExpandMem = true };
+  
+  Index m_jcol;
+  Index& m_jsuper_ref;
+  typename SparseLUImpl<Scalar, Index>::GlobalLU_t& m_glu;
+  SparseLUImpl<Scalar, Index>& m_luImpl;
+};
+
+
+/**
+ * \brief Performs a symbolic factorization on column jcol and decide the supernode boundary
+ * 
+ * A supernode representative is the last column of a supernode.
+ * The nonzeros in U[*,j] are segments that end at supernodes representatives. 
+ * The routine returns a list of the supernodal representatives 
+ * in topological order of the dfs that generates them. 
+ * The location of the first nonzero in each supernodal segment 
+ * (supernodal entry location) is also returned. 
+ * 
+ * \param m number of rows in the matrix
+ * \param jcol Current column 
+ * \param perm_r Row permutation
+ * \param maxsuper  Maximum number of column allowed in a supernode
+ * \param [in,out] nseg Number of segments in current U[*,j] - new segments appended
+ * \param lsub_col defines the rhs vector to start the dfs
+ * \param [in,out] segrep Segment representatives - new segments appended 
+ * \param repfnz  First nonzero location in each row
+ * \param xprune 
+ * \param marker  marker[i] == jj, if i was visited during dfs of current column jj;
+ * \param parent
+ * \param xplore working array
+ * \param glu global LU data 
+ * \return 0 success
+ *         > 0 number of bytes allocated when run out of space
+ * 
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg,  BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+{
+  
+  Index jsuper = glu.supno(jcol); 
+  Index nextl = glu.xlsub(jcol); 
+  VectorBlock<IndexVector> marker2(marker, 2*m, m); 
+  
+  
+  column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);
+  
+  // For each nonzero in A(*,jcol) do dfs 
+  for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++)
+  {
+    Index krow = lsub_col(k); 
+    lsub_col(k) = emptyIdxLU; 
+    Index kmark = marker2(krow); 
+    
+    // krow was visited before, go to the next nonz; 
+    if (kmark == jcol) continue;
+    
+    dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
+                   xplore, glu, nextl, krow, traits);
+  } // for each nonzero ... 
+  
+  Index fsupc, jptr, jm1ptr, ito, ifrom, istop;
+  Index nsuper = glu.supno(jcol);
+  Index jcolp1 = jcol + 1;
+  Index jcolm1 = jcol - 1;
+  
+  // check to see if j belongs in the same supernode as j-1
+  if ( jcol == 0 )
+  { // Do nothing for column 0 
+    nsuper = glu.supno(0) = 0 ;
+  }
+  else 
+  {
+    fsupc = glu.xsup(nsuper); 
+    jptr = glu.xlsub(jcol); // Not yet compressed
+    jm1ptr = glu.xlsub(jcolm1); 
+    
+    // Use supernodes of type T2 : see SuperLU paper
+    if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
+    
+    // Make sure the number of columns in a supernode doesn't
+    // exceed threshold
+    if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU; 
+    
+    /* If jcol starts a new supernode, reclaim storage space in
+     * glu.lsub from previous supernode. Note we only store 
+     * the subscript set of the first and last columns of 
+     * a supernode. (first for num values, last for pruning)
+     */
+    if (jsuper == emptyIdxLU)
+    { // starts a new supernode 
+      if ( (fsupc < jcolm1-1) ) 
+      { // >= 3 columns in nsuper
+        ito = glu.xlsub(fsupc+1);
+        glu.xlsub(jcolm1) = ito; 
+        istop = ito + jptr - jm1ptr; 
+        xprune(jcolm1) = istop; // intialize xprune(jcol-1)
+        glu.xlsub(jcol) = istop; 
+        
+        for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
+          glu.lsub(ito) = glu.lsub(ifrom); 
+        nextl = ito;  // = istop + length(jcol)
+      }
+      nsuper++; 
+      glu.supno(jcol) = nsuper; 
+    } // if a new supernode 
+  } // end else:  jcol > 0
+  
+  // Tidy up the pointers before exit
+  glu.xsup(nsuper+1) = jcolp1; 
+  glu.supno(jcolp1) = nsuper; 
+  xprune(jcol) = nextl;  // Intialize upper bound for pruning
+  glu.xlsub(jcolp1) = nextl; 
+  
+  return 0; 
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
diff --git a/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
new file mode 100644
index 0000000..170610d
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h
@@ -0,0 +1,106 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+/* 
+ 
+ * NOTE: This file is the modified version of [s,d,c,z]copy_to_ucol.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_COPY_TO_UCOL_H
+#define SPARSELU_COPY_TO_UCOL_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Performs numeric block updates (sup-col) in topological order
+ * 
+ * \param jcol current column to update
+ * \param nseg Number of segments in the U part
+ * \param segrep segment representative ...
+ * \param repfnz First nonzero column in each row  ...
+ * \param perm_r Row permutation 
+ * \param dense Store the full representation of the column
+ * \param glu Global LU data. 
+ * \return 0 - successful return 
+ *         > 0 - number of bytes allocated when run out of space
+ * 
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)
+{  
+  Index ksub, krep, ksupno; 
+    
+  Index jsupno = glu.supno(jcol);
+  
+  // For each nonzero supernode segment of U[*,j] in topological order 
+  Index k = nseg - 1, i; 
+  Index nextu = glu.xusub(jcol); 
+  Index kfnz, isub, segsize; 
+  Index new_next,irow; 
+  Index fsupc, mem; 
+  for (ksub = 0; ksub < nseg; ksub++)
+  {
+    krep = segrep(k); k--; 
+    ksupno = glu.supno(krep); 
+    if (jsupno != ksupno ) // should go into ucol(); 
+    {
+      kfnz = repfnz(krep); 
+      if (kfnz != emptyIdxLU)
+      { // Nonzero U-segment 
+        fsupc = glu.xsup(ksupno); 
+        isub = glu.xlsub(fsupc) + kfnz - fsupc; 
+        segsize = krep - kfnz + 1; 
+        new_next = nextu + segsize; 
+        while (new_next > glu.nzumax) 
+        {
+          mem = memXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions); 
+          if (mem) return mem; 
+          mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions); 
+          if (mem) return mem; 
+          
+        }
+        
+        for (i = 0; i < segsize; i++)
+        {
+          irow = glu.lsub(isub); 
+          glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order
+          glu.ucol(nextu) = dense(irow); 
+          dense(irow) = Scalar(0.0); 
+          nextu++;
+          isub++;
+        }
+        
+      } // end nonzero U-segment 
+      
+    } // end if jsupno 
+    
+  } // end for each segment
+  glu.xusub(jcol + 1) = nextu; // close U(*,jcol)
+  return 0; 
+}
+
+} // namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_COPY_TO_UCOL_H
diff --git a/Eigen/src/SparseLU/SparseLU_gemm_kernel.h b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
new file mode 100644
index 0000000..9e4e3e7
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
@@ -0,0 +1,279 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSELU_GEMM_KERNEL_H
+#define EIGEN_SPARSELU_GEMM_KERNEL_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+/** \internal
+  * A general matrix-matrix product kernel optimized for the SparseLU factorization.
+  *  - A, B, and C must be column major
+  *  - lda and ldc must be multiples of the respective packet size
+  *  - C must have the same alignment as A
+  */
+template<typename Scalar,typename Index>
+EIGEN_DONT_INLINE
+void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
+{
+  using namespace Eigen::internal;
+  
+  typedef typename packet_traits<Scalar>::type Packet;
+  enum {
+    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
+    PacketSize = packet_traits<Scalar>::size,
+    PM = 8,                             // peeling in M
+    RN = 2,                             // register blocking
+    RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
+    BM = 4096/sizeof(Scalar),           // number of rows of A-C per chunk
+    SM = PM*PacketSize                  // step along M
+  };
+  Index d_end = (d/RK)*RK;    // number of columns of A (rows of B) suitable for full register blocking
+  Index n_end = (n/RN)*RN;    // number of columns of B-C suitable for processing RN columns at once
+  Index i0 = internal::first_aligned(A,m);
+  
+  eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_aligned(C,m)));
+  
+  // handle the non aligned rows of A and C without any optimization:
+  for(Index i=0; i<i0; ++i)
+  {
+    for(Index j=0; j<n; ++j)
+    {
+      Scalar c = C[i+j*ldc];
+      for(Index k=0; k<d; ++k)
+        c += B[k+j*ldb] * A[i+k*lda];
+      C[i+j*ldc] = c;
+    }
+  }
+  // process the remaining rows per chunk of BM rows
+  for(Index ib=i0; ib<m; ib+=BM)
+  {
+    Index actual_b = std::min<Index>(BM, m-ib);                 // actual number of rows
+    Index actual_b_end1 = (actual_b/SM)*SM;                   // actual number of rows suitable for peeling
+    Index actual_b_end2 = (actual_b/PacketSize)*PacketSize;   // actual number of rows suitable for vectorization
+    
+    // Let's process two columns of B-C at once
+    for(Index j=0; j<n_end; j+=RN)
+    {
+      const Scalar* Bc0 = B+(j+0)*ldb;
+      const Scalar* Bc1 = B+(j+1)*ldb;
+      
+      for(Index k=0; k<d_end; k+=RK)
+      {
+        
+        // load and expand a RN x RK block of B
+        Packet b00, b10, b20, b30, b01, b11, b21, b31;
+                  b00 = pset1<Packet>(Bc0[0]);
+                  b10 = pset1<Packet>(Bc0[1]);
+        if(RK==4) b20 = pset1<Packet>(Bc0[2]);
+        if(RK==4) b30 = pset1<Packet>(Bc0[3]);
+                  b01 = pset1<Packet>(Bc1[0]);
+                  b11 = pset1<Packet>(Bc1[1]);
+        if(RK==4) b21 = pset1<Packet>(Bc1[2]);
+        if(RK==4) b31 = pset1<Packet>(Bc1[3]);
+        
+        Packet a0, a1, a2, a3, c0, c1, t0, t1;
+        
+        const Scalar* A0 = A+ib+(k+0)*lda;
+        const Scalar* A1 = A+ib+(k+1)*lda;
+        const Scalar* A2 = A+ib+(k+2)*lda;
+        const Scalar* A3 = A+ib+(k+3)*lda;
+        
+        Scalar* C0 = C+ib+(j+0)*ldc;
+        Scalar* C1 = C+ib+(j+1)*ldc;
+        
+                  a0 = pload<Packet>(A0);
+                  a1 = pload<Packet>(A1);
+        if(RK==4)
+        {
+          a2 = pload<Packet>(A2);
+          a3 = pload<Packet>(A3);
+        }
+        else
+        {
+          // workaround "may be used uninitialized in this function" warning
+          a2 = a3 = a0;
+        }
+        
+#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
+#define WORK(I)  \
+                    c0 = pload<Packet>(C0+i+(I)*PacketSize);   \
+                    c1 = pload<Packet>(C1+i+(I)*PacketSize);   \
+                    KMADD(c0, a0, b00, t0)      \
+                    KMADD(c1, a0, b01, t1)      \
+                    a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
+                    KMADD(c0, a1, b10, t0)      \
+                    KMADD(c1, a1, b11, t1)       \
+                    a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
+          if(RK==4) KMADD(c0, a2, b20, t0)       \
+          if(RK==4) KMADD(c1, a2, b21, t1)       \
+          if(RK==4) a2 = pload<Packet>(A2+i+(I+1)*PacketSize); \
+          if(RK==4) KMADD(c0, a3, b30, t0)       \
+          if(RK==4) KMADD(c1, a3, b31, t1)       \
+          if(RK==4) a3 = pload<Packet>(A3+i+(I+1)*PacketSize); \
+                    pstore(C0+i+(I)*PacketSize, c0);           \
+                    pstore(C1+i+(I)*PacketSize, c1)
+        
+        // process rows of A' - C' with aggressive vectorization and peeling 
+        for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
+        {
+          EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
+                    prefetch((A0+i+(5)*PacketSize));
+                    prefetch((A1+i+(5)*PacketSize));
+          if(RK==4) prefetch((A2+i+(5)*PacketSize));
+          if(RK==4) prefetch((A3+i+(5)*PacketSize));
+                    WORK(0);
+                    WORK(1);
+                    WORK(2);
+                    WORK(3);
+                    WORK(4);
+                    WORK(5);
+                    WORK(6);
+                    WORK(7);
+        }
+        // process the remaining rows with vectorization only
+        for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
+        {
+          WORK(0);
+        }
+#undef WORK
+        // process the remaining rows without vectorization
+        for(Index i=actual_b_end2; i<actual_b; ++i)
+        {
+          if(RK==4)
+          {
+            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
+            C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
+          }
+          else
+          {
+            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
+            C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
+          }
+        }
+        
+        Bc0 += RK;
+        Bc1 += RK;
+      } // peeled loop on k
+    } // peeled loop on the columns j
+    // process the last column (we now perform a matrux-vector product)
+    if((n-n_end)>0)
+    {
+      const Scalar* Bc0 = B+(n-1)*ldb;
+      
+      for(Index k=0; k<d_end; k+=RK)
+      {
+        
+        // load and expand a 1 x RK block of B
+        Packet b00, b10, b20, b30;
+                  b00 = pset1<Packet>(Bc0[0]);
+                  b10 = pset1<Packet>(Bc0[1]);
+        if(RK==4) b20 = pset1<Packet>(Bc0[2]);
+        if(RK==4) b30 = pset1<Packet>(Bc0[3]);
+        
+        Packet a0, a1, a2, a3, c0, t0/*, t1*/;
+        
+        const Scalar* A0 = A+ib+(k+0)*lda;
+        const Scalar* A1 = A+ib+(k+1)*lda;
+        const Scalar* A2 = A+ib+(k+2)*lda;
+        const Scalar* A3 = A+ib+(k+3)*lda;
+        
+        Scalar* C0 = C+ib+(n_end)*ldc;
+        
+                  a0 = pload<Packet>(A0);
+                  a1 = pload<Packet>(A1);
+        if(RK==4)
+        {
+          a2 = pload<Packet>(A2);
+          a3 = pload<Packet>(A3);
+        }
+        else
+        {
+          // workaround "may be used uninitialized in this function" warning
+          a2 = a3 = a0;
+        }
+        
+#define WORK(I) \
+                  c0 = pload<Packet>(C0+i+(I)*PacketSize);   \
+                  KMADD(c0, a0, b00, t0)       \
+                  a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
+                  KMADD(c0, a1, b10, t0)       \
+                  a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
+        if(RK==4) KMADD(c0, a2, b20, t0)       \
+        if(RK==4) a2 = pload<Packet>(A2+i+(I+1)*PacketSize); \
+        if(RK==4) KMADD(c0, a3, b30, t0)       \
+        if(RK==4) a3 = pload<Packet>(A3+i+(I+1)*PacketSize); \
+                  pstore(C0+i+(I)*PacketSize, c0);
+        
+        // agressive vectorization and peeling
+        for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
+        {
+          EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
+          WORK(0);
+          WORK(1);
+          WORK(2);
+          WORK(3);
+          WORK(4);
+          WORK(5);
+          WORK(6);
+          WORK(7);
+        }
+        // vectorization only
+        for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
+        {
+          WORK(0);
+        }
+        // remaining scalars
+        for(Index i=actual_b_end2; i<actual_b; ++i)
+        {
+          if(RK==4) 
+            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
+          else
+            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
+        }
+        
+        Bc0 += RK;
+#undef WORK
+      }
+    }
+    
+    // process the last columns of A, corresponding to the last rows of B
+    Index rd = d-d_end;
+    if(rd>0)
+    {
+      for(Index j=0; j<n; ++j)
+      {
+        enum {
+          Alignment = PacketSize>1 ? Aligned : 0
+        };
+        typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
+        typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
+        if(rd==1)       MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
+        
+        else if(rd==2)  MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+                                                        + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
+        
+        else            MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
+                                                        + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
+                                                        + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
+      }
+    }
+  
+  } // blocking on the rows of A and C
+}
+#undef KMADD
+
+} // namespace internal
+
+} // namespace Eigen
+
+#endif // EIGEN_SPARSELU_GEMM_KERNEL_H
diff --git a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
new file mode 100644
index 0000000..7a4e430
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h
@@ -0,0 +1,127 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* This file is a modified version of heap_relax_snode.c file in SuperLU
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
+#ifndef SPARSELU_HEAP_RELAX_SNODE_H
+#define SPARSELU_HEAP_RELAX_SNODE_H
+
+namespace Eigen {
+namespace internal {
+
+/** 
+ * \brief Identify the initial relaxed supernodes
+ * 
+ * This routine applied to a symmetric elimination tree. 
+ * It assumes that the matrix has been reordered according to the postorder of the etree
+ * \param n The number of columns
+ * \param et elimination tree 
+ * \param relax_columns Maximum number of columns allowed in a relaxed snode 
+ * \param descendants Number of descendants of each node in the etree
+ * \param relax_end last column in a supernode
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
+{
+  
+  // The etree may not be postordered, but its heap ordered  
+  IndexVector post;
+  internal::treePostorder(n, et, post); // Post order etree
+  IndexVector inv_post(n+1); 
+  Index i;
+  for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
+  
+  // Renumber etree in postorder 
+  IndexVector iwork(n);
+  IndexVector et_save(n+1);
+  for (i = 0; i < n; ++i)
+  {
+    iwork(post(i)) = post(et(i));
+  }
+  et_save = et; // Save the original etree
+  et = iwork; 
+  
+  // compute the number of descendants of each node in the etree
+  relax_end.setConstant(emptyIdxLU);
+  Index j, parent; 
+  descendants.setZero();
+  for (j = 0; j < n; j++) 
+  {
+    parent = et(j);
+    if (parent != n) // not the dummy root
+      descendants(parent) += descendants(j) + 1;
+  }
+  // Identify the relaxed supernodes by postorder traversal of the etree
+  Index snode_start; // beginning of a snode 
+  Index k;
+  Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree 
+  Index nsuper_et = 0; // Number of relaxed snodes in the original etree 
+  Index l; 
+  for (j = 0; j < n; )
+  {
+    parent = et(j);
+    snode_start = j; 
+    while ( parent != n && descendants(parent) < relax_columns ) 
+    {
+      j = parent; 
+      parent = et(j);
+    }
+    // Found a supernode in postordered etree, j is the last column 
+    ++nsuper_et_post;
+    k = n;
+    for (i = snode_start; i <= j; ++i)
+      k = (std::min)(k, inv_post(i));
+    l = inv_post(j);
+    if ( (l - k) == (j - snode_start) )  // Same number of columns in the snode
+    {
+      // This is also a supernode in the original etree
+      relax_end(k) = l; // Record last column 
+      ++nsuper_et; 
+    }
+    else 
+    {
+      for (i = snode_start; i <= j; ++i) 
+      {
+        l = inv_post(i);
+        if (descendants(i) == 0) 
+        {
+          relax_end(l) = l;
+          ++nsuper_et;
+        }
+      }
+    }
+    j++;
+    // Search for a new leaf
+    while (descendants(j) != 0 && j < n) j++;
+  } // End postorder traversal of the etree
+  
+  // Recover the original etree
+  et = et_save; 
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // SPARSELU_HEAP_RELAX_SNODE_H
diff --git a/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
new file mode 100644
index 0000000..0d0283b
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_kernel_bmod.h
@@ -0,0 +1,130 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef SPARSELU_KERNEL_BMOD_H
+#define SPARSELU_KERNEL_BMOD_H
+
+namespace Eigen {
+namespace internal {
+  
+/**
+ * \brief Performs numeric block updates from a given supernode to a single column
+ * 
+ * \param segsize Size of the segment (and blocks ) to use for updates
+ * \param[in,out] dense Packed values of the original matrix
+ * \param tempv temporary vector to use for updates
+ * \param lusup array containing the supernodes
+ * \param lda Leading dimension in the supernode
+ * \param nrow Number of rows in the rectangular part of the supernode
+ * \param lsub compressed row subscripts of supernodes
+ * \param lptr pointer to the first column of the current supernode in lsub
+ * \param no_zeros Number of nonzeros elements before the diagonal part of the supernode
+ * \return 0 on success
+ */
+template <int SegSizeAtCompileTime> struct LU_kernel_bmod
+{
+  template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+  static EIGEN_DONT_INLINE void run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,
+                                    const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);
+};
+
+template <int SegSizeAtCompileTime>
+template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+EIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const int segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,
+                                                                  const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
+{
+  typedef typename ScalarVector::Scalar Scalar;
+  // First, copy U[*,j] segment from dense(*) to tempv(*)
+  // The result of triangular solve is in tempv[*]; 
+    // The result of matric-vector update is in dense[*]
+  Index isub = lptr + no_zeros; 
+  int i;
+  Index irow;
+  for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
+  {
+    irow = lsub(isub); 
+    tempv(i) = dense(irow); 
+    ++isub; 
+  }
+  // Dense triangular solve -- start effective triangle
+  luptr += lda * no_zeros + no_zeros; 
+  // Form Eigen matrix and vector 
+  Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );
+  Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);
+  
+  u = A.template triangularView<UnitLower>().solve(u); 
+  
+  // Dense matrix-vector product y <-- B*x 
+  luptr += segsize;
+  const Index PacketSize = internal::packet_traits<Scalar>::size;
+  Index ldl = internal::first_multiple(nrow, PacketSize);
+  Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );
+  Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize);
+  Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize;
+  Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );
+  
+  l.setZero();
+  internal::sparselu_gemm<Scalar>(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride());
+  
+  // Scatter tempv[] into SPA dense[] as a temporary storage 
+  isub = lptr + no_zeros;
+  for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)
+  {
+    irow = lsub(isub++); 
+    dense(irow) = tempv(i);
+  }
+  
+  // Scatter l into SPA dense[]
+  for (i = 0; i < nrow; i++)
+  {
+    irow = lsub(isub++); 
+    dense(irow) -= l(i);
+  } 
+}
+
+template <> struct LU_kernel_bmod<1>
+{
+  template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+  static EIGEN_DONT_INLINE void run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,
+                                    const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);
+};
+
+
+template <typename BlockScalarVector, typename ScalarVector, typename IndexVector, typename Index>
+EIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const int /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,
+                                              const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)
+{
+  typedef typename ScalarVector::Scalar Scalar;
+  Scalar f = dense(lsub(lptr + no_zeros));
+  luptr += lda * no_zeros + no_zeros + 1;
+  const Scalar* a(lusup.data() + luptr);
+  const /*typename IndexVector::Scalar*/Index*  irow(lsub.data()+lptr + no_zeros + 1);
+  Index i = 0;
+  for (; i+1 < nrow; i+=2)
+  {
+    Index i0 = *(irow++);
+    Index i1 = *(irow++);
+    Scalar a0 = *(a++);
+    Scalar a1 = *(a++);
+    Scalar d0 = dense.coeff(i0);
+    Scalar d1 = dense.coeff(i1);
+    d0 -= f*a0;
+    d1 -= f*a1;
+    dense.coeffRef(i0) = d0;
+    dense.coeffRef(i1) = d1;
+  }
+  if(i<nrow)
+    dense.coeffRef(*(irow++)) -= f * *(a++);
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif // SPARSELU_KERNEL_BMOD_H
diff --git a/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
new file mode 100644
index 0000000..da0e0fc
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_panel_bmod.h
@@ -0,0 +1,223 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of [s,d,c,z]panel_bmod.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PANEL_BMOD_H
+#define SPARSELU_PANEL_BMOD_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Performs numeric block updates (sup-panel) in topological order.
+ * 
+ * Before entering this routine, the original nonzeros in the panel
+ * were already copied i nto the spa[m,w]
+ * 
+ * \param m number of rows in the matrix
+ * \param w Panel size
+ * \param jcol Starting  column of the panel
+ * \param nseg Number of segments in the U part
+ * \param dense Store the full representation of the panel 
+ * \param tempv working array 
+ * \param segrep segment representative... first row in the segment
+ * \param repfnz First nonzero rows
+ * \param glu Global LU data. 
+ * 
+ * 
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::panel_bmod(const Index m, const Index w, const Index jcol, 
+                                            const Index nseg, ScalarVector& dense, ScalarVector& tempv,
+                                            IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)
+{
+  
+  Index ksub,jj,nextl_col; 
+  Index fsupc, nsupc, nsupr, nrow; 
+  Index krep, kfnz; 
+  Index lptr; // points to the row subscripts of a supernode 
+  Index luptr; // ...
+  Index segsize,no_zeros ; 
+  // For each nonz supernode segment of U[*,j] in topological order
+  Index k = nseg - 1; 
+  const Index PacketSize = internal::packet_traits<Scalar>::size;
+  
+  for (ksub = 0; ksub < nseg; ksub++)
+  { // For each updating supernode
+    /* krep = representative of current k-th supernode
+     * fsupc =  first supernodal column
+     * nsupc = number of columns in a supernode
+     * nsupr = number of rows in a supernode
+     */
+    krep = segrep(k); k--; 
+    fsupc = glu.xsup(glu.supno(krep)); 
+    nsupc = krep - fsupc + 1; 
+    nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); 
+    nrow = nsupr - nsupc; 
+    lptr = glu.xlsub(fsupc); 
+    
+    // loop over the panel columns to detect the actual number of columns and rows
+    Index u_rows = 0;
+    Index u_cols = 0;
+    for (jj = jcol; jj < jcol + w; jj++)
+    {
+      nextl_col = (jj-jcol) * m; 
+      VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+      
+      kfnz = repfnz_col(krep); 
+      if ( kfnz == emptyIdxLU ) 
+        continue; // skip any zero segment
+      
+      segsize = krep - kfnz + 1;
+      u_cols++;
+      u_rows = (std::max)(segsize,u_rows);
+    }
+    
+    if(nsupc >= 2)
+    { 
+      Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);
+      Map<Matrix<Scalar,Dynamic,Dynamic>, Aligned,  OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));
+      
+      // gather U
+      Index u_col = 0;
+      for (jj = jcol; jj < jcol + w; jj++)
+      {
+        nextl_col = (jj-jcol) * m; 
+        VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+        VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
+        
+        kfnz = repfnz_col(krep); 
+        if ( kfnz == emptyIdxLU ) 
+          continue; // skip any zero segment
+        
+        segsize = krep - kfnz + 1;
+        luptr = glu.xlusup(fsupc);    
+        no_zeros = kfnz - fsupc; 
+        
+        Index isub = lptr + no_zeros;
+        Index off = u_rows-segsize;
+        for (Index i = 0; i < off; i++) U(i,u_col) = 0;
+        for (Index i = 0; i < segsize; i++)
+        {
+          Index irow = glu.lsub(isub); 
+          U(i+off,u_col) = dense_col(irow); 
+          ++isub; 
+        }
+        u_col++;
+      }
+      // solve U = A^-1 U
+      luptr = glu.xlusup(fsupc);
+      Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);
+      no_zeros = (krep - u_rows + 1) - fsupc;
+      luptr += lda * no_zeros + no_zeros;
+      Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );
+      U = A.template triangularView<UnitLower>().solve(U);
+      
+      // update
+      luptr += u_rows;
+      Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );
+      eigen_assert(tempv.size()>w*ldu + nrow*w + 1);
+      
+      Index ldl = internal::first_multiple<Index>(nrow, PacketSize);
+      Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize;
+      Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));
+      
+      L.setZero();
+      internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());
+      
+      // scatter U and L
+      u_col = 0;
+      for (jj = jcol; jj < jcol + w; jj++)
+      {
+        nextl_col = (jj-jcol) * m; 
+        VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+        VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
+        
+        kfnz = repfnz_col(krep); 
+        if ( kfnz == emptyIdxLU ) 
+          continue; // skip any zero segment
+        
+        segsize = krep - kfnz + 1;
+        no_zeros = kfnz - fsupc; 
+        Index isub = lptr + no_zeros;
+        
+        Index off = u_rows-segsize;
+        for (Index i = 0; i < segsize; i++)
+        {
+          Index irow = glu.lsub(isub++); 
+          dense_col(irow) = U.coeff(i+off,u_col);
+          U.coeffRef(i+off,u_col) = 0;
+        }
+        
+        // Scatter l into SPA dense[]
+        for (Index i = 0; i < nrow; i++)
+        {
+          Index irow = glu.lsub(isub++); 
+          dense_col(irow) -= L.coeff(i,u_col);
+          L.coeffRef(i,u_col) = 0;
+        }
+        u_col++;
+      }
+    }
+    else // level 2 only
+    {
+      // Sequence through each column in the panel
+      for (jj = jcol; jj < jcol + w; jj++)
+      {
+        nextl_col = (jj-jcol) * m; 
+        VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
+        VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
+        
+        kfnz = repfnz_col(krep); 
+        if ( kfnz == emptyIdxLU ) 
+          continue; // skip any zero segment
+        
+        segsize = krep - kfnz + 1;
+        luptr = glu.xlusup(fsupc);
+        
+        Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr
+        
+        // Perform a trianglar solve and block update, 
+        // then scatter the result of sup-col update to dense[]
+        no_zeros = kfnz - fsupc; 
+              if(segsize==1)  LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+        else  if(segsize==2)  LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+        else  if(segsize==3)  LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);
+        else                  LU_kernel_bmod<Dynamic>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); 
+      } // End for each column in the panel 
+    }
+    
+  } // End for each updating supernode
+} // end panel bmod
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // SPARSELU_PANEL_BMOD_H
diff --git a/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/Eigen/src/SparseLU/SparseLU_panel_dfs.h
new file mode 100644
index 0000000..dc0054e
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_panel_dfs.h
@@ -0,0 +1,258 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of [s,d,c,z]panel_dfs.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PANEL_DFS_H
+#define SPARSELU_PANEL_DFS_H
+
+namespace Eigen {
+
+namespace internal {
+  
+template<typename IndexVector>
+struct panel_dfs_traits
+{
+  typedef typename IndexVector::Scalar Index;
+  panel_dfs_traits(Index jcol, Index* marker)
+    : m_jcol(jcol), m_marker(marker)
+  {}
+  bool update_segrep(Index krep, Index jj)
+  {
+    if(m_marker[krep]<m_jcol)
+    {
+      m_marker[krep] = jj; 
+      return true;
+    }
+    return false;
+  }
+  void mem_expand(IndexVector& /*glu.lsub*/, Index /*nextl*/, Index /*chmark*/) {}
+  enum { ExpandMem = false };
+  Index m_jcol;
+  Index* m_marker;
+};
+
+
+template <typename Scalar, typename Index>
+template <typename Traits>
+void SparseLUImpl<Scalar,Index>::dfs_kernel(const Index jj, IndexVector& perm_r,
+                   Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
+                   Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
+                   IndexVector& xplore, GlobalLU_t& glu,
+                   Index& nextl_col, Index krow, Traits& traits
+                  )
+{
+  
+  Index kmark = marker(krow);
+      
+  // For each unmarked krow of jj
+  marker(krow) = jj; 
+  Index kperm = perm_r(krow); 
+  if (kperm == emptyIdxLU ) {
+    // krow is in L : place it in structure of L(*, jj)
+    panel_lsub(nextl_col++) = krow;  // krow is indexed into A
+    
+    traits.mem_expand(panel_lsub, nextl_col, kmark);
+  }
+  else 
+  {
+    // krow is in U : if its supernode-representative krep
+    // has been explored, update repfnz(*)
+    // krep = supernode representative of the current row
+    Index krep = glu.xsup(glu.supno(kperm)+1) - 1; 
+    // First nonzero element in the current column:
+    Index myfnz = repfnz_col(krep); 
+    
+    if (myfnz != emptyIdxLU )
+    {
+      // Representative visited before
+      if (myfnz > kperm ) repfnz_col(krep) = kperm; 
+      
+    }
+    else 
+    {
+      // Otherwise, perform dfs starting at krep
+      Index oldrep = emptyIdxLU; 
+      parent(krep) = oldrep; 
+      repfnz_col(krep) = kperm; 
+      Index xdfs =  glu.xlsub(krep); 
+      Index maxdfs = xprune(krep); 
+      
+      Index kpar;
+      do 
+      {
+        // For each unmarked kchild of krep
+        while (xdfs < maxdfs) 
+        {
+          Index kchild = glu.lsub(xdfs); 
+          xdfs++; 
+          Index chmark = marker(kchild); 
+          
+          if (chmark != jj ) 
+          {
+            marker(kchild) = jj; 
+            Index chperm = perm_r(kchild); 
+            
+            if (chperm == emptyIdxLU) 
+            {
+              // case kchild is in L: place it in L(*, j)
+              panel_lsub(nextl_col++) = kchild;
+              traits.mem_expand(panel_lsub, nextl_col, chmark);
+            }
+            else
+            {
+              // case kchild is in U :
+              // chrep = its supernode-rep. If its rep has been explored, 
+              // update its repfnz(*)
+              Index chrep = glu.xsup(glu.supno(chperm)+1) - 1; 
+              myfnz = repfnz_col(chrep); 
+              
+              if (myfnz != emptyIdxLU) 
+              { // Visited before 
+                if (myfnz > chperm) 
+                  repfnz_col(chrep) = chperm; 
+              }
+              else 
+              { // Cont. dfs at snode-rep of kchild
+                xplore(krep) = xdfs; 
+                oldrep = krep; 
+                krep = chrep; // Go deeper down G(L)
+                parent(krep) = oldrep; 
+                repfnz_col(krep) = chperm; 
+                xdfs = glu.xlsub(krep); 
+                maxdfs = xprune(krep); 
+                
+              } // end if myfnz != -1
+            } // end if chperm == -1 
+                
+          } // end if chmark !=jj
+        } // end while xdfs < maxdfs
+        
+        // krow has no more unexplored nbrs :
+        //    Place snode-rep krep in postorder DFS, if this 
+        //    segment is seen for the first time. (Note that 
+        //    "repfnz(krep)" may change later.)
+        //    Baktrack dfs to its parent
+        if(traits.update_segrep(krep,jj))
+        //if (marker1(krep) < jcol )
+        {
+          segrep(nseg) = krep; 
+          ++nseg; 
+          //marker1(krep) = jj; 
+        }
+        
+        kpar = parent(krep); // Pop recursion, mimic recursion 
+        if (kpar == emptyIdxLU) 
+          break; // dfs done 
+        krep = kpar; 
+        xdfs = xplore(krep); 
+        maxdfs = xprune(krep); 
+
+      } while (kpar != emptyIdxLU); // Do until empty stack 
+      
+    } // end if (myfnz = -1)
+
+  } // end if (kperm == -1)   
+}
+
+/**
+ * \brief Performs a symbolic factorization on a panel of columns [jcol, jcol+w)
+ * 
+ * A supernode representative is the last column of a supernode.
+ * The nonzeros in U[*,j] are segments that end at supernodes representatives
+ * 
+ * The routine returns a list of the supernodal representatives 
+ * in topological order of the dfs that generates them. This list is 
+ * a superset of the topological order of each individual column within 
+ * the panel.
+ * The location of the first nonzero in each supernodal segment 
+ * (supernodal entry location) is also returned. Each column has 
+ * a separate list for this purpose. 
+ * 
+ * Two markers arrays are used for dfs :
+ *    marker[i] == jj, if i was visited during dfs of current column jj;
+ *    marker1[i] >= jcol, if i was visited by earlier columns in this panel; 
+ * 
+ * \param[in] m number of rows in the matrix
+ * \param[in] w Panel size
+ * \param[in] jcol Starting  column of the panel
+ * \param[in] A Input matrix in column-major storage
+ * \param[in] perm_r Row permutation
+ * \param[out] nseg Number of U segments
+ * \param[out] dense Accumulate the column vectors of the panel
+ * \param[out] panel_lsub Subscripts of the row in the panel 
+ * \param[out] segrep Segment representative i.e first nonzero row of each segment
+ * \param[out] repfnz First nonzero location in each row
+ * \param[out] xprune The pruned elimination tree
+ * \param[out] marker work vector
+ * \param  parent The elimination tree
+ * \param xplore work vector
+ * \param glu The global data structure
+ * 
+ */
+
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)
+{
+  Index nextl_col; // Next available position in panel_lsub[*,jj] 
+  
+  // Initialize pointers 
+  VectorBlock<IndexVector> marker1(marker, m, m); 
+  nseg = 0; 
+  
+  panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
+  
+  // For each column in the panel 
+  for (Index jj = jcol; jj < jcol + w; jj++) 
+  {
+    nextl_col = (jj - jcol) * m; 
+    
+    VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row
+    VectorBlock<ScalarVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here
+    
+    
+    // For each nnz in A[*, jj] do depth first search
+    for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
+    {
+      Index krow = it.row(); 
+      dense_col(krow) = it.value();
+      
+      Index kmark = marker(krow); 
+      if (kmark == jj) 
+        continue; // krow visited before, go to the next nonzero
+      
+      dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,
+                   xplore, glu, nextl_col, krow, traits);
+    }// end for nonzeros in column jj
+    
+  } // end for column jj
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_PANEL_DFS_H
diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h
new file mode 100644
index 0000000..457789c
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -0,0 +1,136 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of xpivotL.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PIVOTL_H
+#define SPARSELU_PIVOTL_H
+
+namespace Eigen {
+namespace internal {
+  
+/**
+ * \brief Performs the numerical pivotin on the current column of L, and the CDIV operation.
+ * 
+ * Pivot policy :
+ * (1) Compute thresh = u * max_(i>=j) abs(A_ij);
+ * (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN
+ *           pivot row = k;
+ *       ELSE IF abs(A_jj) >= thresh THEN
+ *           pivot row = j;
+ *       ELSE
+ *           pivot row = m;
+ * 
+ *   Note: If you absolutely want to use a given pivot order, then set u=0.0.
+ * 
+ * \param jcol The current column of L
+ * \param diagpivotthresh diagonal pivoting threshold
+ * \param[in,out] perm_r Row permutation (threshold pivoting)
+ * \param[in] iperm_c column permutation - used to finf diagonal of Pc*A*Pc'
+ * \param[out] pivrow  The pivot row
+ * \param glu Global LU data
+ * \return 0 if success, i > 0 if U(i,i) is exactly zero 
+ * 
+ */
+template <typename Scalar, typename Index>
+Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)
+{
+  
+  Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol
+  Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0
+  Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion
+  Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode
+  Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension
+  Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode
+  Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode
+  Index* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode
+  
+  // Determine the largest abs numerical value for partial pivoting 
+  Index diagind = iperm_c(jcol); // diagonal index 
+  RealScalar pivmax = 0.0; 
+  Index pivptr = nsupc; 
+  Index diag = emptyIdxLU; 
+  RealScalar rtemp;
+  Index isub, icol, itemp, k; 
+  for (isub = nsupc; isub < nsupr; ++isub) {
+    using std::abs;
+    rtemp = abs(lu_col_ptr[isub]);
+    if (rtemp > pivmax) {
+      pivmax = rtemp; 
+      pivptr = isub;
+    } 
+    if (lsub_ptr[isub] == diagind) diag = isub;
+  }
+  
+  // Test for singularity
+  if ( pivmax == 0.0 ) {
+    pivrow = lsub_ptr[pivptr];
+    perm_r(pivrow) = jcol;
+    return (jcol+1);
+  }
+  
+  RealScalar thresh = diagpivotthresh * pivmax; 
+  
+  // Choose appropriate pivotal element 
+  
+  {
+    // Test if the diagonal element can be used as a pivot (given the threshold value)
+    if (diag >= 0 ) 
+    {
+      // Diagonal element exists
+      using std::abs;
+      rtemp = abs(lu_col_ptr[diag]);
+      if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
+    }
+    pivrow = lsub_ptr[pivptr];
+  }
+  
+  // Record pivot row
+  perm_r(pivrow) = jcol; 
+  // Interchange row subscripts
+  if (pivptr != nsupc )
+  {
+    std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] );
+    // Interchange numerical values as well, for the two rows in the whole snode
+    // such that L is indexed the same way as A
+    for (icol = 0; icol <= nsupc; icol++)
+    {
+      itemp = pivptr + icol * lda; 
+      std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]);
+    }
+  }
+  // cdiv operations
+  Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc];
+  for (k = nsupc+1; k < nsupr; k++)
+    lu_col_ptr[k] *= temp; 
+  return 0;
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_PIVOTL_H
diff --git a/Eigen/src/SparseLU/SparseLU_pruneL.h b/Eigen/src/SparseLU/SparseLU_pruneL.h
new file mode 100644
index 0000000..66460d1
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_pruneL.h
@@ -0,0 +1,135 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* 
+ 
+ * NOTE: This file is the modified version of [s,d,c,z]pruneL.c file in SuperLU 
+ 
+ * -- SuperLU routine (version 2.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * November 15, 1997
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+#ifndef SPARSELU_PRUNEL_H
+#define SPARSELU_PRUNEL_H
+
+namespace Eigen {
+namespace internal {
+
+/**
+ * \brief Prunes the L-structure.
+ *
+ * It prunes the L-structure  of supernodes whose L-structure contains the current pivot row "pivrow"
+ * 
+ * 
+ * \param jcol The current column of L
+ * \param[in] perm_r Row permutation
+ * \param[out] pivrow  The pivot row
+ * \param nseg Number of segments
+ * \param segrep 
+ * \param repfnz
+ * \param[out] xprune 
+ * \param glu Global LU data
+ * 
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)
+{
+  // For each supernode-rep irep in U(*,j]
+  Index jsupno = glu.supno(jcol); 
+  Index i,irep,irep1; 
+  bool movnum, do_prune = false; 
+  Index kmin = 0, kmax = 0, minloc, maxloc,krow; 
+  for (i = 0; i < nseg; i++)
+  {
+    irep = segrep(i); 
+    irep1 = irep + 1; 
+    do_prune = false; 
+    
+    // Don't prune with a zero U-segment 
+    if (repfnz(irep) == emptyIdxLU) continue; 
+    
+    // If a snode overlaps with the next panel, then the U-segment
+    // is fragmented into two parts -- irep and irep1. We should let 
+    // pruning occur at the rep-column in irep1s snode. 
+    if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune 
+    
+    // If it has not been pruned & it has a nonz in row L(pivrow,i)
+    if (glu.supno(irep) != jsupno )
+    {
+      if ( xprune (irep) >= glu.xlsub(irep1) )
+      {
+        kmin = glu.xlsub(irep);
+        kmax = glu.xlsub(irep1) - 1; 
+        for (krow = kmin; krow <= kmax; krow++)
+        {
+          if (glu.lsub(krow) == pivrow) 
+          {
+            do_prune = true; 
+            break; 
+          }
+        }
+      }
+      
+      if (do_prune) 
+      {
+        // do a quicksort-type partition
+        // movnum=true means that the num values have to be exchanged
+        movnum = false; 
+        if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1 
+          movnum = true; 
+        
+        while (kmin <= kmax)
+        {
+          if (perm_r(glu.lsub(kmax)) == emptyIdxLU)
+            kmax--; 
+          else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU)
+            kmin++;
+          else 
+          {
+            // kmin below pivrow (not yet pivoted), and kmax
+            // above pivrow: interchange the two suscripts
+            std::swap(glu.lsub(kmin), glu.lsub(kmax)); 
+            
+            // If the supernode has only one column, then we 
+            // only keep one set of subscripts. For any subscript
+            // intercnahge performed, similar interchange must be 
+            // done on the numerical values. 
+            if (movnum) 
+            {
+              minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) ); 
+              maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) ); 
+              std::swap(glu.lusup(minloc), glu.lusup(maxloc)); 
+            }
+            kmin++;
+            kmax--;
+          }
+        } // end while 
+        
+        xprune(irep) = kmin;  //Pruning 
+      } // end if do_prune 
+    } // end pruning 
+  } // End for each U-segment
+}
+
+} // end namespace internal
+} // end namespace Eigen
+
+#endif // SPARSELU_PRUNEL_H
diff --git a/Eigen/src/SparseLU/SparseLU_relax_snode.h b/Eigen/src/SparseLU/SparseLU_relax_snode.h
new file mode 100644
index 0000000..58ec32e
--- /dev/null
+++ b/Eigen/src/SparseLU/SparseLU_relax_snode.h
@@ -0,0 +1,83 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+/* This file is a modified version of heap_relax_snode.c file in SuperLU
+ * -- SuperLU routine (version 3.0) --
+ * Univ. of California Berkeley, Xerox Palo Alto Research Center,
+ * and Lawrence Berkeley National Lab.
+ * October 15, 2003
+ *
+ * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+ * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
+#ifndef SPARSELU_RELAX_SNODE_H
+#define SPARSELU_RELAX_SNODE_H
+
+namespace Eigen {
+
+namespace internal {
+ 
+/** 
+ * \brief Identify the initial relaxed supernodes
+ * 
+ * This routine is applied to a column elimination tree. 
+ * It assumes that the matrix has been reordered according to the postorder of the etree
+ * \param n  the number of columns
+ * \param et elimination tree 
+ * \param relax_columns Maximum number of columns allowed in a relaxed snode 
+ * \param descendants Number of descendants of each node in the etree
+ * \param relax_end last column in a supernode
+ */
+template <typename Scalar, typename Index>
+void SparseLUImpl<Scalar,Index>::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)
+{
+  
+  // compute the number of descendants of each node in the etree
+  Index j, parent; 
+  relax_end.setConstant(emptyIdxLU);
+  descendants.setZero();
+  for (j = 0; j < n; j++) 
+  {
+    parent = et(j);
+    if (parent != n) // not the dummy root
+      descendants(parent) += descendants(j) + 1;
+  }
+  // Identify the relaxed supernodes by postorder traversal of the etree
+  Index snode_start; // beginning of a snode 
+  for (j = 0; j < n; )
+  {
+    parent = et(j);
+    snode_start = j; 
+    while ( parent != n && descendants(parent) < relax_columns ) 
+    {
+      j = parent; 
+      parent = et(j);
+    }
+    // Found a supernode in postordered etree, j is the last column 
+    relax_end(snode_start) = j; // Record last column
+    j++;
+    // Search for a new leaf
+    while (descendants(j) != 0 && j < n) j++;
+  } // End postorder traversal of the etree
+  
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+#endif
diff --git a/Eigen/src/SparseQR/CMakeLists.txt b/Eigen/src/SparseQR/CMakeLists.txt
new file mode 100644
index 0000000..f9ddf2b
--- /dev/null
+++ b/Eigen/src/SparseQR/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseQR_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_SparseQR_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseQR/ COMPONENT Devel
+  )
diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h
new file mode 100644
index 0000000..a00bd5d
--- /dev/null
+++ b/Eigen/src/SparseQR/SparseQR.h
@@ -0,0 +1,714 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+// Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_QR_H
+#define EIGEN_SPARSE_QR_H
+
+namespace Eigen {
+
+template<typename MatrixType, typename OrderingType> class SparseQR;
+template<typename SparseQRType> struct SparseQRMatrixQReturnType;
+template<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType;
+template<typename SparseQRType, typename Derived> struct SparseQR_QProduct;
+namespace internal {
+  template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> >
+  {
+    typedef typename SparseQRType::MatrixType ReturnType;
+    typedef typename ReturnType::Index Index;
+    typedef typename ReturnType::StorageKind StorageKind;
+  };
+  template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> >
+  {
+    typedef typename SparseQRType::MatrixType ReturnType;
+  };
+  template <typename SparseQRType, typename Derived> struct traits<SparseQR_QProduct<SparseQRType, Derived> >
+  {
+    typedef typename Derived::PlainObject ReturnType;
+  };
+} // End namespace internal
+
+/**
+  * \ingroup SparseQR_Module
+  * \class SparseQR
+  * \brief Sparse left-looking rank-revealing QR factorization
+  * 
+  * This class implements a left-looking rank-revealing QR decomposition 
+  * of sparse matrices. When a column has a norm less than a given tolerance
+  * it is implicitly permuted to the end. The QR factorization thus obtained is 
+  * given by A*P = Q*R where R is upper triangular or trapezoidal. 
+  * 
+  * P is the column permutation which is the product of the fill-reducing and the
+  * rank-revealing permutations. Use colsPermutation() to get it.
+  * 
+  * Q is the orthogonal matrix represented as products of Householder reflectors. 
+  * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
+  * You can then apply it to a vector.
+  * 
+  * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
+  * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
+  * 
+  * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
+  * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module 
+  *  OrderingMethods \endlink module for the list of built-in and external ordering methods.
+  * 
+  * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
+  * 
+  */
+template<typename _MatrixType, typename _OrderingType>
+class SparseQR
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef _OrderingType OrderingType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef SparseMatrix<Scalar,ColMajor,Index> QRMatrixType;
+    typedef Matrix<Index, Dynamic, 1> IndexVector;
+    typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
+    typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+  public:
+    SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
+    { }
+    
+    /** Construct a QR factorization of the matrix \a mat.
+      * 
+      * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+      * 
+      * \sa compute()
+      */
+    SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
+    {
+      compute(mat);
+    }
+    
+    /** Computes the QR factorization of the sparse matrix \a mat.
+      * 
+      * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+      * 
+      * \sa analyzePattern(), factorize()
+      */
+    void compute(const MatrixType& mat)
+    {
+      analyzePattern(mat);
+      factorize(mat);
+    }
+    void analyzePattern(const MatrixType& mat);
+    void factorize(const MatrixType& mat);
+    
+    /** \returns the number of rows of the represented matrix. 
+      */
+    inline Index rows() const { return m_pmat.rows(); }
+    
+    /** \returns the number of columns of the represented matrix. 
+      */
+    inline Index cols() const { return m_pmat.cols();}
+    
+    /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
+      */
+    const QRMatrixType& matrixR() const { return m_R; }
+    
+    /** \returns the number of non linearly dependent columns as determined by the pivoting threshold.
+      *
+      * \sa setPivotThreshold()
+      */
+    Index rank() const 
+    {
+      eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+      return m_nonzeropivots; 
+    }
+    
+    /** \returns an expression of the matrix Q as products of sparse Householder reflectors.
+    * The common usage of this function is to apply it to a dense matrix or vector
+    * \code
+    * VectorXd B1, B2;
+    * // Initialize B1
+    * B2 = matrixQ() * B1;
+    * \endcode
+    *
+    * To get a plain SparseMatrix representation of Q:
+    * \code
+    * SparseMatrix<double> Q;
+    * Q = SparseQR<SparseMatrix<double> >(A).matrixQ();
+    * \endcode
+    * Internally, this call simply performs a sparse product between the matrix Q
+    * and a sparse identity matrix. However, due to the fact that the sparse
+    * reflectors are stored unsorted, two transpositions are needed to sort
+    * them before performing the product.
+    */
+    SparseQRMatrixQReturnType<SparseQR> matrixQ() const 
+    { return SparseQRMatrixQReturnType<SparseQR>(*this); }
+    
+    /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R
+      * It is the combination of the fill-in reducing permutation and numerical column pivoting.
+      */
+    const PermutationType& colsPermutation() const
+    { 
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_outputPerm_c;
+    }
+    
+    /** \returns A string describing the type of error.
+      * This method is provided to ease debugging, not to handle errors.
+      */
+    std::string lastErrorMessage() const { return m_lastError; }
+    
+    /** \internal */
+    template<typename Rhs, typename Dest>
+    bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
+    {
+      eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+      eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
+
+      Index rank = this->rank();
+      
+      // Compute Q^T * b;
+      typename Dest::PlainObject y, b;
+      y = this->matrixQ().transpose() * B; 
+      b = y;
+      
+      // Solve with the triangular matrix R
+      y.resize((std::max)(cols(),Index(y.rows())),y.cols());
+      y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
+      y.bottomRows(y.rows()-rank).setZero();
+
+      // Apply the column permutation
+      if (m_perm_c.size())  dest = colsPermutation() * y.topRows(cols());
+      else                  dest = y.topRows(cols());
+      
+      m_info = Success;
+      return true;
+    }
+    
+
+    /** Sets the threshold that is used to determine linearly dependent columns during the factorization.
+      *
+      * In practice, if during the factorization the norm of the column that has to be eliminated is below
+      * this threshold, then the entire column is treated as zero, and it is moved at the end.
+      */
+    void setPivotThreshold(const RealScalar& threshold)
+    {
+      m_useDefaultThreshold = false;
+      m_threshold = threshold;
+    }
+    
+    /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const 
+    {
+      eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+      eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
+      return internal::solve_retval<SparseQR, Rhs>(*this, B.derived());
+    }
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<SparseQR, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
+    {
+          eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
+          eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
+          return internal::sparse_solve_retval<SparseQR, Rhs>(*this, B.derived());
+    }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was successful,
+      *          \c NumericalIssue if the QR factorization reports a numerical problem
+      *          \c InvalidInput if the input matrix is invalid
+      *
+      * \sa iparm()          
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+  protected:
+    inline void sort_matrix_Q()
+    {
+      if(this->m_isQSorted) return;
+      // The matrix Q is sorted during the transposition
+      SparseMatrix<Scalar, RowMajor, Index> mQrm(this->m_Q);
+      this->m_Q = mQrm;
+      this->m_isQSorted = true;
+    }
+
+    
+  protected:
+    bool m_isInitialized;
+    bool m_analysisIsok;
+    bool m_factorizationIsok;
+    mutable ComputationInfo m_info;
+    std::string m_lastError;
+    QRMatrixType m_pmat;            // Temporary matrix
+    QRMatrixType m_R;               // The triangular factor matrix
+    QRMatrixType m_Q;               // The orthogonal reflectors
+    ScalarVector m_hcoeffs;         // The Householder coefficients
+    PermutationType m_perm_c;       // Fill-reducing  Column  permutation
+    PermutationType m_pivotperm;    // The permutation for rank revealing
+    PermutationType m_outputPerm_c; // The final column permutation
+    RealScalar m_threshold;         // Threshold to determine null Householder reflections
+    bool m_useDefaultThreshold;     // Use default threshold
+    Index m_nonzeropivots;          // Number of non zero pivots found 
+    IndexVector m_etree;            // Column elimination tree
+    IndexVector m_firstRowElt;      // First element in each row
+    bool m_isQSorted;               // whether Q is sorted or not
+    bool m_isEtreeOk;               // whether the elimination tree match the initial input matrix
+    
+    template <typename, typename > friend struct SparseQR_QProduct;
+    template <typename > friend struct SparseQRMatrixQReturnType;
+    
+};
+
+/** \brief Preprocessing step of a QR factorization 
+  * 
+  * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
+  * 
+  * In this step, the fill-reducing permutation is computed and applied to the columns of A
+  * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
+  * 
+  * \note In this step it is assumed that there is no empty row in the matrix \a mat.
+  */
+template <typename MatrixType, typename OrderingType>
+void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
+{
+  eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR");
+  // Copy to a column major matrix if the input is rowmajor
+  typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
+  // Compute the column fill reducing ordering
+  OrderingType ord; 
+  ord(matCpy, m_perm_c); 
+  Index n = mat.cols();
+  Index m = mat.rows();
+  Index diagSize = (std::min)(m,n);
+  
+  if (!m_perm_c.size())
+  {
+    m_perm_c.resize(n);
+    m_perm_c.indices().setLinSpaced(n, 0,n-1);
+  }
+  
+  // Compute the column elimination tree of the permuted matrix
+  m_outputPerm_c = m_perm_c.inverse();
+  internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+  m_isEtreeOk = true;
+  
+  m_R.resize(m, n);
+  m_Q.resize(m, diagSize);
+  
+  // Allocate space for nonzero elements : rough estimation
+  m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
+  m_Q.reserve(2*mat.nonZeros());
+  m_hcoeffs.resize(diagSize);
+  m_analysisIsok = true;
+}
+
+/** \brief Performs the numerical QR factorization of the input matrix
+  * 
+  * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
+  * a matrix having the same sparsity pattern than \a mat.
+  * 
+  * \param mat The sparse column-major matrix
+  */
+template <typename MatrixType, typename OrderingType>
+void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
+{
+  using std::abs;
+  using std::max;
+  
+  eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
+  Index m = mat.rows();
+  Index n = mat.cols();
+  Index diagSize = (std::min)(m,n);
+  IndexVector mark((std::max)(m,n)); mark.setConstant(-1);  // Record the visited nodes
+  IndexVector Ridx(n), Qidx(m);                             // Store temporarily the row indexes for the current column of R and Q
+  Index nzcolR, nzcolQ;                                     // Number of nonzero for the current column of R and Q
+  ScalarVector tval(m);                                     // The dense vector used to compute the current column
+  RealScalar pivotThreshold = m_threshold;
+  
+  m_R.setZero();
+  m_Q.setZero();
+  m_pmat = mat;
+  if(!m_isEtreeOk)
+  {
+    m_outputPerm_c = m_perm_c.inverse();
+    internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
+    m_isEtreeOk = true;
+  }
+
+  m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
+  
+  // Apply the fill-in reducing permutation lazily:
+  {
+    // If the input is row major, copy the original column indices,
+    // otherwise directly use the input matrix
+    // 
+    IndexVector originalOuterIndicesCpy;
+    const Index *originalOuterIndices = mat.outerIndexPtr();
+    if(MatrixType::IsRowMajor)
+    {
+      originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
+      originalOuterIndices = originalOuterIndicesCpy.data();
+    }
+    
+    for (int i = 0; i < n; i++)
+    {
+      Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
+      m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; 
+      m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; 
+    }
+  }
+  
+  /* Compute the default threshold as in MatLab, see:
+   * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+   * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 
+   */
+  if(m_useDefaultThreshold) 
+  {
+    RealScalar max2Norm = 0.0;
+    for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm());
+    if(max2Norm==RealScalar(0))
+      max2Norm = RealScalar(1);
+    pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
+  }
+  
+  // Initialize the numerical permutation
+  m_pivotperm.setIdentity(n);
+  
+  Index nonzeroCol = 0; // Record the number of valid pivots
+  m_Q.startVec(0);
+
+  // Left looking rank-revealing QR factorization: compute a column of R and Q at a time
+  for (Index col = 0; col < n; ++col)
+  {
+    mark.setConstant(-1);
+    m_R.startVec(col);
+    mark(nonzeroCol) = col;
+    Qidx(0) = nonzeroCol;
+    nzcolR = 0; nzcolQ = 1;
+    bool found_diag = nonzeroCol>=m;
+    tval.setZero(); 
+    
+    // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
+    // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
+    // Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
+    // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
+    for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
+    {
+      Index curIdx = nonzeroCol;
+      if(itp) curIdx = itp.row();
+      if(curIdx == nonzeroCol) found_diag = true;
+      
+      // Get the nonzeros indexes of the current column of R
+      Index st = m_firstRowElt(curIdx); // The traversal of the etree starts here 
+      if (st < 0 )
+      {
+        m_lastError = "Empty row found during numerical factorization";
+        m_info = InvalidInput;
+        return;
+      }
+
+      // Traverse the etree 
+      Index bi = nzcolR;
+      for (; mark(st) != col; st = m_etree(st))
+      {
+        Ridx(nzcolR) = st;  // Add this row to the list,
+        mark(st) = col;     // and mark this row as visited
+        nzcolR++;
+      }
+
+      // Reverse the list to get the topological ordering
+      Index nt = nzcolR-bi;
+      for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
+       
+      // Copy the current (curIdx,pcol) value of the input matrix
+      if(itp) tval(curIdx) = itp.value();
+      else    tval(curIdx) = Scalar(0);
+      
+      // Compute the pattern of Q(:,k)
+      if(curIdx > nonzeroCol && mark(curIdx) != col ) 
+      {
+        Qidx(nzcolQ) = curIdx;  // Add this row to the pattern of Q,
+        mark(curIdx) = col;     // and mark it as visited
+        nzcolQ++;
+      }
+    }
+
+    // Browse all the indexes of R(:,col) in reverse order
+    for (Index i = nzcolR-1; i >= 0; i--)
+    {
+      Index curIdx = Ridx(i);
+      
+      // Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
+      Scalar tdot(0);
+      
+      // First compute q' * tval
+      tdot = m_Q.col(curIdx).dot(tval);
+
+      tdot *= m_hcoeffs(curIdx);
+      
+      // Then update tval = tval - q * tau
+      // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse")
+      for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
+        tval(itq.row()) -= itq.value() * tdot;
+
+      // Detect fill-in for the current column of Q
+      if(m_etree(Ridx(i)) == nonzeroCol)
+      {
+        for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
+        {
+          Index iQ = itq.row();
+          if (mark(iQ) != col)
+          {
+            Qidx(nzcolQ++) = iQ;  // Add this row to the pattern of Q,
+            mark(iQ) = col;       // and mark it as visited
+          }
+        }
+      }
+    } // End update current column
+    
+    Scalar tau = 0;
+    RealScalar beta = 0;
+    
+    if(nonzeroCol < diagSize)
+    {
+      // Compute the Householder reflection that eliminate the current column
+      // FIXME this step should call the Householder module.
+      Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
+      
+      // First, the squared norm of Q((col+1):m, col)
+      RealScalar sqrNorm = 0.;
+      for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
+      if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
+      {
+        beta = numext::real(c0);
+        tval(Qidx(0)) = 1;
+      }
+      else
+      {
+        using std::sqrt;
+        beta = sqrt(numext::abs2(c0) + sqrNorm);
+        if(numext::real(c0) >= RealScalar(0))
+          beta = -beta;
+        tval(Qidx(0)) = 1;
+        for (Index itq = 1; itq < nzcolQ; ++itq)
+          tval(Qidx(itq)) /= (c0 - beta);
+        tau = numext::conj((beta-c0) / beta);
+          
+      }
+    }
+
+    // Insert values in R
+    for (Index  i = nzcolR-1; i >= 0; i--)
+    {
+      Index curIdx = Ridx(i);
+      if(curIdx < nonzeroCol) 
+      {
+        m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
+        tval(curIdx) = Scalar(0.);
+      }
+    }
+
+    if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold)
+    {
+      m_R.insertBackByOuterInner(col, nonzeroCol) = beta;
+      // The householder coefficient
+      m_hcoeffs(nonzeroCol) = tau;
+      // Record the householder reflections
+      for (Index itq = 0; itq < nzcolQ; ++itq)
+      {
+        Index iQ = Qidx(itq);
+        m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ);
+        tval(iQ) = Scalar(0.);
+      }
+      nonzeroCol++;
+      if(nonzeroCol<diagSize)
+        m_Q.startVec(nonzeroCol);
+    }
+    else
+    {
+      // Zero pivot found: move implicitly this column to the end
+      for (Index j = nonzeroCol; j < n-1; j++) 
+        std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
+      
+      // Recompute the column elimination tree
+      internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
+      m_isEtreeOk = false;
+    }
+  }
+  
+  m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
+  
+  // Finalize the column pointers of the sparse matrices R and Q
+  m_Q.finalize();
+  m_Q.makeCompressed();
+  m_R.finalize();
+  m_R.makeCompressed();
+  m_isQSorted = false;
+
+  m_nonzeropivots = nonzeroCol;
+  
+  if(nonzeroCol<n)
+  {
+    // Permute the triangular factor to put the 'dead' columns to the end
+    QRMatrixType tempR(m_R);
+    m_R = tempR * m_pivotperm;
+    
+    // Update the column permutation
+    m_outputPerm_c = m_outputPerm_c * m_pivotperm;
+  }
+  
+  m_isInitialized = true; 
+  m_factorizationIsok = true;
+  m_info = Success;
+}
+
+namespace internal {
+  
+template<typename _MatrixType, typename OrderingType, typename Rhs>
+struct solve_retval<SparseQR<_MatrixType,OrderingType>, Rhs>
+  : solve_retval_base<SparseQR<_MatrixType,OrderingType>, Rhs>
+{
+  typedef SparseQR<_MatrixType,OrderingType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+template<typename _MatrixType, typename OrderingType, typename Rhs>
+struct sparse_solve_retval<SparseQR<_MatrixType, OrderingType>, Rhs>
+ : sparse_solve_retval_base<SparseQR<_MatrixType, OrderingType>, Rhs>
+{
+  typedef SparseQR<_MatrixType, OrderingType> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec, Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+} // end namespace internal
+
+template <typename SparseQRType, typename Derived>
+struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived> >
+{
+  typedef typename SparseQRType::QRMatrixType MatrixType;
+  typedef typename SparseQRType::Scalar Scalar;
+  typedef typename SparseQRType::Index Index;
+  // Get the references 
+  SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) : 
+  m_qr(qr),m_other(other),m_transpose(transpose) {}
+  inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
+  inline Index cols() const { return m_other.cols(); }
+  
+  // Assign to a vector
+  template<typename DesType>
+  void evalTo(DesType& res) const
+  {
+    Index m = m_qr.rows();
+    Index n = m_qr.cols();
+    Index diagSize = (std::min)(m,n);
+    res = m_other;
+    if (m_transpose)
+    {
+      eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
+      //Compute res = Q' * other column by column
+      for(Index j = 0; j < res.cols(); j++){
+        for (Index k = 0; k < diagSize; k++)
+        {
+          Scalar tau = Scalar(0);
+          tau = m_qr.m_Q.col(k).dot(res.col(j));
+          if(tau==Scalar(0)) continue;
+          tau = tau * m_qr.m_hcoeffs(k);
+          res.col(j) -= tau * m_qr.m_Q.col(k);
+        }
+      }
+    }
+    else
+    {
+      eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
+      // Compute res = Q * other column by column
+      for(Index j = 0; j < res.cols(); j++)
+      {
+        for (Index k = diagSize-1; k >=0; k--)
+        {
+          Scalar tau = Scalar(0);
+          tau = m_qr.m_Q.col(k).dot(res.col(j));
+          if(tau==Scalar(0)) continue;
+          tau = tau * m_qr.m_hcoeffs(k);
+          res.col(j) -= tau * m_qr.m_Q.col(k);
+        }
+      }
+    }
+  }
+  
+  const SparseQRType& m_qr;
+  const Derived& m_other;
+  bool m_transpose;
+};
+
+template<typename SparseQRType>
+struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
+{  
+  typedef typename SparseQRType::Index Index;
+  typedef typename SparseQRType::Scalar Scalar;
+  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+  SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {}
+  template<typename Derived>
+  SparseQR_QProduct<SparseQRType, Derived> operator*(const MatrixBase<Derived>& other)
+  {
+    return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);
+  }
+  SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const
+  {
+    return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
+  }
+  inline Index rows() const { return m_qr.rows(); }
+  inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
+  // To use for operations with the transpose of Q
+  SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
+  {
+    return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
+  }
+  template<typename Dest> void evalTo(MatrixBase<Dest>& dest) const
+  {
+    dest.derived() = m_qr.matrixQ() * Dest::Identity(m_qr.rows(), m_qr.rows());
+  }
+  template<typename Dest> void evalTo(SparseMatrixBase<Dest>& dest) const
+  {
+    Dest idMat(m_qr.rows(), m_qr.rows());
+    idMat.setIdentity();
+    // Sort the sparse householder reflectors if needed
+    const_cast<SparseQRType *>(&m_qr)->sort_matrix_Q();
+    dest.derived() = SparseQR_QProduct<SparseQRType, Dest>(m_qr, idMat, false);
+  }
+
+  const SparseQRType& m_qr;
+};
+
+template<typename SparseQRType>
+struct SparseQRMatrixQTransposeReturnType
+{
+  SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {}
+  template<typename Derived>
+  SparseQR_QProduct<SparseQRType,Derived> operator*(const MatrixBase<Derived>& other)
+  {
+    return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(), true);
+  }
+  const SparseQRType& m_qr;
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/Eigen/src/StlSupport/CMakeLists.txt b/Eigen/src/StlSupport/CMakeLists.txt
new file mode 100644
index 0000000..0f094f6
--- /dev/null
+++ b/Eigen/src/StlSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_StlSupport_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_StlSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/StlSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/StlSupport/StdDeque.h b/Eigen/src/StlSupport/StdDeque.h
new file mode 100644
index 0000000..aaf6633
--- /dev/null
+++ b/Eigen/src/StlSupport/StdDeque.h
@@ -0,0 +1,134 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STDDEQUE_H
+#define EIGEN_STDDEQUE_H
+
+#include "details.h"
+
+// Define the explicit instantiation (e.g. necessary for the Intel compiler)
+#if defined(__INTEL_COMPILER) || defined(__GNUC__)
+  #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) template class std::deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
+#else
+  #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...)
+#endif
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::deque such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \
+EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(__VA_ARGS__) \
+namespace std \
+{ \
+  template<typename _Ay> \
+  class deque<__VA_ARGS__, _Ay>  \
+    : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
+  { \
+    typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \
+  public: \
+    typedef __VA_ARGS__ value_type; \
+    typedef typename deque_base::allocator_type allocator_type; \
+    typedef typename deque_base::size_type size_type;  \
+    typedef typename deque_base::iterator iterator;  \
+    explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {}  \
+    template<typename InputIterator> \
+    deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \
+    deque(const deque& c) : deque_base(c) {}  \
+    explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
+    deque(iterator start, iterator end) : deque_base(start, end) {}  \
+    deque& operator=(const deque& x) {  \
+      deque_base::operator=(x);  \
+      return *this;  \
+    } \
+  }; \
+}
+
+// check whether we really need the std::deque specialization
+#if !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */
+
+namespace std {
+
+#define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \
+  public:  \
+    typedef T value_type; \
+    typedef typename deque_base::allocator_type allocator_type; \
+    typedef typename deque_base::size_type size_type;  \
+    typedef typename deque_base::iterator iterator;  \
+    typedef typename deque_base::const_iterator const_iterator;  \
+    explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {}  \
+    template<typename InputIterator> \
+    deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
+    : deque_base(first, last, a) {} \
+    deque(const deque& c) : deque_base(c) {}  \
+    explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \
+    deque(iterator start, iterator end) : deque_base(start, end) {}  \
+    deque& operator=(const deque& x) {  \
+      deque_base::operator=(x);  \
+      return *this;  \
+    }
+
+  template<typename T>
+  class deque<T,EIGEN_ALIGNED_ALLOCATOR<T> >
+    : public deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+                   Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
+{
+  typedef deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+                Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > deque_base;
+  EIGEN_STD_DEQUE_SPECIALIZATION_BODY
+
+  void resize(size_type new_size)
+  { resize(new_size, T()); }
+
+#if defined(_DEQUE_)
+  // workaround MSVC std::deque implementation
+  void resize(size_type new_size, const value_type& x)
+  {
+    if (deque_base::size() < new_size)
+      deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x);
+    else if (new_size < deque_base::size())
+      deque_base::erase(deque_base::begin() + new_size, deque_base::end());
+  }
+  void push_back(const value_type& x)
+  { deque_base::push_back(x); } 
+  void push_front(const value_type& x)
+  { deque_base::push_front(x); }
+  using deque_base::insert;  
+  iterator insert(const_iterator position, const value_type& x)
+  { return deque_base::insert(position,x); }
+  void insert(const_iterator position, size_type new_size, const value_type& x)
+  { deque_base::insert(position, new_size, x); }
+#elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2)
+  // workaround GCC std::deque implementation
+  void resize(size_type new_size, const value_type& x)
+  {
+    if (new_size < deque_base::size())
+      deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
+    else
+      deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
+  }
+#else
+  // either GCC 4.1 or non-GCC
+  // default implementation which should always work.
+  void resize(size_type new_size, const value_type& x)
+  {
+    if (new_size < deque_base::size())
+      deque_base::erase(deque_base::begin() + new_size, deque_base::end());
+    else if (new_size > deque_base::size())
+      deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);
+  }
+#endif
+  };
+}
+
+#endif // check whether specialization is actually required
+
+#endif // EIGEN_STDDEQUE_H
diff --git a/Eigen/src/StlSupport/StdList.h b/Eigen/src/StlSupport/StdList.h
new file mode 100644
index 0000000..3c74243
--- /dev/null
+++ b/Eigen/src/StlSupport/StdList.h
@@ -0,0 +1,114 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STDLIST_H
+#define EIGEN_STDLIST_H
+
+#include "details.h"
+
+// Define the explicit instantiation (e.g. necessary for the Intel compiler)
+#if defined(__INTEL_COMPILER) || defined(__GNUC__)
+  #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) template class std::list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >;
+#else
+  #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...)
+#endif
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::list such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \
+EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(__VA_ARGS__) \
+namespace std \
+{ \
+  template<typename _Ay> \
+  class list<__VA_ARGS__, _Ay>  \
+    : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
+  { \
+    typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \
+  public: \
+    typedef __VA_ARGS__ value_type; \
+    typedef typename list_base::allocator_type allocator_type; \
+    typedef typename list_base::size_type size_type;  \
+    typedef typename list_base::iterator iterator;  \
+    explicit list(const allocator_type& a = allocator_type()) : list_base(a) {}  \
+    template<typename InputIterator> \
+    list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \
+    list(const list& c) : list_base(c) {}  \
+    explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
+    list(iterator start, iterator end) : list_base(start, end) {}  \
+    list& operator=(const list& x) {  \
+      list_base::operator=(x);  \
+      return *this;  \
+    } \
+  }; \
+}
+
+// check whether we really need the std::vector specialization
+#if !(defined(_GLIBCXX_VECTOR) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */
+
+namespace std
+{
+
+#define EIGEN_STD_LIST_SPECIALIZATION_BODY \
+  public:  \
+    typedef T value_type; \
+    typedef typename list_base::allocator_type allocator_type; \
+    typedef typename list_base::size_type size_type;  \
+    typedef typename list_base::iterator iterator;  \
+    typedef typename list_base::const_iterator const_iterator;  \
+    explicit list(const allocator_type& a = allocator_type()) : list_base(a) {}  \
+    template<typename InputIterator> \
+    list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
+    : list_base(first, last, a) {} \
+    list(const list& c) : list_base(c) {}  \
+    explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \
+    list(iterator start, iterator end) : list_base(start, end) {}  \
+    list& operator=(const list& x) {  \
+    list_base::operator=(x);  \
+    return *this; \
+  }
+
+  template<typename T>
+  class list<T,EIGEN_ALIGNED_ALLOCATOR<T> >
+    : public list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+                  Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
+  {
+    typedef list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+                 Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > list_base;
+    EIGEN_STD_LIST_SPECIALIZATION_BODY
+
+    void resize(size_type new_size)
+    { resize(new_size, T()); }
+
+    void resize(size_type new_size, const value_type& x)
+    {
+      if (list_base::size() < new_size)
+        list_base::insert(list_base::end(), new_size - list_base::size(), x);
+      else
+        while (new_size < list_base::size()) list_base::pop_back();
+    }
+
+#if defined(_LIST_)
+    // workaround MSVC std::list implementation
+    void push_back(const value_type& x)
+    { list_base::push_back(x); } 
+    using list_base::insert;  
+    iterator insert(const_iterator position, const value_type& x)
+    { return list_base::insert(position,x); }
+    void insert(const_iterator position, size_type new_size, const value_type& x)
+    { list_base::insert(position, new_size, x); }
+#endif
+  };
+}
+
+#endif // check whether specialization is actually required
+
+#endif // EIGEN_STDLIST_H
diff --git a/Eigen/src/StlSupport/StdVector.h b/Eigen/src/StlSupport/StdVector.h
new file mode 100644
index 0000000..611664a
--- /dev/null
+++ b/Eigen/src/StlSupport/StdVector.h
@@ -0,0 +1,126 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STDVECTOR_H
+#define EIGEN_STDVECTOR_H
+
+#include "details.h"
+
+/**
+ * This section contains a convenience MACRO which allows an easy specialization of
+ * std::vector such that for data types with alignment issues the correct allocator
+ * is used automatically.
+ */
+#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \
+namespace std \
+{ \
+  template<> \
+  class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> >  \
+    : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \
+  { \
+    typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \
+  public: \
+    typedef __VA_ARGS__ value_type; \
+    typedef vector_base::allocator_type allocator_type; \
+    typedef vector_base::size_type size_type;  \
+    typedef vector_base::iterator iterator;  \
+    explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {}  \
+    template<typename InputIterator> \
+    vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \
+    vector(const vector& c) : vector_base(c) {}  \
+    explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
+    vector(iterator start, iterator end) : vector_base(start, end) {}  \
+    vector& operator=(const vector& x) {  \
+      vector_base::operator=(x);  \
+      return *this;  \
+    } \
+  }; \
+}
+
+namespace std {
+
+#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \
+  public:  \
+    typedef T value_type; \
+    typedef typename vector_base::allocator_type allocator_type; \
+    typedef typename vector_base::size_type size_type;  \
+    typedef typename vector_base::iterator iterator;  \
+    typedef typename vector_base::const_iterator const_iterator;  \
+    explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {}  \
+    template<typename InputIterator> \
+    vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \
+    : vector_base(first, last, a) {} \
+    vector(const vector& c) : vector_base(c) {}  \
+    explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \
+    vector(iterator start, iterator end) : vector_base(start, end) {}  \
+    vector& operator=(const vector& x) {  \
+      vector_base::operator=(x);  \
+      return *this;  \
+    }
+
+  template<typename T>
+  class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> >
+    : public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+                    Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >
+{
+  typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),
+                 Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base;
+  EIGEN_STD_VECTOR_SPECIALIZATION_BODY
+
+  void resize(size_type new_size)
+  { resize(new_size, T()); }
+
+#if defined(_VECTOR_)
+  // workaround MSVC std::vector implementation
+  void resize(size_type new_size, const value_type& x)
+  {
+    if (vector_base::size() < new_size)
+      vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x);
+    else if (new_size < vector_base::size())
+      vector_base::erase(vector_base::begin() + new_size, vector_base::end());
+  }
+  void push_back(const value_type& x)
+  { vector_base::push_back(x); } 
+  using vector_base::insert;  
+  iterator insert(const_iterator position, const value_type& x)
+  { return vector_base::insert(position,x); }
+  void insert(const_iterator position, size_type new_size, const value_type& x)
+  { vector_base::insert(position, new_size, x); }
+#elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1)))
+  /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&).
+   * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */
+  void resize(size_type new_size, const value_type& x)
+  {
+    vector_base::resize(new_size,x);
+  }
+#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2)
+  // workaround GCC std::vector implementation
+  void resize(size_type new_size, const value_type& x)
+  {
+    if (new_size < vector_base::size())
+      vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size);
+    else
+      vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
+  }
+#else
+  // either GCC 4.1 or non-GCC
+  // default implementation which should always work.
+  void resize(size_type new_size, const value_type& x)
+  {
+    if (new_size < vector_base::size())
+      vector_base::erase(vector_base::begin() + new_size, vector_base::end());
+    else if (new_size > vector_base::size())
+      vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);
+  }
+#endif
+  };
+}
+
+#endif // EIGEN_STDVECTOR_H
diff --git a/Eigen/src/StlSupport/details.h b/Eigen/src/StlSupport/details.h
new file mode 100644
index 0000000..d8debc7
--- /dev/null
+++ b/Eigen/src/StlSupport/details.h
@@ -0,0 +1,84 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STL_DETAILS_H
+#define EIGEN_STL_DETAILS_H
+
+#ifndef EIGEN_ALIGNED_ALLOCATOR
+  #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator
+#endif
+
+namespace Eigen {
+
+  // This one is needed to prevent reimplementing the whole std::vector.
+  template <class T>
+  class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T>
+  {
+  public:
+    typedef size_t    size_type;
+    typedef ptrdiff_t difference_type;
+    typedef T*        pointer;
+    typedef const T*  const_pointer;
+    typedef T&        reference;
+    typedef const T&  const_reference;
+    typedef T         value_type;
+
+    template<class U>
+    struct rebind
+    {
+      typedef aligned_allocator_indirection<U> other;
+    };
+
+    aligned_allocator_indirection() {}
+    aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {}
+    aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {}
+    template<class U>
+    aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {}
+    template<class U>
+    aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {}
+    ~aligned_allocator_indirection() {}
+  };
+
+#ifdef _MSC_VER
+
+  // sometimes, MSVC detects, at compile time, that the argument x
+  // in std::vector::resize(size_t s,T x) won't be aligned and generate an error
+  // even if this function is never called. Whence this little wrapper.
+#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \
+  typename Eigen::internal::conditional< \
+    Eigen::internal::is_arithmetic<T>::value, \
+    T, \
+    Eigen::internal::workaround_msvc_stl_support<T> \
+  >::type
+
+  namespace internal {
+  template<typename T> struct workaround_msvc_stl_support : public T
+  {
+    inline workaround_msvc_stl_support() : T() {}
+    inline workaround_msvc_stl_support(const T& other) : T(other) {}
+    inline operator T& () { return *static_cast<T*>(this); }
+    inline operator const T& () const { return *static_cast<const T*>(this); }
+    template<typename OtherT>
+    inline T& operator=(const OtherT& other)
+    { T::operator=(other); return *this; }
+    inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other)
+    { T::operator=(other); return *this; }
+  };
+  }
+
+#else
+
+#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T
+
+#endif
+
+}
+
+#endif // EIGEN_STL_DETAILS_H
diff --git a/Eigen/src/SuperLUSupport/CMakeLists.txt b/Eigen/src/SuperLUSupport/CMakeLists.txt
new file mode 100644
index 0000000..b28ebe5
--- /dev/null
+++ b/Eigen/src/SuperLUSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SuperLUSupport_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_SuperLUSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SuperLUSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h
new file mode 100644
index 0000000..bcb3557
--- /dev/null
+++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h
@@ -0,0 +1,1026 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SUPERLUSUPPORT_H
+#define EIGEN_SUPERLUSUPPORT_H
+
+namespace Eigen { 
+
+#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE)		\
+    extern "C" {                                                                                          \
+      typedef struct { FLOATTYPE for_lu; FLOATTYPE total_needed; int expansions; } PREFIX##mem_usage_t;   \
+      extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *,                  \
+                                char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,           \
+                                void *, int, SuperMatrix *, SuperMatrix *,                                \
+                                FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *,                       \
+                                PREFIX##mem_usage_t *, SuperLUStat_t *, int *);                           \
+    }                                                                                                     \
+    inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A,                                \
+         int *perm_c, int *perm_r, int *etree, char *equed,                                               \
+         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                                      \
+         SuperMatrix *U, void *work, int lwork,                                                           \
+         SuperMatrix *B, SuperMatrix *X,                                                                  \
+         FLOATTYPE *recip_pivot_growth,                                                                   \
+         FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr,                                              \
+         SuperLUStat_t *stats, int *info, KEYTYPE) {                                                      \
+    PREFIX##mem_usage_t mem_usage;                                                                        \
+    PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L,                                      \
+         U, work, lwork, B, X, recip_pivot_growth, rcond,                                                 \
+         ferr, berr, &mem_usage, stats, info);                                                            \
+    return mem_usage.for_lu; /* bytes used by the factor storage */                                       \
+  }
+
+DECL_GSSVX(s,float,float)
+DECL_GSSVX(c,float,std::complex<float>)
+DECL_GSSVX(d,double,double)
+DECL_GSSVX(z,double,std::complex<double>)
+
+#ifdef MILU_ALPHA
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+// similarly for the incomplete factorization using gsisx
+#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE)                                                    \
+    extern "C" {                                                                                \
+      extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *,        \
+                         char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,        \
+                         void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *,   \
+                         PREFIX##mem_usage_t *, SuperLUStat_t *, int *);                        \
+    }                                                                                           \
+    inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A,                      \
+         int *perm_c, int *perm_r, int *etree, char *equed,                                     \
+         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                            \
+         SuperMatrix *U, void *work, int lwork,                                                 \
+         SuperMatrix *B, SuperMatrix *X,                                                        \
+         FLOATTYPE *recip_pivot_growth,                                                         \
+         FLOATTYPE *rcond,                                                                      \
+         SuperLUStat_t *stats, int *info, KEYTYPE) {                                            \
+    PREFIX##mem_usage_t mem_usage;                                                              \
+    PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L,                            \
+         U, work, lwork, B, X, recip_pivot_growth, rcond,                                       \
+         &mem_usage, stats, info);                                                              \
+    return mem_usage.for_lu; /* bytes used by the factor storage */                             \
+  }
+
+DECL_GSISX(s,float,float)
+DECL_GSISX(c,float,std::complex<float>)
+DECL_GSISX(d,double,double)
+DECL_GSISX(z,double,std::complex<double>)
+
+#endif
+
+template<typename MatrixType>
+struct SluMatrixMapHelper;
+
+/** \internal
+  *
+  * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices
+  * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.
+  *
+  * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.
+  */
+struct SluMatrix : SuperMatrix
+{
+  SluMatrix()
+  {
+    Store = &storage;
+  }
+
+  SluMatrix(const SluMatrix& other)
+    : SuperMatrix(other)
+  {
+    Store = &storage;
+    storage = other.storage;
+  }
+
+  SluMatrix& operator=(const SluMatrix& other)
+  {
+    SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));
+    Store = &storage;
+    storage = other.storage;
+    return *this;
+  }
+
+  struct
+  {
+    union {int nnz;int lda;};
+    void *values;
+    int *innerInd;
+    int *outerInd;
+  } storage;
+
+  void setStorageType(Stype_t t)
+  {
+    Stype = t;
+    if (t==SLU_NC || t==SLU_NR || t==SLU_DN)
+      Store = &storage;
+    else
+    {
+      eigen_assert(false && "storage type not supported");
+      Store = 0;
+    }
+  }
+
+  template<typename Scalar>
+  void setScalarType()
+  {
+    if (internal::is_same<Scalar,float>::value)
+      Dtype = SLU_S;
+    else if (internal::is_same<Scalar,double>::value)
+      Dtype = SLU_D;
+    else if (internal::is_same<Scalar,std::complex<float> >::value)
+      Dtype = SLU_C;
+    else if (internal::is_same<Scalar,std::complex<double> >::value)
+      Dtype = SLU_Z;
+    else
+    {
+      eigen_assert(false && "Scalar type not supported by SuperLU");
+    }
+  }
+
+  template<typename MatrixType>
+  static SluMatrix Map(MatrixBase<MatrixType>& _mat)
+  {
+    MatrixType& mat(_mat.derived());
+    eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && "row-major dense matrices are not supported by SuperLU");
+    SluMatrix res;
+    res.setStorageType(SLU_DN);
+    res.setScalarType<typename MatrixType::Scalar>();
+    res.Mtype     = SLU_GE;
+
+    res.nrow      = mat.rows();
+    res.ncol      = mat.cols();
+
+    res.storage.lda       = MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride();
+    res.storage.values    = (void*)(mat.data());
+    return res;
+  }
+
+  template<typename MatrixType>
+  static SluMatrix Map(SparseMatrixBase<MatrixType>& mat)
+  {
+    SluMatrix res;
+    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+    {
+      res.setStorageType(SLU_NR);
+      res.nrow      = mat.cols();
+      res.ncol      = mat.rows();
+    }
+    else
+    {
+      res.setStorageType(SLU_NC);
+      res.nrow      = mat.rows();
+      res.ncol      = mat.cols();
+    }
+
+    res.Mtype       = SLU_GE;
+
+    res.storage.nnz       = mat.nonZeros();
+    res.storage.values    = mat.derived().valuePtr();
+    res.storage.innerInd  = mat.derived().innerIndexPtr();
+    res.storage.outerInd  = mat.derived().outerIndexPtr();
+
+    res.setScalarType<typename MatrixType::Scalar>();
+
+    // FIXME the following is not very accurate
+    if (MatrixType::Flags & Upper)
+      res.Mtype = SLU_TRU;
+    if (MatrixType::Flags & Lower)
+      res.Mtype = SLU_TRL;
+
+    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+
+    return res;
+  }
+};
+
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
+{
+  typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
+  static void run(MatrixType& mat, SluMatrix& res)
+  {
+    eigen_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
+    res.setStorageType(SLU_DN);
+    res.setScalarType<Scalar>();
+    res.Mtype     = SLU_GE;
+
+    res.nrow      = mat.rows();
+    res.ncol      = mat.cols();
+
+    res.storage.lda       = mat.outerStride();
+    res.storage.values    = mat.data();
+  }
+};
+
+template<typename Derived>
+struct SluMatrixMapHelper<SparseMatrixBase<Derived> >
+{
+  typedef Derived MatrixType;
+  static void run(MatrixType& mat, SluMatrix& res)
+  {
+    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)
+    {
+      res.setStorageType(SLU_NR);
+      res.nrow      = mat.cols();
+      res.ncol      = mat.rows();
+    }
+    else
+    {
+      res.setStorageType(SLU_NC);
+      res.nrow      = mat.rows();
+      res.ncol      = mat.cols();
+    }
+
+    res.Mtype       = SLU_GE;
+
+    res.storage.nnz       = mat.nonZeros();
+    res.storage.values    = mat.valuePtr();
+    res.storage.innerInd  = mat.innerIndexPtr();
+    res.storage.outerInd  = mat.outerIndexPtr();
+
+    res.setScalarType<typename MatrixType::Scalar>();
+
+    // FIXME the following is not very accurate
+    if (MatrixType::Flags & Upper)
+      res.Mtype = SLU_TRU;
+    if (MatrixType::Flags & Lower)
+      res.Mtype = SLU_TRL;
+
+    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && "SelfAdjoint matrix shape not supported by SuperLU");
+  }
+};
+
+namespace internal {
+
+template<typename MatrixType>
+SluMatrix asSluMatrix(MatrixType& mat)
+{
+  return SluMatrix::Map(mat);
+}
+
+/** View a Super LU matrix as an Eigen expression */
+template<typename Scalar, int Flags, typename Index>
+MappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)
+{
+  eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
+         || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);
+
+  Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
+
+  return MappedSparseMatrix<Scalar,Flags,Index>(
+    sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
+    sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
+}
+
+} // end namespace internal
+
+/** \ingroup SuperLUSupport_Module
+  * \class SuperLUBase
+  * \brief The base class for the direct and incomplete LU factorization of SuperLU
+  */
+template<typename _MatrixType, typename Derived>
+class SuperLUBase : internal::noncopyable
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;    
+    typedef SparseMatrix<Scalar> LUMatrixType;
+
+  public:
+
+    SuperLUBase() {}
+
+    ~SuperLUBase()
+    {
+      clearFactors();
+    }
+    
+    Derived& derived() { return *static_cast<Derived*>(this); }
+    const Derived& derived() const { return *static_cast<const Derived*>(this); }
+    
+    inline Index rows() const { return m_matrix.rows(); }
+    inline Index cols() const { return m_matrix.cols(); }
+    
+    /** \returns a reference to the Super LU option object to configure the  Super LU algorithms. */
+    inline superlu_options_t& options() { return m_sluOptions; }
+    
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    /** Computes the sparse Cholesky decomposition of \a matrix */
+    void compute(const MatrixType& matrix)
+    {
+      derived().analyzePattern(matrix);
+      derived().factorize(matrix);
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<SuperLUBase, Rhs> solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+    }
+    
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<SuperLUBase, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "SuperLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "SuperLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<SuperLUBase, Rhs>(*this, b.derived());
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& /*matrix*/)
+    {
+      m_isInitialized = true;
+      m_info = Success;
+      m_analysisIsOk = true;
+      m_factorizationIsOk = false;
+    }
+    
+    template<typename Stream>
+    void dumpMemory(Stream& /*s*/)
+    {}
+    
+  protected:
+    
+    void initFactorization(const MatrixType& a)
+    {
+      set_default_options(&this->m_sluOptions);
+      
+      const int size = a.rows();
+      m_matrix = a;
+
+      m_sluA = internal::asSluMatrix(m_matrix);
+      clearFactors();
+
+      m_p.resize(size);
+      m_q.resize(size);
+      m_sluRscale.resize(size);
+      m_sluCscale.resize(size);
+      m_sluEtree.resize(size);
+
+      // set empty B and X
+      m_sluB.setStorageType(SLU_DN);
+      m_sluB.setScalarType<Scalar>();
+      m_sluB.Mtype          = SLU_GE;
+      m_sluB.storage.values = 0;
+      m_sluB.nrow           = 0;
+      m_sluB.ncol           = 0;
+      m_sluB.storage.lda    = size;
+      m_sluX                = m_sluB;
+      
+      m_extractedDataAreDirty = true;
+    }
+    
+    void init()
+    {
+      m_info = InvalidInput;
+      m_isInitialized = false;
+      m_sluL.Store = 0;
+      m_sluU.Store = 0;
+    }
+    
+    void extractData() const;
+
+    void clearFactors()
+    {
+      if(m_sluL.Store)
+        Destroy_SuperNode_Matrix(&m_sluL);
+      if(m_sluU.Store)
+        Destroy_CompCol_Matrix(&m_sluU);
+
+      m_sluL.Store = 0;
+      m_sluU.Store = 0;
+
+      memset(&m_sluL,0,sizeof m_sluL);
+      memset(&m_sluU,0,sizeof m_sluU);
+    }
+
+    // cached data to reduce reallocation, etc.
+    mutable LUMatrixType m_l;
+    mutable LUMatrixType m_u;
+    mutable IntColVectorType m_p;
+    mutable IntRowVectorType m_q;
+
+    mutable LUMatrixType m_matrix;  // copy of the factorized matrix
+    mutable SluMatrix m_sluA;
+    mutable SuperMatrix m_sluL, m_sluU;
+    mutable SluMatrix m_sluB, m_sluX;
+    mutable SuperLUStat_t m_sluStat;
+    mutable superlu_options_t m_sluOptions;
+    mutable std::vector<int> m_sluEtree;
+    mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;
+    mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;
+    mutable char m_sluEqued;
+
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    int m_factorizationIsOk;
+    int m_analysisIsOk;
+    mutable bool m_extractedDataAreDirty;
+    
+  private:
+    SuperLUBase(SuperLUBase& ) { }
+};
+
+
+/** \ingroup SuperLUSupport_Module
+  * \class SuperLU
+  * \brief A sparse direct LU factorization and solver based on the SuperLU library
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization
+  * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices
+  * X and B can be either dense or sparse.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType>
+class SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >
+{
+  public:
+    typedef SuperLUBase<_MatrixType,SuperLU> Base;
+    typedef _MatrixType MatrixType;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::RealScalar RealScalar;
+    typedef typename Base::Index Index;
+    typedef typename Base::IntRowVectorType IntRowVectorType;
+    typedef typename Base::IntColVectorType IntColVectorType;    
+    typedef typename Base::LUMatrixType LUMatrixType;
+    typedef TriangularView<LUMatrixType, Lower|UnitDiag>  LMatrixType;
+    typedef TriangularView<LUMatrixType,  Upper>           UMatrixType;
+
+  public:
+
+    SuperLU() : Base() { init(); }
+
+    SuperLU(const MatrixType& matrix) : Base()
+    {
+      init();
+      Base::compute(matrix);
+    }
+
+    ~SuperLU()
+    {
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      m_info = InvalidInput;
+      m_isInitialized = false;
+      Base::analyzePattern(matrix);
+    }
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& matrix);
+    
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    
+    inline const LMatrixType& matrixL() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_l;
+    }
+
+    inline const UMatrixType& matrixU() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_u;
+    }
+
+    inline const IntColVectorType& permutationP() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_p;
+    }
+
+    inline const IntRowVectorType& permutationQ() const
+    {
+      if (m_extractedDataAreDirty) this->extractData();
+      return m_q;
+    }
+    
+    Scalar determinant() const;
+    
+  protected:
+    
+    using Base::m_matrix;
+    using Base::m_sluOptions;
+    using Base::m_sluA;
+    using Base::m_sluB;
+    using Base::m_sluX;
+    using Base::m_p;
+    using Base::m_q;
+    using Base::m_sluEtree;
+    using Base::m_sluEqued;
+    using Base::m_sluRscale;
+    using Base::m_sluCscale;
+    using Base::m_sluL;
+    using Base::m_sluU;
+    using Base::m_sluStat;
+    using Base::m_sluFerr;
+    using Base::m_sluBerr;
+    using Base::m_l;
+    using Base::m_u;
+    
+    using Base::m_analysisIsOk;
+    using Base::m_factorizationIsOk;
+    using Base::m_extractedDataAreDirty;
+    using Base::m_isInitialized;
+    using Base::m_info;
+    
+    void init()
+    {
+      Base::init();
+      
+      set_default_options(&this->m_sluOptions);
+      m_sluOptions.PrintStat        = NO;
+      m_sluOptions.ConditionNumber  = NO;
+      m_sluOptions.Trans            = NOTRANS;
+      m_sluOptions.ColPerm          = COLAMD;
+    }
+    
+    
+  private:
+    SuperLU(SuperLU& ) { }
+};
+
+template<typename MatrixType>
+void SuperLU<MatrixType>::factorize(const MatrixType& a)
+{
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  if(!m_analysisIsOk)
+  {
+    m_info = InvalidInput;
+    return;
+  }
+  
+  this->initFactorization(a);
+  
+  m_sluOptions.ColPerm = COLAMD;
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+  RealScalar ferr, berr;
+
+  StatInit(&m_sluStat);
+  SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &ferr, &berr,
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+
+  m_extractedDataAreDirty = true;
+
+  // FIXME how to better check for errors ???
+  m_info = info == 0 ? Success : NumericalIssue;
+  m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperLU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+  const int size = m_matrix.rows();
+  const int rhsCols = b.cols();
+  eigen_assert(size==b.rows());
+
+  m_sluOptions.Trans = NOTRANS;
+  m_sluOptions.Fact = FACTORED;
+  m_sluOptions.IterRefine = NOREFINE;
+  
+
+  m_sluFerr.resize(rhsCols);
+  m_sluBerr.resize(rhsCols);
+  m_sluB = SluMatrix::Map(b.const_cast_derived());
+  m_sluX = SluMatrix::Map(x.derived());
+  
+  typename Rhs::PlainObject b_cpy;
+  if(m_sluEqued!='N')
+  {
+    b_cpy = b;
+    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  
+  }
+
+  StatInit(&m_sluStat);
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+  SuperLU_gssvx(&m_sluOptions, &m_sluA,
+                m_q.data(), m_p.data(),
+                &m_sluEtree[0], &m_sluEqued,
+                &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &m_sluFerr[0], &m_sluBerr[0],
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+  m_info = info==0 ? Success : NumericalIssue;
+}
+
+// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,
+//
+//  Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
+//
+//  THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
+//  EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
+//
+template<typename MatrixType, typename Derived>
+void SuperLUBase<MatrixType,Derived>::extractData() const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()");
+  if (m_extractedDataAreDirty)
+  {
+    int         upper;
+    int         fsupc, istart, nsupr;
+    int         lastl = 0, lastu = 0;
+    SCformat    *Lstore = static_cast<SCformat*>(m_sluL.Store);
+    NCformat    *Ustore = static_cast<NCformat*>(m_sluU.Store);
+    Scalar      *SNptr;
+
+    const int size = m_matrix.rows();
+    m_l.resize(size,size);
+    m_l.resizeNonZeros(Lstore->nnz);
+    m_u.resize(size,size);
+    m_u.resizeNonZeros(Ustore->nnz);
+
+    int* Lcol = m_l.outerIndexPtr();
+    int* Lrow = m_l.innerIndexPtr();
+    Scalar* Lval = m_l.valuePtr();
+
+    int* Ucol = m_u.outerIndexPtr();
+    int* Urow = m_u.innerIndexPtr();
+    Scalar* Uval = m_u.valuePtr();
+
+    Ucol[0] = 0;
+    Ucol[0] = 0;
+
+    /* for each supernode */
+    for (int k = 0; k <= Lstore->nsuper; ++k)
+    {
+      fsupc   = L_FST_SUPC(k);
+      istart  = L_SUB_START(fsupc);
+      nsupr   = L_SUB_START(fsupc+1) - istart;
+      upper   = 1;
+
+      /* for each column in the supernode */
+      for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
+      {
+        SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];
+
+        /* Extract U */
+        for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)
+        {
+          Uval[lastu] = ((Scalar*)Ustore->nzval)[i];
+          /* Matlab doesn't like explicit zero. */
+          if (Uval[lastu] != 0.0)
+            Urow[lastu++] = U_SUB(i);
+        }
+        for (int i = 0; i < upper; ++i)
+        {
+          /* upper triangle in the supernode */
+          Uval[lastu] = SNptr[i];
+          /* Matlab doesn't like explicit zero. */
+          if (Uval[lastu] != 0.0)
+            Urow[lastu++] = L_SUB(istart+i);
+        }
+        Ucol[j+1] = lastu;
+
+        /* Extract L */
+        Lval[lastl] = 1.0; /* unit diagonal */
+        Lrow[lastl++] = L_SUB(istart + upper - 1);
+        for (int i = upper; i < nsupr; ++i)
+        {
+          Lval[lastl] = SNptr[i];
+          /* Matlab doesn't like explicit zero. */
+          if (Lval[lastl] != 0.0)
+            Lrow[lastl++] = L_SUB(istart+i);
+        }
+        Lcol[j+1] = lastl;
+
+        ++upper;
+      } /* for j ... */
+
+    } /* for k ... */
+
+    // squeeze the matrices :
+    m_l.resizeNonZeros(lastl);
+    m_u.resizeNonZeros(lastu);
+
+    m_extractedDataAreDirty = false;
+  }
+}
+
+template<typename MatrixType>
+typename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()");
+  
+  if (m_extractedDataAreDirty)
+    this->extractData();
+
+  Scalar det = Scalar(1);
+  for (int j=0; j<m_u.cols(); ++j)
+  {
+    if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0)
+    {
+      int lastId = m_u.outerIndexPtr()[j+1]-1;
+      eigen_assert(m_u.innerIndexPtr()[lastId]<=j);
+      if (m_u.innerIndexPtr()[lastId]==j)
+        det *= m_u.valuePtr()[lastId];
+    }
+  }
+  if(m_sluEqued!='N')
+    return det/m_sluRscale.prod()/m_sluCscale.prod();
+  else
+    return det;
+}
+
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+#define EIGEN_SUPERLU_HAS_ILU
+#endif
+
+#ifdef EIGEN_SUPERLU_HAS_ILU
+
+/** \ingroup SuperLUSupport_Module
+  * \class SuperILU
+  * \brief A sparse direct \b incomplete LU factorization and solver based on the SuperLU library
+  *
+  * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization
+  * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.
+  *
+  * \warning This class requires SuperLU 4 or later.
+  *
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers, class ConjugateGradient, class BiCGSTAB
+  */
+
+template<typename _MatrixType>
+class SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >
+{
+  public:
+    typedef SuperLUBase<_MatrixType,SuperILU> Base;
+    typedef _MatrixType MatrixType;
+    typedef typename Base::Scalar Scalar;
+    typedef typename Base::RealScalar RealScalar;
+    typedef typename Base::Index Index;
+
+  public:
+
+    SuperILU() : Base() { init(); }
+
+    SuperILU(const MatrixType& matrix) : Base()
+    {
+      init();
+      Base::compute(matrix);
+    }
+
+    ~SuperILU()
+    {
+    }
+    
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      * 
+      * \sa factorize()
+      */
+    void analyzePattern(const MatrixType& matrix)
+    {
+      Base::analyzePattern(matrix);
+    }
+    
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
+      *
+      * \sa analyzePattern()
+      */
+    void factorize(const MatrixType& matrix);
+    
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename Rhs,typename Dest>
+    void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;
+    #endif // EIGEN_PARSED_BY_DOXYGEN
+    
+  protected:
+    
+    using Base::m_matrix;
+    using Base::m_sluOptions;
+    using Base::m_sluA;
+    using Base::m_sluB;
+    using Base::m_sluX;
+    using Base::m_p;
+    using Base::m_q;
+    using Base::m_sluEtree;
+    using Base::m_sluEqued;
+    using Base::m_sluRscale;
+    using Base::m_sluCscale;
+    using Base::m_sluL;
+    using Base::m_sluU;
+    using Base::m_sluStat;
+    using Base::m_sluFerr;
+    using Base::m_sluBerr;
+    using Base::m_l;
+    using Base::m_u;
+    
+    using Base::m_analysisIsOk;
+    using Base::m_factorizationIsOk;
+    using Base::m_extractedDataAreDirty;
+    using Base::m_isInitialized;
+    using Base::m_info;
+
+    void init()
+    {
+      Base::init();
+      
+      ilu_set_default_options(&m_sluOptions);
+      m_sluOptions.PrintStat        = NO;
+      m_sluOptions.ConditionNumber  = NO;
+      m_sluOptions.Trans            = NOTRANS;
+      m_sluOptions.ColPerm          = MMD_AT_PLUS_A;
+      
+      // no attempt to preserve column sum
+      m_sluOptions.ILU_MILU = SILU;
+      // only basic ILU(k) support -- no direct control over memory consumption
+      // better to use ILU_DropRule = DROP_BASIC | DROP_AREA
+      // and set ILU_FillFactor to max memory growth
+      m_sluOptions.ILU_DropRule = DROP_BASIC;
+      m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;
+    }
+    
+  private:
+    SuperILU(SuperILU& ) { }
+};
+
+template<typename MatrixType>
+void SuperILU<MatrixType>::factorize(const MatrixType& a)
+{
+  eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
+  if(!m_analysisIsOk)
+  {
+    m_info = InvalidInput;
+    return;
+  }
+  
+  this->initFactorization(a);
+
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+
+  StatInit(&m_sluStat);
+  SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],
+                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+
+  // FIXME how to better check for errors ???
+  m_info = info == 0 ? Success : NumericalIssue;
+  m_factorizationIsOk = true;
+}
+
+template<typename MatrixType>
+template<typename Rhs,typename Dest>
+void SuperILU<MatrixType>::_solve(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const
+{
+  eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()");
+
+  const int size = m_matrix.rows();
+  const int rhsCols = b.cols();
+  eigen_assert(size==b.rows());
+
+  m_sluOptions.Trans = NOTRANS;
+  m_sluOptions.Fact = FACTORED;
+  m_sluOptions.IterRefine = NOREFINE;
+
+  m_sluFerr.resize(rhsCols);
+  m_sluBerr.resize(rhsCols);
+  m_sluB = SluMatrix::Map(b.const_cast_derived());
+  m_sluX = SluMatrix::Map(x.derived());
+
+  typename Rhs::PlainObject b_cpy;
+  if(m_sluEqued!='N')
+  {
+    b_cpy = b;
+    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  
+  }
+  
+  int info = 0;
+  RealScalar recip_pivot_growth, rcond;
+
+  StatInit(&m_sluStat);
+  SuperLU_gsisx(&m_sluOptions, &m_sluA,
+                m_q.data(), m_p.data(),
+                &m_sluEtree[0], &m_sluEqued,
+                &m_sluRscale[0], &m_sluCscale[0],
+                &m_sluL, &m_sluU,
+                NULL, 0,
+                &m_sluB, &m_sluX,
+                &recip_pivot_growth, &rcond,
+                &m_sluStat, &info, Scalar());
+  StatFree(&m_sluStat);
+
+  m_info = info==0 ? Success : NumericalIssue;
+}
+#endif
+
+namespace internal {
+  
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+  : solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+  typedef SuperLUBase<_MatrixType,Derived> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec().derived()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, typename Derived, typename Rhs>
+struct sparse_solve_retval<SuperLUBase<_MatrixType,Derived>, Rhs>
+  : sparse_solve_retval_base<SuperLUBase<_MatrixType,Derived>, Rhs>
+{
+  typedef SuperLUBase<_MatrixType,Derived> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SUPERLUSUPPORT_H
diff --git a/Eigen/src/UmfPackSupport/CMakeLists.txt b/Eigen/src/UmfPackSupport/CMakeLists.txt
new file mode 100644
index 0000000..a57de00
--- /dev/null
+++ b/Eigen/src/UmfPackSupport/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_UmfPackSupport_SRCS "*.h")
+
+INSTALL(FILES 
+  ${Eigen_UmfPackSupport_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/UmfPackSupport COMPONENT Devel
+  )
diff --git a/Eigen/src/UmfPackSupport/UmfPackSupport.h b/Eigen/src/UmfPackSupport/UmfPackSupport.h
new file mode 100644
index 0000000..29c60c3
--- /dev/null
+++ b/Eigen/src/UmfPackSupport/UmfPackSupport.h
@@ -0,0 +1,474 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_UMFPACKSUPPORT_H
+#define EIGEN_UMFPACKSUPPORT_H
+
+namespace Eigen { 
+
+/* TODO extract L, extract U, compute det, etc... */
+
+// generic double/complex<double> wrapper functions:
+
+inline void umfpack_free_numeric(void **Numeric, double)
+{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_numeric(void **Numeric, std::complex<double>)
+{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, double)
+{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)
+{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }
+
+inline int umfpack_symbolic(int n_row,int n_col,
+                            const int Ap[], const int Ai[], const double Ax[], void **Symbolic,
+                            const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+  return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);
+}
+
+inline int umfpack_symbolic(int n_row,int n_col,
+                            const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,
+                            const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])
+{
+  return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],
+                            void *Symbolic, void **Numeric,
+                            const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+  return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],
+                            void *Symbolic, void **Numeric,
+                            const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])
+{
+  return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],
+                          double X[], const double B[], void *Numeric,
+                          const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+  return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);
+}
+
+inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],
+                          std::complex<double> X[], const std::complex<double> B[], void *Numeric,
+                          const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])
+{
+  return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)
+{
+  return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)
+{
+  return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],
+                               int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+  return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],
+                               int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)
+{
+  double& lx0_real = numext::real_ref(Lx[0]);
+  double& ux0_real = numext::real_ref(Ux[0]);
+  double& dx0_real = numext::real_ref(Dx[0]);
+  return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,
+                                Dx?&dx0_real:0,0,do_recip,Rs,Numeric);
+}
+
+inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+  return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);
+}
+
+inline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])
+{
+  double& mx_real = numext::real_ref(*Mx);
+  return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);
+}
+
+namespace internal {
+  template<typename T> struct umfpack_helper_is_sparse_plain : false_type {};
+  template<typename Scalar, int Options, typename StorageIndex>
+  struct umfpack_helper_is_sparse_plain<SparseMatrix<Scalar,Options,StorageIndex> >
+    : true_type {};
+  template<typename Scalar, int Options, typename StorageIndex>
+  struct umfpack_helper_is_sparse_plain<MappedSparseMatrix<Scalar,Options,StorageIndex> >
+    : true_type {};
+}
+
+/** \ingroup UmfPackSupport_Module
+  * \brief A sparse LU factorization and solver based on UmfPack
+  *
+  * This class allows to solve for A.X = B sparse linear problems via a LU factorization
+  * using the UmfPack library. The sparse matrix A must be squared and full rank.
+  * The vectors or matrices X and B can be either dense or sparse.
+  *
+  * \warning The input matrix A should be in a \b compressed and \b column-major form.
+  * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
+  * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
+  *
+  * \sa \ref TutorialSparseDirectSolvers
+  */
+template<typename _MatrixType>
+class UmfPackLU : internal::noncopyable
+{
+  public:
+    typedef _MatrixType MatrixType;
+    typedef typename MatrixType::Scalar Scalar;
+    typedef typename MatrixType::RealScalar RealScalar;
+    typedef typename MatrixType::Index Index;
+    typedef Matrix<Scalar,Dynamic,1> Vector;
+    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
+    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
+    typedef SparseMatrix<Scalar> LUMatrixType;
+    typedef SparseMatrix<Scalar,ColMajor,int> UmfpackMatrixType;
+
+  public:
+
+    UmfPackLU() { init(); }
+
+    UmfPackLU(const MatrixType& matrix)
+    {
+      init();
+      compute(matrix);
+    }
+
+    ~UmfPackLU()
+    {
+      if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+      if(m_numeric)  umfpack_free_numeric(&m_numeric,Scalar());
+    }
+
+    inline Index rows() const { return m_copyMatrix.rows(); }
+    inline Index cols() const { return m_copyMatrix.cols(); }
+
+    /** \brief Reports whether previous computation was successful.
+      *
+      * \returns \c Success if computation was succesful,
+      *          \c NumericalIssue if the matrix.appears to be negative.
+      */
+    ComputationInfo info() const
+    {
+      eigen_assert(m_isInitialized && "Decomposition is not initialized.");
+      return m_info;
+    }
+
+    inline const LUMatrixType& matrixL() const
+    {
+      if (m_extractedDataAreDirty) extractData();
+      return m_l;
+    }
+
+    inline const LUMatrixType& matrixU() const
+    {
+      if (m_extractedDataAreDirty) extractData();
+      return m_u;
+    }
+
+    inline const IntColVectorType& permutationP() const
+    {
+      if (m_extractedDataAreDirty) extractData();
+      return m_p;
+    }
+
+    inline const IntRowVectorType& permutationQ() const
+    {
+      if (m_extractedDataAreDirty) extractData();
+      return m_q;
+    }
+
+    /** Computes the sparse Cholesky decomposition of \a matrix 
+     *  Note that the matrix should be column-major, and in compressed format for best performance.
+     *  \sa SparseMatrix::makeCompressed().
+     */
+    template<typename InputMatrixType>
+    void compute(const InputMatrixType& matrix)
+    {
+      if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+      if(m_numeric)  umfpack_free_numeric(&m_numeric,Scalar());
+      grapInput(matrix.derived());
+      analyzePattern_impl();
+      factorize_impl();
+    }
+
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::solve_retval<UmfPackLU, Rhs> solve(const MatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "UmfPackLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::solve_retval<UmfPackLU, Rhs>(*this, b.derived());
+    }
+
+    /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
+      *
+      * \sa compute()
+      */
+    template<typename Rhs>
+    inline const internal::sparse_solve_retval<UmfPackLU, Rhs> solve(const SparseMatrixBase<Rhs>& b) const
+    {
+      eigen_assert(m_isInitialized && "UmfPackLU is not initialized.");
+      eigen_assert(rows()==b.rows()
+                && "UmfPackLU::solve(): invalid number of rows of the right hand side matrix b");
+      return internal::sparse_solve_retval<UmfPackLU, Rhs>(*this, b.derived());
+    }
+
+    /** Performs a symbolic decomposition on the sparcity of \a matrix.
+      *
+      * This function is particularly useful when solving for several problems having the same structure.
+      *
+      * \sa factorize(), compute()
+      */
+    template<typename InputMatrixType>
+    void analyzePattern(const InputMatrixType& matrix)
+    {
+      if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());
+      if(m_numeric)  umfpack_free_numeric(&m_numeric,Scalar());
+      
+      grapInput(matrix.derived());
+
+      analyzePattern_impl();
+    }
+
+    /** Performs a numeric decomposition of \a matrix
+      *
+      * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed.
+      *
+      * \sa analyzePattern(), compute()
+      */
+    template<typename InputMatrixType>
+    void factorize(const InputMatrixType& matrix)
+    {
+      eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()");
+      if(m_numeric)
+        umfpack_free_numeric(&m_numeric,Scalar());
+
+      grapInput(matrix.derived());
+      
+      factorize_impl();
+    }
+
+    #ifndef EIGEN_PARSED_BY_DOXYGEN
+    /** \internal */
+    template<typename BDerived,typename XDerived>
+    bool _solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;
+    #endif
+
+    Scalar determinant() const;
+
+    void extractData() const;
+
+  protected:
+
+    void init()
+    {
+      m_info                  = InvalidInput;
+      m_isInitialized         = false;
+      m_numeric               = 0;
+      m_symbolic              = 0;
+      m_outerIndexPtr         = 0;
+      m_innerIndexPtr         = 0;
+      m_valuePtr              = 0;
+      m_extractedDataAreDirty = true;
+    }
+    
+    template<typename InputMatrixType>
+    void grapInput_impl(const InputMatrixType& mat, internal::true_type)
+    {
+      m_copyMatrix.resize(mat.rows(), mat.cols());
+      if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() )
+      {
+        // non supported input -> copy
+        m_copyMatrix = mat;
+        m_outerIndexPtr = m_copyMatrix.outerIndexPtr();
+        m_innerIndexPtr = m_copyMatrix.innerIndexPtr();
+        m_valuePtr      = m_copyMatrix.valuePtr();
+      }
+      else
+      {
+        m_outerIndexPtr = mat.outerIndexPtr();
+        m_innerIndexPtr = mat.innerIndexPtr();
+        m_valuePtr      = mat.valuePtr();
+      }
+    }
+    
+    template<typename InputMatrixType>
+    void grapInput_impl(const InputMatrixType& mat, internal::false_type)
+    {
+      m_copyMatrix = mat;
+      m_outerIndexPtr = m_copyMatrix.outerIndexPtr();
+      m_innerIndexPtr = m_copyMatrix.innerIndexPtr();
+      m_valuePtr      = m_copyMatrix.valuePtr();
+    }
+    
+    template<typename InputMatrixType>
+    void grapInput(const InputMatrixType& mat)
+    {
+      grapInput_impl(mat, internal::umfpack_helper_is_sparse_plain<InputMatrixType>());
+    }
+    
+    void analyzePattern_impl()
+    {
+      int errorCode = 0;
+      errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+                                   &m_symbolic, 0, 0);
+
+      m_isInitialized = true;
+      m_info = errorCode ? InvalidInput : Success;
+      m_analysisIsOk = true;
+      m_factorizationIsOk = false;
+      m_extractedDataAreDirty = true;
+    }
+    
+    void factorize_impl()
+    {
+      int errorCode;
+      errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+                                  m_symbolic, &m_numeric, 0, 0);
+
+      m_info = errorCode ? NumericalIssue : Success;
+      m_factorizationIsOk = true;
+      m_extractedDataAreDirty = true;
+    }
+
+    // cached data to reduce reallocation, etc.
+    mutable LUMatrixType m_l;
+    mutable LUMatrixType m_u;
+    mutable IntColVectorType m_p;
+    mutable IntRowVectorType m_q;
+
+    UmfpackMatrixType m_copyMatrix;
+    const Scalar* m_valuePtr;
+    const int* m_outerIndexPtr;
+    const int* m_innerIndexPtr;
+    void* m_numeric;
+    void* m_symbolic;
+
+    mutable ComputationInfo m_info;
+    bool m_isInitialized;
+    int m_factorizationIsOk;
+    int m_analysisIsOk;
+    mutable bool m_extractedDataAreDirty;
+    
+  private:
+    UmfPackLU(UmfPackLU& ) { }
+};
+
+
+template<typename MatrixType>
+void UmfPackLU<MatrixType>::extractData() const
+{
+  if (m_extractedDataAreDirty)
+  {
+    // get size of the data
+    int lnz, unz, rows, cols, nz_udiag;
+    umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());
+
+    // allocate data
+    m_l.resize(rows,(std::min)(rows,cols));
+    m_l.resizeNonZeros(lnz);
+
+    m_u.resize((std::min)(rows,cols),cols);
+    m_u.resizeNonZeros(unz);
+
+    m_p.resize(rows);
+    m_q.resize(cols);
+
+    // extract
+    umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(),
+                        m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(),
+                        m_p.data(), m_q.data(), 0, 0, 0, m_numeric);
+
+    m_extractedDataAreDirty = false;
+  }
+}
+
+template<typename MatrixType>
+typename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const
+{
+  Scalar det;
+  umfpack_get_determinant(&det, 0, m_numeric, 0);
+  return det;
+}
+
+template<typename MatrixType>
+template<typename BDerived,typename XDerived>
+bool UmfPackLU<MatrixType>::_solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const
+{
+  const int rhsCols = b.cols();
+  eigen_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major rhs yet");
+  eigen_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPackLU backend does not support non col-major result yet");
+  eigen_assert(b.derived().data() != x.derived().data() && " Umfpack does not support inplace solve");
+  
+  int errorCode;
+  for (int j=0; j<rhsCols; ++j)
+  {
+    errorCode = umfpack_solve(UMFPACK_A,
+        m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
+        &x.col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0);
+    if (errorCode!=0)
+      return false;
+  }
+
+  return true;
+}
+
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<UmfPackLU<_MatrixType>, Rhs>
+  : solve_retval_base<UmfPackLU<_MatrixType>, Rhs>
+{
+  typedef UmfPackLU<_MatrixType> Dec;
+  EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    dec()._solve(rhs(),dst);
+  }
+};
+
+template<typename _MatrixType, typename Rhs>
+struct sparse_solve_retval<UmfPackLU<_MatrixType>, Rhs>
+  : sparse_solve_retval_base<UmfPackLU<_MatrixType>, Rhs>
+{
+  typedef UmfPackLU<_MatrixType> Dec;
+  EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
+
+  template<typename Dest> void evalTo(Dest& dst) const
+  {
+    this->defaultEvalTo(dst);
+  }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_UMFPACKSUPPORT_H
diff --git a/Eigen/src/misc/CMakeLists.txt b/Eigen/src/misc/CMakeLists.txt
new file mode 100644
index 0000000..a58ffb7
--- /dev/null
+++ b/Eigen/src/misc/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_misc_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_misc_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/misc COMPONENT Devel
+  )
diff --git a/Eigen/src/misc/Image.h b/Eigen/src/misc/Image.h
new file mode 100644
index 0000000..75c5f43
--- /dev/null
+++ b/Eigen/src/misc/Image.h
@@ -0,0 +1,84 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MISC_IMAGE_H
+#define EIGEN_MISC_IMAGE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \class image_retval_base
+  *
+  */
+template<typename DecompositionType>
+struct traits<image_retval_base<DecompositionType> >
+{
+  typedef typename DecompositionType::MatrixType MatrixType;
+  typedef Matrix<
+    typename MatrixType::Scalar,
+    MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose
+                                   // dimension is the number of rows of the original matrix
+    Dynamic,                       // we don't know at compile time the dimension of the image (the rank)
+    MatrixType::Options,
+    MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
+    MatrixType::MaxColsAtCompileTime  // so it has the same number of rows and at most as many columns.
+  > ReturnType;
+};
+
+template<typename _DecompositionType> struct image_retval_base
+ : public ReturnByValue<image_retval_base<_DecompositionType> >
+{
+  typedef _DecompositionType DecompositionType;
+  typedef typename DecompositionType::MatrixType MatrixType;
+  typedef ReturnByValue<image_retval_base> Base;
+  typedef typename Base::Index Index;
+
+  image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix)
+    : m_dec(dec), m_rank(dec.rank()),
+      m_cols(m_rank == 0 ? 1 : m_rank),
+      m_originalMatrix(originalMatrix)
+  {}
+
+  inline Index rows() const { return m_dec.rows(); }
+  inline Index cols() const { return m_cols; }
+  inline Index rank() const { return m_rank; }
+  inline const DecompositionType& dec() const { return m_dec; }
+  inline const MatrixType& originalMatrix() const { return m_originalMatrix; }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    static_cast<const image_retval<DecompositionType>*>(this)->evalTo(dst);
+  }
+
+  protected:
+    const DecompositionType& m_dec;
+    Index m_rank, m_cols;
+    const MatrixType& m_originalMatrix;
+};
+
+} // end namespace internal
+
+#define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \
+  typedef typename DecompositionType::MatrixType MatrixType; \
+  typedef typename MatrixType::Scalar Scalar; \
+  typedef typename MatrixType::RealScalar RealScalar; \
+  typedef typename MatrixType::Index Index; \
+  typedef Eigen::internal::image_retval_base<DecompositionType> Base; \
+  using Base::dec; \
+  using Base::originalMatrix; \
+  using Base::rank; \
+  using Base::rows; \
+  using Base::cols; \
+  image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \
+    : Base(dec, originalMatrix) {}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MISC_IMAGE_H
diff --git a/Eigen/src/misc/Kernel.h b/Eigen/src/misc/Kernel.h
new file mode 100644
index 0000000..b9e1518
--- /dev/null
+++ b/Eigen/src/misc/Kernel.h
@@ -0,0 +1,81 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MISC_KERNEL_H
+#define EIGEN_MISC_KERNEL_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \class kernel_retval_base
+  *
+  */
+template<typename DecompositionType>
+struct traits<kernel_retval_base<DecompositionType> >
+{
+  typedef typename DecompositionType::MatrixType MatrixType;
+  typedef Matrix<
+    typename MatrixType::Scalar,
+    MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix"
+                                   // is the number of cols of the original matrix
+                                   // so that the product "matrix * kernel = zero" makes sense
+    Dynamic,                       // we don't know at compile-time the dimension of the kernel
+    MatrixType::Options,
+    MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
+    MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space,
+                                     // whose dimension is the number of columns of the original matrix
+  > ReturnType;
+};
+
+template<typename _DecompositionType> struct kernel_retval_base
+ : public ReturnByValue<kernel_retval_base<_DecompositionType> >
+{
+  typedef _DecompositionType DecompositionType;
+  typedef ReturnByValue<kernel_retval_base> Base;
+  typedef typename Base::Index Index;
+
+  kernel_retval_base(const DecompositionType& dec)
+    : m_dec(dec),
+      m_rank(dec.rank()),
+      m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)
+  {}
+
+  inline Index rows() const { return m_dec.cols(); }
+  inline Index cols() const { return m_cols; }
+  inline Index rank() const { return m_rank; }
+  inline const DecompositionType& dec() const { return m_dec; }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    static_cast<const kernel_retval<DecompositionType>*>(this)->evalTo(dst);
+  }
+
+  protected:
+    const DecompositionType& m_dec;
+    Index m_rank, m_cols;
+};
+
+} // end namespace internal
+
+#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \
+  typedef typename DecompositionType::MatrixType MatrixType; \
+  typedef typename MatrixType::Scalar Scalar; \
+  typedef typename MatrixType::RealScalar RealScalar; \
+  typedef typename MatrixType::Index Index; \
+  typedef Eigen::internal::kernel_retval_base<DecompositionType> Base; \
+  using Base::dec; \
+  using Base::rank; \
+  using Base::rows; \
+  using Base::cols; \
+  kernel_retval(const DecompositionType& dec) : Base(dec) {}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MISC_KERNEL_H
diff --git a/Eigen/src/misc/Solve.h b/Eigen/src/misc/Solve.h
new file mode 100644
index 0000000..7f70d60
--- /dev/null
+++ b/Eigen/src/misc/Solve.h
@@ -0,0 +1,76 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MISC_SOLVE_H
+#define EIGEN_MISC_SOLVE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+/** \class solve_retval_base
+  *
+  */
+template<typename DecompositionType, typename Rhs>
+struct traits<solve_retval_base<DecompositionType, Rhs> >
+{
+  typedef typename DecompositionType::MatrixType MatrixType;
+  typedef Matrix<typename Rhs::Scalar,
+                 MatrixType::ColsAtCompileTime,
+                 Rhs::ColsAtCompileTime,
+                 Rhs::PlainObject::Options,
+                 MatrixType::MaxColsAtCompileTime,
+                 Rhs::MaxColsAtCompileTime> ReturnType;
+};
+
+template<typename _DecompositionType, typename Rhs> struct solve_retval_base
+ : public ReturnByValue<solve_retval_base<_DecompositionType, Rhs> >
+{
+  typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+  typedef _DecompositionType DecompositionType;
+  typedef ReturnByValue<solve_retval_base> Base;
+  typedef typename Base::Index Index;
+
+  solve_retval_base(const DecompositionType& dec, const Rhs& rhs)
+    : m_dec(dec), m_rhs(rhs)
+  {}
+
+  inline Index rows() const { return m_dec.cols(); }
+  inline Index cols() const { return m_rhs.cols(); }
+  inline const DecompositionType& dec() const { return m_dec; }
+  inline const RhsNestedCleaned& rhs() const { return m_rhs; }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    static_cast<const solve_retval<DecompositionType,Rhs>*>(this)->evalTo(dst);
+  }
+
+  protected:
+    const DecompositionType& m_dec;
+    typename Rhs::Nested m_rhs;
+};
+
+} // end namespace internal
+
+#define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType,Rhs) \
+  typedef typename DecompositionType::MatrixType MatrixType; \
+  typedef typename MatrixType::Scalar Scalar; \
+  typedef typename MatrixType::RealScalar RealScalar; \
+  typedef typename MatrixType::Index Index; \
+  typedef Eigen::internal::solve_retval_base<DecompositionType,Rhs> Base; \
+  using Base::dec; \
+  using Base::rhs; \
+  using Base::rows; \
+  using Base::cols; \
+  solve_retval(const DecompositionType& dec, const Rhs& rhs) \
+    : Base(dec, rhs) {}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MISC_SOLVE_H
diff --git a/Eigen/src/misc/SparseSolve.h b/Eigen/src/misc/SparseSolve.h
new file mode 100644
index 0000000..244bb8e
--- /dev/null
+++ b/Eigen/src/misc/SparseSolve.h
@@ -0,0 +1,128 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_SOLVE_H
+#define EIGEN_SPARSE_SOLVE_H
+
+namespace Eigen { 
+
+namespace internal {
+
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_base;
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval;
+  
+template<typename DecompositionType, typename Rhs>
+struct traits<sparse_solve_retval_base<DecompositionType, Rhs> >
+{
+  typedef typename DecompositionType::MatrixType MatrixType;
+  typedef SparseMatrix<typename Rhs::Scalar, Rhs::Options, typename Rhs::Index> ReturnType;
+};
+
+template<typename _DecompositionType, typename Rhs> struct sparse_solve_retval_base
+ : public ReturnByValue<sparse_solve_retval_base<_DecompositionType, Rhs> >
+{
+  typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;
+  typedef _DecompositionType DecompositionType;
+  typedef ReturnByValue<sparse_solve_retval_base> Base;
+  typedef typename Base::Index Index;
+
+  sparse_solve_retval_base(const DecompositionType& dec, const Rhs& rhs)
+    : m_dec(dec), m_rhs(rhs)
+  {}
+
+  inline Index rows() const { return m_dec.cols(); }
+  inline Index cols() const { return m_rhs.cols(); }
+  inline const DecompositionType& dec() const { return m_dec; }
+  inline const RhsNestedCleaned& rhs() const { return m_rhs; }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    static_cast<const sparse_solve_retval<DecompositionType,Rhs>*>(this)->evalTo(dst);
+  }
+
+  protected:
+    template<typename DestScalar, int DestOptions, typename DestIndex>
+    inline void defaultEvalTo(SparseMatrix<DestScalar,DestOptions,DestIndex>& dst) const
+    {
+      // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
+      static const int NbColsAtOnce = 4;
+      int rhsCols = m_rhs.cols();
+      int size = m_rhs.rows();
+      Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,rhsCols);
+      Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,rhsCols);
+      for(int k=0; k<rhsCols; k+=NbColsAtOnce)
+      {
+        int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
+        tmp.leftCols(actualCols) = m_rhs.middleCols(k,actualCols);
+        tmpX.leftCols(actualCols) = m_dec.solve(tmp.leftCols(actualCols));
+        dst.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
+      }
+    }
+    const DecompositionType& m_dec;
+    typename Rhs::Nested m_rhs;
+};
+
+#define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType,Rhs) \
+  typedef typename DecompositionType::MatrixType MatrixType; \
+  typedef typename MatrixType::Scalar Scalar; \
+  typedef typename MatrixType::RealScalar RealScalar; \
+  typedef typename MatrixType::Index Index; \
+  typedef Eigen::internal::sparse_solve_retval_base<DecompositionType,Rhs> Base; \
+  using Base::dec; \
+  using Base::rhs; \
+  using Base::rows; \
+  using Base::cols; \
+  sparse_solve_retval(const DecompositionType& dec, const Rhs& rhs) \
+    : Base(dec, rhs) {}
+
+
+
+template<typename DecompositionType, typename Rhs, typename Guess> struct solve_retval_with_guess;
+
+template<typename DecompositionType, typename Rhs, typename Guess>
+struct traits<solve_retval_with_guess<DecompositionType, Rhs, Guess> >
+{
+  typedef typename DecompositionType::MatrixType MatrixType;
+  typedef Matrix<typename Rhs::Scalar,
+                 MatrixType::ColsAtCompileTime,
+                 Rhs::ColsAtCompileTime,
+                 Rhs::PlainObject::Options,
+                 MatrixType::MaxColsAtCompileTime,
+                 Rhs::MaxColsAtCompileTime> ReturnType;
+};
+
+template<typename DecompositionType, typename Rhs, typename Guess> struct solve_retval_with_guess
+ : public ReturnByValue<solve_retval_with_guess<DecompositionType, Rhs, Guess> >
+{
+  typedef typename DecompositionType::Index Index;
+
+  solve_retval_with_guess(const DecompositionType& dec, const Rhs& rhs, const Guess& guess)
+    : m_dec(dec), m_rhs(rhs), m_guess(guess)
+  {}
+
+  inline Index rows() const { return m_dec.cols(); }
+  inline Index cols() const { return m_rhs.cols(); }
+
+  template<typename Dest> inline void evalTo(Dest& dst) const
+  {
+    dst = m_guess;
+    m_dec._solveWithGuess(m_rhs,dst);
+  }
+
+  protected:
+    const DecompositionType& m_dec;
+    const typename Rhs::Nested m_rhs;
+    const typename Guess::Nested m_guess;
+};
+
+} // namepsace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SOLVE_H
diff --git a/Eigen/src/misc/blas.h b/Eigen/src/misc/blas.h
new file mode 100644
index 0000000..6fce99e
--- /dev/null
+++ b/Eigen/src/misc/blas.h
@@ -0,0 +1,658 @@
+#ifndef BLAS_H
+#define BLAS_H
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define BLASFUNC(FUNC) FUNC##_
+
+#ifdef __WIN64__
+typedef long long BLASLONG;
+typedef unsigned long long BLASULONG;
+#else
+typedef long BLASLONG;
+typedef unsigned long BLASULONG;
+#endif
+
+int    BLASFUNC(xerbla)(const char *, int *info, int);
+
+float  BLASFUNC(sdot)  (int *, float  *, int *, float  *, int *);
+float  BLASFUNC(sdsdot)(int *, float  *,        float  *, int *, float  *, int *);
+
+double BLASFUNC(dsdot) (int *, float  *, int *, float  *, int *);
+double BLASFUNC(ddot)  (int *, double *, int *, double *, int *);
+double BLASFUNC(qdot)  (int *, double *, int *, double *, int *);
+
+int  BLASFUNC(cdotuw)  (int *, float  *, int *, float  *, int *, float*);
+int  BLASFUNC(cdotcw)  (int *, float  *, int *, float  *, int *, float*);
+int  BLASFUNC(zdotuw)  (int *, double  *, int *, double  *, int *, double*);
+int  BLASFUNC(zdotcw)  (int *, double  *, int *, double  *, int *, double*);
+
+int    BLASFUNC(saxpy) (int *, float  *, float  *, int *, float  *, int *);
+int    BLASFUNC(daxpy) (int *, double *, double *, int *, double *, int *);
+int    BLASFUNC(qaxpy) (int *, double *, double *, int *, double *, int *);
+int    BLASFUNC(caxpy) (int *, float  *, float  *, int *, float  *, int *);
+int    BLASFUNC(zaxpy) (int *, double *, double *, int *, double *, int *);
+int    BLASFUNC(xaxpy) (int *, double *, double *, int *, double *, int *);
+int    BLASFUNC(caxpyc)(int *, float  *, float  *, int *, float  *, int *);
+int    BLASFUNC(zaxpyc)(int *, double *, double *, int *, double *, int *);
+int    BLASFUNC(xaxpyc)(int *, double *, double *, int *, double *, int *);
+
+int    BLASFUNC(scopy) (int *, float  *, int *, float  *, int *);
+int    BLASFUNC(dcopy) (int *, double *, int *, double *, int *);
+int    BLASFUNC(qcopy) (int *, double *, int *, double *, int *);
+int    BLASFUNC(ccopy) (int *, float  *, int *, float  *, int *);
+int    BLASFUNC(zcopy) (int *, double *, int *, double *, int *);
+int    BLASFUNC(xcopy) (int *, double *, int *, double *, int *);
+
+int    BLASFUNC(sswap) (int *, float  *, int *, float  *, int *);
+int    BLASFUNC(dswap) (int *, double *, int *, double *, int *);
+int    BLASFUNC(qswap) (int *, double *, int *, double *, int *);
+int    BLASFUNC(cswap) (int *, float  *, int *, float  *, int *);
+int    BLASFUNC(zswap) (int *, double *, int *, double *, int *);
+int    BLASFUNC(xswap) (int *, double *, int *, double *, int *);
+
+float  BLASFUNC(sasum) (int *, float  *, int *);
+float  BLASFUNC(scasum)(int *, float  *, int *);
+double BLASFUNC(dasum) (int *, double *, int *);
+double BLASFUNC(qasum) (int *, double *, int *);
+double BLASFUNC(dzasum)(int *, double *, int *);
+double BLASFUNC(qxasum)(int *, double *, int *);
+
+int    BLASFUNC(isamax)(int *, float  *, int *);
+int    BLASFUNC(idamax)(int *, double *, int *);
+int    BLASFUNC(iqamax)(int *, double *, int *);
+int    BLASFUNC(icamax)(int *, float  *, int *);
+int    BLASFUNC(izamax)(int *, double *, int *);
+int    BLASFUNC(ixamax)(int *, double *, int *);
+
+int    BLASFUNC(ismax) (int *, float  *, int *);
+int    BLASFUNC(idmax) (int *, double *, int *);
+int    BLASFUNC(iqmax) (int *, double *, int *);
+int    BLASFUNC(icmax) (int *, float  *, int *);
+int    BLASFUNC(izmax) (int *, double *, int *);
+int    BLASFUNC(ixmax) (int *, double *, int *);
+
+int    BLASFUNC(isamin)(int *, float  *, int *);
+int    BLASFUNC(idamin)(int *, double *, int *);
+int    BLASFUNC(iqamin)(int *, double *, int *);
+int    BLASFUNC(icamin)(int *, float  *, int *);
+int    BLASFUNC(izamin)(int *, double *, int *);
+int    BLASFUNC(ixamin)(int *, double *, int *);
+
+int    BLASFUNC(ismin)(int *, float  *, int *);
+int    BLASFUNC(idmin)(int *, double *, int *);
+int    BLASFUNC(iqmin)(int *, double *, int *);
+int    BLASFUNC(icmin)(int *, float  *, int *);
+int    BLASFUNC(izmin)(int *, double *, int *);
+int    BLASFUNC(ixmin)(int *, double *, int *);
+
+float  BLASFUNC(samax) (int *, float  *, int *);
+double BLASFUNC(damax) (int *, double *, int *);
+double BLASFUNC(qamax) (int *, double *, int *);
+float  BLASFUNC(scamax)(int *, float  *, int *);
+double BLASFUNC(dzamax)(int *, double *, int *);
+double BLASFUNC(qxamax)(int *, double *, int *);
+
+float  BLASFUNC(samin) (int *, float  *, int *);
+double BLASFUNC(damin) (int *, double *, int *);
+double BLASFUNC(qamin) (int *, double *, int *);
+float  BLASFUNC(scamin)(int *, float  *, int *);
+double BLASFUNC(dzamin)(int *, double *, int *);
+double BLASFUNC(qxamin)(int *, double *, int *);
+
+float  BLASFUNC(smax)  (int *, float  *, int *);
+double BLASFUNC(dmax)  (int *, double *, int *);
+double BLASFUNC(qmax)  (int *, double *, int *);
+float  BLASFUNC(scmax) (int *, float  *, int *);
+double BLASFUNC(dzmax) (int *, double *, int *);
+double BLASFUNC(qxmax) (int *, double *, int *);
+
+float  BLASFUNC(smin)  (int *, float  *, int *);
+double BLASFUNC(dmin)  (int *, double *, int *);
+double BLASFUNC(qmin)  (int *, double *, int *);
+float  BLASFUNC(scmin) (int *, float  *, int *);
+double BLASFUNC(dzmin) (int *, double *, int *);
+double BLASFUNC(qxmin) (int *, double *, int *);
+
+int    BLASFUNC(sscal) (int *,  float  *, float  *, int *);
+int    BLASFUNC(dscal) (int *,  double *, double *, int *);
+int    BLASFUNC(qscal) (int *,  double *, double *, int *);
+int    BLASFUNC(cscal) (int *,  float  *, float  *, int *);
+int    BLASFUNC(zscal) (int *,  double *, double *, int *);
+int    BLASFUNC(xscal) (int *,  double *, double *, int *);
+int    BLASFUNC(csscal)(int *,  float  *, float  *, int *);
+int    BLASFUNC(zdscal)(int *,  double *, double *, int *);
+int    BLASFUNC(xqscal)(int *,  double *, double *, int *);
+
+float  BLASFUNC(snrm2) (int *, float  *, int *);
+float  BLASFUNC(scnrm2)(int *, float  *, int *);
+
+double BLASFUNC(dnrm2) (int *, double *, int *);
+double BLASFUNC(qnrm2) (int *, double *, int *);
+double BLASFUNC(dznrm2)(int *, double *, int *);
+double BLASFUNC(qxnrm2)(int *, double *, int *);
+
+int    BLASFUNC(srot)  (int *, float  *, int *, float  *, int *, float  *, float  *);
+int    BLASFUNC(drot)  (int *, double *, int *, double *, int *, double *, double *);
+int    BLASFUNC(qrot)  (int *, double *, int *, double *, int *, double *, double *);
+int    BLASFUNC(csrot) (int *, float  *, int *, float  *, int *, float  *, float  *);
+int    BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *);
+int    BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *);
+
+int    BLASFUNC(srotg) (float  *, float  *, float  *, float  *);
+int    BLASFUNC(drotg) (double *, double *, double *, double *);
+int    BLASFUNC(qrotg) (double *, double *, double *, double *);
+int    BLASFUNC(crotg) (float  *, float  *, float  *, float  *);
+int    BLASFUNC(zrotg) (double *, double *, double *, double *);
+int    BLASFUNC(xrotg) (double *, double *, double *, double *);
+
+int    BLASFUNC(srotmg)(float  *, float  *, float  *, float  *, float  *);
+int    BLASFUNC(drotmg)(double *, double *, double *, double *, double *);
+
+int    BLASFUNC(srotm) (int *, float  *, int *, float  *, int *, float  *);
+int    BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *);
+int    BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *);
+
+/* Level 2 routines */
+
+int BLASFUNC(sger)(int *,    int *, float *,  float *, int *,
+		   float *,  int *, float *,  int *);
+int BLASFUNC(dger)(int *,    int *, double *, double *, int *,
+		   double *, int *, double *, int *);
+int BLASFUNC(qger)(int *,    int *, double *, double *, int *,
+		   double *, int *, double *, int *);
+int BLASFUNC(cgeru)(int *,    int *, float *,  float *, int *,
+		    float *,  int *, float *,  int *);
+int BLASFUNC(cgerc)(int *,    int *, float *,  float *, int *,
+		    float *,  int *, float *,  int *);
+int BLASFUNC(zgeru)(int *,    int *, double *, double *, int *,
+		    double *, int *, double *, int *);
+int BLASFUNC(zgerc)(int *,    int *, double *, double *, int *,
+		    double *, int *, double *, int *);
+int BLASFUNC(xgeru)(int *,    int *, double *, double *, int *,
+		    double *, int *, double *, int *);
+int BLASFUNC(xgerc)(int *,    int *, double *, double *, int *,
+		    double *, int *, double *, int *);
+
+int BLASFUNC(sgemv)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(dgemv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(qgemv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(cgemv)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zgemv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(xgemv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+
+int BLASFUNC(strsv) (char *, char *, char *, int *, float  *, int *,
+		     float  *, int *);
+int BLASFUNC(dtrsv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+int BLASFUNC(qtrsv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+int BLASFUNC(ctrsv) (char *, char *, char *, int *, float  *, int *,
+		     float  *, int *);
+int BLASFUNC(ztrsv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+int BLASFUNC(xtrsv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+
+int BLASFUNC(stpsv) (char *, char *, char *, int *, float  *, float  *, int *);
+int BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(ctpsv) (char *, char *, char *, int *, float  *, float  *, int *);
+int BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *);
+
+int BLASFUNC(strmv) (char *, char *, char *, int *, float  *, int *,
+		     float  *, int *);
+int BLASFUNC(dtrmv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+int BLASFUNC(qtrmv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+int BLASFUNC(ctrmv) (char *, char *, char *, int *, float  *, int *,
+		     float  *, int *);
+int BLASFUNC(ztrmv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+int BLASFUNC(xtrmv) (char *, char *, char *, int *, double *, int *,
+		     double *, int *);
+
+int BLASFUNC(stpmv) (char *, char *, char *, int *, float  *, float  *, int *);
+int BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(ctpmv) (char *, char *, char *, int *, float  *, float  *, int *);
+int BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *);
+int BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *);
+
+int BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+int BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(ssymv) (char *, int *, float  *, float *, int *,
+		     float  *, int *, float *, float *, int *);
+int BLASFUNC(dsymv) (char *, int *, double  *, double *, int *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(qsymv) (char *, int *, double  *, double *, int *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(csymv) (char *, int *, float  *, float *, int *,
+		     float  *, int *, float *, float *, int *);
+int BLASFUNC(zsymv) (char *, int *, double  *, double *, int *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(xsymv) (char *, int *, double  *, double *, int *,
+		     double  *, int *, double *, double *, int *);
+
+int BLASFUNC(sspmv) (char *, int *, float  *, float *,
+		     float  *, int *, float *, float *, int *);
+int BLASFUNC(dspmv) (char *, int *, double  *, double *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(qspmv) (char *, int *, double  *, double *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(cspmv) (char *, int *, float  *, float *,
+		     float  *, int *, float *, float *, int *);
+int BLASFUNC(zspmv) (char *, int *, double  *, double *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(xspmv) (char *, int *, double  *, double *,
+		     double  *, int *, double *, double *, int *);
+
+int BLASFUNC(ssyr) (char *, int *, float   *, float  *, int *,
+		    float  *, int *);
+int BLASFUNC(dsyr) (char *, int *, double  *, double *, int *,
+		    double *, int *);
+int BLASFUNC(qsyr) (char *, int *, double  *, double *, int *,
+		    double *, int *);
+int BLASFUNC(csyr) (char *, int *, float   *, float  *, int *,
+		    float  *, int *);
+int BLASFUNC(zsyr) (char *, int *, double  *, double *, int *,
+		    double *, int *);
+int BLASFUNC(xsyr) (char *, int *, double  *, double *, int *,
+		    double *, int *);
+
+int BLASFUNC(ssyr2) (char *, int *, float   *,
+		     float  *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(dsyr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *, int *);
+int BLASFUNC(qsyr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *, int *);
+int BLASFUNC(csyr2) (char *, int *, float   *,
+		     float  *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(zsyr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *, int *);
+int BLASFUNC(xsyr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(sspr) (char *, int *, float   *, float  *, int *,
+		    float  *);
+int BLASFUNC(dspr) (char *, int *, double  *, double *, int *,
+		    double *);
+int BLASFUNC(qspr) (char *, int *, double  *, double *, int *,
+		    double *);
+int BLASFUNC(cspr) (char *, int *, float   *, float  *, int *,
+		    float  *);
+int BLASFUNC(zspr) (char *, int *, double  *, double *, int *,
+		    double *);
+int BLASFUNC(xspr) (char *, int *, double  *, double *, int *,
+		    double *);
+
+int BLASFUNC(sspr2) (char *, int *, float   *,
+		     float  *, int *, float  *, int *, float  *);
+int BLASFUNC(dspr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *);
+int BLASFUNC(qspr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *);
+int BLASFUNC(cspr2) (char *, int *, float   *,
+		     float  *, int *, float  *, int *, float  *);
+int BLASFUNC(zspr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *);
+int BLASFUNC(xspr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *);
+
+int BLASFUNC(cher) (char *, int *, float   *, float  *, int *,
+		    float  *, int *);
+int BLASFUNC(zher) (char *, int *, double  *, double *, int *,
+		    double *, int *);
+int BLASFUNC(xher) (char *, int *, double  *, double *, int *,
+		    double *, int *);
+
+int BLASFUNC(chpr) (char *, int *, float   *, float  *, int *, float  *);
+int BLASFUNC(zhpr) (char *, int *, double  *, double *, int *, double *);
+int BLASFUNC(xhpr) (char *, int *, double  *, double *, int *, double *);
+
+int BLASFUNC(cher2) (char *, int *, float   *,
+		     float  *, int *, float  *, int *, float  *, int *);
+int BLASFUNC(zher2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *, int *);
+int BLASFUNC(xher2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *, int *);
+
+int BLASFUNC(chpr2) (char *, int *, float   *,
+		     float  *, int *, float  *, int *, float  *);
+int BLASFUNC(zhpr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *);
+int BLASFUNC(xhpr2) (char *, int *, double  *,
+		     double *, int *, double *, int *, double *);
+
+int BLASFUNC(chemv) (char *, int *, float  *, float *, int *,
+		     float  *, int *, float *, float *, int *);
+int BLASFUNC(zhemv) (char *, int *, double  *, double *, int *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(xhemv) (char *, int *, double  *, double *, int *,
+		     double  *, int *, double *, double *, int *);
+
+int BLASFUNC(chpmv) (char *, int *, float  *, float *,
+		     float  *, int *, float *, float *, int *);
+int BLASFUNC(zhpmv) (char *, int *, double  *, double *,
+		     double  *, int *, double *, double *, int *);
+int BLASFUNC(xhpmv) (char *, int *, double  *, double *,
+		     double  *, int *, double *, double *, int *);
+
+int BLASFUNC(snorm)(char *, int *, int *, float  *, int *);
+int BLASFUNC(dnorm)(char *, int *, int *, double *, int *);
+int BLASFUNC(cnorm)(char *, int *, int *, float  *, int *);
+int BLASFUNC(znorm)(char *, int *, int *, double *, int *);
+
+int BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+
+int BLASFUNC(ssbmv)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(csbmv)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+
+int BLASFUNC(chbmv)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+int BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *,
+		    double *, int *, double *, double *, int *);
+
+/* Level 3 routines */
+
+int BLASFUNC(sgemm)(char *, char *, int *, int *, int *, float *,
+	   float  *, int *, float  *, int *, float  *, float  *, int *);
+int BLASFUNC(dgemm)(char *, char *, int *, int *, int *, double *,
+	   double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(qgemm)(char *, char *, int *, int *, int *, double *,
+	   double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(cgemm)(char *, char *, int *, int *, int *, float *,
+	   float  *, int *, float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zgemm)(char *, char *, int *, int *, int *, double *,
+	   double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(xgemm)(char *, char *, int *, int *, int *, double *,
+	   double *, int *, double *, int *, double *, double *, int *);
+
+int BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *,
+	   float  *, int *, float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *,
+	   double *, int *, double *, int *, double *, double *, int *);
+int BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *,
+	   double *, int *, double *, int *, double *, double *, int *);
+
+int BLASFUNC(sge2mm)(char *, char *, char *, int *, int *,
+		     float *, float  *, int *, float  *, int *,
+		     float *, float  *, int *);
+int BLASFUNC(dge2mm)(char *, char *, char *, int *, int *,
+		     double *, double  *, int *, double  *, int *,
+		     double *, double  *, int *);
+int BLASFUNC(cge2mm)(char *, char *, char *, int *, int *,
+		     float *, float  *, int *, float  *, int *,
+		     float *, float  *, int *);
+int BLASFUNC(zge2mm)(char *, char *, char *, int *, int *,
+		     double *, double  *, int *, double  *, int *,
+		     double *, double  *, int *);
+
+int BLASFUNC(strsm)(char *, char *, char *, char *, int *, int *,
+	   float *,  float *, int *, float *, int *);
+int BLASFUNC(dtrsm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+int BLASFUNC(qtrsm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+int BLASFUNC(ctrsm)(char *, char *, char *, char *, int *, int *,
+	   float *,  float *, int *, float *, int *);
+int BLASFUNC(ztrsm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+int BLASFUNC(xtrsm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+
+int BLASFUNC(strmm)(char *, char *, char *, char *, int *, int *,
+	   float *,  float *, int *, float *, int *);
+int BLASFUNC(dtrmm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+int BLASFUNC(qtrmm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+int BLASFUNC(ctrmm)(char *, char *, char *, char *, int *, int *,
+	   float *,  float *, int *, float *, int *);
+int BLASFUNC(ztrmm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+int BLASFUNC(xtrmm)(char *, char *, char *, char *, int *, int *,
+	   double *,  double *, int *, double *, int *);
+
+int BLASFUNC(ssymm)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, int *, float  *, float  *, int *);
+int BLASFUNC(dsymm)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+int BLASFUNC(qsymm)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+int BLASFUNC(csymm)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zsymm)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+int BLASFUNC(xsymm)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+
+int BLASFUNC(csymm3m)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+int BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+
+int BLASFUNC(ssyrk)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, float  *, int *);
+int BLASFUNC(dsyrk)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, double *, int *);
+int BLASFUNC(qsyrk)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, double *, int *);
+int BLASFUNC(csyrk)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, float  *, int *);
+int BLASFUNC(zsyrk)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, double *, int *);
+int BLASFUNC(xsyrk)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, double *, int *);
+
+int BLASFUNC(ssyr2k)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float *, int *, float  *, float  *, int *);
+int BLASFUNC(dsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+int BLASFUNC(qsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+int BLASFUNC(csyr2k)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float *, int *, float  *, float  *, int *);
+int BLASFUNC(zsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+int BLASFUNC(xsyr2k)(char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+
+int BLASFUNC(chemm)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zhemm)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+int BLASFUNC(xhemm)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+
+int BLASFUNC(chemm3m)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, int *, float  *, float  *, int *);
+int BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+int BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, int *, double *, double *, int *);
+
+int BLASFUNC(cherk)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float  *, float  *, int *);
+int BLASFUNC(zherk)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, double *, int *);
+int BLASFUNC(xherk)(char *, char *, int *, int *, double *, double *, int *,
+	   double *, double *, int *);
+
+int BLASFUNC(cher2k)(char *, char *, int *, int *, float  *, float  *, int *,
+	   float *, int *, float  *, float  *, int *);
+int BLASFUNC(zher2k)(char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+int BLASFUNC(xher2k)(char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+int BLASFUNC(cher2m)(char *, char *, char *, int *, int *, float  *, float  *, int *,
+	   float *, int *, float  *, float  *, int *);
+int BLASFUNC(zher2m)(char *, char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+int BLASFUNC(xher2m)(char *, char *, char *, int *, int *, double *, double *, int *,
+	   double*, int *, double *, double *, int *);
+
+int BLASFUNC(sgemt)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *);
+int BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *,
+		    double *, int *);
+int BLASFUNC(cgemt)(char *, int *, int *, float  *, float  *, int *,
+		    float  *, int *);
+int BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *,
+		    double *, int *);
+
+int BLASFUNC(sgema)(char *, char *, int *, int *, float  *,
+		    float  *, int *, float *, float  *, int *, float *, int *);
+int BLASFUNC(dgema)(char *, char *, int *, int *, double *,
+		    double *, int *, double*, double *, int *, double*, int *);
+int BLASFUNC(cgema)(char *, char *, int *, int *, float  *,
+		    float  *, int *, float *, float  *, int *, float *, int *);
+int BLASFUNC(zgema)(char *, char *, int *, int *, double *,
+		    double *, int *, double*, double *, int *, double*, int *);
+
+int BLASFUNC(sgems)(char *, char *, int *, int *, float  *,
+		    float  *, int *, float *, float  *, int *, float *, int *);
+int BLASFUNC(dgems)(char *, char *, int *, int *, double *,
+		    double *, int *, double*, double *, int *, double*, int *);
+int BLASFUNC(cgems)(char *, char *, int *, int *, float  *,
+		    float  *, int *, float *, float  *, int *, float *, int *);
+int BLASFUNC(zgems)(char *, char *, int *, int *, double *,
+		    double *, int *, double*, double *, int *, double*, int *);
+
+int BLASFUNC(sgetf2)(int *, int *, float  *, int *, int *, int *);
+int BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(cgetf2)(int *, int *, float  *, int *, int *, int *);
+int BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *);
+
+int BLASFUNC(sgetrf)(int *, int *, float  *, int *, int *, int *);
+int BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(cgetrf)(int *, int *, float  *, int *, int *, int *);
+int BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *);
+int BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *);
+
+int BLASFUNC(slaswp)(int *, float  *, int *, int *, int *, int *, int *);
+int BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *);
+int BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *);
+int BLASFUNC(claswp)(int *, float  *, int *, int *, int *, int *, int *);
+int BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *);
+int BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *);
+
+int BLASFUNC(sgetrs)(char *, int *, int *, float  *, int *, int *, float  *, int *, int *);
+int BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+int BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+int BLASFUNC(cgetrs)(char *, int *, int *, float  *, int *, int *, float  *, int *, int *);
+int BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+int BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);
+
+int BLASFUNC(sgesv)(int *, int *, float  *, int *, int *, float *, int *, int *);
+int BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+int BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+int BLASFUNC(cgesv)(int *, int *, float  *, int *, int *, float *, int *, int *);
+int BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+int BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *);
+
+int BLASFUNC(spotf2)(char *, int *, float  *, int *, int *);
+int BLASFUNC(dpotf2)(char *, int *, double *, int *, int *);
+int BLASFUNC(qpotf2)(char *, int *, double *, int *, int *);
+int BLASFUNC(cpotf2)(char *, int *, float  *, int *, int *);
+int BLASFUNC(zpotf2)(char *, int *, double *, int *, int *);
+int BLASFUNC(xpotf2)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(spotrf)(char *, int *, float  *, int *, int *);
+int BLASFUNC(dpotrf)(char *, int *, double *, int *, int *);
+int BLASFUNC(qpotrf)(char *, int *, double *, int *, int *);
+int BLASFUNC(cpotrf)(char *, int *, float  *, int *, int *);
+int BLASFUNC(zpotrf)(char *, int *, double *, int *, int *);
+int BLASFUNC(xpotrf)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(slauu2)(char *, int *, float  *, int *, int *);
+int BLASFUNC(dlauu2)(char *, int *, double *, int *, int *);
+int BLASFUNC(qlauu2)(char *, int *, double *, int *, int *);
+int BLASFUNC(clauu2)(char *, int *, float  *, int *, int *);
+int BLASFUNC(zlauu2)(char *, int *, double *, int *, int *);
+int BLASFUNC(xlauu2)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(slauum)(char *, int *, float  *, int *, int *);
+int BLASFUNC(dlauum)(char *, int *, double *, int *, int *);
+int BLASFUNC(qlauum)(char *, int *, double *, int *, int *);
+int BLASFUNC(clauum)(char *, int *, float  *, int *, int *);
+int BLASFUNC(zlauum)(char *, int *, double *, int *, int *);
+int BLASFUNC(xlauum)(char *, int *, double *, int *, int *);
+
+int BLASFUNC(strti2)(char *, char *, int *, float  *, int *, int *);
+int BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(ctrti2)(char *, char *, int *, float  *, int *, int *);
+int BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *);
+
+int BLASFUNC(strtri)(char *, char *, int *, float  *, int *, int *);
+int BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(ctrtri)(char *, char *, int *, float  *, int *, int *);
+int BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *);
+int BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *);
+
+int BLASFUNC(spotri)(char *, int *, float  *, int *, int *);
+int BLASFUNC(dpotri)(char *, int *, double *, int *, int *);
+int BLASFUNC(qpotri)(char *, int *, double *, int *, int *);
+int BLASFUNC(cpotri)(char *, int *, float  *, int *, int *);
+int BLASFUNC(zpotri)(char *, int *, double *, int *, int *);
+int BLASFUNC(xpotri)(char *, int *, double *, int *, int *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/Eigen/src/plugins/ArrayCwiseBinaryOps.h
new file mode 100644
index 0000000..1951286
--- /dev/null
+++ b/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -0,0 +1,253 @@
+/** \returns an expression of the coefficient wise product of \c *this and \a other
+  *
+  * \sa MatrixBase::cwiseProduct
+  */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)
+operator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient wise quotient of \c *this and \a other
+  *
+  * \sa MatrixBase::cwiseQuotient
+  */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
+operator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise min of \c *this and \a other
+  *
+  * Example: \include Cwise_min.cpp
+  * Output: \verbinclude Cwise_min.out
+  *
+  * \sa max()
+  */
+EIGEN_MAKE_CWISE_BINARY_OP(min,internal::scalar_min_op)
+
+/** \returns an expression of the coefficient-wise min of \c *this and scalar \a other
+  *
+  * \sa max()
+  */
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived,
+                                        const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+min
+#else
+(min)
+#endif
+(const Scalar &other) const
+{
+  return (min)(Derived::PlainObject::Constant(rows(), cols(), other));
+}
+
+/** \returns an expression of the coefficient-wise max of \c *this and \a other
+  *
+  * Example: \include Cwise_max.cpp
+  * Output: \verbinclude Cwise_max.out
+  *
+  * \sa min()
+  */
+EIGEN_MAKE_CWISE_BINARY_OP(max,internal::scalar_max_op)
+
+/** \returns an expression of the coefficient-wise max of \c *this and scalar \a other
+  *
+  * \sa min()
+  */
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived,
+                                        const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+max
+#else
+(max)
+#endif
+(const Scalar &other) const
+{
+  return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
+}
+
+
+#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \
+OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+{ \
+  return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \
+}\
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \
+EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \
+OP(const Scalar& s) const { \
+  return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
+} \
+friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
+OP(const Scalar& s, const Derived& d) { \
+  return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
+}
+
+#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \
+OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+{ \
+  return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \
+} \
+\
+inline const RCmp ## RCOMPARATOR ## ReturnType \
+OP(const Scalar& s) const { \
+  return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \
+} \
+friend inline const Cmp ## RCOMPARATOR ## ReturnType \
+OP(const Scalar& s, const Derived& d) { \
+  return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \
+}
+
+
+/** \returns an expression of the coefficient-wise \< operator of *this and \a other
+  *
+  * Example: \include Cwise_less.cpp
+  * Output: \verbinclude Cwise_less.out
+  *
+  * \sa all(), any(), operator>(), operator<=()
+  */
+EIGEN_MAKE_CWISE_COMP_OP(operator<, LT)
+
+/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
+  *
+  * Example: \include Cwise_less_equal.cpp
+  * Output: \verbinclude Cwise_less_equal.out
+  *
+  * \sa all(), any(), operator>=(), operator<()
+  */
+EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)
+
+/** \returns an expression of the coefficient-wise \> operator of *this and \a other
+  *
+  * Example: \include Cwise_greater.cpp
+  * Output: \verbinclude Cwise_greater.out
+  *
+  * \sa all(), any(), operator>=(), operator<()
+  */
+EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)
+
+/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
+  *
+  * Example: \include Cwise_greater_equal.cpp
+  * Output: \verbinclude Cwise_greater_equal.out
+  *
+  * \sa all(), any(), operator>(), operator<=()
+  */
+EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)
+
+/** \returns an expression of the coefficient-wise == operator of *this and \a other
+  *
+  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+  * isMuchSmallerThan().
+  *
+  * Example: \include Cwise_equal_equal.cpp
+  * Output: \verbinclude Cwise_equal_equal.out
+  *
+  * \sa all(), any(), isApprox(), isMuchSmallerThan()
+  */
+EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)
+
+/** \returns an expression of the coefficient-wise != operator of *this and \a other
+  *
+  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+  * isMuchSmallerThan().
+  *
+  * Example: \include Cwise_not_equal.cpp
+  * Output: \verbinclude Cwise_not_equal.out
+  *
+  * \sa all(), any(), isApprox(), isMuchSmallerThan()
+  */
+EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)
+
+#undef EIGEN_MAKE_CWISE_COMP_OP
+#undef EIGEN_MAKE_CWISE_COMP_R_OP
+
+// scalar addition
+
+/** \returns an expression of \c *this with each coeff incremented by the constant \a scalar
+  *
+  * Example: \include Cwise_plus.cpp
+  * Output: \verbinclude Cwise_plus.out
+  *
+  * \sa operator+=(), operator-()
+  */
+inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
+operator+(const Scalar& scalar) const
+{
+  return CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>(derived(), internal::scalar_add_op<Scalar>(scalar));
+}
+
+friend inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
+operator+(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS<Derived>& other)
+{
+  return other + scalar;
+}
+
+/** \returns an expression of \c *this with each coeff decremented by the constant \a scalar
+  *
+  * Example: \include Cwise_minus.cpp
+  * Output: \verbinclude Cwise_minus.out
+  *
+  * \sa operator+(), operator-=()
+  */
+inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
+operator-(const Scalar& scalar) const
+{
+  return *this + (-scalar);
+}
+
+friend inline const CwiseUnaryOp<internal::scalar_add_op<Scalar>, const CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived> >
+operator-(const Scalar& scalar,const EIGEN_CURRENT_STORAGE_BASE_CLASS<Derived>& other)
+{
+  return (-other) + scalar;
+}
+
+/** \returns an expression of the coefficient-wise && operator of *this and \a other
+  *
+  * \warning this operator is for expression of bool only.
+  *
+  * Example: \include Cwise_boolean_and.cpp
+  * Output: \verbinclude Cwise_boolean_and.out
+  *
+  * \sa operator||(), select()
+  */
+template<typename OtherDerived>
+inline const CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>
+operator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
+                      THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
+  return CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>(derived(),other.derived());
+}
+
+/** \returns an expression of the coefficient-wise || operator of *this and \a other
+  *
+  * \warning this operator is for expression of bool only.
+  *
+  * Example: \include Cwise_boolean_or.cpp
+  * Output: \verbinclude Cwise_boolean_or.out
+  *
+  * \sa operator&&(), select()
+  */
+template<typename OtherDerived>
+inline const CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>
+operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),
+                      THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
+  return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
+}
+
+
diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
new file mode 100644
index 0000000..1c3ed3f
--- /dev/null
+++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -0,0 +1,187 @@
+
+
+/** \returns an expression of the coefficient-wise absolute value of \c *this
+  *
+  * Example: \include Cwise_abs.cpp
+  * Output: \verbinclude Cwise_abs.out
+  *
+  * \sa abs2()
+  */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
+abs() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise squared absolute value of \c *this
+  *
+  * Example: \include Cwise_abs2.cpp
+  * Output: \verbinclude Cwise_abs2.out
+  *
+  * \sa abs(), square()
+  */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived>
+abs2() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise exponential of *this.
+  *
+  * Example: \include Cwise_exp.cpp
+  * Output: \verbinclude Cwise_exp.out
+  *
+  * \sa pow(), log(), sin(), cos()
+  */
+inline const CwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived>
+exp() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise logarithm of *this.
+  *
+  * Example: \include Cwise_log.cpp
+  * Output: \verbinclude Cwise_log.out
+  *
+  * \sa exp()
+  */
+inline const CwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived>
+log() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise square root of *this.
+  *
+  * Example: \include Cwise_sqrt.cpp
+  * Output: \verbinclude Cwise_sqrt.out
+  *
+  * \sa pow(), square()
+  */
+inline const CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
+sqrt() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise cosine of *this.
+  *
+  * Example: \include Cwise_cos.cpp
+  * Output: \verbinclude Cwise_cos.out
+  *
+  * \sa sin(), acos()
+  */
+inline const CwiseUnaryOp<internal::scalar_cos_op<Scalar>, const Derived>
+cos() const
+{
+  return derived();
+}
+
+
+/** \returns an expression of the coefficient-wise sine of *this.
+  *
+  * Example: \include Cwise_sin.cpp
+  * Output: \verbinclude Cwise_sin.out
+  *
+  * \sa cos(), asin()
+  */
+inline const CwiseUnaryOp<internal::scalar_sin_op<Scalar>, const Derived>
+sin() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise arc cosine of *this.
+  *
+  * Example: \include Cwise_acos.cpp
+  * Output: \verbinclude Cwise_acos.out
+  *
+  * \sa cos(), asin()
+  */
+inline const CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived>
+acos() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise arc sine of *this.
+  *
+  * Example: \include Cwise_asin.cpp
+  * Output: \verbinclude Cwise_asin.out
+  *
+  * \sa sin(), acos()
+  */
+inline const CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived>
+asin() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise tan of *this.
+  *
+  * Example: \include Cwise_tan.cpp
+  * Output: \verbinclude Cwise_tan.out
+  *
+  * \sa cos(), sin()
+  */
+inline const CwiseUnaryOp<internal::scalar_tan_op<Scalar>, Derived>
+tan() const
+{
+  return derived();
+}
+
+
+/** \returns an expression of the coefficient-wise power of *this to the given exponent.
+  *
+  * Example: \include Cwise_pow.cpp
+  * Output: \verbinclude Cwise_pow.out
+  *
+  * \sa exp(), log()
+  */
+inline const CwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
+pow(const Scalar& exponent) const
+{
+  return CwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
+          (derived(), internal::scalar_pow_op<Scalar>(exponent));
+}
+
+
+/** \returns an expression of the coefficient-wise inverse of *this.
+  *
+  * Example: \include Cwise_inverse.cpp
+  * Output: \verbinclude Cwise_inverse.out
+  *
+  * \sa operator/(), operator*()
+  */
+inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
+inverse() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise square of *this.
+  *
+  * Example: \include Cwise_square.cpp
+  * Output: \verbinclude Cwise_square.out
+  *
+  * \sa operator/(), operator*(), abs2()
+  */
+inline const CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived>
+square() const
+{
+  return derived();
+}
+
+/** \returns an expression of the coefficient-wise cube of *this.
+  *
+  * Example: \include Cwise_cube.cpp
+  * Output: \verbinclude Cwise_cube.out
+  *
+  * \sa square(), pow()
+  */
+inline const CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived>
+cube() const
+{
+  return derived();
+}
diff --git a/Eigen/src/plugins/BlockMethods.h b/Eigen/src/plugins/BlockMethods.h
new file mode 100644
index 0000000..2788251
--- /dev/null
+++ b/Eigen/src/plugins/BlockMethods.h
@@ -0,0 +1,935 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+/** \internal expression type of a column */
+typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ColXpr;
+typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr;
+/** \internal expression type of a row */
+typedef Block<Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowXpr;
+typedef const Block<const Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowXpr;
+/** \internal expression type of a block of whole columns */
+typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr;
+typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr;
+/** \internal expression type of a block of whole rows */
+typedef Block<Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowsBlockXpr;
+typedef const Block<const Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr;
+/** \internal expression type of a block of whole columns */
+template<int N> struct NColsBlockXpr { typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };
+template<int N> struct ConstNColsBlockXpr { typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };
+/** \internal expression type of a block of whole rows */
+template<int N> struct NRowsBlockXpr { typedef Block<Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
+template<int N> struct ConstNRowsBlockXpr { typedef const Block<const Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };
+
+typedef VectorBlock<Derived> SegmentReturnType;
+typedef const VectorBlock<const Derived> ConstSegmentReturnType;
+template<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };
+template<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+/** \returns a dynamic-size expression of a block in *this.
+  *
+  * \param startRow the first row in the block
+  * \param startCol the first column in the block
+  * \param blockRows the number of rows in the block
+  * \param blockCols the number of columns in the block
+  *
+  * Example: \include MatrixBase_block_int_int_int_int.cpp
+  * Output: \verbinclude MatrixBase_block_int_int_int_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, block(Index,Index)
+  */
+inline Block<Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols)
+{
+  return Block<Derived>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+/** This is the const version of block(Index,Index,Index,Index). */
+inline const Block<const Derived> block(Index startRow, Index startCol, Index blockRows, Index blockCols) const
+{
+  return Block<const Derived>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+
+
+
+/** \returns a dynamic-size expression of a top-right corner of *this.
+  *
+  * \param cRows the number of rows in the corner
+  * \param cCols the number of columns in the corner
+  *
+  * Example: \include MatrixBase_topRightCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_topRightCorner_int_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline Block<Derived> topRightCorner(Index cRows, Index cCols)
+{
+  return Block<Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of topRightCorner(Index, Index).*/
+inline const Block<const Derived> topRightCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size top-right corner of *this.
+  *
+  * \tparam CRows the number of rows in the corner
+  * \tparam CCols the number of columns in the corner
+  *
+  * Example: \include MatrixBase_template_int_int_topRightCorner.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_topRightCorner.out
+  *
+  * \sa class Block, block<int,int>(Index,Index)
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topRightCorner()
+{
+  return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+}
+
+/** This is the const version of topRightCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topRightCorner() const
+{
+  return Block<const Derived, CRows, CCols>(derived(), 0, cols() - CCols);
+}
+
+/** \returns an expression of a top-right corner of *this.
+  *
+  * \tparam CRows number of rows in corner as specified at compile-time
+  * \tparam CCols number of columns in corner as specified at compile-time
+  * \param  cRows number of rows in corner as specified at run-time
+  * \param  cCols number of columns in corner as specified at run-time
+  *
+  * This function is mainly useful for corners where the number of rows is specified at compile-time
+  * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+  * information should not contradict. In other words, \a cRows should equal \a CRows unless
+  * \a CRows is \a Dynamic, and the same for the number of columns.
+  *
+  * Example: \include MatrixBase_template_int_int_topRightCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out
+  *
+  * \sa class Block
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topRightCorner(Index cRows, Index cCols)
+{
+  return Block<Derived, CRows, CCols>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of topRightCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topRightCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived, CRows, CCols>(derived(), 0, cols() - cCols, cRows, cCols);
+}
+
+
+
+/** \returns a dynamic-size expression of a top-left corner of *this.
+  *
+  * \param cRows the number of rows in the corner
+  * \param cCols the number of columns in the corner
+  *
+  * Example: \include MatrixBase_topLeftCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_topLeftCorner_int_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline Block<Derived> topLeftCorner(Index cRows, Index cCols)
+{
+  return Block<Derived>(derived(), 0, 0, cRows, cCols);
+}
+
+/** This is the const version of topLeftCorner(Index, Index).*/
+inline const Block<const Derived> topLeftCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived>(derived(), 0, 0, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size top-left corner of *this.
+  *
+  * The template parameters CRows and CCols are the number of rows and columns in the corner.
+  *
+  * Example: \include MatrixBase_template_int_int_topLeftCorner.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topLeftCorner()
+{
+  return Block<Derived, CRows, CCols>(derived(), 0, 0);
+}
+
+/** This is the const version of topLeftCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topLeftCorner() const
+{
+  return Block<const Derived, CRows, CCols>(derived(), 0, 0);
+}
+
+/** \returns an expression of a top-left corner of *this.
+  *
+  * \tparam CRows number of rows in corner as specified at compile-time
+  * \tparam CCols number of columns in corner as specified at compile-time
+  * \param  cRows number of rows in corner as specified at run-time
+  * \param  cCols number of columns in corner as specified at run-time
+  *
+  * This function is mainly useful for corners where the number of rows is specified at compile-time
+  * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+  * information should not contradict. In other words, \a cRows should equal \a CRows unless
+  * \a CRows is \a Dynamic, and the same for the number of columns.
+  *
+  * Example: \include MatrixBase_template_int_int_topLeftCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out
+  *
+  * \sa class Block
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> topLeftCorner(Index cRows, Index cCols)
+{
+  return Block<Derived, CRows, CCols>(derived(), 0, 0, cRows, cCols);
+}
+
+/** This is the const version of topLeftCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> topLeftCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived, CRows, CCols>(derived(), 0, 0, cRows, cCols);
+}
+
+
+
+/** \returns a dynamic-size expression of a bottom-right corner of *this.
+  *
+  * \param cRows the number of rows in the corner
+  * \param cCols the number of columns in the corner
+  *
+  * Example: \include MatrixBase_bottomRightCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_bottomRightCorner_int_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline Block<Derived> bottomRightCorner(Index cRows, Index cCols)
+{
+  return Block<Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of bottomRightCorner(Index, Index).*/
+inline const Block<const Derived> bottomRightCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size bottom-right corner of *this.
+  *
+  * The template parameters CRows and CCols are the number of rows and columns in the corner.
+  *
+  * Example: \include MatrixBase_template_int_int_bottomRightCorner.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomRightCorner()
+{
+  return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+}
+
+/** This is the const version of bottomRightCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomRightCorner() const
+{
+  return Block<const Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
+}
+
+/** \returns an expression of a bottom-right corner of *this.
+  *
+  * \tparam CRows number of rows in corner as specified at compile-time
+  * \tparam CCols number of columns in corner as specified at compile-time
+  * \param  cRows number of rows in corner as specified at run-time
+  * \param  cCols number of columns in corner as specified at run-time
+  *
+  * This function is mainly useful for corners where the number of rows is specified at compile-time
+  * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+  * information should not contradict. In other words, \a cRows should equal \a CRows unless
+  * \a CRows is \a Dynamic, and the same for the number of columns.
+  *
+  * Example: \include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out
+  *
+  * \sa class Block
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomRightCorner(Index cRows, Index cCols)
+{
+  return Block<Derived, CRows, CCols>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+/** This is the const version of bottomRightCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomRightCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived, CRows, CCols>(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
+}
+
+
+
+/** \returns a dynamic-size expression of a bottom-left corner of *this.
+  *
+  * \param cRows the number of rows in the corner
+  * \param cCols the number of columns in the corner
+  *
+  * Example: \include MatrixBase_bottomLeftCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_bottomLeftCorner_int_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline Block<Derived> bottomLeftCorner(Index cRows, Index cCols)
+{
+  return Block<Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+/** This is the const version of bottomLeftCorner(Index, Index).*/
+inline const Block<const Derived> bottomLeftCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+/** \returns an expression of a fixed-size bottom-left corner of *this.
+  *
+  * The template parameters CRows and CCols are the number of rows and columns in the corner.
+  *
+  * Example: \include MatrixBase_template_int_int_bottomLeftCorner.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomLeftCorner()
+{
+  return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+}
+
+/** This is the const version of bottomLeftCorner<int, int>().*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomLeftCorner() const
+{
+  return Block<const Derived, CRows, CCols>(derived(), rows() - CRows, 0);
+}
+
+/** \returns an expression of a bottom-left corner of *this.
+  *
+  * \tparam CRows number of rows in corner as specified at compile-time
+  * \tparam CCols number of columns in corner as specified at compile-time
+  * \param  cRows number of rows in corner as specified at run-time
+  * \param  cCols number of columns in corner as specified at run-time
+  *
+  * This function is mainly useful for corners where the number of rows is specified at compile-time
+  * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+  * information should not contradict. In other words, \a cRows should equal \a CRows unless
+  * \a CRows is \a Dynamic, and the same for the number of columns.
+  *
+  * Example: \include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out
+  *
+  * \sa class Block
+  */
+template<int CRows, int CCols>
+inline Block<Derived, CRows, CCols> bottomLeftCorner(Index cRows, Index cCols)
+{
+  return Block<Derived, CRows, CCols>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+/** This is the const version of bottomLeftCorner<int, int>(Index, Index).*/
+template<int CRows, int CCols>
+inline const Block<const Derived, CRows, CCols> bottomLeftCorner(Index cRows, Index cCols) const
+{
+  return Block<const Derived, CRows, CCols>(derived(), rows() - cRows, 0, cRows, cCols);
+}
+
+
+
+/** \returns a block consisting of the top rows of *this.
+  *
+  * \param n the number of rows in the block
+  *
+  * Example: \include MatrixBase_topRows_int.cpp
+  * Output: \verbinclude MatrixBase_topRows_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline RowsBlockXpr topRows(Index n)
+{
+  return RowsBlockXpr(derived(), 0, 0, n, cols());
+}
+
+/** This is the const version of topRows(Index).*/
+inline ConstRowsBlockXpr topRows(Index n) const
+{
+  return ConstRowsBlockXpr(derived(), 0, 0, n, cols());
+}
+
+/** \returns a block consisting of the top rows of *this.
+  *
+  * \tparam N the number of rows in the block as specified at compile-time
+  * \param n the number of rows in the block as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_topRows.cpp
+  * Output: \verbinclude MatrixBase_template_int_topRows.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int N>
+inline typename NRowsBlockXpr<N>::Type topRows(Index n = N)
+{
+  return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
+}
+
+/** This is the const version of topRows<int>().*/
+template<int N>
+inline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const
+{
+  return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());
+}
+
+
+
+/** \returns a block consisting of the bottom rows of *this.
+  *
+  * \param n the number of rows in the block
+  *
+  * Example: \include MatrixBase_bottomRows_int.cpp
+  * Output: \verbinclude MatrixBase_bottomRows_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline RowsBlockXpr bottomRows(Index n)
+{
+  return RowsBlockXpr(derived(), rows() - n, 0, n, cols());
+}
+
+/** This is the const version of bottomRows(Index).*/
+inline ConstRowsBlockXpr bottomRows(Index n) const
+{
+  return ConstRowsBlockXpr(derived(), rows() - n, 0, n, cols());
+}
+
+/** \returns a block consisting of the bottom rows of *this.
+  *
+  * \tparam N the number of rows in the block as specified at compile-time
+  * \param n the number of rows in the block as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_bottomRows.cpp
+  * Output: \verbinclude MatrixBase_template_int_bottomRows.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int N>
+inline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)
+{
+  return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
+}
+
+/** This is the const version of bottomRows<int>().*/
+template<int N>
+inline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const
+{
+  return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());
+}
+
+
+
+/** \returns a block consisting of a range of rows of *this.
+  *
+  * \param startRow the index of the first row in the block
+  * \param n the number of rows in the block
+  *
+  * Example: \include DenseBase_middleRows_int.cpp
+  * Output: \verbinclude DenseBase_middleRows_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline RowsBlockXpr middleRows(Index startRow, Index n)
+{
+  return RowsBlockXpr(derived(), startRow, 0, n, cols());
+}
+
+/** This is the const version of middleRows(Index,Index).*/
+inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const
+{
+  return ConstRowsBlockXpr(derived(), startRow, 0, n, cols());
+}
+
+/** \returns a block consisting of a range of rows of *this.
+  *
+  * \tparam N the number of rows in the block as specified at compile-time
+  * \param startRow the index of the first row in the block
+  * \param n the number of rows in the block as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include DenseBase_template_int_middleRows.cpp
+  * Output: \verbinclude DenseBase_template_int_middleRows.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int N>
+inline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)
+{
+  return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
+}
+
+/** This is the const version of middleRows<int>().*/
+template<int N>
+inline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const
+{
+  return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());
+}
+
+
+
+/** \returns a block consisting of the left columns of *this.
+  *
+  * \param n the number of columns in the block
+  *
+  * Example: \include MatrixBase_leftCols_int.cpp
+  * Output: \verbinclude MatrixBase_leftCols_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline ColsBlockXpr leftCols(Index n)
+{
+  return ColsBlockXpr(derived(), 0, 0, rows(), n);
+}
+
+/** This is the const version of leftCols(Index).*/
+inline ConstColsBlockXpr leftCols(Index n) const
+{
+  return ConstColsBlockXpr(derived(), 0, 0, rows(), n);
+}
+
+/** \returns a block consisting of the left columns of *this.
+  *
+  * \tparam N the number of columns in the block as specified at compile-time
+  * \param n the number of columns in the block as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_leftCols.cpp
+  * Output: \verbinclude MatrixBase_template_int_leftCols.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int N>
+inline typename NColsBlockXpr<N>::Type leftCols(Index n = N)
+{
+  return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
+}
+
+/** This is the const version of leftCols<int>().*/
+template<int N>
+inline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const
+{
+  return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);
+}
+
+
+
+/** \returns a block consisting of the right columns of *this.
+  *
+  * \param n the number of columns in the block
+  *
+  * Example: \include MatrixBase_rightCols_int.cpp
+  * Output: \verbinclude MatrixBase_rightCols_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline ColsBlockXpr rightCols(Index n)
+{
+  return ColsBlockXpr(derived(), 0, cols() - n, rows(), n);
+}
+
+/** This is the const version of rightCols(Index).*/
+inline ConstColsBlockXpr rightCols(Index n) const
+{
+  return ConstColsBlockXpr(derived(), 0, cols() - n, rows(), n);
+}
+
+/** \returns a block consisting of the right columns of *this.
+  *
+  * \tparam N the number of columns in the block as specified at compile-time
+  * \param n the number of columns in the block as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_rightCols.cpp
+  * Output: \verbinclude MatrixBase_template_int_rightCols.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int N>
+inline typename NColsBlockXpr<N>::Type rightCols(Index n = N)
+{
+  return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
+}
+
+/** This is the const version of rightCols<int>().*/
+template<int N>
+inline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const
+{
+  return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);
+}
+
+
+
+/** \returns a block consisting of a range of columns of *this.
+  *
+  * \param startCol the index of the first column in the block
+  * \param numCols the number of columns in the block
+  *
+  * Example: \include DenseBase_middleCols_int.cpp
+  * Output: \verbinclude DenseBase_middleCols_int.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+inline ColsBlockXpr middleCols(Index startCol, Index numCols)
+{
+  return ColsBlockXpr(derived(), 0, startCol, rows(), numCols);
+}
+
+/** This is the const version of middleCols(Index,Index).*/
+inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const
+{
+  return ConstColsBlockXpr(derived(), 0, startCol, rows(), numCols);
+}
+
+/** \returns a block consisting of a range of columns of *this.
+  *
+  * \tparam N the number of columns in the block as specified at compile-time
+  * \param startCol the index of the first column in the block
+  * \param n the number of columns in the block as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include DenseBase_template_int_middleCols.cpp
+  * Output: \verbinclude DenseBase_template_int_middleCols.out
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int N>
+inline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)
+{
+  return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
+}
+
+/** This is the const version of middleCols<int>().*/
+template<int N>
+inline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const
+{
+  return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);
+}
+
+
+
+/** \returns a fixed-size expression of a block in *this.
+  *
+  * The template parameters \a BlockRows and \a BlockCols are the number of
+  * rows and columns in the block.
+  *
+  * \param startRow the first row in the block
+  * \param startCol the first column in the block
+  *
+  * Example: \include MatrixBase_block_int_int.cpp
+  * Output: \verbinclude MatrixBase_block_int_int.out
+  *
+  * \note since block is a templated member, the keyword template has to be used
+  * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int BlockRows, int BlockCols>
+inline Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol)
+{
+  return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
+}
+
+/** This is the const version of block<>(Index, Index). */
+template<int BlockRows, int BlockCols>
+inline const Block<const Derived, BlockRows, BlockCols> block(Index startRow, Index startCol) const
+{
+  return Block<const Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
+}
+
+/** \returns an expression of a block in *this.
+  *
+  * \tparam BlockRows number of rows in block as specified at compile-time
+  * \tparam BlockCols number of columns in block as specified at compile-time
+  * \param  startRow  the first row in the block
+  * \param  startCol  the first column in the block
+  * \param  blockRows number of rows in block as specified at run-time
+  * \param  blockCols number of columns in block as specified at run-time
+  *
+  * This function is mainly useful for blocks where the number of rows is specified at compile-time
+  * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time
+  * information should not contradict. In other words, \a blockRows should equal \a BlockRows unless
+  * \a BlockRows is \a Dynamic, and the same for the number of columns.
+  *
+  * Example: \include MatrixBase_template_int_int_block_int_int_int_int.cpp
+  * Output: \verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp
+  *
+  * \sa class Block, block(Index,Index,Index,Index)
+  */
+template<int BlockRows, int BlockCols>
+inline Block<Derived, BlockRows, BlockCols> block(Index startRow, Index startCol, 
+                                                  Index blockRows, Index blockCols)
+{
+  return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+/** This is the const version of block<>(Index, Index, Index, Index). */
+template<int BlockRows, int BlockCols>
+inline const Block<const Derived, BlockRows, BlockCols> block(Index startRow, Index startCol,
+                                                              Index blockRows, Index blockCols) const
+{
+  return Block<const Derived, BlockRows, BlockCols>(derived(), startRow, startCol, blockRows, blockCols);
+}
+
+/** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0.
+  *
+  * Example: \include MatrixBase_col.cpp
+  * Output: \verbinclude MatrixBase_col.out
+  *
+  * \sa row(), class Block */
+inline ColXpr col(Index i)
+{
+  return ColXpr(derived(), i);
+}
+
+/** This is the const version of col(). */
+inline ConstColXpr col(Index i) const
+{
+  return ConstColXpr(derived(), i);
+}
+
+/** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0.
+  *
+  * Example: \include MatrixBase_row.cpp
+  * Output: \verbinclude MatrixBase_row.out
+  *
+  * \sa col(), class Block */
+inline RowXpr row(Index i)
+{
+  return RowXpr(derived(), i);
+}
+
+/** This is the const version of row(). */
+inline ConstRowXpr row(Index i) const
+{
+  return ConstRowXpr(derived(), i);
+}
+
+/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this.
+  *
+  * \only_for_vectors
+  *
+  * \param start the first coefficient in the segment
+  * \param n the number of coefficients in the segment
+  *
+  * Example: \include MatrixBase_segment_int_int.cpp
+  * Output: \verbinclude MatrixBase_segment_int_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, segment(Index)
+  */
+inline SegmentReturnType segment(Index start, Index n)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return SegmentReturnType(derived(), start, n);
+}
+
+
+/** This is the const version of segment(Index,Index).*/
+inline ConstSegmentReturnType segment(Index start, Index n) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return ConstSegmentReturnType(derived(), start, n);
+}
+
+/** \returns a dynamic-size expression of the first coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * \param n the number of coefficients in the segment
+  *
+  * Example: \include MatrixBase_start_int.cpp
+  * Output: \verbinclude MatrixBase_start_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, block(Index,Index)
+  */
+inline SegmentReturnType head(Index n)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return SegmentReturnType(derived(), 0, n);
+}
+
+/** This is the const version of head(Index).*/
+inline ConstSegmentReturnType head(Index n) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return ConstSegmentReturnType(derived(), 0, n);
+}
+
+/** \returns a dynamic-size expression of the last coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * \param n the number of coefficients in the segment
+  *
+  * Example: \include MatrixBase_end_int.cpp
+  * Output: \verbinclude MatrixBase_end_int.out
+  *
+  * \note Even though the returned expression has dynamic size, in the case
+  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
+  * which means that evaluating it does not cause a dynamic memory allocation.
+  *
+  * \sa class Block, block(Index,Index)
+  */
+inline SegmentReturnType tail(Index n)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return SegmentReturnType(derived(), this->size() - n, n);
+}
+
+/** This is the const version of tail(Index).*/
+inline ConstSegmentReturnType tail(Index n) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return ConstSegmentReturnType(derived(), this->size() - n, n);
+}
+
+/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this
+  *
+  * \only_for_vectors
+  *
+  * \tparam N the number of coefficients in the segment as specified at compile-time
+  * \param start the index of the first element in the segment
+  * \param n the number of coefficients in the segment as specified at compile-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_segment.cpp
+  * Output: \verbinclude MatrixBase_template_int_segment.out
+  *
+  * \sa class Block
+  */
+template<int N>
+inline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename FixedSegmentReturnType<N>::Type(derived(), start, n);
+}
+
+/** This is the const version of segment<int>(Index).*/
+template<int N>
+inline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);
+}
+
+/** \returns a fixed-size expression of the first coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * \tparam N the number of coefficients in the segment as specified at compile-time
+  * \param  n the number of coefficients in the segment as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_start.cpp
+  * Output: \verbinclude MatrixBase_template_int_start.out
+  *
+  * \sa class Block
+  */
+template<int N>
+inline typename FixedSegmentReturnType<N>::Type head(Index n = N)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);
+}
+
+/** This is the const version of head<int>().*/
+template<int N>
+inline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);
+}
+
+/** \returns a fixed-size expression of the last coefficients of *this.
+  *
+  * \only_for_vectors
+  *
+  * \tparam N the number of coefficients in the segment as specified at compile-time
+  * \param  n the number of coefficients in the segment as specified at run-time
+  *
+  * The compile-time and run-time information should not contradict. In other words,
+  * \a n should equal \a N unless \a N is \a Dynamic.
+  *
+  * Example: \include MatrixBase_template_int_end.cpp
+  * Output: \verbinclude MatrixBase_template_int_end.out
+  *
+  * \sa class Block
+  */
+template<int N>
+inline typename FixedSegmentReturnType<N>::Type tail(Index n = N)
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);
+}
+
+/** This is the const version of tail<int>.*/
+template<int N>
+inline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const
+{
+  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+  return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);
+}
diff --git a/Eigen/src/plugins/CMakeLists.txt b/Eigen/src/plugins/CMakeLists.txt
new file mode 100644
index 0000000..1a1d3ff
--- /dev/null
+++ b/Eigen/src/plugins/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_plugins_SRCS "*.h")
+
+INSTALL(FILES
+  ${Eigen_plugins_SRCS}
+  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/plugins COMPONENT Devel
+  )
diff --git a/Eigen/src/plugins/CommonCwiseBinaryOps.h b/Eigen/src/plugins/CommonCwiseBinaryOps.h
new file mode 100644
index 0000000..688d224
--- /dev/null
+++ b/Eigen/src/plugins/CommonCwiseBinaryOps.h
@@ -0,0 +1,46 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is a base class plugin containing common coefficient wise functions.
+
+/** \returns an expression of the difference of \c *this and \a other
+  *
+  * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
+  *
+  * \sa class CwiseBinaryOp, operator-=()
+  */
+EIGEN_MAKE_CWISE_BINARY_OP(operator-,internal::scalar_difference_op)
+
+/** \returns an expression of the sum of \c *this and \a other
+  *
+  * \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
+  *
+  * \sa class CwiseBinaryOp, operator+=()
+  */
+EIGEN_MAKE_CWISE_BINARY_OP(operator+,internal::scalar_sum_op)
+
+/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
+  *
+  * The template parameter \a CustomBinaryOp is the type of the functor
+  * of the custom operator (see class CwiseBinaryOp for an example)
+  *
+  * Here is an example illustrating the use of custom functors:
+  * \include class_CwiseBinaryOp.cpp
+  * Output: \verbinclude class_CwiseBinaryOp.out
+  *
+  * \sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct()
+  */
+template<typename CustomBinaryOp, typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
+binaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const
+{
+  return CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other.derived(), func);
+}
+
diff --git a/Eigen/src/plugins/CommonCwiseUnaryOps.h b/Eigen/src/plugins/CommonCwiseUnaryOps.h
new file mode 100644
index 0000000..08e931a
--- /dev/null
+++ b/Eigen/src/plugins/CommonCwiseUnaryOps.h
@@ -0,0 +1,172 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is a base class plugin containing common coefficient wise functions.
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+/** \internal Represents a scalar multiple of an expression */
+typedef CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived> ScalarMultipleReturnType;
+/** \internal Represents a quotient of an expression by a scalar*/
+typedef CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>, const Derived> ScalarQuotient1ReturnType;
+/** \internal the return type of conjugate() */
+typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                    const CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>,
+                    const Derived&
+                  >::type ConjugateReturnType;
+/** \internal the return type of real() const */
+typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                    const CwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>,
+                    const Derived&
+                  >::type RealReturnType;
+/** \internal the return type of real() */
+typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+                    CwiseUnaryView<internal::scalar_real_ref_op<Scalar>, Derived>,
+                    Derived&
+                  >::type NonConstRealReturnType;
+/** \internal the return type of imag() const */
+typedef CwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived> ImagReturnType;
+/** \internal the return type of imag() */
+typedef CwiseUnaryView<internal::scalar_imag_ref_op<Scalar>, Derived> NonConstImagReturnType;
+
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+/** \returns an expression of the opposite of \c *this
+  */
+inline const CwiseUnaryOp<internal::scalar_opposite_op<typename internal::traits<Derived>::Scalar>, const Derived>
+operator-() const { return derived(); }
+
+
+/** \returns an expression of \c *this scaled by the scalar factor \a scalar */
+inline const ScalarMultipleReturnType
+operator*(const Scalar& scalar) const
+{
+  return CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived>
+    (derived(), internal::scalar_multiple_op<Scalar>(scalar));
+}
+
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+const ScalarMultipleReturnType operator*(const RealScalar& scalar) const;
+#endif
+
+/** \returns an expression of \c *this divided by the scalar value \a scalar */
+inline const CwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>, const Derived>
+operator/(const Scalar& scalar) const
+{
+  return CwiseUnaryOp<internal::scalar_quotient1_op<Scalar>, const Derived>
+    (derived(), internal::scalar_quotient1_op<Scalar>(scalar));
+}
+
+/** Overloaded for efficient real matrix times complex scalar value */
+inline const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >, const Derived>
+operator*(const std::complex<Scalar>& scalar) const
+{
+  return CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >, const Derived>
+    (*static_cast<const Derived*>(this), internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >(scalar));
+}
+
+inline friend const ScalarMultipleReturnType
+operator*(const Scalar& scalar, const StorageBaseType& matrix)
+{ return matrix*scalar; }
+
+inline friend const CwiseUnaryOp<internal::scalar_multiple2_op<Scalar,std::complex<Scalar> >, const Derived>
+operator*(const std::complex<Scalar>& scalar, const StorageBaseType& matrix)
+{ return matrix*scalar; }
+
+/** \returns an expression of *this with the \a Scalar type casted to
+  * \a NewScalar.
+  *
+  * The template parameter \a NewScalar is the type we are casting the scalars to.
+  *
+  * \sa class CwiseUnaryOp
+  */
+template<typename NewType>
+typename internal::cast_return_type<Derived,const CwiseUnaryOp<internal::scalar_cast_op<typename internal::traits<Derived>::Scalar, NewType>, const Derived> >::type
+cast() const
+{
+  return derived();
+}
+
+/** \returns an expression of the complex conjugate of \c *this.
+  *
+  * \sa adjoint() */
+inline ConjugateReturnType
+conjugate() const
+{
+  return ConjugateReturnType(derived());
+}
+
+/** \returns a read-only expression of the real part of \c *this.
+  *
+  * \sa imag() */
+inline RealReturnType
+real() const { return derived(); }
+
+/** \returns an read-only expression of the imaginary part of \c *this.
+  *
+  * \sa real() */
+inline const ImagReturnType
+imag() const { return derived(); }
+
+/** \brief Apply a unary operator coefficient-wise
+  * \param[in]  func  Functor implementing the unary operator
+  * \tparam  CustomUnaryOp Type of \a func  
+  * \returns An expression of a custom coefficient-wise unary operator \a func of *this
+  *
+  * The function \c ptr_fun() from the C++ standard library can be used to make functors out of normal functions.
+  *
+  * Example:
+  * \include class_CwiseUnaryOp_ptrfun.cpp
+  * Output: \verbinclude class_CwiseUnaryOp_ptrfun.out
+  *
+  * Genuine functors allow for more possibilities, for instance it may contain a state.
+  *
+  * Example:
+  * \include class_CwiseUnaryOp.cpp
+  * Output: \verbinclude class_CwiseUnaryOp.out
+  *
+  * \sa class CwiseUnaryOp, class CwiseBinaryOp
+  */
+template<typename CustomUnaryOp>
+inline const CwiseUnaryOp<CustomUnaryOp, const Derived>
+unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const
+{
+  return CwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
+}
+
+/** \returns an expression of a custom coefficient-wise unary operator \a func of *this
+  *
+  * The template parameter \a CustomUnaryOp is the type of the functor
+  * of the custom unary operator.
+  *
+  * Example:
+  * \include class_CwiseUnaryOp.cpp
+  * Output: \verbinclude class_CwiseUnaryOp.out
+  *
+  * \sa class CwiseUnaryOp, class CwiseBinaryOp
+  */
+template<typename CustomViewOp>
+inline const CwiseUnaryView<CustomViewOp, const Derived>
+unaryViewExpr(const CustomViewOp& func = CustomViewOp()) const
+{
+  return CwiseUnaryView<CustomViewOp, const Derived>(derived(), func);
+}
+
+/** \returns a non const expression of the real part of \c *this.
+  *
+  * \sa imag() */
+inline NonConstRealReturnType
+real() { return derived(); }
+
+/** \returns a non const expression of the imaginary part of \c *this.
+  *
+  * \sa real() */
+inline NonConstImagReturnType
+imag() { return derived(); }
diff --git a/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/Eigen/src/plugins/MatrixCwiseBinaryOps.h
new file mode 100644
index 0000000..c4a042b
--- /dev/null
+++ b/Eigen/src/plugins/MatrixCwiseBinaryOps.h
@@ -0,0 +1,143 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is a base class plugin containing matrix specifics coefficient wise functions.
+
+/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other
+  *
+  * Example: \include MatrixBase_cwiseProduct.cpp
+  * Output: \verbinclude MatrixBase_cwiseProduct.out
+  *
+  * \sa class CwiseBinaryOp, cwiseAbs2
+  */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)
+cwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return EIGEN_CWISE_PRODUCT_RETURN_TYPE(Derived,OtherDerived)(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise == operator of *this and \a other
+  *
+  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+  * isMuchSmallerThan().
+  *
+  * Example: \include MatrixBase_cwiseEqual.cpp
+  * Output: \verbinclude MatrixBase_cwiseEqual.out
+  *
+  * \sa cwiseNotEqual(), isApprox(), isMuchSmallerThan()
+  */
+template<typename OtherDerived>
+inline const CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>
+cwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise != operator of *this and \a other
+  *
+  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+  * isMuchSmallerThan().
+  *
+  * Example: \include MatrixBase_cwiseNotEqual.cpp
+  * Output: \verbinclude MatrixBase_cwiseNotEqual.out
+  *
+  * \sa cwiseEqual(), isApprox(), isMuchSmallerThan()
+  */
+template<typename OtherDerived>
+inline const CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>
+cwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise min of *this and \a other
+  *
+  * Example: \include MatrixBase_cwiseMin.cpp
+  * Output: \verbinclude MatrixBase_cwiseMin.out
+  *
+  * \sa class CwiseBinaryOp, max()
+  */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>
+cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise min of *this and scalar \a other
+  *
+  * \sa class CwiseBinaryOp, min()
+  */
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const ConstantReturnType>
+cwiseMin(const Scalar &other) const
+{
+  return cwiseMin(Derived::Constant(rows(), cols(), other));
+}
+
+/** \returns an expression of the coefficient-wise max of *this and \a other
+  *
+  * Example: \include MatrixBase_cwiseMax.cpp
+  * Output: \verbinclude MatrixBase_cwiseMax.out
+  *
+  * \sa class CwiseBinaryOp, min()
+  */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>
+cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+/** \returns an expression of the coefficient-wise max of *this and scalar \a other
+  *
+  * \sa class CwiseBinaryOp, min()
+  */
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const ConstantReturnType>
+cwiseMax(const Scalar &other) const
+{
+  return cwiseMax(Derived::Constant(rows(), cols(), other));
+}
+
+
+/** \returns an expression of the coefficient-wise quotient of *this and \a other
+  *
+  * Example: \include MatrixBase_cwiseQuotient.cpp
+  * Output: \verbinclude MatrixBase_cwiseQuotient.out
+  *
+  * \sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse()
+  */
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
+cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
+{
+  return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
+}
+
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;
+
+/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
+  *
+  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+  * isMuchSmallerThan().
+  *
+  * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
+  */
+inline const CwiseScalarEqualReturnType
+cwiseEqual(const Scalar& s) const
+{
+  return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,internal::cmp_EQ>());
+}
diff --git a/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/Eigen/src/plugins/MatrixCwiseUnaryOps.h
new file mode 100644
index 0000000..8de1093
--- /dev/null
+++ b/Eigen/src/plugins/MatrixCwiseUnaryOps.h
@@ -0,0 +1,52 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// This file is a base class plugin containing matrix specifics coefficient wise functions.
+
+/** \returns an expression of the coefficient-wise absolute value of \c *this
+  *
+  * Example: \include MatrixBase_cwiseAbs.cpp
+  * Output: \verbinclude MatrixBase_cwiseAbs.out
+  *
+  * \sa cwiseAbs2()
+  */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
+cwiseAbs() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise squared absolute value of \c *this
+  *
+  * Example: \include MatrixBase_cwiseAbs2.cpp
+  * Output: \verbinclude MatrixBase_cwiseAbs2.out
+  *
+  * \sa cwiseAbs()
+  */
+EIGEN_STRONG_INLINE const CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived>
+cwiseAbs2() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise square root of *this.
+  *
+  * Example: \include MatrixBase_cwiseSqrt.cpp
+  * Output: \verbinclude MatrixBase_cwiseSqrt.out
+  *
+  * \sa cwisePow(), cwiseSquare()
+  */
+inline const CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
+cwiseSqrt() const { return derived(); }
+
+/** \returns an expression of the coefficient-wise inverse of *this.
+  *
+  * Example: \include MatrixBase_cwiseInverse.cpp
+  * Output: \verbinclude MatrixBase_cwiseInverse.out
+  *
+  * \sa cwiseProduct()
+  */
+inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
+cwiseInverse() const { return derived(); }
+