Squashed 'third_party/eigen/' content from commit 61d72f6
Change-Id: Iccc90fa0b55ab44037f018046d2fcffd90d9d025
git-subtree-dir: third_party/eigen
git-subtree-split: 61d72f6383cfa842868c53e30e087b0258177257
diff --git a/unsupported/CMakeLists.txt b/unsupported/CMakeLists.txt
new file mode 100644
index 0000000..4fef40a
--- /dev/null
+++ b/unsupported/CMakeLists.txt
@@ -0,0 +1,7 @@
+add_subdirectory(Eigen)
+add_subdirectory(doc EXCLUDE_FROM_ALL)
+if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
+ add_subdirectory(test) # can't do EXCLUDE_FROM_ALL here, breaks CTest
+else()
+ add_subdirectory(test EXCLUDE_FROM_ALL)
+endif()
diff --git a/unsupported/Eigen/AdolcForward b/unsupported/Eigen/AdolcForward
new file mode 100644
index 0000000..2627dec
--- /dev/null
+++ b/unsupported/Eigen/AdolcForward
@@ -0,0 +1,156 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ADLOC_FORWARD
+#define EIGEN_ADLOC_FORWARD
+
+//--------------------------------------------------------------------------------
+//
+// This file provides support for adolc's adouble type in forward mode.
+// ADOL-C is a C++ automatic differentiation library,
+// see https://projects.coin-or.org/ADOL-C for more information.
+//
+// Note that the maximal number of directions is controlled by
+// the preprocessor token NUMBER_DIRECTIONS. The default is 2.
+//
+//--------------------------------------------------------------------------------
+
+#define ADOLC_TAPELESS
+#ifndef NUMBER_DIRECTIONS
+# define NUMBER_DIRECTIONS 2
+#endif
+#include <adolc/adouble.h>
+
+// adolc defines some very stupid macros:
+#if defined(malloc)
+# undef malloc
+#endif
+
+#if defined(calloc)
+# undef calloc
+#endif
+
+#if defined(realloc)
+# undef realloc
+#endif
+
+#include <Eigen/Core>
+
+namespace Eigen {
+
+/**
+ * \defgroup AdolcForward_Module Adolc forward module
+ * This module provides support for adolc's adouble type in forward mode.
+ * ADOL-C is a C++ automatic differentiation library,
+ * see https://projects.coin-or.org/ADOL-C for more information.
+ * It mainly consists in:
+ * - a struct Eigen::NumTraits<adtl::adouble> specialization
+ * - overloads of internal::* math function for adtl::adouble type.
+ *
+ * Note that the maximal number of directions is controlled by
+ * the preprocessor token NUMBER_DIRECTIONS. The default is 2.
+ *
+ * \code
+ * #include <unsupported/Eigen/AdolcSupport>
+ * \endcode
+ */
+ //@{
+
+} // namespace Eigen
+
+// Eigen's require a few additional functions which must be defined in the same namespace
+// than the custom scalar type own namespace
+namespace adtl {
+
+inline const adouble& conj(const adouble& x) { return x; }
+inline const adouble& real(const adouble& x) { return x; }
+inline adouble imag(const adouble&) { return 0.; }
+inline adouble abs(const adouble& x) { return fabs(x); }
+inline adouble abs2(const adouble& x) { return x*x; }
+
+}
+
+namespace Eigen {
+
+template<> struct NumTraits<adtl::adouble>
+ : NumTraits<double>
+{
+ typedef adtl::adouble Real;
+ typedef adtl::adouble NonInteger;
+ typedef adtl::adouble Nested;
+ enum {
+ IsComplex = 0,
+ IsInteger = 0,
+ IsSigned = 1,
+ RequireInitialization = 1,
+ ReadCost = 1,
+ AddCost = 1,
+ MulCost = 1
+ };
+};
+
+template<typename Functor> class AdolcForwardJacobian : public Functor
+{
+ typedef adtl::adouble ActiveScalar;
+public:
+
+ AdolcForwardJacobian() : Functor() {}
+ AdolcForwardJacobian(const Functor& f) : Functor(f) {}
+
+ // forward constructors
+ template<typename T0>
+ AdolcForwardJacobian(const T0& a0) : Functor(a0) {}
+ template<typename T0, typename T1>
+ AdolcForwardJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
+ template<typename T0, typename T1, typename T2>
+ AdolcForwardJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {}
+
+ typedef typename Functor::InputType InputType;
+ typedef typename Functor::ValueType ValueType;
+ typedef typename Functor::JacobianType JacobianType;
+
+ typedef Matrix<ActiveScalar, InputType::SizeAtCompileTime, 1> ActiveInput;
+ typedef Matrix<ActiveScalar, ValueType::SizeAtCompileTime, 1> ActiveValue;
+
+ void operator() (const InputType& x, ValueType* v, JacobianType* _jac) const
+ {
+ eigen_assert(v!=0);
+ if (!_jac)
+ {
+ Functor::operator()(x, v);
+ return;
+ }
+
+ JacobianType& jac = *_jac;
+
+ ActiveInput ax = x.template cast<ActiveScalar>();
+ ActiveValue av(jac.rows());
+
+ for (int j=0; j<jac.cols(); j++)
+ for (int i=0; i<jac.cols(); i++)
+ ax[i].setADValue(j, i==j ? 1 : 0);
+
+ Functor::operator()(ax, &av);
+
+ for (int i=0; i<jac.rows(); i++)
+ {
+ (*v)[i] = av[i].getValue();
+ for (int j=0; j<jac.cols(); j++)
+ jac.coeffRef(i,j) = av[i].getADValue(j);
+ }
+ }
+protected:
+
+};
+
+//@}
+
+}
+
+#endif // EIGEN_ADLOC_FORWARD
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
new file mode 100644
index 0000000..7b45e6c
--- /dev/null
+++ b/unsupported/Eigen/AlignedVector3
@@ -0,0 +1,190 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ALIGNED_VECTOR3
+#define EIGEN_ALIGNED_VECTOR3
+
+#include <Eigen/Geometry>
+
+namespace Eigen {
+
+/**
+ * \defgroup AlignedVector3_Module Aligned vector3 module
+ *
+ * \code
+ * #include <unsupported/Eigen/AlignedVector3>
+ * \endcode
+ */
+ //@{
+
+
+/** \class AlignedVector3
+ *
+ * \brief A vectorization friendly 3D vector
+ *
+ * This class represents a 3D vector internally using a 4D vector
+ * such that vectorization can be seamlessly enabled. Of course,
+ * the same result can be achieved by directly using a 4D vector.
+ * This class makes this process simpler.
+ *
+ */
+// TODO specialize Cwise
+template<typename _Scalar> class AlignedVector3;
+
+namespace internal {
+template<typename _Scalar> struct traits<AlignedVector3<_Scalar> >
+ : traits<Matrix<_Scalar,3,1,0,4,1> >
+{
+};
+}
+
+template<typename _Scalar> class AlignedVector3
+ : public MatrixBase<AlignedVector3<_Scalar> >
+{
+ typedef Matrix<_Scalar,4,1> CoeffType;
+ CoeffType m_coeffs;
+ public:
+
+ typedef MatrixBase<AlignedVector3<_Scalar> > Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3)
+ using Base::operator*;
+
+ inline Index rows() const { return 3; }
+ inline Index cols() const { return 1; }
+
+ inline const Scalar& coeff(Index row, Index col) const
+ { return m_coeffs.coeff(row, col); }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ { return m_coeffs.coeffRef(row, col); }
+
+ inline const Scalar& coeff(Index index) const
+ { return m_coeffs.coeff(index); }
+
+ inline Scalar& coeffRef(Index index)
+ { return m_coeffs.coeffRef(index);}
+
+
+ inline AlignedVector3(const Scalar& x, const Scalar& y, const Scalar& z)
+ : m_coeffs(x, y, z, Scalar(0))
+ {}
+
+ inline AlignedVector3(const AlignedVector3& other)
+ : Base(), m_coeffs(other.m_coeffs)
+ {}
+
+ template<typename XprType, int Size=XprType::SizeAtCompileTime>
+ struct generic_assign_selector {};
+
+ template<typename XprType> struct generic_assign_selector<XprType,4>
+ {
+ inline static void run(AlignedVector3& dest, const XprType& src)
+ {
+ dest.m_coeffs = src;
+ }
+ };
+
+ template<typename XprType> struct generic_assign_selector<XprType,3>
+ {
+ inline static void run(AlignedVector3& dest, const XprType& src)
+ {
+ dest.m_coeffs.template head<3>() = src;
+ dest.m_coeffs.w() = Scalar(0);
+ }
+ };
+
+ template<typename Derived>
+ inline explicit AlignedVector3(const MatrixBase<Derived>& other)
+ {
+ generic_assign_selector<Derived>::run(*this,other.derived());
+ }
+
+ inline AlignedVector3& operator=(const AlignedVector3& other)
+ { m_coeffs = other.m_coeffs; return *this; }
+
+
+ inline AlignedVector3 operator+(const AlignedVector3& other) const
+ { return AlignedVector3(m_coeffs + other.m_coeffs); }
+
+ inline AlignedVector3& operator+=(const AlignedVector3& other)
+ { m_coeffs += other.m_coeffs; return *this; }
+
+ inline AlignedVector3 operator-(const AlignedVector3& other) const
+ { return AlignedVector3(m_coeffs - other.m_coeffs); }
+
+ inline AlignedVector3 operator-=(const AlignedVector3& other)
+ { m_coeffs -= other.m_coeffs; return *this; }
+
+ inline AlignedVector3 operator*(const Scalar& s) const
+ { return AlignedVector3(m_coeffs * s); }
+
+ inline friend AlignedVector3 operator*(const Scalar& s,const AlignedVector3& vec)
+ { return AlignedVector3(s * vec.m_coeffs); }
+
+ inline AlignedVector3& operator*=(const Scalar& s)
+ { m_coeffs *= s; return *this; }
+
+ inline AlignedVector3 operator/(const Scalar& s) const
+ { return AlignedVector3(m_coeffs / s); }
+
+ inline AlignedVector3& operator/=(const Scalar& s)
+ { m_coeffs /= s; return *this; }
+
+ inline Scalar dot(const AlignedVector3& other) const
+ {
+ eigen_assert(m_coeffs.w()==Scalar(0));
+ eigen_assert(other.m_coeffs.w()==Scalar(0));
+ return m_coeffs.dot(other.m_coeffs);
+ }
+
+ inline void normalize()
+ {
+ m_coeffs /= norm();
+ }
+
+ inline AlignedVector3 normalized()
+ {
+ return AlignedVector3(m_coeffs / norm());
+ }
+
+ inline Scalar sum() const
+ {
+ eigen_assert(m_coeffs.w()==Scalar(0));
+ return m_coeffs.sum();
+ }
+
+ inline Scalar squaredNorm() const
+ {
+ eigen_assert(m_coeffs.w()==Scalar(0));
+ return m_coeffs.squaredNorm();
+ }
+
+ inline Scalar norm() const
+ {
+ using std::sqrt;
+ return sqrt(squaredNorm());
+ }
+
+ inline AlignedVector3 cross(const AlignedVector3& other) const
+ {
+ return AlignedVector3(m_coeffs.cross3(other.m_coeffs));
+ }
+
+ template<typename Derived>
+ inline bool isApprox(const MatrixBase<Derived>& other, RealScalar eps=NumTraits<Scalar>::dummy_precision()) const
+ {
+ return m_coeffs.template head<3>().isApprox(other,eps);
+ }
+};
+
+//@}
+
+}
+
+#endif // EIGEN_ALIGNED_VECTOR3
diff --git a/unsupported/Eigen/ArpackSupport b/unsupported/Eigen/ArpackSupport
new file mode 100644
index 0000000..37a2799
--- /dev/null
+++ b/unsupported/Eigen/ArpackSupport
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARPACKSUPPORT_MODULE_H
+#define EIGEN_ARPACKSUPPORT_MODULE_H
+
+#include <Eigen/Core>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+/** \defgroup ArpackSupport_Module Arpack support module
+ *
+ * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition.
+ *
+ * \code
+ * #include <Eigen/ArpackSupport>
+ * \endcode
+ */
+
+#include <Eigen/SparseCholesky>
+#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_ARPACKSUPPORT_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/AutoDiff b/unsupported/Eigen/AutoDiff
new file mode 100644
index 0000000..abf5b7d
--- /dev/null
+++ b/unsupported/Eigen/AutoDiff
@@ -0,0 +1,40 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_AUTODIFF_MODULE
+#define EIGEN_AUTODIFF_MODULE
+
+namespace Eigen {
+
+/**
+ * \defgroup AutoDiff_Module Auto Diff module
+ *
+ * This module features forward automatic differentation via a simple
+ * templated scalar type wrapper AutoDiffScalar.
+ *
+ * Warning : this should NOT be confused with numerical differentiation, which
+ * is a different method and has its own module in Eigen : \ref NumericalDiff_Module.
+ *
+ * \code
+ * #include <unsupported/Eigen/AutoDiff>
+ * \endcode
+ */
+//@{
+
+}
+
+#include "src/AutoDiff/AutoDiffScalar.h"
+// #include "src/AutoDiff/AutoDiffVector.h"
+#include "src/AutoDiff/AutoDiffJacobian.h"
+
+namespace Eigen {
+//@}
+}
+
+#endif // EIGEN_AUTODIFF_MODULE
diff --git a/unsupported/Eigen/BVH b/unsupported/Eigen/BVH
new file mode 100644
index 0000000..0161a54
--- /dev/null
+++ b/unsupported/Eigen/BVH
@@ -0,0 +1,95 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Ilya Baran <ibaran@mit.edu>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BVH_MODULE_H
+#define EIGEN_BVH_MODULE_H
+
+#include <Eigen/Core>
+#include <Eigen/Geometry>
+#include <Eigen/StdVector>
+#include <algorithm>
+#include <queue>
+
+namespace Eigen {
+
+/**
+ * \defgroup BVH_Module BVH module
+ * \brief This module provides generic bounding volume hierarchy algorithms
+ * and reference tree implementations.
+ *
+ *
+ * \code
+ * #include <unsupported/Eigen/BVH>
+ * \endcode
+ *
+ * A bounding volume hierarchy (BVH) can accelerate many geometric queries. This module provides a generic implementation
+ * of the two basic algorithms over a BVH: intersection of a query object against all objects in the hierarchy and minimization
+ * of a function over the objects in the hierarchy. It also provides intersection and minimization over a cartesian product of
+ * two BVH's. A BVH accelerates intersection by using the fact that if a query object does not intersect a volume, then it cannot
+ * intersect any object contained in that volume. Similarly, a BVH accelerates minimization because the minimum of a function
+ * over a volume is no greater than the minimum of a function over any object contained in it.
+ *
+ * Some sample queries that can be written in terms of intersection are:
+ * - Determine all points where a ray intersects a triangle mesh
+ * - Given a set of points, determine which are contained in a query sphere
+ * - Given a set of spheres, determine which contain the query point
+ * - Given a set of disks, determine if any is completely contained in a query rectangle (represent each 2D disk as a point \f$(x,y,r)\f$
+ * in 3D and represent the rectangle as a pyramid based on the original rectangle and shrinking in the \f$r\f$ direction)
+ * - Given a set of points, count how many pairs are \f$d\pm\epsilon\f$ apart (done by looking at the cartesian product of the set
+ * of points with itself)
+ *
+ * Some sample queries that can be written in terms of function minimization over a set of objects are:
+ * - Find the intersection between a ray and a triangle mesh closest to the ray origin (function is infinite off the ray)
+ * - Given a polyline and a query point, determine the closest point on the polyline to the query
+ * - Find the diameter of a point cloud (done by looking at the cartesian product and using negative distance as the function)
+ * - Determine how far two meshes are from colliding (this is also a cartesian product query)
+ *
+ * This implementation decouples the basic algorithms both from the type of hierarchy (and the types of the bounding volumes) and
+ * from the particulars of the query. To enable abstraction from the BVH, the BVH is required to implement a generic mechanism
+ * for traversal. To abstract from the query, the query is responsible for keeping track of results.
+ *
+ * To be used in the algorithms, a hierarchy must implement the following traversal mechanism (see KdBVH for a sample implementation): \code
+ typedef Volume //the type of bounding volume
+ typedef Object //the type of object in the hierarchy
+ typedef Index //a reference to a node in the hierarchy--typically an int or a pointer
+ typedef VolumeIterator //an iterator type over node children--returns Index
+ typedef ObjectIterator //an iterator over object (leaf) children--returns const Object &
+ Index getRootIndex() const //returns the index of the hierarchy root
+ const Volume &getVolume(Index index) const //returns the bounding volume of the node at given index
+ void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd,
+ ObjectIterator &outOBegin, ObjectIterator &outOEnd) const
+ //getChildren takes a node index and makes [outVBegin, outVEnd) range over its node children
+ //and [outOBegin, outOEnd) range over its object children
+ \endcode
+ *
+ * To use the hierarchy, call BVIntersect or BVMinimize, passing it a BVH (or two, for cartesian product) and a minimizer or intersector.
+ * For an intersection query on a single BVH, the intersector encapsulates the query and must provide two functions:
+ * \code
+ bool intersectVolume(const Volume &volume) //returns true if the query intersects the volume
+ bool intersectObject(const Object &object) //returns true if the intersection search should terminate immediately
+ \endcode
+ * The guarantee that BVIntersect provides is that intersectObject will be called on every object whose bounding volume
+ * intersects the query (but possibly on other objects too) unless the search is terminated prematurely. It is the
+ * responsibility of the intersectObject function to keep track of the results in whatever manner is appropriate.
+ * The cartesian product intersection and the BVMinimize queries are similar--see their individual documentation.
+ *
+ * The following is a simple but complete example for how to use the BVH to accelerate the search for a closest red-blue point pair:
+ * \include BVH_Example.cpp
+ * Output: \verbinclude BVH_Example.out
+ */
+}
+
+//@{
+
+#include "src/BVH/BVAlgorithms.h"
+#include "src/BVH/KdBVH.h"
+
+//@}
+
+#endif // EIGEN_BVH_MODULE_H
diff --git a/unsupported/Eigen/CMakeLists.txt b/unsupported/Eigen/CMakeLists.txt
new file mode 100644
index 0000000..e1fbf97
--- /dev/null
+++ b/unsupported/Eigen/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(Eigen_HEADERS AdolcForward AlignedVector3 ArpackSupport AutoDiff BVH FFT IterativeSolvers KroneckerProduct LevenbergMarquardt
+ MatrixFunctions MoreVectorization MPRealSupport NonLinearOptimization NumericalDiff OpenGLSupport Polynomials
+ Skyline SparseExtra Splines
+ )
+
+install(FILES
+ ${Eigen_HEADERS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel
+ )
+
+add_subdirectory(src)
diff --git a/unsupported/Eigen/FFT b/unsupported/Eigen/FFT
new file mode 100644
index 0000000..2c45b39
--- /dev/null
+++ b/unsupported/Eigen/FFT
@@ -0,0 +1,418 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Mark Borgerding mark a borgerding net
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_FFT_H
+#define EIGEN_FFT_H
+
+#include <complex>
+#include <vector>
+#include <map>
+#include <Eigen/Core>
+
+
+/**
+ * \defgroup FFT_Module Fast Fourier Transform module
+ *
+ * \code
+ * #include <unsupported/Eigen/FFT>
+ * \endcode
+ *
+ * This module provides Fast Fourier transformation, with a configurable backend
+ * implementation.
+ *
+ * The default implementation is based on kissfft. It is a small, free, and
+ * reasonably efficient default.
+ *
+ * There are currently two implementation backend:
+ *
+ * - fftw (http://www.fftw.org) : faster, GPL -- incompatible with Eigen in LGPL form, bigger code size.
+ * - MKL (http://en.wikipedia.org/wiki/Math_Kernel_Library) : fastest, commercial -- may be incompatible with Eigen in GPL form.
+ *
+ * \section FFTDesign Design
+ *
+ * The following design decisions were made concerning scaling and
+ * half-spectrum for real FFT.
+ *
+ * The intent is to facilitate generic programming and ease migrating code
+ * from Matlab/octave.
+ * We think the default behavior of Eigen/FFT should favor correctness and
+ * generality over speed. Of course, the caller should be able to "opt-out" from this
+ * behavior and get the speed increase if they want it.
+ *
+ * 1) %Scaling:
+ * Other libraries (FFTW,IMKL,KISSFFT) do not perform scaling, so there
+ * is a constant gain incurred after the forward&inverse transforms , so
+ * IFFT(FFT(x)) = Kx; this is done to avoid a vector-by-value multiply.
+ * The downside is that algorithms that worked correctly in Matlab/octave
+ * don't behave the same way once implemented in C++.
+ *
+ * How Eigen/FFT differs: invertible scaling is performed so IFFT( FFT(x) ) = x.
+ *
+ * 2) Real FFT half-spectrum
+ * Other libraries use only half the frequency spectrum (plus one extra
+ * sample for the Nyquist bin) for a real FFT, the other half is the
+ * conjugate-symmetric of the first half. This saves them a copy and some
+ * memory. The downside is the caller needs to have special logic for the
+ * number of bins in complex vs real.
+ *
+ * How Eigen/FFT differs: The full spectrum is returned from the forward
+ * transform. This facilitates generic template programming by obviating
+ * separate specializations for real vs complex. On the inverse
+ * transform, only half the spectrum is actually used if the output type is real.
+ */
+
+
+#ifdef EIGEN_FFTW_DEFAULT
+// FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size
+# include <fftw3.h>
+# include "src/FFT/ei_fftw_impl.h"
+ namespace Eigen {
+ //template <typename T> typedef struct internal::fftw_impl default_fft_impl; this does not work
+ template <typename T> struct default_fft_impl : public internal::fftw_impl<T> {};
+ }
+#elif defined EIGEN_MKL_DEFAULT
+// TODO
+// intel Math Kernel Library: fastest, commercial -- may be incompatible with Eigen in GPL form
+# include "src/FFT/ei_imklfft_impl.h"
+ namespace Eigen {
+ template <typename T> struct default_fft_impl : public internal::imklfft_impl {};
+ }
+#else
+// internal::kissfft_impl: small, free, reasonably efficient default, derived from kissfft
+//
+# include "src/FFT/ei_kissfft_impl.h"
+ namespace Eigen {
+ template <typename T>
+ struct default_fft_impl : public internal::kissfft_impl<T> {};
+ }
+#endif
+
+namespace Eigen {
+
+
+//
+template<typename T_SrcMat,typename T_FftIfc> struct fft_fwd_proxy;
+template<typename T_SrcMat,typename T_FftIfc> struct fft_inv_proxy;
+
+namespace internal {
+template<typename T_SrcMat,typename T_FftIfc>
+struct traits< fft_fwd_proxy<T_SrcMat,T_FftIfc> >
+{
+ typedef typename T_SrcMat::PlainObject ReturnType;
+};
+template<typename T_SrcMat,typename T_FftIfc>
+struct traits< fft_inv_proxy<T_SrcMat,T_FftIfc> >
+{
+ typedef typename T_SrcMat::PlainObject ReturnType;
+};
+}
+
+template<typename T_SrcMat,typename T_FftIfc>
+struct fft_fwd_proxy
+ : public ReturnByValue<fft_fwd_proxy<T_SrcMat,T_FftIfc> >
+{
+ typedef DenseIndex Index;
+
+ fft_fwd_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {}
+
+ template<typename T_DestMat> void evalTo(T_DestMat& dst) const;
+
+ Index rows() const { return m_src.rows(); }
+ Index cols() const { return m_src.cols(); }
+protected:
+ const T_SrcMat & m_src;
+ T_FftIfc & m_ifc;
+ Index m_nfft;
+private:
+ fft_fwd_proxy& operator=(const fft_fwd_proxy&);
+};
+
+template<typename T_SrcMat,typename T_FftIfc>
+struct fft_inv_proxy
+ : public ReturnByValue<fft_inv_proxy<T_SrcMat,T_FftIfc> >
+{
+ typedef DenseIndex Index;
+
+ fft_inv_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {}
+
+ template<typename T_DestMat> void evalTo(T_DestMat& dst) const;
+
+ Index rows() const { return m_src.rows(); }
+ Index cols() const { return m_src.cols(); }
+protected:
+ const T_SrcMat & m_src;
+ T_FftIfc & m_ifc;
+ Index m_nfft;
+private:
+ fft_inv_proxy& operator=(const fft_inv_proxy&);
+};
+
+
+template <typename T_Scalar,
+ typename T_Impl=default_fft_impl<T_Scalar> >
+class FFT
+{
+ public:
+ typedef T_Impl impl_type;
+ typedef DenseIndex Index;
+ typedef typename impl_type::Scalar Scalar;
+ typedef typename impl_type::Complex Complex;
+
+ enum Flag {
+ Default=0, // goof proof
+ Unscaled=1,
+ HalfSpectrum=2,
+ // SomeOtherSpeedOptimization=4
+ Speedy=32767
+ };
+
+ FFT( const impl_type & impl=impl_type() , Flag flags=Default ) :m_impl(impl),m_flag(flags) { }
+
+ inline
+ bool HasFlag(Flag f) const { return (m_flag & (int)f) == f;}
+
+ inline
+ void SetFlag(Flag f) { m_flag |= (int)f;}
+
+ inline
+ void ClearFlag(Flag f) { m_flag &= (~(int)f);}
+
+ inline
+ void fwd( Complex * dst, const Scalar * src, Index nfft)
+ {
+ m_impl.fwd(dst,src,static_cast<int>(nfft));
+ if ( HasFlag(HalfSpectrum) == false)
+ ReflectSpectrum(dst,nfft);
+ }
+
+ inline
+ void fwd( Complex * dst, const Complex * src, Index nfft)
+ {
+ m_impl.fwd(dst,src,static_cast<int>(nfft));
+ }
+
+ /*
+ inline
+ void fwd2(Complex * dst, const Complex * src, int n0,int n1)
+ {
+ m_impl.fwd2(dst,src,n0,n1);
+ }
+ */
+
+ template <typename _Input>
+ inline
+ void fwd( std::vector<Complex> & dst, const std::vector<_Input> & src)
+ {
+ if ( NumTraits<_Input>::IsComplex == 0 && HasFlag(HalfSpectrum) )
+ dst.resize( (src.size()>>1)+1); // half the bins + Nyquist bin
+ else
+ dst.resize(src.size());
+ fwd(&dst[0],&src[0],src.size());
+ }
+
+ template<typename InputDerived, typename ComplexDerived>
+ inline
+ void fwd( MatrixBase<ComplexDerived> & dst, const MatrixBase<InputDerived> & src, Index nfft=-1)
+ {
+ typedef typename ComplexDerived::Scalar dst_type;
+ typedef typename InputDerived::Scalar src_type;
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(InputDerived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,InputDerived) // size at compile-time
+ EIGEN_STATIC_ASSERT((internal::is_same<dst_type, Complex>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ EIGEN_STATIC_ASSERT(int(InputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES)
+
+ if (nfft<1)
+ nfft = src.size();
+
+ if ( NumTraits< src_type >::IsComplex == 0 && HasFlag(HalfSpectrum) )
+ dst.derived().resize( (nfft>>1)+1);
+ else
+ dst.derived().resize(nfft);
+
+ if ( src.innerStride() != 1 || src.size() < nfft ) {
+ Matrix<src_type,1,Dynamic> tmp;
+ if (src.size()<nfft) {
+ tmp.setZero(nfft);
+ tmp.block(0,0,src.size(),1 ) = src;
+ }else{
+ tmp = src;
+ }
+ fwd( &dst[0],&tmp[0],nfft );
+ }else{
+ fwd( &dst[0],&src[0],nfft );
+ }
+ }
+
+ template<typename InputDerived>
+ inline
+ fft_fwd_proxy< MatrixBase<InputDerived>, FFT<T_Scalar,T_Impl> >
+ fwd( const MatrixBase<InputDerived> & src, Index nfft=-1)
+ {
+ return fft_fwd_proxy< MatrixBase<InputDerived> ,FFT<T_Scalar,T_Impl> >( src, *this,nfft );
+ }
+
+ template<typename InputDerived>
+ inline
+ fft_inv_proxy< MatrixBase<InputDerived>, FFT<T_Scalar,T_Impl> >
+ inv( const MatrixBase<InputDerived> & src, Index nfft=-1)
+ {
+ return fft_inv_proxy< MatrixBase<InputDerived> ,FFT<T_Scalar,T_Impl> >( src, *this,nfft );
+ }
+
+ inline
+ void inv( Complex * dst, const Complex * src, Index nfft)
+ {
+ m_impl.inv( dst,src,static_cast<int>(nfft) );
+ if ( HasFlag( Unscaled ) == false)
+ scale(dst,Scalar(1./nfft),nfft); // scale the time series
+ }
+
+ inline
+ void inv( Scalar * dst, const Complex * src, Index nfft)
+ {
+ m_impl.inv( dst,src,static_cast<int>(nfft) );
+ if ( HasFlag( Unscaled ) == false)
+ scale(dst,Scalar(1./nfft),nfft); // scale the time series
+ }
+
+ template<typename OutputDerived, typename ComplexDerived>
+ inline
+ void inv( MatrixBase<OutputDerived> & dst, const MatrixBase<ComplexDerived> & src, Index nfft=-1)
+ {
+ typedef typename ComplexDerived::Scalar src_type;
+ typedef typename OutputDerived::Scalar dst_type;
+ const bool realfft= (NumTraits<dst_type>::IsComplex == 0);
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OutputDerived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,OutputDerived) // size at compile-time
+ EIGEN_STATIC_ASSERT((internal::is_same<src_type, Complex>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+ EIGEN_STATIC_ASSERT(int(OutputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit,
+ THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES)
+
+ if (nfft<1) { //automatic FFT size determination
+ if ( realfft && HasFlag(HalfSpectrum) )
+ nfft = 2*(src.size()-1); //assume even fft size
+ else
+ nfft = src.size();
+ }
+ dst.derived().resize( nfft );
+
+ // check for nfft that does not fit the input data size
+ Index resize_input= ( realfft && HasFlag(HalfSpectrum) )
+ ? ( (nfft/2+1) - src.size() )
+ : ( nfft - src.size() );
+
+ if ( src.innerStride() != 1 || resize_input ) {
+ // if the vector is strided, then we need to copy it to a packed temporary
+ Matrix<src_type,1,Dynamic> tmp;
+ if ( resize_input ) {
+ size_t ncopy = (std::min)(src.size(),src.size() + resize_input);
+ tmp.setZero(src.size() + resize_input);
+ if ( realfft && HasFlag(HalfSpectrum) ) {
+ // pad at the Nyquist bin
+ tmp.head(ncopy) = src.head(ncopy);
+ tmp(ncopy-1) = real(tmp(ncopy-1)); // enforce real-only Nyquist bin
+ }else{
+ size_t nhead,ntail;
+ nhead = 1+ncopy/2-1; // range [0:pi)
+ ntail = ncopy/2-1; // range (-pi:0)
+ tmp.head(nhead) = src.head(nhead);
+ tmp.tail(ntail) = src.tail(ntail);
+ if (resize_input<0) { //shrinking -- create the Nyquist bin as the average of the two bins that fold into it
+ tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*src_type(.5);
+ }else{ // expanding -- split the old Nyquist bin into two halves
+ tmp(nhead) = src(nhead) * src_type(.5);
+ tmp(tmp.size()-nhead) = tmp(nhead);
+ }
+ }
+ }else{
+ tmp = src;
+ }
+ inv( &dst[0],&tmp[0], nfft);
+ }else{
+ inv( &dst[0],&src[0], nfft);
+ }
+ }
+
+ template <typename _Output>
+ inline
+ void inv( std::vector<_Output> & dst, const std::vector<Complex> & src,Index nfft=-1)
+ {
+ if (nfft<1)
+ nfft = ( NumTraits<_Output>::IsComplex == 0 && HasFlag(HalfSpectrum) ) ? 2*(src.size()-1) : src.size();
+ dst.resize( nfft );
+ inv( &dst[0],&src[0],nfft);
+ }
+
+
+ /*
+ // TODO: multi-dimensional FFTs
+ inline
+ void inv2(Complex * dst, const Complex * src, int n0,int n1)
+ {
+ m_impl.inv2(dst,src,n0,n1);
+ if ( HasFlag( Unscaled ) == false)
+ scale(dst,1./(n0*n1),n0*n1);
+ }
+ */
+
+ inline
+ impl_type & impl() {return m_impl;}
+ private:
+
+ template <typename T_Data>
+ inline
+ void scale(T_Data * x,Scalar s,Index nx)
+ {
+#if 1
+ for (int k=0;k<nx;++k)
+ *x++ *= s;
+#else
+ if ( ((ptrdiff_t)x) & 15 )
+ Matrix<T_Data, Dynamic, 1>::Map(x,nx) *= s;
+ else
+ Matrix<T_Data, Dynamic, 1>::MapAligned(x,nx) *= s;
+ //Matrix<T_Data, Dynamic, Dynamic>::Map(x,nx) * s;
+#endif
+ }
+
+ inline
+ void ReflectSpectrum(Complex * freq, Index nfft)
+ {
+ // create the implicit right-half spectrum (conjugate-mirror of the left-half)
+ Index nhbins=(nfft>>1)+1;
+ for (Index k=nhbins;k < nfft; ++k )
+ freq[k] = conj(freq[nfft-k]);
+ }
+
+ impl_type m_impl;
+ int m_flag;
+};
+
+template<typename T_SrcMat,typename T_FftIfc>
+template<typename T_DestMat> inline
+void fft_fwd_proxy<T_SrcMat,T_FftIfc>::evalTo(T_DestMat& dst) const
+{
+ m_ifc.fwd( dst, m_src, m_nfft);
+}
+
+template<typename T_SrcMat,typename T_FftIfc>
+template<typename T_DestMat> inline
+void fft_inv_proxy<T_SrcMat,T_FftIfc>::evalTo(T_DestMat& dst) const
+{
+ m_ifc.inv( dst, m_src, m_nfft);
+}
+
+}
+#endif
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers
new file mode 100644
index 0000000..aa15403
--- /dev/null
+++ b/unsupported/Eigen/IterativeSolvers
@@ -0,0 +1,45 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H
+#define EIGEN_ITERATIVE_SOLVERS_MODULE_H
+
+#include <Eigen/Sparse>
+
+/**
+ * \defgroup IterativeSolvers_Module Iterative solvers module
+ * This module aims to provide various iterative linear and non linear solver algorithms.
+ * It currently provides:
+ * - a constrained conjugate gradient
+ * - a Householder GMRES implementation
+ * \code
+ * #include <unsupported/Eigen/IterativeSolvers>
+ * \endcode
+ */
+//@{
+
+#include "../../Eigen/src/misc/Solve.h"
+#include "../../Eigen/src/misc/SparseSolve.h"
+
+#ifndef EIGEN_MPL2_ONLY
+#include "src/IterativeSolvers/IterationController.h"
+#include "src/IterativeSolvers/ConstrainedConjGrad.h"
+#endif
+
+#include "src/IterativeSolvers/IncompleteLU.h"
+#include "../../Eigen/Jacobi"
+#include "../../Eigen/Householder"
+#include "src/IterativeSolvers/GMRES.h"
+#include "src/IterativeSolvers/IncompleteCholesky.h"
+//#include "src/IterativeSolvers/SSORPreconditioner.h"
+#include "src/IterativeSolvers/MINRES.h"
+
+//@}
+
+#endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H
diff --git a/unsupported/Eigen/KroneckerProduct b/unsupported/Eigen/KroneckerProduct
new file mode 100644
index 0000000..c932c06
--- /dev/null
+++ b/unsupported/Eigen/KroneckerProduct
@@ -0,0 +1,34 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H
+#define EIGEN_KRONECKER_PRODUCT_MODULE_H
+
+#include "../../Eigen/Core"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+namespace Eigen {
+
+/**
+ * \defgroup KroneckerProduct_Module KroneckerProduct module
+ *
+ * This module contains an experimental Kronecker product implementation.
+ *
+ * \code
+ * #include <Eigen/KroneckerProduct>
+ * \endcode
+ */
+
+} // namespace Eigen
+
+#include "src/KroneckerProduct/KroneckerTensorProduct.h"
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_KRONECKER_PRODUCT_MODULE_H
diff --git a/unsupported/Eigen/LevenbergMarquardt b/unsupported/Eigen/LevenbergMarquardt
new file mode 100644
index 0000000..0fe2680
--- /dev/null
+++ b/unsupported/Eigen/LevenbergMarquardt
@@ -0,0 +1,45 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LEVENBERGMARQUARDT_MODULE
+#define EIGEN_LEVENBERGMARQUARDT_MODULE
+
+// #include <vector>
+
+#include <Eigen/Core>
+#include <Eigen/Jacobi>
+#include <Eigen/QR>
+#include <unsupported/Eigen/NumericalDiff>
+
+#include <Eigen/SparseQR>
+
+/**
+ * \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module
+ *
+ * \code
+ * #include </Eigen/LevenbergMarquardt>
+ * \endcode
+ *
+ *
+ */
+
+#include "Eigen/SparseCore"
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+#include "src/LevenbergMarquardt/LMqrsolv.h"
+#include "src/LevenbergMarquardt/LMcovar.h"
+#include "src/LevenbergMarquardt/LMpar.h"
+
+#endif
+
+#include "src/LevenbergMarquardt/LevenbergMarquardt.h"
+#include "src/LevenbergMarquardt/LMonestep.h"
+
+
+#endif // EIGEN_LEVENBERGMARQUARDT_MODULE
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
new file mode 100644
index 0000000..d4b0364
--- /dev/null
+++ b/unsupported/Eigen/MPRealSupport
@@ -0,0 +1,203 @@
+// This file is part of a joint effort between Eigen, a lightweight C++ template library
+// for linear algebra, and MPFR C++, a C++ interface to MPFR library (http://www.holoborodko.com/pavel/)
+//
+// Copyright (C) 2010-2012 Pavel Holoborodko <pavel@holoborodko.com>
+// Copyright (C) 2010 Konstantin Holoborodko <konstantin@holoborodko.com>
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MPREALSUPPORT_MODULE_H
+#define EIGEN_MPREALSUPPORT_MODULE_H
+
+#include <Eigen/Core>
+#include <mpreal.h>
+
+namespace Eigen {
+
+/**
+ * \defgroup MPRealSupport_Module MPFRC++ Support module
+ * \code
+ * #include <Eigen/MPRealSupport>
+ * \endcode
+ *
+ * This module provides support for multi precision floating point numbers
+ * via the <a href="http://www.holoborodko.com/pavel/mpfr">MPFR C++</a>
+ * library which itself is built upon <a href="http://www.mpfr.org/">MPFR</a>/<a href="http://gmplib.org/">GMP</a>.
+ *
+ * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder.
+ *
+ * Here is an example:
+ *
+\code
+#include <iostream>
+#include <Eigen/MPRealSupport>
+#include <Eigen/LU>
+using namespace mpfr;
+using namespace Eigen;
+int main()
+{
+ // set precision to 256 bits (double has only 53 bits)
+ mpreal::set_default_prec(256);
+ // Declare matrix and vector types with multi-precision scalar type
+ typedef Matrix<mpreal,Dynamic,Dynamic> MatrixXmp;
+ typedef Matrix<mpreal,Dynamic,1> VectorXmp;
+
+ MatrixXmp A = MatrixXmp::Random(100,100);
+ VectorXmp b = VectorXmp::Random(100);
+
+ // Solve Ax=b using LU
+ VectorXmp x = A.lu().solve(b);
+ std::cout << "relative error: " << (A*x - b).norm() / b.norm() << std::endl;
+ return 0;
+}
+\endcode
+ *
+ */
+
+ template<> struct NumTraits<mpfr::mpreal>
+ : GenericNumTraits<mpfr::mpreal>
+ {
+ enum {
+ IsInteger = 0,
+ IsSigned = 1,
+ IsComplex = 0,
+ RequireInitialization = 1,
+ ReadCost = 10,
+ AddCost = 10,
+ MulCost = 40
+ };
+
+ typedef mpfr::mpreal Real;
+ typedef mpfr::mpreal NonInteger;
+
+ inline static Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); }
+ inline static Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); }
+
+ // Constants
+ inline static Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); }
+ inline static Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); }
+ inline static Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); }
+ inline static Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); }
+
+ inline static Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); }
+ inline static Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); }
+
+ inline static Real dummy_precision()
+ {
+ unsigned int weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100;
+ return mpfr::machine_epsilon(weak_prec);
+ }
+ };
+
+ namespace internal {
+
+ template<> inline mpfr::mpreal random<mpfr::mpreal>()
+ {
+ return mpfr::random();
+ }
+
+ template<> inline mpfr::mpreal random<mpfr::mpreal>(const mpfr::mpreal& a, const mpfr::mpreal& b)
+ {
+ return a + (b-a) * random<mpfr::mpreal>();
+ }
+
+ inline bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps)
+ {
+ return mpfr::abs(a) <= mpfr::abs(b) * eps;
+ }
+
+ inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps)
+ {
+ return mpfr::isEqualFuzzy(a,b,eps);
+ }
+
+ inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps)
+ {
+ return a <= b || mpfr::isEqualFuzzy(a,b,eps);
+ }
+
+ template<> inline long double cast<mpfr::mpreal,long double>(const mpfr::mpreal& x)
+ { return x.toLDouble(); }
+
+ template<> inline double cast<mpfr::mpreal,double>(const mpfr::mpreal& x)
+ { return x.toDouble(); }
+
+ template<> inline long cast<mpfr::mpreal,long>(const mpfr::mpreal& x)
+ { return x.toLong(); }
+
+ template<> inline int cast<mpfr::mpreal,int>(const mpfr::mpreal& x)
+ { return int(x.toLong()); }
+
+ // Specialize GEBP kernel and traits for mpreal (no need for peeling, nor complicated stuff)
+ // This also permits to directly call mpfr's routines and avoid many temporaries produced by mpreal
+ template<>
+ class gebp_traits<mpfr::mpreal, mpfr::mpreal, false, false>
+ {
+ public:
+ typedef mpfr::mpreal ResScalar;
+ enum {
+ nr = 2, // must be 2 for proper packing...
+ mr = 1,
+ WorkSpaceFactor = nr,
+ LhsProgress = 1,
+ RhsProgress = 1
+ };
+ };
+
+ template<typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+ struct gebp_kernel<mpfr::mpreal,mpfr::mpreal,Index,mr,nr,ConjugateLhs,ConjugateRhs>
+ {
+ typedef mpfr::mpreal mpreal;
+
+ EIGEN_DONT_INLINE
+ void operator()(mpreal* res, Index resStride, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, mpreal alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, mpreal* /*unpackedB*/ = 0)
+ {
+ mpreal acc1, acc2, tmp;
+
+ if(strideA==-1) strideA = depth;
+ if(strideB==-1) strideB = depth;
+
+ for(Index j=0; j<cols; j+=nr)
+ {
+ Index actual_nr = (std::min<Index>)(nr,cols-j);
+ mpreal *C1 = res + j*resStride;
+ mpreal *C2 = res + (j+1)*resStride;
+ for(Index i=0; i<rows; i++)
+ {
+ mpreal *B = const_cast<mpreal*>(blockB) + j*strideB + offsetB*actual_nr;
+ mpreal *A = const_cast<mpreal*>(blockA) + i*strideA + offsetA;
+ acc1 = 0;
+ acc2 = 0;
+ for(Index k=0; k<depth; k++)
+ {
+ mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[0].mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(acc1.mpfr_ptr(), acc1.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
+
+ if(actual_nr==2) {
+ mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[1].mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(acc2.mpfr_ptr(), acc2.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
+ }
+
+ B+=actual_nr;
+ }
+
+ mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(C1[i].mpfr_ptr(), C1[i].mpfr_ptr(), acc1.mpfr_ptr(), mpreal::get_default_rnd());
+
+ if(actual_nr==2) {
+ mpfr_mul(acc2.mpfr_ptr(), acc2.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(C2[i].mpfr_ptr(), C2[i].mpfr_ptr(), acc2.mpfr_ptr(), mpreal::get_default_rnd());
+ }
+ }
+ }
+ }
+ };
+
+ } // end namespace internal
+}
+
+#endif // EIGEN_MPREALSUPPORT_MODULE_H
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
new file mode 100644
index 0000000..0991817
--- /dev/null
+++ b/unsupported/Eigen/MatrixFunctions
@@ -0,0 +1,447 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_FUNCTIONS
+#define EIGEN_MATRIX_FUNCTIONS
+
+#include <cfloat>
+#include <list>
+#include <functional>
+#include <iterator>
+
+#include <Eigen/Core>
+#include <Eigen/LU>
+#include <Eigen/Eigenvalues>
+
+/**
+ * \defgroup MatrixFunctions_Module Matrix functions module
+ * \brief This module aims to provide various methods for the computation of
+ * matrix functions.
+ *
+ * To use this module, add
+ * \code
+ * #include <unsupported/Eigen/MatrixFunctions>
+ * \endcode
+ * at the start of your source file.
+ *
+ * This module defines the following MatrixBase methods.
+ * - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine
+ * - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine
+ * - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential
+ * - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm
+ * - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power
+ * - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions
+ * - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine
+ * - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine
+ * - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root
+ *
+ * These methods are the main entry points to this module.
+ *
+ * %Matrix functions are defined as follows. Suppose that \f$ f \f$
+ * is an entire function (that is, a function on the complex plane
+ * that is everywhere complex differentiable). Then its Taylor
+ * series
+ * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f]
+ * converges to \f$ f(x) \f$. In this case, we can define the matrix
+ * function by the same series:
+ * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f]
+ *
+ */
+
+#include "src/MatrixFunctions/MatrixExponential.h"
+#include "src/MatrixFunctions/MatrixFunction.h"
+#include "src/MatrixFunctions/MatrixSquareRoot.h"
+#include "src/MatrixFunctions/MatrixLogarithm.h"
+#include "src/MatrixFunctions/MatrixPower.h"
+
+
+/**
+\page matrixbaseextra_page
+\ingroup MatrixFunctions_Module
+
+\section matrixbaseextra MatrixBase methods defined in the MatrixFunctions module
+
+The remainder of the page documents the following MatrixBase methods
+which are defined in the MatrixFunctions module.
+
+
+
+\subsection matrixbase_cos MatrixBase::cos()
+
+Compute the matrix cosine.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
+\endcode
+
+\param[in] M a square matrix.
+\returns expression representing \f$ \cos(M) \f$.
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos().
+
+\sa \ref matrixbase_sin "sin()" for an example.
+
+
+
+\subsection matrixbase_cosh MatrixBase::cosh()
+
+Compute the matrix hyberbolic cosine.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
+\endcode
+
+\param[in] M a square matrix.
+\returns expression representing \f$ \cosh(M) \f$
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh().
+
+\sa \ref matrixbase_sinh "sinh()" for an example.
+
+
+
+\subsection matrixbase_exp MatrixBase::exp()
+
+Compute the matrix exponential.
+
+\code
+const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
+\endcode
+
+\param[in] M matrix whose exponential is to be computed.
+\returns expression representing the matrix exponential of \p M.
+
+The matrix exponential of \f$ M \f$ is defined by
+\f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f]
+The matrix exponential can be used to solve linear ordinary
+differential equations: the solution of \f$ y' = My \f$ with the
+initial condition \f$ y(0) = y_0 \f$ is given by
+\f$ y(t) = \exp(M) y_0 \f$.
+
+The cost of the computation is approximately \f$ 20 n^3 \f$ for
+matrices of size \f$ n \f$. The number 20 depends weakly on the
+norm of the matrix.
+
+The matrix exponential is computed using the scaling-and-squaring
+method combined with Padé approximation. The matrix is first
+rescaled, then the exponential of the reduced matrix is computed
+approximant, and then the rescaling is undone by repeated
+squaring. The degree of the Padé approximant is chosen such
+that the approximation error is less than the round-off
+error. However, errors may accumulate during the squaring phase.
+
+Details of the algorithm can be found in: Nicholas J. Higham, "The
+scaling and squaring method for the matrix exponential revisited,"
+<em>SIAM J. %Matrix Anal. Applic.</em>, <b>26</b>:1179–1193,
+2005.
+
+Example: The following program checks that
+\f[ \exp \left[ \begin{array}{ccc}
+ 0 & \frac14\pi & 0 \\
+ -\frac14\pi & 0 & 0 \\
+ 0 & 0 & 0
+ \end{array} \right] = \left[ \begin{array}{ccc}
+ \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right]. \f]
+This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+the z-axis.
+
+\include MatrixExponential.cpp
+Output: \verbinclude MatrixExponential.out
+
+\note \p M has to be a matrix of \c float, \c double, \c long double
+\c complex<float>, \c complex<double>, or \c complex<long double> .
+
+
+\subsection matrixbase_log MatrixBase::log()
+
+Compute the matrix logarithm.
+
+\code
+const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
+\endcode
+
+\param[in] M invertible matrix whose logarithm is to be computed.
+\returns expression representing the matrix logarithm root of \p M.
+
+The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that
+\f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for
+the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have
+multiple solutions; this function returns a matrix whose eigenvalues
+have imaginary part in the interval \f$ (-\pi,\pi] \f$.
+
+In the real case, the matrix \f$ M \f$ should be invertible and
+it should have no eigenvalues which are real and negative (pairs of
+complex conjugate eigenvalues are allowed). In the complex case, it
+only needs to be invertible.
+
+This function computes the matrix logarithm using the Schur-Parlett
+algorithm as implemented by MatrixBase::matrixFunction(). The
+logarithm of an atomic block is computed by MatrixLogarithmAtomic,
+which uses direct computation for 1-by-1 and 2-by-2 blocks and an
+inverse scaling-and-squaring algorithm for bigger blocks, with the
+square roots computed by MatrixBase::sqrt().
+
+Details of the algorithm can be found in Section 11.6.2 of:
+Nicholas J. Higham,
+<em>Functions of Matrices: Theory and Computation</em>,
+SIAM 2008. ISBN 978-0-898716-46-7.
+
+Example: The following program checks that
+\f[ \log \left[ \begin{array}{ccc}
+ \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right] = \left[ \begin{array}{ccc}
+ 0 & \frac14\pi & 0 \\
+ -\frac14\pi & 0 & 0 \\
+ 0 & 0 & 0
+ \end{array} \right]. \f]
+This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+the z-axis. This is the inverse of the example used in the
+documentation of \ref matrixbase_exp "exp()".
+
+\include MatrixLogarithm.cpp
+Output: \verbinclude MatrixLogarithm.out
+
+\note \p M has to be a matrix of \c float, \c double, <tt>long
+double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
+double> .
+
+\sa MatrixBase::exp(), MatrixBase::matrixFunction(),
+ class MatrixLogarithmAtomic, MatrixBase::sqrt().
+
+
+\subsection matrixbase_pow MatrixBase::pow()
+
+Compute the matrix raised to arbitrary real power.
+
+\code
+const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) const
+\endcode
+
+\param[in] M base of the matrix power, should be a square matrix.
+\param[in] p exponent of the matrix power, should be real.
+
+The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$,
+where exp denotes the matrix exponential, and log denotes the matrix
+logarithm.
+
+The matrix \f$ M \f$ should meet the conditions to be an argument of
+matrix logarithm. If \p p is not of the real scalar type of \p M, it
+is casted into the real scalar type of \p M.
+
+This function computes the matrix power using the Schur-Padé
+algorithm as implemented by class MatrixPower. The exponent is split
+into integral part and fractional part, where the fractional part is
+in the interval \f$ (-1, 1) \f$. The main diagonal and the first
+super-diagonal is directly computed.
+
+Details of the algorithm can be found in: Nicholas J. Higham and
+Lijing Lin, "A Schur-Padé algorithm for fractional powers of a
+matrix," <em>SIAM J. %Matrix Anal. Applic.</em>,
+<b>32(3)</b>:1056–1078, 2011.
+
+Example: The following program checks that
+\f[ \left[ \begin{array}{ccc}
+ \cos1 & -\sin1 & 0 \\
+ \sin1 & \cos1 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc}
+ \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right]. \f]
+This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around
+the z-axis.
+
+\include MatrixPower.cpp
+Output: \verbinclude MatrixPower.out
+
+MatrixBase::pow() is user-friendly. However, there are some
+circumstances under which you should use class MatrixPower directly.
+MatrixPower can save the result of Schur decomposition, so it's
+better for computing various powers for the same matrix.
+
+Example:
+\include MatrixPower_optimal.cpp
+Output: \verbinclude MatrixPower_optimal.out
+
+\note \p M has to be a matrix of \c float, \c double, <tt>long
+double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
+double> .
+
+\sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower.
+
+
+\subsection matrixbase_matrixfunction MatrixBase::matrixFunction()
+
+Compute a matrix function.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
+\endcode
+
+\param[in] M argument of matrix function, should be a square matrix.
+\param[in] f an entire function; \c f(x,n) should compute the n-th
+derivative of f at x.
+\returns expression representing \p f applied to \p M.
+
+Suppose that \p M is a matrix whose entries have type \c Scalar.
+Then, the second argument, \p f, should be a function with prototype
+\code
+ComplexScalar f(ComplexScalar, int)
+\endcode
+where \c ComplexScalar = \c std::complex<Scalar> if \c Scalar is
+real (e.g., \c float or \c double) and \c ComplexScalar =
+\c Scalar if \c Scalar is complex. The return value of \c f(x,n)
+should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x.
+
+This routine uses the algorithm described in:
+Philip Davies and Nicholas J. Higham,
+"A Schur-Parlett algorithm for computing matrix functions",
+<em>SIAM J. %Matrix Anal. Applic.</em>, <b>25</b>:464–485, 2003.
+
+The actual work is done by the MatrixFunction class.
+
+Example: The following program checks that
+\f[ \exp \left[ \begin{array}{ccc}
+ 0 & \frac14\pi & 0 \\
+ -\frac14\pi & 0 & 0 \\
+ 0 & 0 & 0
+ \end{array} \right] = \left[ \begin{array}{ccc}
+ \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right]. \f]
+This corresponds to a rotation of \f$ \frac14\pi \f$ radians around
+the z-axis. This is the same example as used in the documentation
+of \ref matrixbase_exp "exp()".
+
+\include MatrixFunction.cpp
+Output: \verbinclude MatrixFunction.out
+
+Note that the function \c expfn is defined for complex numbers
+\c x, even though the matrix \c A is over the reals. Instead of
+\c expfn, we could also have used StdStemFunctions::exp:
+\code
+A.matrixFunction(StdStemFunctions<std::complex<double> >::exp, &B);
+\endcode
+
+
+
+\subsection matrixbase_sin MatrixBase::sin()
+
+Compute the matrix sine.
+
+\code
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
+\endcode
+
+\param[in] M a square matrix.
+\returns expression representing \f$ \sin(M) \f$.
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin().
+
+Example: \include MatrixSine.cpp
+Output: \verbinclude MatrixSine.out
+
+
+
+\subsection matrixbase_sinh MatrixBase::sinh()
+
+Compute the matrix hyperbolic sine.
+
+\code
+MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
+\endcode
+
+\param[in] M a square matrix.
+\returns expression representing \f$ \sinh(M) \f$
+
+This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh().
+
+Example: \include MatrixSinh.cpp
+Output: \verbinclude MatrixSinh.out
+
+
+\subsection matrixbase_sqrt MatrixBase::sqrt()
+
+Compute the matrix square root.
+
+\code
+const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
+\endcode
+
+\param[in] M invertible matrix whose square root is to be computed.
+\returns expression representing the matrix square root of \p M.
+
+The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$
+whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then
+\f$ S^2 = M \f$.
+
+In the <b>real case</b>, the matrix \f$ M \f$ should be invertible and
+it should have no eigenvalues which are real and negative (pairs of
+complex conjugate eigenvalues are allowed). In that case, the matrix
+has a square root which is also real, and this is the square root
+computed by this function.
+
+The matrix square root is computed by first reducing the matrix to
+quasi-triangular form with the real Schur decomposition. The square
+root of the quasi-triangular matrix can then be computed directly. The
+cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur
+decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder
+(though the computation time in practice is likely more than this
+indicates).
+
+Details of the algorithm can be found in: Nicholas J. Highan,
+"Computing real square roots of a real matrix", <em>Linear Algebra
+Appl.</em>, 88/89:405–430, 1987.
+
+If the matrix is <b>positive-definite symmetric</b>, then the square
+root is also positive-definite symmetric. In this case, it is best to
+use SelfAdjointEigenSolver::operatorSqrt() to compute it.
+
+In the <b>complex case</b>, the matrix \f$ M \f$ should be invertible;
+this is a restriction of the algorithm. The square root computed by
+this algorithm is the one whose eigenvalues have an argument in the
+interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch
+cut.
+
+The computation is the same as in the real case, except that the
+complex Schur decomposition is used to reduce the matrix to a
+triangular matrix. The theoretical cost is the same. Details are in:
+Åke Björck and Sven Hammarling, "A Schur method for the
+square root of a matrix", <em>Linear Algebra Appl.</em>,
+52/53:127–140, 1983.
+
+Example: The following program checks that the square root of
+\f[ \left[ \begin{array}{cc}
+ \cos(\frac13\pi) & -\sin(\frac13\pi) \\
+ \sin(\frac13\pi) & \cos(\frac13\pi)
+ \end{array} \right], \f]
+corresponding to a rotation over 60 degrees, is a rotation over 30 degrees:
+\f[ \left[ \begin{array}{cc}
+ \cos(\frac16\pi) & -\sin(\frac16\pi) \\
+ \sin(\frac16\pi) & \cos(\frac16\pi)
+ \end{array} \right]. \f]
+
+\include MatrixSquareRoot.cpp
+Output: \verbinclude MatrixSquareRoot.out
+
+\sa class RealSchur, class ComplexSchur, class MatrixSquareRoot,
+ SelfAdjointEigenSolver::operatorSqrt().
+
+*/
+
+#endif // EIGEN_MATRIX_FUNCTIONS
+
diff --git a/unsupported/Eigen/MoreVectorization b/unsupported/Eigen/MoreVectorization
new file mode 100644
index 0000000..470e724
--- /dev/null
+++ b/unsupported/Eigen/MoreVectorization
@@ -0,0 +1,24 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MOREVECTORIZATION_MODULE_H
+#define EIGEN_MOREVECTORIZATION_MODULE_H
+
+#include <Eigen/Core>
+
+namespace Eigen {
+
+/**
+ * \defgroup MoreVectorization More vectorization module
+ */
+
+}
+
+#include "src/MoreVectorization/MathFunctions.h"
+
+#endif // EIGEN_MOREVECTORIZATION_MODULE_H
diff --git a/unsupported/Eigen/NonLinearOptimization b/unsupported/Eigen/NonLinearOptimization
new file mode 100644
index 0000000..600ab4c
--- /dev/null
+++ b/unsupported/Eigen/NonLinearOptimization
@@ -0,0 +1,134 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE
+#define EIGEN_NONLINEAROPTIMIZATION_MODULE
+
+#include <vector>
+
+#include <Eigen/Core>
+#include <Eigen/Jacobi>
+#include <Eigen/QR>
+#include <unsupported/Eigen/NumericalDiff>
+
+/**
+ * \defgroup NonLinearOptimization_Module Non linear optimization module
+ *
+ * \code
+ * #include <unsupported/Eigen/NonLinearOptimization>
+ * \endcode
+ *
+ * This module provides implementation of two important algorithms in non linear
+ * optimization. In both cases, we consider a system of non linear functions. Of
+ * course, this should work, and even work very well if those functions are
+ * actually linear. But if this is so, you should probably better use other
+ * methods more fitted to this special case.
+ *
+ * One algorithm allows to find an extremum of such a system (Levenberg
+ * Marquardt algorithm) and the second one is used to find
+ * a zero for the system (Powell hybrid "dogleg" method).
+ *
+ * This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK).
+ * Minpack is a very famous, old, robust and well-reknown package, written in
+ * fortran. Those implementations have been carefully tuned, tested, and used
+ * for several decades.
+ *
+ * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C,
+ * then c++, and then cleaned by several different authors.
+ * The last one of those cleanings being our starting point :
+ * http://devernay.free.fr/hacks/cminpack.html
+ *
+ * Finally, we ported this code to Eigen, creating classes and API
+ * coherent with Eigen. When possible, we switched to Eigen
+ * implementation, such as most linear algebra (vectors, matrices, stable norms).
+ *
+ * Doing so, we were very careful to check the tests we setup at the very
+ * beginning, which ensure that the same results are found.
+ *
+ * \section Tests Tests
+ *
+ * The tests are placed in the file unsupported/test/NonLinear.cpp.
+ *
+ * There are two kinds of tests : those that come from examples bundled with cminpack.
+ * They guaranty we get the same results as the original algorithms (value for 'x',
+ * for the number of evaluations of the function, and for the number of evaluations
+ * of the jacobian if ever).
+ *
+ * Other tests were added by myself at the very beginning of the
+ * process and check the results for levenberg-marquardt using the reference data
+ * on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've
+ * carefully checked that the same results were obtained when modifiying the
+ * code. Please note that we do not always get the exact same decimals as they do,
+ * but this is ok : they use 128bits float, and we do the tests using the C type 'double',
+ * which is 64 bits on most platforms (x86 and amd64, at least).
+ * I've performed those tests on several other implementations of levenberg-marquardt, and
+ * (c)minpack performs VERY well compared to those, both in accuracy and speed.
+ *
+ * The documentation for running the tests is on the wiki
+ * http://eigen.tuxfamily.org/index.php?title=Tests
+ *
+ * \section API API : overview of methods
+ *
+ * Both algorithms can use either the jacobian (provided by the user) or compute
+ * an approximation by themselves (actually using Eigen \ref NumericalDiff_Module).
+ * The part of API referring to the latter use 'NumericalDiff' in the method names
+ * (exemple: LevenbergMarquardt.minimizeNumericalDiff() )
+ *
+ * The methods LevenbergMarquardt.lmder1()/lmdif1()/lmstr1() and
+ * HybridNonLinearSolver.hybrj1()/hybrd1() are specific methods from the original
+ * minpack package that you probably should NOT use until you are porting a code that
+ * was previously using minpack. They just define a 'simple' API with default values
+ * for some parameters.
+ *
+ * All algorithms are provided using Two APIs :
+ * - one where the user inits the algorithm, and uses '*OneStep()' as much as he wants :
+ * this way the caller have control over the steps
+ * - one where the user just calls a method (optimize() or solve()) which will
+ * handle the loop: init + loop until a stop condition is met. Those are provided for
+ * convenience.
+ *
+ * As an example, the method LevenbergMarquardt::minimize() is
+ * implemented as follow :
+ * \code
+ * Status LevenbergMarquardt<FunctorType,Scalar>::minimize(FVectorType &x, const int mode)
+ * {
+ * Status status = minimizeInit(x, mode);
+ * do {
+ * status = minimizeOneStep(x, mode);
+ * } while (status==Running);
+ * return status;
+ * }
+ * \endcode
+ *
+ * \section examples Examples
+ *
+ * The easiest way to understand how to use this module is by looking at the many examples in the file
+ * unsupported/test/NonLinearOptimization.cpp.
+ */
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+#include "src/NonLinearOptimization/qrsolv.h"
+#include "src/NonLinearOptimization/r1updt.h"
+#include "src/NonLinearOptimization/r1mpyq.h"
+#include "src/NonLinearOptimization/rwupdt.h"
+#include "src/NonLinearOptimization/fdjac1.h"
+#include "src/NonLinearOptimization/lmpar.h"
+#include "src/NonLinearOptimization/dogleg.h"
+#include "src/NonLinearOptimization/covar.h"
+
+#include "src/NonLinearOptimization/chkder.h"
+
+#endif
+
+#include "src/NonLinearOptimization/HybridNonLinearSolver.h"
+#include "src/NonLinearOptimization/LevenbergMarquardt.h"
+
+
+#endif // EIGEN_NONLINEAROPTIMIZATION_MODULE
diff --git a/unsupported/Eigen/NumericalDiff b/unsupported/Eigen/NumericalDiff
new file mode 100644
index 0000000..433334c
--- /dev/null
+++ b/unsupported/Eigen/NumericalDiff
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NUMERICALDIFF_MODULE
+#define EIGEN_NUMERICALDIFF_MODULE
+
+#include <Eigen/Core>
+
+namespace Eigen {
+
+/**
+ * \defgroup NumericalDiff_Module Numerical differentiation module
+ *
+ * \code
+ * #include <unsupported/Eigen/NumericalDiff>
+ * \endcode
+ *
+ * See http://en.wikipedia.org/wiki/Numerical_differentiation
+ *
+ * Warning : this should NOT be confused with automatic differentiation, which
+ * is a different method and has its own module in Eigen : \ref
+ * AutoDiff_Module.
+ *
+ * Currently only "Forward" and "Central" schemes are implemented. Those
+ * are basic methods, and there exist some more elaborated way of
+ * computing such approximates. They are implemented using both
+ * proprietary and free software, and usually requires linking to an
+ * external library. It is very easy for you to write a functor
+ * using such software, and the purpose is quite orthogonal to what we
+ * want to achieve with Eigen.
+ *
+ * This is why we will not provide wrappers for every great numerical
+ * differentiation software that exist, but should rather stick with those
+ * basic ones, that still are useful for testing.
+ *
+ * Also, the \ref NonLinearOptimization_Module needs this in order to
+ * provide full features compatibility with the original (c)minpack
+ * package.
+ *
+ */
+}
+
+//@{
+
+#include "src/NumericalDiff/NumericalDiff.h"
+
+//@}
+
+
+#endif // EIGEN_NUMERICALDIFF_MODULE
diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport
new file mode 100644
index 0000000..e276944
--- /dev/null
+++ b/unsupported/Eigen/OpenGLSupport
@@ -0,0 +1,322 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_OPENGL_MODULE
+#define EIGEN_OPENGL_MODULE
+
+#include <Eigen/Geometry>
+
+#if defined(__APPLE_CC__)
+ #include <OpenGL/gl.h>
+#else
+ #include <GL/gl.h>
+#endif
+
+namespace Eigen {
+
+/**
+ * \defgroup OpenGLSUpport_Module OpenGL Support module
+ *
+ * This module provides wrapper functions for a couple of OpenGL functions
+ * which simplify the way to pass Eigen's object to openGL.
+ * Here is an exmaple:
+ *
+ * \code
+ * // You need to add path_to_eigen/unsupported to your include path.
+ * #include <Eigen/OpenGLSupport>
+ * // ...
+ * Vector3f x, y;
+ * Matrix3f rot;
+ *
+ * glVertex(y + x * rot);
+ *
+ * Quaternion q;
+ * glRotate(q);
+ *
+ * // ...
+ * \endcode
+ *
+ */
+//@{
+
+#define EIGEN_GL_FUNC_DECLARATION(FUNC) \
+namespace internal { \
+ template< typename XprType, \
+ typename Scalar = typename XprType::Scalar, \
+ int Rows = XprType::RowsAtCompileTime, \
+ int Cols = XprType::ColsAtCompileTime, \
+ bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \
+ && bool(XprType::Flags&DirectAccessBit) \
+ && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \
+ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \
+ \
+ template<typename XprType, typename Scalar, int Rows, int Cols> \
+ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType,Scalar,Rows,Cols,false> { \
+ inline static void run(const XprType& p) { \
+ EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<typename plain_matrix_type_column_major<XprType>::type>::run(p); } \
+ }; \
+} \
+ \
+template<typename Derived> inline void FUNC(const Eigen::DenseBase<Derived>& p) { \
+ EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)<Derived>::run(p.derived()); \
+}
+
+
+#define EIGEN_GL_FUNC_SPECIALIZATION_MAT(FUNC,SCALAR,ROWS,COLS,SUFFIX) \
+namespace internal { \
+ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType, SCALAR, ROWS, COLS, true> { \
+ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \
+ }; \
+}
+
+
+#define EIGEN_GL_FUNC_SPECIALIZATION_VEC(FUNC,SCALAR,SIZE,SUFFIX) \
+namespace internal { \
+ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType, SCALAR, SIZE, 1, true> { \
+ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \
+ }; \
+ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType, SCALAR, 1, SIZE, true> { \
+ inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \
+ }; \
+}
+
+
+EIGEN_GL_FUNC_DECLARATION (glVertex)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 2,2iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 2,2sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 2,2fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 2,2dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 3,3iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 3,3sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 3,3fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 3,3dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 4,4iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 4,4sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 4,4fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 4,4dv)
+
+EIGEN_GL_FUNC_DECLARATION (glTexCoord)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 2,2iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 2,2sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 2,2fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 2,2dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 3,3iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 3,3sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 3,3fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 3,3dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 4,4iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 4,4sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 4,4fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 4,4dv)
+
+EIGEN_GL_FUNC_DECLARATION (glColor)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 2,2iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 2,2sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 2,2fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 2,2dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 3,3iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 3,3sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 3,3fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 3,3dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 4,4iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 4,4sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 4,4fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 4,4dv)
+
+EIGEN_GL_FUNC_DECLARATION (glNormal)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,int, 3,3iv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,short, 3,3sv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,float, 3,3fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,double, 3,3dv)
+
+inline void glScale2fv(const float* v) { glScalef(v[0], v[1], 1.f); }
+inline void glScale2dv(const double* v) { glScaled(v[0], v[1], 1.0); }
+inline void glScale3fv(const float* v) { glScalef(v[0], v[1], v[2]); }
+inline void glScale3dv(const double* v) { glScaled(v[0], v[1], v[2]); }
+
+EIGEN_GL_FUNC_DECLARATION (glScale)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 2,2fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 2,2dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 3,3fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 3,3dv)
+
+template<typename Scalar> void glScale(const UniformScaling<Scalar>& s) { glScale(Matrix<Scalar,3,1>::Constant(s.factor())); }
+
+inline void glTranslate2fv(const float* v) { glTranslatef(v[0], v[1], 0.f); }
+inline void glTranslate2dv(const double* v) { glTranslated(v[0], v[1], 0.0); }
+inline void glTranslate3fv(const float* v) { glTranslatef(v[0], v[1], v[2]); }
+inline void glTranslate3dv(const double* v) { glTranslated(v[0], v[1], v[2]); }
+
+EIGEN_GL_FUNC_DECLARATION (glTranslate)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 2,2fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 2,2dv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 3,3fv)
+EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 3,3dv)
+
+template<typename Scalar> void glTranslate(const Translation<Scalar,2>& t) { glTranslate(t.vector()); }
+template<typename Scalar> void glTranslate(const Translation<Scalar,3>& t) { glTranslate(t.vector()); }
+
+EIGEN_GL_FUNC_DECLARATION (glMultMatrix)
+EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,float, 4,4,f)
+EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,double, 4,4,d)
+
+template<typename Scalar> void glMultMatrix(const Transform<Scalar,3,Affine>& t) { glMultMatrix(t.matrix()); }
+template<typename Scalar> void glMultMatrix(const Transform<Scalar,3,Projective>& t) { glMultMatrix(t.matrix()); }
+template<typename Scalar> void glMultMatrix(const Transform<Scalar,3,AffineCompact>& t) { glMultMatrix(Transform<Scalar,3,Affine>(t).matrix()); }
+
+EIGEN_GL_FUNC_DECLARATION (glLoadMatrix)
+EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,float, 4,4,f)
+EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,double, 4,4,d)
+
+template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Affine>& t) { glLoadMatrix(t.matrix()); }
+template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,Projective>& t) { glLoadMatrix(t.matrix()); }
+template<typename Scalar> void glLoadMatrix(const Transform<Scalar,3,AffineCompact>& t) { glLoadMatrix(Transform<Scalar,3,Affine>(t).matrix()); }
+
+inline void glRotate(const Rotation2D<float>& rot)
+{
+ glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f);
+}
+inline void glRotate(const Rotation2D<double>& rot)
+{
+ glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0);
+}
+
+template<typename Derived> void glRotate(const RotationBase<Derived,3>& rot)
+{
+ Transform<typename Derived::Scalar,3,Projective> tr(rot);
+ glMultMatrix(tr.matrix());
+}
+
+#define EIGEN_GL_MAKE_CONST_const const
+#define EIGEN_GL_MAKE_CONST__
+#define EIGEN_GL_EVAL(X) X
+
+#define EIGEN_GL_FUNC1_DECLARATION(FUNC,ARG1,CONST) \
+namespace internal { \
+ template< typename XprType, \
+ typename Scalar = typename XprType::Scalar, \
+ int Rows = XprType::RowsAtCompileTime, \
+ int Cols = XprType::ColsAtCompileTime, \
+ bool IsGLCompatible = bool(XprType::Flags&LinearAccessBit) \
+ && bool(XprType::Flags&DirectAccessBit) \
+ && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \
+ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \
+ \
+ template<typename XprType, typename Scalar, int Rows, int Cols> \
+ struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType,Scalar,Rows,Cols,false> { \
+ inline static void run(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { \
+ EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<typename plain_matrix_type_column_major<XprType>::type>::run(a,p); } \
+ }; \
+} \
+ \
+template<typename Derived> inline void FUNC(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) Eigen::DenseBase<Derived>& p) { \
+ EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)<Derived>::run(a,p.derived()); \
+}
+
+
+#define EIGEN_GL_FUNC1_SPECIALIZATION_MAT(FUNC,ARG1,CONST,SCALAR,ROWS,COLS,SUFFIX) \
+namespace internal { \
+ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType, SCALAR, ROWS, COLS, true> { \
+ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \
+ }; \
+}
+
+
+#define EIGEN_GL_FUNC1_SPECIALIZATION_VEC(FUNC,ARG1,CONST,SCALAR,SIZE,SUFFIX) \
+namespace internal { \
+ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType, SCALAR, SIZE, 1, true> { \
+ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \
+ }; \
+ template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)<XprType, SCALAR, 1, SIZE, true> { \
+ inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \
+ }; \
+}
+
+EIGEN_GL_FUNC1_DECLARATION (glGet,GLenum,_)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,float, 4,4,Floatv)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev)
+
+// glUniform API
+
+#ifdef GL_VERSION_2_0
+
+inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); }
+inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); }
+
+inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); }
+inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); }
+
+inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); }
+inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); }
+
+inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); }
+inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); }
+inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); }
+
+
+EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 2,2fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 2,2iv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 3,3fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 3,3iv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 4,4fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 4,4iv_ei)
+
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,2,Matrix2fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,3,Matrix3fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix4fv_ei)
+
+#endif
+
+#ifdef GL_VERSION_2_1
+
+static void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); }
+static void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); }
+static void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); }
+static void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); }
+static void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); }
+static void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); }
+
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,4,Matrix2x4fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,2,Matrix4x2fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,4,Matrix3x4fv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix4x3fv_ei)
+
+#endif
+
+#ifdef GL_VERSION_3_0
+
+inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); }
+inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); }
+inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); }
+
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei)
+
+#endif
+
+#ifdef GL_ARB_gpu_shader_fp64
+inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); }
+inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); }
+inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); }
+
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei)
+EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 4,4dv_ei)
+#endif
+
+
+//@}
+
+}
+
+#endif // EIGEN_OPENGL_MODULE
diff --git a/unsupported/Eigen/Polynomials b/unsupported/Eigen/Polynomials
new file mode 100644
index 0000000..cece563
--- /dev/null
+++ b/unsupported/Eigen/Polynomials
@@ -0,0 +1,138 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_POLYNOMIALS_MODULE_H
+#define EIGEN_POLYNOMIALS_MODULE_H
+
+#include <Eigen/Core>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+#include <Eigen/Eigenvalues>
+
+// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
+#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
+ #ifndef EIGEN_HIDE_HEAVY_CODE
+ #define EIGEN_HIDE_HEAVY_CODE
+ #endif
+#elif defined EIGEN_HIDE_HEAVY_CODE
+ #undef EIGEN_HIDE_HEAVY_CODE
+#endif
+
+/**
+ * \defgroup Polynomials_Module Polynomials module
+ * \brief This module provides a QR based polynomial solver.
+ *
+ * To use this module, add
+ * \code
+ * #include <unsupported/Eigen/Polynomials>
+ * \endcode
+ * at the start of your source file.
+ */
+
+#include "src/Polynomials/PolynomialUtils.h"
+#include "src/Polynomials/Companion.h"
+#include "src/Polynomials/PolynomialSolver.h"
+
+/**
+ \page polynomials Polynomials defines functions for dealing with polynomials
+ and a QR based polynomial solver.
+ \ingroup Polynomials_Module
+
+ The remainder of the page documents first the functions for evaluating, computing
+ polynomials, computing estimates about polynomials and next the QR based polynomial
+ solver.
+
+ \section polynomialUtils convenient functions to deal with polynomials
+ \subsection roots_to_monicPolynomial
+ The function
+ \code
+ void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly )
+ \endcode
+ computes the coefficients \f$ a_i \f$ of
+
+ \f$ p(x) = a_0 + a_{1}x + ... + a_{n-1}x^{n-1} + x^n \f$
+
+ where \f$ p \f$ is known through its roots i.e. \f$ p(x) = (x-r_1)(x-r_2)...(x-r_n) \f$.
+
+ \subsection poly_eval
+ The function
+ \code
+ T poly_eval( const Polynomials& poly, const T& x )
+ \endcode
+ evaluates a polynomial at a given point using stabilized Hörner method.
+
+ The following code: first computes the coefficients in the monomial basis of the monic polynomial that has the provided roots;
+ then, it evaluates the computed polynomial, using a stabilized Hörner method.
+
+ \include PolynomialUtils1.cpp
+ Output: \verbinclude PolynomialUtils1.out
+
+ \subsection Cauchy bounds
+ The function
+ \code
+ Real cauchy_max_bound( const Polynomial& poly )
+ \endcode
+ provides a maximum bound (the Cauchy one: \f$C(p)\f$) for the absolute value of a root of the given polynomial i.e.
+ \f$ \forall r_i \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$,
+ \f$ |r_i| \le C(p) = \sum_{k=0}^{d} \left | \frac{a_k}{a_d} \right | \f$
+ The leading coefficient \f$ p \f$: should be non zero \f$a_d \neq 0\f$.
+
+
+ The function
+ \code
+ Real cauchy_min_bound( const Polynomial& poly )
+ \endcode
+ provides a minimum bound (the Cauchy one: \f$c(p)\f$) for the absolute value of a non zero root of the given polynomial i.e.
+ \f$ \forall r_i \neq 0 \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$,
+ \f$ |r_i| \ge c(p) = \left( \sum_{k=0}^{d} \left | \frac{a_k}{a_0} \right | \right)^{-1} \f$
+
+
+
+
+ \section QR polynomial solver class
+ Computes the complex roots of a polynomial by computing the eigenvalues of the associated companion matrix with the QR algorithm.
+
+ The roots of \f$ p(x) = a_0 + a_1 x + a_2 x^2 + a_{3} x^3 + x^4 \f$ are the eigenvalues of
+ \f$
+ \left [
+ \begin{array}{cccc}
+ 0 & 0 & 0 & a_0 \\
+ 1 & 0 & 0 & a_1 \\
+ 0 & 1 & 0 & a_2 \\
+ 0 & 0 & 1 & a_3
+ \end{array} \right ]
+ \f$
+
+ However, the QR algorithm is not guaranteed to converge when there are several eigenvalues with same modulus.
+
+ Therefore the current polynomial solver is guaranteed to provide a correct result only when the complex roots \f$r_1,r_2,...,r_d\f$ have distinct moduli i.e.
+
+ \f$ \forall i,j \in [1;d],~ \| r_i \| \neq \| r_j \| \f$.
+
+ With 32bit (float) floating types this problem shows up frequently.
+ However, almost always, correct accuracy is reached even in these cases for 64bit
+ (double) floating types and small polynomial degree (<20).
+
+ \include PolynomialSolver1.cpp
+
+ In the above example:
+
+ -# a simple use of the polynomial solver is shown;
+ -# the accuracy problem with the QR algorithm is presented: a polynomial with almost conjugate roots is provided to the solver.
+ Those roots have almost same module therefore the QR algorithm failed to converge: the accuracy
+ of the last root is bad;
+ -# a simple way to circumvent the problem is shown: use doubles instead of floats.
+
+ Output: \verbinclude PolynomialSolver1.out
+*/
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_POLYNOMIALS_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/SVD b/unsupported/Eigen/SVD
new file mode 100644
index 0000000..7cc0592
--- /dev/null
+++ b/unsupported/Eigen/SVD
@@ -0,0 +1,39 @@
+#ifndef EIGEN_SVD_MODULE_H
+#define EIGEN_SVD_MODULE_H
+
+#include <Eigen/QR>
+#include <Eigen/Householder>
+#include <Eigen/Jacobi>
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+/** \defgroup SVD_Module SVD module
+ *
+ *
+ *
+ * This module provides SVD decomposition for matrices (both real and complex).
+ * This decomposition is accessible via the following MatrixBase method:
+ * - MatrixBase::jacobiSvd()
+ *
+ * \code
+ * #include <Eigen/SVD>
+ * \endcode
+ */
+
+#include "../../Eigen/src/misc/Solve.h"
+#include "../../Eigen/src/SVD/UpperBidiagonalization.h"
+#include "src/SVD/SVDBase.h"
+#include "src/SVD/JacobiSVD.h"
+#include "src/SVD/BDCSVD.h"
+#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
+#include "../../Eigen/src/SVD/JacobiSVD_MKL.h"
+#endif
+
+#ifdef EIGEN2_SUPPORT
+#include "../../Eigen/src/Eigen2Support/SVD.h"
+#endif
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SVD_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/Skyline b/unsupported/Eigen/Skyline
new file mode 100644
index 0000000..71a68cb
--- /dev/null
+++ b/unsupported/Eigen/Skyline
@@ -0,0 +1,39 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINE_MODULE_H
+#define EIGEN_SKYLINE_MODULE_H
+
+
+#include "Eigen/Core"
+
+#include "Eigen/src/Core/util/DisableStupidWarnings.h"
+
+#include <map>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+
+/**
+ * \defgroup Skyline_Module Skyline module
+ *
+ *
+ *
+ *
+ */
+
+#include "src/Skyline/SkylineUtil.h"
+#include "src/Skyline/SkylineMatrixBase.h"
+#include "src/Skyline/SkylineStorage.h"
+#include "src/Skyline/SkylineMatrix.h"
+#include "src/Skyline/SkylineInplaceLU.h"
+#include "src/Skyline/SkylineProduct.h"
+
+#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SKYLINE_MODULE_H
diff --git a/unsupported/Eigen/SparseExtra b/unsupported/Eigen/SparseExtra
new file mode 100644
index 0000000..b559790
--- /dev/null
+++ b/unsupported/Eigen/SparseExtra
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_EXTRA_MODULE_H
+#define EIGEN_SPARSE_EXTRA_MODULE_H
+
+#include "../../Eigen/Sparse"
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+#include <vector>
+#include <map>
+#include <cstdlib>
+#include <cstring>
+#include <algorithm>
+#include <fstream>
+#include <sstream>
+
+#ifdef EIGEN_GOOGLEHASH_SUPPORT
+ #include <google/dense_hash_map>
+#endif
+
+/**
+ * \defgroup SparseExtra_Module SparseExtra module
+ *
+ * This module contains some experimental features extending the sparse module.
+ *
+ * \code
+ * #include <Eigen/SparseExtra>
+ * \endcode
+ */
+
+
+#include "../../Eigen/src/misc/Solve.h"
+#include "../../Eigen/src/misc/SparseSolve.h"
+
+#include "src/SparseExtra/DynamicSparseMatrix.h"
+#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h"
+#include "src/SparseExtra/RandomSetter.h"
+
+#include "src/SparseExtra/MarketIO.h"
+
+#if !defined(_WIN32)
+#include <dirent.h>
+#include "src/SparseExtra/MatrixMarketIterator.h"
+#endif
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SPARSE_EXTRA_MODULE_H
diff --git a/unsupported/Eigen/Splines b/unsupported/Eigen/Splines
new file mode 100644
index 0000000..322e6b9
--- /dev/null
+++ b/unsupported/Eigen/Splines
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPLINES_MODULE_H
+#define EIGEN_SPLINES_MODULE_H
+
+namespace Eigen
+{
+/**
+ * \defgroup Splines_Module Spline and spline fitting module
+ *
+ * This module provides a simple multi-dimensional spline class while
+ * offering most basic functionality to fit a spline to point sets.
+ *
+ * \code
+ * #include <unsupported/Eigen/Splines>
+ * \endcode
+ */
+}
+
+#include "src/Splines/SplineFwd.h"
+#include "src/Splines/Spline.h"
+#include "src/Splines/SplineFitting.h"
+
+#endif // EIGEN_SPLINES_MODULE_H
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
new file mode 100644
index 0000000..1a61e33
--- /dev/null
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
@@ -0,0 +1,83 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_AUTODIFF_JACOBIAN_H
+#define EIGEN_AUTODIFF_JACOBIAN_H
+
+namespace Eigen
+{
+
+template<typename Functor> class AutoDiffJacobian : public Functor
+{
+public:
+ AutoDiffJacobian() : Functor() {}
+ AutoDiffJacobian(const Functor& f) : Functor(f) {}
+
+ // forward constructors
+ template<typename T0>
+ AutoDiffJacobian(const T0& a0) : Functor(a0) {}
+ template<typename T0, typename T1>
+ AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {}
+ template<typename T0, typename T1, typename T2>
+ AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {}
+
+ enum {
+ InputsAtCompileTime = Functor::InputsAtCompileTime,
+ ValuesAtCompileTime = Functor::ValuesAtCompileTime
+ };
+
+ typedef typename Functor::InputType InputType;
+ typedef typename Functor::ValueType ValueType;
+ typedef typename Functor::JacobianType JacobianType;
+ typedef typename JacobianType::Scalar Scalar;
+ typedef typename JacobianType::Index Index;
+
+ typedef Matrix<Scalar,InputsAtCompileTime,1> DerivativeType;
+ typedef AutoDiffScalar<DerivativeType> ActiveScalar;
+
+
+ typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput;
+ typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue;
+
+ void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const
+ {
+ eigen_assert(v!=0);
+ if (!_jac)
+ {
+ Functor::operator()(x, v);
+ return;
+ }
+
+ JacobianType& jac = *_jac;
+
+ ActiveInput ax = x.template cast<ActiveScalar>();
+ ActiveValue av(jac.rows());
+
+ if(InputsAtCompileTime==Dynamic)
+ for (Index j=0; j<jac.rows(); j++)
+ av[j].derivatives().resize(this->inputs());
+
+ for (Index i=0; i<jac.cols(); i++)
+ ax[i].derivatives() = DerivativeType::Unit(this->inputs(),i);
+
+ Functor::operator()(ax, &av);
+
+ for (Index i=0; i<jac.rows(); i++)
+ {
+ (*v)[i] = av[i].value();
+ jac.row(i) = av[i].derivatives();
+ }
+ }
+protected:
+
+};
+
+}
+
+#endif // EIGEN_AUTODIFF_JACOBIAN_H
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
new file mode 100644
index 0000000..8d42e69
--- /dev/null
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
@@ -0,0 +1,642 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_AUTODIFF_SCALAR_H
+#define EIGEN_AUTODIFF_SCALAR_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename A, typename B>
+struct make_coherent_impl {
+ static void run(A&, B&) {}
+};
+
+// resize a to match b is a.size()==0, and conversely.
+template<typename A, typename B>
+void make_coherent(const A& a, const B&b)
+{
+ make_coherent_impl<A,B>::run(a.const_cast_derived(), b.const_cast_derived());
+}
+
+template<typename _DerType, bool Enable> struct auto_diff_special_op;
+
+} // end namespace internal
+
+/** \class AutoDiffScalar
+ * \brief A scalar type replacement with automatic differentation capability
+ *
+ * \param _DerType the vector type used to store/represent the derivatives. The base scalar type
+ * as well as the number of derivatives to compute are determined from this type.
+ * Typical choices include, e.g., \c Vector4f for 4 derivatives, or \c VectorXf
+ * if the number of derivatives is not known at compile time, and/or, the number
+ * of derivatives is large.
+ * Note that _DerType can also be a reference (e.g., \c VectorXf&) to wrap a
+ * existing vector into an AutoDiffScalar.
+ * Finally, _DerType can also be any Eigen compatible expression.
+ *
+ * This class represents a scalar value while tracking its respective derivatives using Eigen's expression
+ * template mechanism.
+ *
+ * It supports the following list of global math function:
+ * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos,
+ * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos,
+ * - internal::conj, internal::real, internal::imag, numext::abs2.
+ *
+ * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However,
+ * in that case, the expression template mechanism only occurs at the top Matrix level,
+ * while derivatives are computed right away.
+ *
+ */
+
+template<typename _DerType>
+class AutoDiffScalar
+ : public internal::auto_diff_special_op
+ <_DerType, !internal::is_same<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar,
+ typename NumTraits<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar>::Real>::value>
+{
+ public:
+ typedef internal::auto_diff_special_op
+ <_DerType, !internal::is_same<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar,
+ typename NumTraits<typename internal::traits<typename internal::remove_all<_DerType>::type>::Scalar>::Real>::value> Base;
+ typedef typename internal::remove_all<_DerType>::type DerType;
+ typedef typename internal::traits<DerType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real Real;
+
+ using Base::operator+;
+ using Base::operator*;
+
+ /** Default constructor without any initialization. */
+ AutoDiffScalar() {}
+
+ /** Constructs an active scalar from its \a value,
+ and initializes the \a nbDer derivatives such that it corresponds to the \a derNumber -th variable */
+ AutoDiffScalar(const Scalar& value, int nbDer, int derNumber)
+ : m_value(value), m_derivatives(DerType::Zero(nbDer))
+ {
+ m_derivatives.coeffRef(derNumber) = Scalar(1);
+ }
+
+ /** Conversion from a scalar constant to an active scalar.
+ * The derivatives are set to zero. */
+ /*explicit*/ AutoDiffScalar(const Real& value)
+ : m_value(value)
+ {
+ if(m_derivatives.size()>0)
+ m_derivatives.setZero();
+ }
+
+ /** Constructs an active scalar from its \a value and derivatives \a der */
+ AutoDiffScalar(const Scalar& value, const DerType& der)
+ : m_value(value), m_derivatives(der)
+ {}
+
+ template<typename OtherDerType>
+ AutoDiffScalar(const AutoDiffScalar<OtherDerType>& other)
+ : m_value(other.value()), m_derivatives(other.derivatives())
+ {}
+
+ friend std::ostream & operator << (std::ostream & s, const AutoDiffScalar& a)
+ {
+ return s << a.value();
+ }
+
+ AutoDiffScalar(const AutoDiffScalar& other)
+ : m_value(other.value()), m_derivatives(other.derivatives())
+ {}
+
+ template<typename OtherDerType>
+ inline AutoDiffScalar& operator=(const AutoDiffScalar<OtherDerType>& other)
+ {
+ m_value = other.value();
+ m_derivatives = other.derivatives();
+ return *this;
+ }
+
+ inline AutoDiffScalar& operator=(const AutoDiffScalar& other)
+ {
+ m_value = other.value();
+ m_derivatives = other.derivatives();
+ return *this;
+ }
+
+// inline operator const Scalar& () const { return m_value; }
+// inline operator Scalar& () { return m_value; }
+
+ inline const Scalar& value() const { return m_value; }
+ inline Scalar& value() { return m_value; }
+
+ inline const DerType& derivatives() const { return m_derivatives; }
+ inline DerType& derivatives() { return m_derivatives; }
+
+ inline bool operator< (const Scalar& other) const { return m_value < other; }
+ inline bool operator<=(const Scalar& other) const { return m_value <= other; }
+ inline bool operator> (const Scalar& other) const { return m_value > other; }
+ inline bool operator>=(const Scalar& other) const { return m_value >= other; }
+ inline bool operator==(const Scalar& other) const { return m_value == other; }
+ inline bool operator!=(const Scalar& other) const { return m_value != other; }
+
+ friend inline bool operator< (const Scalar& a, const AutoDiffScalar& b) { return a < b.value(); }
+ friend inline bool operator<=(const Scalar& a, const AutoDiffScalar& b) { return a <= b.value(); }
+ friend inline bool operator> (const Scalar& a, const AutoDiffScalar& b) { return a > b.value(); }
+ friend inline bool operator>=(const Scalar& a, const AutoDiffScalar& b) { return a >= b.value(); }
+ friend inline bool operator==(const Scalar& a, const AutoDiffScalar& b) { return a == b.value(); }
+ friend inline bool operator!=(const Scalar& a, const AutoDiffScalar& b) { return a != b.value(); }
+
+ template<typename OtherDerType> inline bool operator< (const AutoDiffScalar<OtherDerType>& b) const { return m_value < b.value(); }
+ template<typename OtherDerType> inline bool operator<=(const AutoDiffScalar<OtherDerType>& b) const { return m_value <= b.value(); }
+ template<typename OtherDerType> inline bool operator> (const AutoDiffScalar<OtherDerType>& b) const { return m_value > b.value(); }
+ template<typename OtherDerType> inline bool operator>=(const AutoDiffScalar<OtherDerType>& b) const { return m_value >= b.value(); }
+ template<typename OtherDerType> inline bool operator==(const AutoDiffScalar<OtherDerType>& b) const { return m_value == b.value(); }
+ template<typename OtherDerType> inline bool operator!=(const AutoDiffScalar<OtherDerType>& b) const { return m_value != b.value(); }
+
+ inline const AutoDiffScalar<DerType&> operator+(const Scalar& other) const
+ {
+ return AutoDiffScalar<DerType&>(m_value + other, m_derivatives);
+ }
+
+ friend inline const AutoDiffScalar<DerType&> operator+(const Scalar& a, const AutoDiffScalar& b)
+ {
+ return AutoDiffScalar<DerType&>(a + b.value(), b.derivatives());
+ }
+
+// inline const AutoDiffScalar<DerType&> operator+(const Real& other) const
+// {
+// return AutoDiffScalar<DerType&>(m_value + other, m_derivatives);
+// }
+
+// friend inline const AutoDiffScalar<DerType&> operator+(const Real& a, const AutoDiffScalar& b)
+// {
+// return AutoDiffScalar<DerType&>(a + b.value(), b.derivatives());
+// }
+
+ inline AutoDiffScalar& operator+=(const Scalar& other)
+ {
+ value() += other;
+ return *this;
+ }
+
+ template<typename OtherDerType>
+ inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,const DerType,const typename internal::remove_all<OtherDerType>::type> >
+ operator+(const AutoDiffScalar<OtherDerType>& other) const
+ {
+ internal::make_coherent(m_derivatives, other.derivatives());
+ return AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,const DerType,const typename internal::remove_all<OtherDerType>::type> >(
+ m_value + other.value(),
+ m_derivatives + other.derivatives());
+ }
+
+ template<typename OtherDerType>
+ inline AutoDiffScalar&
+ operator+=(const AutoDiffScalar<OtherDerType>& other)
+ {
+ (*this) = (*this) + other;
+ return *this;
+ }
+
+ inline const AutoDiffScalar<DerType&> operator-(const Scalar& b) const
+ {
+ return AutoDiffScalar<DerType&>(m_value - b, m_derivatives);
+ }
+
+ friend inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
+ operator-(const Scalar& a, const AutoDiffScalar& b)
+ {
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
+ (a - b.value(), -b.derivatives());
+ }
+
+ inline AutoDiffScalar& operator-=(const Scalar& other)
+ {
+ value() -= other;
+ return *this;
+ }
+
+ template<typename OtherDerType>
+ inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,const typename internal::remove_all<OtherDerType>::type> >
+ operator-(const AutoDiffScalar<OtherDerType>& other) const
+ {
+ internal::make_coherent(m_derivatives, other.derivatives());
+ return AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,const typename internal::remove_all<OtherDerType>::type> >(
+ m_value - other.value(),
+ m_derivatives - other.derivatives());
+ }
+
+ template<typename OtherDerType>
+ inline AutoDiffScalar&
+ operator-=(const AutoDiffScalar<OtherDerType>& other)
+ {
+ *this = *this - other;
+ return *this;
+ }
+
+ inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >
+ operator-() const
+ {
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType> >(
+ -m_value,
+ -m_derivatives);
+ }
+
+ inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ operator*(const Scalar& other) const
+ {
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
+ m_value * other,
+ (m_derivatives * other));
+ }
+
+ friend inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ operator*(const Scalar& other, const AutoDiffScalar& a)
+ {
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
+ a.value() * other,
+ a.derivatives() * other);
+ }
+
+// inline const AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >
+// operator*(const Real& other) const
+// {
+// return AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >(
+// m_value * other,
+// (m_derivatives * other));
+// }
+//
+// friend inline const AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >
+// operator*(const Real& other, const AutoDiffScalar& a)
+// {
+// return AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >(
+// a.value() * other,
+// a.derivatives() * other);
+// }
+
+ inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ operator/(const Scalar& other) const
+ {
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
+ m_value / other,
+ (m_derivatives * (Scalar(1)/other)));
+ }
+
+ friend inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >
+ operator/(const Scalar& other, const AutoDiffScalar& a)
+ {
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType> >(
+ other / a.value(),
+ a.derivatives() * (Scalar(-other) / (a.value()*a.value())));
+ }
+
+// inline const AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >
+// operator/(const Real& other) const
+// {
+// return AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >(
+// m_value / other,
+// (m_derivatives * (Real(1)/other)));
+// }
+//
+// friend inline const AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >
+// operator/(const Real& other, const AutoDiffScalar& a)
+// {
+// return AutoDiffScalar<typename CwiseUnaryOp<internal::scalar_multiple_op<Real>, DerType>::Type >(
+// other / a.value(),
+// a.derivatives() * (-Real(1)/other));
+// }
+
+ template<typename OtherDerType>
+ inline const AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>,
+ const CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type > > > >
+ operator/(const AutoDiffScalar<OtherDerType>& other) const
+ {
+ internal::make_coherent(m_derivatives, other.derivatives());
+ return AutoDiffScalar<CwiseUnaryOp<internal::scalar_multiple_op<Scalar>,
+ const CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type > > > >(
+ m_value / other.value(),
+ ((m_derivatives * other.value()) - (m_value * other.derivatives()))
+ * (Scalar(1)/(other.value()*other.value())));
+ }
+
+ template<typename OtherDerType>
+ inline const AutoDiffScalar<CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type> > >
+ operator*(const AutoDiffScalar<OtherDerType>& other) const
+ {
+ internal::make_coherent(m_derivatives, other.derivatives());
+ return AutoDiffScalar<const CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const DerType>,
+ const CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const typename internal::remove_all<OtherDerType>::type > > >(
+ m_value * other.value(),
+ (m_derivatives * other.value()) + (m_value * other.derivatives()));
+ }
+
+ inline AutoDiffScalar& operator*=(const Scalar& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ template<typename OtherDerType>
+ inline AutoDiffScalar& operator*=(const AutoDiffScalar<OtherDerType>& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ inline AutoDiffScalar& operator/=(const Scalar& other)
+ {
+ *this = *this / other;
+ return *this;
+ }
+
+ template<typename OtherDerType>
+ inline AutoDiffScalar& operator/=(const AutoDiffScalar<OtherDerType>& other)
+ {
+ *this = *this / other;
+ return *this;
+ }
+
+ protected:
+ Scalar m_value;
+ DerType m_derivatives;
+
+};
+
+namespace internal {
+
+template<typename _DerType>
+struct auto_diff_special_op<_DerType, true>
+// : auto_diff_scalar_op<_DerType, typename NumTraits<Scalar>::Real,
+// is_same<Scalar,typename NumTraits<Scalar>::Real>::value>
+{
+ typedef typename remove_all<_DerType>::type DerType;
+ typedef typename traits<DerType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real Real;
+
+// typedef auto_diff_scalar_op<_DerType, typename NumTraits<Scalar>::Real,
+// is_same<Scalar,typename NumTraits<Scalar>::Real>::value> Base;
+
+// using Base::operator+;
+// using Base::operator+=;
+// using Base::operator-;
+// using Base::operator-=;
+// using Base::operator*;
+// using Base::operator*=;
+
+ const AutoDiffScalar<_DerType>& derived() const { return *static_cast<const AutoDiffScalar<_DerType>*>(this); }
+ AutoDiffScalar<_DerType>& derived() { return *static_cast<AutoDiffScalar<_DerType>*>(this); }
+
+
+ inline const AutoDiffScalar<DerType&> operator+(const Real& other) const
+ {
+ return AutoDiffScalar<DerType&>(derived().value() + other, derived().derivatives());
+ }
+
+ friend inline const AutoDiffScalar<DerType&> operator+(const Real& a, const AutoDiffScalar<_DerType>& b)
+ {
+ return AutoDiffScalar<DerType&>(a + b.value(), b.derivatives());
+ }
+
+ inline AutoDiffScalar<_DerType>& operator+=(const Real& other)
+ {
+ derived().value() += other;
+ return derived();
+ }
+
+
+ inline const AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >
+ operator*(const Real& other) const
+ {
+ return AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >(
+ derived().value() * other,
+ derived().derivatives() * other);
+ }
+
+ friend inline const AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >
+ operator*(const Real& other, const AutoDiffScalar<_DerType>& a)
+ {
+ return AutoDiffScalar<typename CwiseUnaryOp<scalar_multiple2_op<Scalar,Real>, DerType>::Type >(
+ a.value() * other,
+ a.derivatives() * other);
+ }
+
+ inline AutoDiffScalar<_DerType>& operator*=(const Scalar& other)
+ {
+ *this = *this * other;
+ return derived();
+ }
+};
+
+template<typename _DerType>
+struct auto_diff_special_op<_DerType, false>
+{
+ void operator*() const;
+ void operator-() const;
+ void operator+() const;
+};
+
+template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols, typename B>
+struct make_coherent_impl<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>, B> {
+ typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> A;
+ static void run(A& a, B& b) {
+ if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0))
+ {
+ a.resize(b.size());
+ a.setZero();
+ }
+ }
+};
+
+template<typename A, typename B_Scalar, int B_Rows, int B_Cols, int B_Options, int B_MaxRows, int B_MaxCols>
+struct make_coherent_impl<A, Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols> > {
+ typedef Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols> B;
+ static void run(A& a, B& b) {
+ if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0))
+ {
+ b.resize(a.size());
+ b.setZero();
+ }
+ }
+};
+
+template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols,
+ typename B_Scalar, int B_Rows, int B_Cols, int B_Options, int B_MaxRows, int B_MaxCols>
+struct make_coherent_impl<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,
+ Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols> > {
+ typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> A;
+ typedef Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols> B;
+ static void run(A& a, B& b) {
+ if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0))
+ {
+ a.resize(b.size());
+ a.setZero();
+ }
+ else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0))
+ {
+ b.resize(a.size());
+ b.setZero();
+ }
+ }
+};
+
+template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
+struct scalar_product_traits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar>
+{
+ enum { Defined = 1 };
+ typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
+};
+
+template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
+struct scalar_product_traits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> >
+{
+ enum { Defined = 1 };
+ typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
+};
+
+template<typename DerType>
+struct scalar_product_traits<AutoDiffScalar<DerType>,typename DerType::Scalar>
+{
+ enum { Defined = 1 };
+ typedef AutoDiffScalar<DerType> ReturnType;
+};
+
+template<typename DerType>
+struct scalar_product_traits<typename DerType::Scalar,AutoDiffScalar<DerType> >
+{
+ enum { Defined = 1 };
+ typedef AutoDiffScalar<DerType> ReturnType;
+};
+
+} // end namespace internal
+
+#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \
+ template<typename DerType> \
+ inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > \
+ FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
+ using namespace Eigen; \
+ typedef typename Eigen::internal::traits<typename Eigen::internal::remove_all<DerType>::type>::Scalar Scalar; \
+ typedef AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const typename Eigen::internal::remove_all<DerType>::type> > ReturnType; \
+ CODE; \
+ }
+
+template<typename DerType>
+inline const AutoDiffScalar<DerType>& conj(const AutoDiffScalar<DerType>& x) { return x; }
+template<typename DerType>
+inline const AutoDiffScalar<DerType>& real(const AutoDiffScalar<DerType>& x) { return x; }
+template<typename DerType>
+inline typename DerType::Scalar imag(const AutoDiffScalar<DerType>&) { return 0.; }
+template<typename DerType, typename T>
+inline AutoDiffScalar<DerType> (min)(const AutoDiffScalar<DerType>& x, const T& y) { return (x <= y ? x : y); }
+template<typename DerType, typename T>
+inline AutoDiffScalar<DerType> (max)(const AutoDiffScalar<DerType>& x, const T& y) { return (x >= y ? x : y); }
+template<typename DerType, typename T>
+inline AutoDiffScalar<DerType> (min)(const T& x, const AutoDiffScalar<DerType>& y) { return (x < y ? x : y); }
+template<typename DerType, typename T>
+inline AutoDiffScalar<DerType> (max)(const T& x, const AutoDiffScalar<DerType>& y) { return (x > y ? x : y); }
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs,
+ using std::abs;
+ return ReturnType(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2,
+ using numext::abs2;
+ return ReturnType(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt,
+ using std::sqrt;
+ Scalar sqrtx = sqrt(x.value());
+ return ReturnType(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos,
+ using std::cos;
+ using std::sin;
+ return ReturnType(cos(x.value()), x.derivatives() * (-sin(x.value())));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin,
+ using std::sin;
+ using std::cos;
+ return ReturnType(sin(x.value()),x.derivatives() * cos(x.value()));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp,
+ using std::exp;
+ Scalar expx = exp(x.value());
+ return ReturnType(expx,x.derivatives() * expx);)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log,
+ using std::log;
+ return ReturnType(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));)
+
+template<typename DerType>
+inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename Eigen::internal::traits<DerType>::Scalar>, const DerType> >
+pow(const Eigen::AutoDiffScalar<DerType>& x, typename Eigen::internal::traits<DerType>::Scalar y)
+{
+ using namespace Eigen;
+ typedef typename Eigen::internal::traits<DerType>::Scalar Scalar;
+ return AutoDiffScalar<CwiseUnaryOp<Eigen::internal::scalar_multiple_op<Scalar>, const DerType> >(
+ std::pow(x.value(),y),
+ x.derivatives() * (y * std::pow(x.value(),y-1)));
+}
+
+
+template<typename DerTypeA,typename DerTypeB>
+inline const AutoDiffScalar<Matrix<typename internal::traits<DerTypeA>::Scalar,Dynamic,1> >
+atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
+{
+ using std::atan2;
+ using std::max;
+ typedef typename internal::traits<DerTypeA>::Scalar Scalar;
+ typedef AutoDiffScalar<Matrix<Scalar,Dynamic,1> > PlainADS;
+ PlainADS ret;
+ ret.value() = atan2(a.value(), b.value());
+
+ Scalar tmp2 = a.value() * a.value();
+ Scalar tmp3 = b.value() * b.value();
+ Scalar tmp4 = tmp3/(tmp2+tmp3);
+
+ if (tmp4!=0)
+ ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) * (tmp2+tmp3);
+
+ return ret;
+}
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan,
+ using std::tan;
+ using std::cos;
+ return ReturnType(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin,
+ using std::sqrt;
+ using std::asin;
+ return ReturnType(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));)
+
+EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos,
+ using std::sqrt;
+ using std::acos;
+ return ReturnType(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));)
+
+#undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY
+
+template<typename DerType> struct NumTraits<AutoDiffScalar<DerType> >
+ : NumTraits< typename NumTraits<typename DerType::Scalar>::Real >
+{
+ typedef AutoDiffScalar<Matrix<typename NumTraits<typename DerType::Scalar>::Real,DerType::RowsAtCompileTime,DerType::ColsAtCompileTime> > Real;
+ typedef AutoDiffScalar<DerType> NonInteger;
+ typedef AutoDiffScalar<DerType>& Nested;
+ enum{
+ RequireInitialization = 1
+ };
+};
+
+}
+
+#endif // EIGEN_AUTODIFF_SCALAR_H
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h b/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
new file mode 100644
index 0000000..8c2d048
--- /dev/null
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
@@ -0,0 +1,220 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_AUTODIFF_VECTOR_H
+#define EIGEN_AUTODIFF_VECTOR_H
+
+namespace Eigen {
+
+/* \class AutoDiffScalar
+ * \brief A scalar type replacement with automatic differentation capability
+ *
+ * \param DerType the vector type used to store/represent the derivatives (e.g. Vector3f)
+ *
+ * This class represents a scalar value while tracking its respective derivatives.
+ *
+ * It supports the following list of global math function:
+ * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos,
+ * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos,
+ * - internal::conj, internal::real, internal::imag, numext::abs2.
+ *
+ * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However,
+ * in that case, the expression template mechanism only occurs at the top Matrix level,
+ * while derivatives are computed right away.
+ *
+ */
+template<typename ValueType, typename JacobianType>
+class AutoDiffVector
+{
+ public:
+ //typedef typename internal::traits<ValueType>::Scalar Scalar;
+ typedef typename internal::traits<ValueType>::Scalar BaseScalar;
+ typedef AutoDiffScalar<Matrix<BaseScalar,JacobianType::RowsAtCompileTime,1> > ActiveScalar;
+ typedef ActiveScalar Scalar;
+ typedef AutoDiffScalar<typename JacobianType::ColXpr> CoeffType;
+ typedef typename JacobianType::Index Index;
+
+ inline AutoDiffVector() {}
+
+ inline AutoDiffVector(const ValueType& values)
+ : m_values(values)
+ {
+ m_jacobian.setZero();
+ }
+
+
+ CoeffType operator[] (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ const CoeffType operator[] (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
+
+ CoeffType operator() (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ const CoeffType operator() (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
+
+ CoeffType coeffRef(Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); }
+ const CoeffType coeffRef(Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); }
+
+ Index size() const { return m_values.size(); }
+
+ // FIXME here we could return an expression of the sum
+ Scalar sum() const { /*std::cerr << "sum \n\n";*/ /*std::cerr << m_jacobian.rowwise().sum() << "\n\n";*/ return Scalar(m_values.sum(), m_jacobian.rowwise().sum()); }
+
+
+ inline AutoDiffVector(const ValueType& values, const JacobianType& jac)
+ : m_values(values), m_jacobian(jac)
+ {}
+
+ template<typename OtherValueType, typename OtherJacobianType>
+ inline AutoDiffVector(const AutoDiffVector<OtherValueType, OtherJacobianType>& other)
+ : m_values(other.values()), m_jacobian(other.jacobian())
+ {}
+
+ inline AutoDiffVector(const AutoDiffVector& other)
+ : m_values(other.values()), m_jacobian(other.jacobian())
+ {}
+
+ template<typename OtherValueType, typename OtherJacobianType>
+ inline AutoDiffVector& operator=(const AutoDiffVector<OtherValueType, OtherJacobianType>& other)
+ {
+ m_values = other.values();
+ m_jacobian = other.jacobian();
+ return *this;
+ }
+
+ inline AutoDiffVector& operator=(const AutoDiffVector& other)
+ {
+ m_values = other.values();
+ m_jacobian = other.jacobian();
+ return *this;
+ }
+
+ inline const ValueType& values() const { return m_values; }
+ inline ValueType& values() { return m_values; }
+
+ inline const JacobianType& jacobian() const { return m_jacobian; }
+ inline JacobianType& jacobian() { return m_jacobian; }
+
+ template<typename OtherValueType,typename OtherJacobianType>
+ inline const AutoDiffVector<
+ typename MakeCwiseBinaryOp<internal::scalar_sum_op<BaseScalar>,ValueType,OtherValueType>::Type,
+ typename MakeCwiseBinaryOp<internal::scalar_sum_op<BaseScalar>,JacobianType,OtherJacobianType>::Type >
+ operator+(const AutoDiffVector<OtherValueType,OtherJacobianType>& other) const
+ {
+ return AutoDiffVector<
+ typename MakeCwiseBinaryOp<internal::scalar_sum_op<BaseScalar>,ValueType,OtherValueType>::Type,
+ typename MakeCwiseBinaryOp<internal::scalar_sum_op<BaseScalar>,JacobianType,OtherJacobianType>::Type >(
+ m_values + other.values(),
+ m_jacobian + other.jacobian());
+ }
+
+ template<typename OtherValueType, typename OtherJacobianType>
+ inline AutoDiffVector&
+ operator+=(const AutoDiffVector<OtherValueType,OtherJacobianType>& other)
+ {
+ m_values += other.values();
+ m_jacobian += other.jacobian();
+ return *this;
+ }
+
+ template<typename OtherValueType,typename OtherJacobianType>
+ inline const AutoDiffVector<
+ typename MakeCwiseBinaryOp<internal::scalar_difference_op<Scalar>,ValueType,OtherValueType>::Type,
+ typename MakeCwiseBinaryOp<internal::scalar_difference_op<Scalar>,JacobianType,OtherJacobianType>::Type >
+ operator-(const AutoDiffVector<OtherValueType,OtherJacobianType>& other) const
+ {
+ return AutoDiffVector<
+ typename MakeCwiseBinaryOp<internal::scalar_difference_op<Scalar>,ValueType,OtherValueType>::Type,
+ typename MakeCwiseBinaryOp<internal::scalar_difference_op<Scalar>,JacobianType,OtherJacobianType>::Type >(
+ m_values - other.values(),
+ m_jacobian - other.jacobian());
+ }
+
+ template<typename OtherValueType, typename OtherJacobianType>
+ inline AutoDiffVector&
+ operator-=(const AutoDiffVector<OtherValueType,OtherJacobianType>& other)
+ {
+ m_values -= other.values();
+ m_jacobian -= other.jacobian();
+ return *this;
+ }
+
+ inline const AutoDiffVector<
+ typename MakeCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, ValueType>::Type,
+ typename MakeCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, JacobianType>::Type >
+ operator-() const
+ {
+ return AutoDiffVector<
+ typename MakeCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, ValueType>::Type,
+ typename MakeCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, JacobianType>::Type >(
+ -m_values,
+ -m_jacobian);
+ }
+
+ inline const AutoDiffVector<
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, ValueType>::Type,
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, JacobianType>::Type>
+ operator*(const BaseScalar& other) const
+ {
+ return AutoDiffVector<
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, ValueType>::Type,
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, JacobianType>::Type >(
+ m_values * other,
+ m_jacobian * other);
+ }
+
+ friend inline const AutoDiffVector<
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, ValueType>::Type,
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, JacobianType>::Type >
+ operator*(const Scalar& other, const AutoDiffVector& v)
+ {
+ return AutoDiffVector<
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, ValueType>::Type,
+ typename MakeCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, JacobianType>::Type >(
+ v.values() * other,
+ v.jacobian() * other);
+ }
+
+// template<typename OtherValueType,typename OtherJacobianType>
+// inline const AutoDiffVector<
+// CwiseBinaryOp<internal::scalar_multiple_op<Scalar>, ValueType, OtherValueType>
+// CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
+// CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, JacobianType>,
+// CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, OtherJacobianType> > >
+// operator*(const AutoDiffVector<OtherValueType,OtherJacobianType>& other) const
+// {
+// return AutoDiffVector<
+// CwiseBinaryOp<internal::scalar_multiple_op<Scalar>, ValueType, OtherValueType>
+// CwiseBinaryOp<internal::scalar_sum_op<Scalar>,
+// CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, JacobianType>,
+// CwiseUnaryOp<internal::scalar_multiple_op<Scalar>, OtherJacobianType> > >(
+// m_values.cwise() * other.values(),
+// (m_jacobian * other.values()) + (m_values * other.jacobian()));
+// }
+
+ inline AutoDiffVector& operator*=(const Scalar& other)
+ {
+ m_values *= other;
+ m_jacobian *= other;
+ return *this;
+ }
+
+ template<typename OtherValueType,typename OtherJacobianType>
+ inline AutoDiffVector& operator*=(const AutoDiffVector<OtherValueType,OtherJacobianType>& other)
+ {
+ *this = *this * other;
+ return *this;
+ }
+
+ protected:
+ ValueType m_values;
+ JacobianType m_jacobian;
+
+};
+
+}
+
+#endif // EIGEN_AUTODIFF_VECTOR_H
diff --git a/unsupported/Eigen/src/AutoDiff/CMakeLists.txt b/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
new file mode 100644
index 0000000..ad91fd9
--- /dev/null
+++ b/unsupported/Eigen/src/AutoDiff/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_AutoDiff_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_AutoDiff_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/AutoDiff COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/BVH/BVAlgorithms.h b/unsupported/Eigen/src/BVH/BVAlgorithms.h
new file mode 100644
index 0000000..994c8af
--- /dev/null
+++ b/unsupported/Eigen/src/BVH/BVAlgorithms.h
@@ -0,0 +1,293 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Ilya Baran <ibaran@mit.edu>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BVALGORITHMS_H
+#define EIGEN_BVALGORITHMS_H
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename BVH, typename Intersector>
+bool intersect_helper(const BVH &tree, Intersector &intersector, typename BVH::Index root)
+{
+ typedef typename BVH::Index Index;
+ typedef typename BVH::VolumeIterator VolIter;
+ typedef typename BVH::ObjectIterator ObjIter;
+
+ VolIter vBegin = VolIter(), vEnd = VolIter();
+ ObjIter oBegin = ObjIter(), oEnd = ObjIter();
+
+ std::vector<Index> todo(1, root);
+
+ while(!todo.empty()) {
+ tree.getChildren(todo.back(), vBegin, vEnd, oBegin, oEnd);
+ todo.pop_back();
+
+ for(; vBegin != vEnd; ++vBegin) //go through child volumes
+ if(intersector.intersectVolume(tree.getVolume(*vBegin)))
+ todo.push_back(*vBegin);
+
+ for(; oBegin != oEnd; ++oBegin) //go through child objects
+ if(intersector.intersectObject(*oBegin))
+ return true; //intersector said to stop query
+ }
+ return false;
+}
+#endif //not EIGEN_PARSED_BY_DOXYGEN
+
+template<typename Volume1, typename Object1, typename Object2, typename Intersector>
+struct intersector_helper1
+{
+ intersector_helper1(const Object2 &inStored, Intersector &in) : stored(inStored), intersector(in) {}
+ bool intersectVolume(const Volume1 &vol) { return intersector.intersectVolumeObject(vol, stored); }
+ bool intersectObject(const Object1 &obj) { return intersector.intersectObjectObject(obj, stored); }
+ Object2 stored;
+ Intersector &intersector;
+private:
+ intersector_helper1& operator=(const intersector_helper1&);
+};
+
+template<typename Volume2, typename Object2, typename Object1, typename Intersector>
+struct intersector_helper2
+{
+ intersector_helper2(const Object1 &inStored, Intersector &in) : stored(inStored), intersector(in) {}
+ bool intersectVolume(const Volume2 &vol) { return intersector.intersectObjectVolume(stored, vol); }
+ bool intersectObject(const Object2 &obj) { return intersector.intersectObjectObject(stored, obj); }
+ Object1 stored;
+ Intersector &intersector;
+private:
+ intersector_helper2& operator=(const intersector_helper2&);
+};
+
+} // end namespace internal
+
+/** Given a BVH, runs the query encapsulated by \a intersector.
+ * The Intersector type must provide the following members: \code
+ bool intersectVolume(const BVH::Volume &volume) //returns true if volume intersects the query
+ bool intersectObject(const BVH::Object &object) //returns true if the search should terminate immediately
+ \endcode
+ */
+template<typename BVH, typename Intersector>
+void BVIntersect(const BVH &tree, Intersector &intersector)
+{
+ internal::intersect_helper(tree, intersector, tree.getRootIndex());
+}
+
+/** Given two BVH's, runs the query on their Cartesian product encapsulated by \a intersector.
+ * The Intersector type must provide the following members: \code
+ bool intersectVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) //returns true if product of volumes intersects the query
+ bool intersectVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) //returns true if the volume-object product intersects the query
+ bool intersectObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) //returns true if the volume-object product intersects the query
+ bool intersectObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) //returns true if the search should terminate immediately
+ \endcode
+ */
+template<typename BVH1, typename BVH2, typename Intersector>
+void BVIntersect(const BVH1 &tree1, const BVH2 &tree2, Intersector &intersector) //TODO: tandem descent when it makes sense
+{
+ typedef typename BVH1::Index Index1;
+ typedef typename BVH2::Index Index2;
+ typedef internal::intersector_helper1<typename BVH1::Volume, typename BVH1::Object, typename BVH2::Object, Intersector> Helper1;
+ typedef internal::intersector_helper2<typename BVH2::Volume, typename BVH2::Object, typename BVH1::Object, Intersector> Helper2;
+ typedef typename BVH1::VolumeIterator VolIter1;
+ typedef typename BVH1::ObjectIterator ObjIter1;
+ typedef typename BVH2::VolumeIterator VolIter2;
+ typedef typename BVH2::ObjectIterator ObjIter2;
+
+ VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1();
+ ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1();
+ VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2();
+ ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2();
+
+ std::vector<std::pair<Index1, Index2> > todo(1, std::make_pair(tree1.getRootIndex(), tree2.getRootIndex()));
+
+ while(!todo.empty()) {
+ tree1.getChildren(todo.back().first, vBegin1, vEnd1, oBegin1, oEnd1);
+ tree2.getChildren(todo.back().second, vBegin2, vEnd2, oBegin2, oEnd2);
+ todo.pop_back();
+
+ for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree
+ const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1);
+ for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
+ if(intersector.intersectVolumeVolume(vol1, tree2.getVolume(*vCur2)))
+ todo.push_back(std::make_pair(*vBegin1, *vCur2));
+ }
+
+ for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
+ Helper1 helper(*oCur2, intersector);
+ if(internal::intersect_helper(tree1, helper, *vBegin1))
+ return; //intersector said to stop query
+ }
+ }
+
+ for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree
+ for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
+ Helper2 helper(*oBegin1, intersector);
+ if(internal::intersect_helper(tree2, helper, *vCur2))
+ return; //intersector said to stop query
+ }
+
+ for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
+ if(intersector.intersectObjectObject(*oBegin1, *oCur2))
+ return; //intersector said to stop query
+ }
+ }
+ }
+}
+
+namespace internal {
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+template<typename BVH, typename Minimizer>
+typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer, typename BVH::Index root, typename Minimizer::Scalar minimum)
+{
+ typedef typename Minimizer::Scalar Scalar;
+ typedef typename BVH::Index Index;
+ typedef std::pair<Scalar, Index> QueueElement; //first element is priority
+ typedef typename BVH::VolumeIterator VolIter;
+ typedef typename BVH::ObjectIterator ObjIter;
+
+ VolIter vBegin = VolIter(), vEnd = VolIter();
+ ObjIter oBegin = ObjIter(), oEnd = ObjIter();
+ std::priority_queue<QueueElement, std::vector<QueueElement>, std::greater<QueueElement> > todo; //smallest is at the top
+
+ todo.push(std::make_pair(Scalar(), root));
+
+ while(!todo.empty()) {
+ tree.getChildren(todo.top().second, vBegin, vEnd, oBegin, oEnd);
+ todo.pop();
+
+ for(; oBegin != oEnd; ++oBegin) //go through child objects
+ minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin));
+
+ for(; vBegin != vEnd; ++vBegin) { //go through child volumes
+ Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin));
+ if(val < minimum)
+ todo.push(std::make_pair(val, *vBegin));
+ }
+ }
+
+ return minimum;
+}
+#endif //not EIGEN_PARSED_BY_DOXYGEN
+
+
+template<typename Volume1, typename Object1, typename Object2, typename Minimizer>
+struct minimizer_helper1
+{
+ typedef typename Minimizer::Scalar Scalar;
+ minimizer_helper1(const Object2 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {}
+ Scalar minimumOnVolume(const Volume1 &vol) { return minimizer.minimumOnVolumeObject(vol, stored); }
+ Scalar minimumOnObject(const Object1 &obj) { return minimizer.minimumOnObjectObject(obj, stored); }
+ Object2 stored;
+ Minimizer &minimizer;
+private:
+ minimizer_helper1& operator=(const minimizer_helper1&);
+};
+
+template<typename Volume2, typename Object2, typename Object1, typename Minimizer>
+struct minimizer_helper2
+{
+ typedef typename Minimizer::Scalar Scalar;
+ minimizer_helper2(const Object1 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {}
+ Scalar minimumOnVolume(const Volume2 &vol) { return minimizer.minimumOnObjectVolume(stored, vol); }
+ Scalar minimumOnObject(const Object2 &obj) { return minimizer.minimumOnObjectObject(stored, obj); }
+ Object1 stored;
+ Minimizer &minimizer;
+private:
+ minimizer_helper2& operator=(const minimizer_helper2&);
+};
+
+} // end namespace internal
+
+/** Given a BVH, runs the query encapsulated by \a minimizer.
+ * \returns the minimum value.
+ * The Minimizer type must provide the following members: \code
+ typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one)
+ Scalar minimumOnVolume(const BVH::Volume &volume)
+ Scalar minimumOnObject(const BVH::Object &object)
+ \endcode
+ */
+template<typename BVH, typename Minimizer>
+typename Minimizer::Scalar BVMinimize(const BVH &tree, Minimizer &minimizer)
+{
+ return internal::minimize_helper(tree, minimizer, tree.getRootIndex(), (std::numeric_limits<typename Minimizer::Scalar>::max)());
+}
+
+/** Given two BVH's, runs the query on their cartesian product encapsulated by \a minimizer.
+ * \returns the minimum value.
+ * The Minimizer type must provide the following members: \code
+ typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one)
+ Scalar minimumOnVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2)
+ Scalar minimumOnVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2)
+ Scalar minimumOnObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2)
+ Scalar minimumOnObjectObject(const BVH1::Object &o1, const BVH2::Object &o2)
+ \endcode
+ */
+template<typename BVH1, typename BVH2, typename Minimizer>
+typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Minimizer &minimizer)
+{
+ typedef typename Minimizer::Scalar Scalar;
+ typedef typename BVH1::Index Index1;
+ typedef typename BVH2::Index Index2;
+ typedef internal::minimizer_helper1<typename BVH1::Volume, typename BVH1::Object, typename BVH2::Object, Minimizer> Helper1;
+ typedef internal::minimizer_helper2<typename BVH2::Volume, typename BVH2::Object, typename BVH1::Object, Minimizer> Helper2;
+ typedef std::pair<Scalar, std::pair<Index1, Index2> > QueueElement; //first element is priority
+ typedef typename BVH1::VolumeIterator VolIter1;
+ typedef typename BVH1::ObjectIterator ObjIter1;
+ typedef typename BVH2::VolumeIterator VolIter2;
+ typedef typename BVH2::ObjectIterator ObjIter2;
+
+ VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1();
+ ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1();
+ VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2();
+ ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2();
+ std::priority_queue<QueueElement, std::vector<QueueElement>, std::greater<QueueElement> > todo; //smallest is at the top
+
+ Scalar minimum = (std::numeric_limits<Scalar>::max)();
+ todo.push(std::make_pair(Scalar(), std::make_pair(tree1.getRootIndex(), tree2.getRootIndex())));
+
+ while(!todo.empty()) {
+ tree1.getChildren(todo.top().second.first, vBegin1, vEnd1, oBegin1, oEnd1);
+ tree2.getChildren(todo.top().second.second, vBegin2, vEnd2, oBegin2, oEnd2);
+ todo.pop();
+
+ for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree
+ for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
+ minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2));
+ }
+
+ for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
+ Helper2 helper(*oBegin1, minimizer);
+ minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum));
+ }
+ }
+
+ for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree
+ const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1);
+
+ for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree
+ Helper1 helper(*oCur2, minimizer);
+ minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum));
+ }
+
+ for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree
+ Scalar val = minimizer.minimumOnVolumeVolume(vol1, tree2.getVolume(*vCur2));
+ if(val < minimum)
+ todo.push(std::make_pair(val, std::make_pair(*vBegin1, *vCur2)));
+ }
+ }
+ }
+ return minimum;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_BVALGORITHMS_H
diff --git a/unsupported/Eigen/src/BVH/CMakeLists.txt b/unsupported/Eigen/src/BVH/CMakeLists.txt
new file mode 100644
index 0000000..b377d86
--- /dev/null
+++ b/unsupported/Eigen/src/BVH/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_BVH_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_BVH_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/BVH COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/BVH/KdBVH.h b/unsupported/Eigen/src/BVH/KdBVH.h
new file mode 100644
index 0000000..1b8d758
--- /dev/null
+++ b/unsupported/Eigen/src/BVH/KdBVH.h
@@ -0,0 +1,222 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Ilya Baran <ibaran@mit.edu>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef KDBVH_H_INCLUDED
+#define KDBVH_H_INCLUDED
+
+namespace Eigen {
+
+namespace internal {
+
+//internal pair class for the BVH--used instead of std::pair because of alignment
+template<typename Scalar, int Dim>
+struct vector_int_pair
+{
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Dim)
+ typedef Matrix<Scalar, Dim, 1> VectorType;
+
+ vector_int_pair(const VectorType &v, int i) : first(v), second(i) {}
+
+ VectorType first;
+ int second;
+};
+
+//these templates help the tree initializer get the bounding boxes either from a provided
+//iterator range or using bounding_box in a unified way
+template<typename ObjectList, typename VolumeList, typename BoxIter>
+struct get_boxes_helper {
+ void operator()(const ObjectList &objects, BoxIter boxBegin, BoxIter boxEnd, VolumeList &outBoxes)
+ {
+ outBoxes.insert(outBoxes.end(), boxBegin, boxEnd);
+ eigen_assert(outBoxes.size() == objects.size());
+ }
+};
+
+template<typename ObjectList, typename VolumeList>
+struct get_boxes_helper<ObjectList, VolumeList, int> {
+ void operator()(const ObjectList &objects, int, int, VolumeList &outBoxes)
+ {
+ outBoxes.reserve(objects.size());
+ for(int i = 0; i < (int)objects.size(); ++i)
+ outBoxes.push_back(bounding_box(objects[i]));
+ }
+};
+
+} // end namespace internal
+
+
+/** \class KdBVH
+ * \brief A simple bounding volume hierarchy based on AlignedBox
+ *
+ * \param _Scalar The underlying scalar type of the bounding boxes
+ * \param _Dim The dimension of the space in which the hierarchy lives
+ * \param _Object The object type that lives in the hierarchy. It must have value semantics. Either bounding_box(_Object) must
+ * be defined and return an AlignedBox<_Scalar, _Dim> or bounding boxes must be provided to the tree initializer.
+ *
+ * This class provides a simple (as opposed to optimized) implementation of a bounding volume hierarchy analogous to a Kd-tree.
+ * Given a sequence of objects, it computes their bounding boxes, constructs a Kd-tree of their centers
+ * and builds a BVH with the structure of that Kd-tree. When the elements of the tree are too expensive to be copied around,
+ * it is useful for _Object to be a pointer.
+ */
+template<typename _Scalar, int _Dim, typename _Object> class KdBVH
+{
+public:
+ enum { Dim = _Dim };
+ typedef _Object Object;
+ typedef std::vector<Object, aligned_allocator<Object> > ObjectList;
+ typedef _Scalar Scalar;
+ typedef AlignedBox<Scalar, Dim> Volume;
+ typedef std::vector<Volume, aligned_allocator<Volume> > VolumeList;
+ typedef int Index;
+ typedef const int *VolumeIterator; //the iterators are just pointers into the tree's vectors
+ typedef const Object *ObjectIterator;
+
+ KdBVH() {}
+
+ /** Given an iterator range over \a Object references, constructs the BVH. Requires that bounding_box(Object) return a Volume. */
+ template<typename Iter> KdBVH(Iter begin, Iter end) { init(begin, end, 0, 0); } //int is recognized by init as not being an iterator type
+
+ /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, constructs the BVH */
+ template<typename OIter, typename BIter> KdBVH(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { init(begin, end, boxBegin, boxEnd); }
+
+ /** Given an iterator range over \a Object references, constructs the BVH, overwriting whatever is in there currently.
+ * Requires that bounding_box(Object) return a Volume. */
+ template<typename Iter> void init(Iter begin, Iter end) { init(begin, end, 0, 0); }
+
+ /** Given an iterator range over \a Object references and an iterator range over their bounding boxes,
+ * constructs the BVH, overwriting whatever is in there currently. */
+ template<typename OIter, typename BIter> void init(OIter begin, OIter end, BIter boxBegin, BIter boxEnd)
+ {
+ objects.clear();
+ boxes.clear();
+ children.clear();
+
+ objects.insert(objects.end(), begin, end);
+ int n = static_cast<int>(objects.size());
+
+ if(n < 2)
+ return; //if we have at most one object, we don't need any internal nodes
+
+ VolumeList objBoxes;
+ VIPairList objCenters;
+
+ //compute the bounding boxes depending on BIter type
+ internal::get_boxes_helper<ObjectList, VolumeList, BIter>()(objects, boxBegin, boxEnd, objBoxes);
+
+ objCenters.reserve(n);
+ boxes.reserve(n - 1);
+ children.reserve(2 * n - 2);
+
+ for(int i = 0; i < n; ++i)
+ objCenters.push_back(VIPair(objBoxes[i].center(), i));
+
+ build(objCenters, 0, n, objBoxes, 0); //the recursive part of the algorithm
+
+ ObjectList tmp(n);
+ tmp.swap(objects);
+ for(int i = 0; i < n; ++i)
+ objects[i] = tmp[objCenters[i].second];
+ }
+
+ /** \returns the index of the root of the hierarchy */
+ inline Index getRootIndex() const { return (int)boxes.size() - 1; }
+
+ /** Given an \a index of a node, on exit, \a outVBegin and \a outVEnd range over the indices of the volume children of the node
+ * and \a outOBegin and \a outOEnd range over the object children of the node */
+ EIGEN_STRONG_INLINE void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd,
+ ObjectIterator &outOBegin, ObjectIterator &outOEnd) const
+ { //inlining this function should open lots of optimization opportunities to the compiler
+ if(index < 0) {
+ outVBegin = outVEnd;
+ if(!objects.empty())
+ outOBegin = &(objects[0]);
+ outOEnd = outOBegin + objects.size(); //output all objects--necessary when the tree has only one object
+ return;
+ }
+
+ int numBoxes = static_cast<int>(boxes.size());
+
+ int idx = index * 2;
+ if(children[idx + 1] < numBoxes) { //second index is always bigger
+ outVBegin = &(children[idx]);
+ outVEnd = outVBegin + 2;
+ outOBegin = outOEnd;
+ }
+ else if(children[idx] >= numBoxes) { //if both children are objects
+ outVBegin = outVEnd;
+ outOBegin = &(objects[children[idx] - numBoxes]);
+ outOEnd = outOBegin + 2;
+ } else { //if the first child is a volume and the second is an object
+ outVBegin = &(children[idx]);
+ outVEnd = outVBegin + 1;
+ outOBegin = &(objects[children[idx + 1] - numBoxes]);
+ outOEnd = outOBegin + 1;
+ }
+ }
+
+ /** \returns the bounding box of the node at \a index */
+ inline const Volume &getVolume(Index index) const
+ {
+ return boxes[index];
+ }
+
+private:
+ typedef internal::vector_int_pair<Scalar, Dim> VIPair;
+ typedef std::vector<VIPair, aligned_allocator<VIPair> > VIPairList;
+ typedef Matrix<Scalar, Dim, 1> VectorType;
+ struct VectorComparator //compares vectors, or, more specificall, VIPairs along a particular dimension
+ {
+ VectorComparator(int inDim) : dim(inDim) {}
+ inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; }
+ int dim;
+ };
+
+ //Build the part of the tree between objects[from] and objects[to] (not including objects[to]).
+ //This routine partitions the objCenters in [from, to) along the dimension dim, recursively constructs
+ //the two halves, and adds their parent node. TODO: a cache-friendlier layout
+ void build(VIPairList &objCenters, int from, int to, const VolumeList &objBoxes, int dim)
+ {
+ eigen_assert(to - from > 1);
+ if(to - from == 2) {
+ boxes.push_back(objBoxes[objCenters[from].second].merged(objBoxes[objCenters[from + 1].second]));
+ children.push_back(from + (int)objects.size() - 1); //there are objects.size() - 1 tree nodes
+ children.push_back(from + (int)objects.size());
+ }
+ else if(to - from == 3) {
+ int mid = from + 2;
+ std::nth_element(objCenters.begin() + from, objCenters.begin() + mid,
+ objCenters.begin() + to, VectorComparator(dim)); //partition
+ build(objCenters, from, mid, objBoxes, (dim + 1) % Dim);
+ int idx1 = (int)boxes.size() - 1;
+ boxes.push_back(boxes[idx1].merged(objBoxes[objCenters[mid].second]));
+ children.push_back(idx1);
+ children.push_back(mid + (int)objects.size() - 1);
+ }
+ else {
+ int mid = from + (to - from) / 2;
+ nth_element(objCenters.begin() + from, objCenters.begin() + mid,
+ objCenters.begin() + to, VectorComparator(dim)); //partition
+ build(objCenters, from, mid, objBoxes, (dim + 1) % Dim);
+ int idx1 = (int)boxes.size() - 1;
+ build(objCenters, mid, to, objBoxes, (dim + 1) % Dim);
+ int idx2 = (int)boxes.size() - 1;
+ boxes.push_back(boxes[idx1].merged(boxes[idx2]));
+ children.push_back(idx1);
+ children.push_back(idx2);
+ }
+ }
+
+ std::vector<int> children; //children of x are children[2x] and children[2x+1], indices bigger than boxes.size() index into objects.
+ VolumeList boxes;
+ ObjectList objects;
+};
+
+} // end namespace Eigen
+
+#endif //KDBVH_H_INCLUDED
diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt
new file mode 100644
index 0000000..125c43f
--- /dev/null
+++ b/unsupported/Eigen/src/CMakeLists.txt
@@ -0,0 +1,14 @@
+ADD_SUBDIRECTORY(AutoDiff)
+ADD_SUBDIRECTORY(BVH)
+ADD_SUBDIRECTORY(FFT)
+ADD_SUBDIRECTORY(IterativeSolvers)
+ADD_SUBDIRECTORY(KroneckerProduct)
+ADD_SUBDIRECTORY(LevenbergMarquardt)
+ADD_SUBDIRECTORY(MatrixFunctions)
+ADD_SUBDIRECTORY(MoreVectorization)
+ADD_SUBDIRECTORY(NonLinearOptimization)
+ADD_SUBDIRECTORY(NumericalDiff)
+ADD_SUBDIRECTORY(Polynomials)
+ADD_SUBDIRECTORY(Skyline)
+ADD_SUBDIRECTORY(SparseExtra)
+ADD_SUBDIRECTORY(Splines)
diff --git a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
new file mode 100644
index 0000000..3b6a69a
--- /dev/null
+++ b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
@@ -0,0 +1,805 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 David Harmon <dharmon@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
+#define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
+
+#include <Eigen/Dense>
+
+namespace Eigen {
+
+namespace internal {
+ template<typename Scalar, typename RealScalar> struct arpack_wrapper;
+ template<typename MatrixSolver, typename MatrixType, typename Scalar, bool BisSPD> struct OP;
+}
+
+
+
+template<typename MatrixType, typename MatrixSolver=SimplicialLLT<MatrixType>, bool BisSPD=false>
+class ArpackGeneralizedSelfAdjointEigenSolver
+{
+public:
+ //typedef typename MatrixSolver::MatrixType MatrixType;
+
+ /** \brief Scalar type for matrices of type \p MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Real scalar type for \p MatrixType.
+ *
+ * This is just \c Scalar if #Scalar is real (e.g., \c float or
+ * \c Scalar), and the type of the real part of \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+ *
+ * This is a column vector with entries of type #RealScalar.
+ * The length of the vector is the size of \p nbrEigenvalues.
+ */
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
+
+ /** \brief Default constructor.
+ *
+ * The default constructor is for cases in which the user intends to
+ * perform decompositions via compute().
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver()
+ : m_eivec(),
+ m_eivalues(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_nbrConverged(0),
+ m_nbrIterations(0)
+ { }
+
+ /** \brief Constructor; computes generalized eigenvalues of given matrix with respect to another matrix.
+ *
+ * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will
+ * computed. By default, the upper triangular part is used, but can be changed
+ * through the template parameter.
+ * \param[in] B Self-adjoint matrix for the generalized eigenvalue problem.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * This constructor calls compute(const MatrixType&, const MatrixType&, Index, string, int, RealScalar)
+ * to compute the eigenvalues of the matrix \p A with respect to \p B. The eigenvectors are computed if
+ * \p options equals #ComputeEigenvectors.
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, const MatrixType& B,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0)
+ : m_eivec(),
+ m_eivalues(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_nbrConverged(0),
+ m_nbrIterations(0)
+ {
+ compute(A, B, nbrEigenvalues, eigs_sigma, options, tol);
+ }
+
+ /** \brief Constructor; computes eigenvalues of given matrix.
+ *
+ * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will
+ * computed. By default, the upper triangular part is used, but can be changed
+ * through the template parameter.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * This constructor calls compute(const MatrixType&, Index, string, int, RealScalar)
+ * to compute the eigenvalues of the matrix \p A. The eigenvectors are computed if
+ * \p options equals #ComputeEigenvectors.
+ *
+ */
+
+ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0)
+ : m_eivec(),
+ m_eivalues(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_nbrConverged(0),
+ m_nbrIterations(0)
+ {
+ compute(A, nbrEigenvalues, eigs_sigma, options, tol);
+ }
+
+
+ /** \brief Computes generalized eigenvalues / eigenvectors of given matrix using the external ARPACK library.
+ *
+ * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed.
+ * \param[in] B Selfadjoint matrix for generalized eigenvalues.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * \returns Reference to \c *this
+ *
+ * This function computes the generalized eigenvalues of \p A with respect to \p B using ARPACK. The eigenvalues()
+ * function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
+ * then the eigenvectors are also computed and can be retrieved by
+ * calling eigenvectors().
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, const MatrixType& B,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0);
+
+ /** \brief Computes eigenvalues / eigenvectors of given matrix using the external ARPACK library.
+ *
+ * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * \returns Reference to \c *this
+ *
+ * This function computes the eigenvalues of \p A using ARPACK. The eigenvalues()
+ * function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
+ * then the eigenvectors are also computed and can be retrieved by
+ * calling eigenvectors().
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0);
+
+
+ /** \brief Returns the eigenvectors of given matrix.
+ *
+ * \returns A const reference to the matrix whose columns are the eigenvectors.
+ *
+ * \pre The eigenvectors have been computed before.
+ *
+ * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+ * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
+ * eigenvectors are normalized to have (Euclidean) norm equal to one. If
+ * this object was used to solve the eigenproblem for the selfadjoint
+ * matrix \f$ A \f$, then the matrix returned by this function is the
+ * matrix \f$ V \f$ in the eigendecomposition \f$ A V = D V \f$.
+ * For the generalized eigenproblem, the matrix returned is the solution \f$ A V = D B V \f$
+ *
+ * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
+ *
+ * \sa eigenvalues()
+ */
+ const Matrix<Scalar, Dynamic, Dynamic>& eigenvectors() const
+ {
+ eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec;
+ }
+
+ /** \brief Returns the eigenvalues of given matrix.
+ *
+ * \returns A const reference to the column vector containing the eigenvalues.
+ *
+ * \pre The eigenvalues have been computed before.
+ *
+ * The eigenvalues are repeated according to their algebraic multiplicity,
+ * so there are as many eigenvalues as rows in the matrix. The eigenvalues
+ * are sorted in increasing order.
+ *
+ * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
+ *
+ * \sa eigenvectors(), MatrixBase::eigenvalues()
+ */
+ const Matrix<Scalar, Dynamic, 1>& eigenvalues() const
+ {
+ eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized.");
+ return m_eivalues;
+ }
+
+ /** \brief Computes the positive-definite square root of the matrix.
+ *
+ * \returns the positive-definite square root of the matrix
+ *
+ * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+ * have been computed before.
+ *
+ * The square root of a positive-definite matrix \f$ A \f$ is the
+ * positive-definite matrix whose square equals \f$ A \f$. This function
+ * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
+ * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
+ *
+ * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
+ *
+ * \sa operatorInverseSqrt(),
+ * \ref MatrixFunctions_Module "MatrixFunctions Module"
+ */
+ Matrix<Scalar, Dynamic, Dynamic> operatorSqrt() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+ }
+
+ /** \brief Computes the inverse square root of the matrix.
+ *
+ * \returns the inverse positive-definite square root of the matrix
+ *
+ * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+ * have been computed before.
+ *
+ * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
+ * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
+ * cheaper than first computing the square root with operatorSqrt() and
+ * then its inverse with MatrixBase::inverse().
+ *
+ * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
+ *
+ * \sa operatorSqrt(), MatrixBase::inverse(),
+ * \ref MatrixFunctions_Module "MatrixFunctions Module"
+ */
+ Matrix<Scalar, Dynamic, Dynamic> operatorInverseSqrt() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+ }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized.");
+ return m_info;
+ }
+
+ size_t getNbrConvergedEigenValues() const
+ { return m_nbrConverged; }
+
+ size_t getNbrIterations() const
+ { return m_nbrIterations; }
+
+protected:
+ Matrix<Scalar, Dynamic, Dynamic> m_eivec;
+ Matrix<Scalar, Dynamic, 1> m_eivalues;
+ ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_eigenvectorsOk;
+
+ size_t m_nbrConverged;
+ size_t m_nbrIterations;
+};
+
+
+
+
+
+template<typename MatrixType, typename MatrixSolver, bool BisSPD>
+ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>&
+ ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>
+::compute(const MatrixType& A, Index nbrEigenvalues,
+ std::string eigs_sigma, int options, RealScalar tol)
+{
+ MatrixType B(0,0);
+ compute(A, B, nbrEigenvalues, eigs_sigma, options, tol);
+
+ return *this;
+}
+
+
+template<typename MatrixType, typename MatrixSolver, bool BisSPD>
+ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>&
+ ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>
+::compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues,
+ std::string eigs_sigma, int options, RealScalar tol)
+{
+ eigen_assert(A.cols() == A.rows());
+ eigen_assert(B.cols() == B.rows());
+ eigen_assert(B.rows() == 0 || A.cols() == B.rows());
+ eigen_assert((options &~ (EigVecMask | GenEigMask)) == 0
+ && (options & EigVecMask) != EigVecMask
+ && "invalid option parameter");
+
+ bool isBempty = (B.rows() == 0) || (B.cols() == 0);
+
+ // For clarity, all parameters match their ARPACK name
+ //
+ // Always 0 on the first call
+ //
+ int ido = 0;
+
+ int n = (int)A.cols();
+
+ // User options: "LA", "SA", "SM", "LM", "BE"
+ //
+ char whch[3] = "LM";
+
+ // Specifies the shift if iparam[6] = { 3, 4, 5 }, not used if iparam[6] = { 1, 2 }
+ //
+ RealScalar sigma = 0.0;
+
+ if (eigs_sigma.length() >= 2 && isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1]))
+ {
+ eigs_sigma[0] = toupper(eigs_sigma[0]);
+ eigs_sigma[1] = toupper(eigs_sigma[1]);
+
+ // In the following special case we're going to invert the problem, since solving
+ // for larger magnitude is much much faster
+ // i.e., if 'SM' is specified, we're going to really use 'LM', the default
+ //
+ if (eigs_sigma.substr(0,2) != "SM")
+ {
+ whch[0] = eigs_sigma[0];
+ whch[1] = eigs_sigma[1];
+ }
+ }
+ else
+ {
+ eigen_assert(false && "Specifying clustered eigenvalues is not yet supported!");
+
+ // If it's not scalar values, then the user may be explicitly
+ // specifying the sigma value to cluster the evs around
+ //
+ sigma = atof(eigs_sigma.c_str());
+
+ // If atof fails, it returns 0.0, which is a fine default
+ //
+ }
+
+ // "I" means normal eigenvalue problem, "G" means generalized
+ //
+ char bmat[2] = "I";
+ if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) || (!isBempty && !BisSPD))
+ bmat[0] = 'G';
+
+ // Now we determine the mode to use
+ //
+ int mode = (bmat[0] == 'G') + 1;
+ if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])))
+ {
+ // We're going to use shift-and-invert mode, and basically find
+ // the largest eigenvalues of the inverse operator
+ //
+ mode = 3;
+ }
+
+ // The user-specified number of eigenvalues/vectors to compute
+ //
+ int nev = (int)nbrEigenvalues;
+
+ // Allocate space for ARPACK to store the residual
+ //
+ Scalar *resid = new Scalar[n];
+
+ // Number of Lanczos vectors, must satisfy nev < ncv <= n
+ // Note that this indicates that nev != n, and we cannot compute
+ // all eigenvalues of a mtrix
+ //
+ int ncv = std::min(std::max(2*nev, 20), n);
+
+ // The working n x ncv matrix, also store the final eigenvectors (if computed)
+ //
+ Scalar *v = new Scalar[n*ncv];
+ int ldv = n;
+
+ // Working space
+ //
+ Scalar *workd = new Scalar[3*n];
+ int lworkl = ncv*ncv+8*ncv; // Must be at least this length
+ Scalar *workl = new Scalar[lworkl];
+
+ int *iparam= new int[11];
+ iparam[0] = 1; // 1 means we let ARPACK perform the shifts, 0 means we'd have to do it
+ iparam[2] = std::max(300, (int)std::ceil(2*n/std::max(ncv,1)));
+ iparam[6] = mode; // The mode, 1 is standard ev problem, 2 for generalized ev, 3 for shift-and-invert
+
+ // Used during reverse communicate to notify where arrays start
+ //
+ int *ipntr = new int[11];
+
+ // Error codes are returned in here, initial value of 0 indicates a random initial
+ // residual vector is used, any other values means resid contains the initial residual
+ // vector, possibly from a previous run
+ //
+ int info = 0;
+
+ Scalar scale = 1.0;
+ //if (!isBempty)
+ //{
+ //Scalar scale = B.norm() / std::sqrt(n);
+ //scale = std::pow(2, std::floor(std::log(scale+1)));
+ ////M /= scale;
+ //for (size_t i=0; i<(size_t)B.outerSize(); i++)
+ // for (typename MatrixType::InnerIterator it(B, i); it; ++it)
+ // it.valueRef() /= scale;
+ //}
+
+ MatrixSolver OP;
+ if (mode == 1 || mode == 2)
+ {
+ if (!isBempty)
+ OP.compute(B);
+ }
+ else if (mode == 3)
+ {
+ if (sigma == 0.0)
+ {
+ OP.compute(A);
+ }
+ else
+ {
+ // Note: We will never enter here because sigma must be 0.0
+ //
+ if (isBempty)
+ {
+ MatrixType AminusSigmaB(A);
+ for (Index i=0; i<A.rows(); ++i)
+ AminusSigmaB.coeffRef(i,i) -= sigma;
+
+ OP.compute(AminusSigmaB);
+ }
+ else
+ {
+ MatrixType AminusSigmaB = A - sigma * B;
+ OP.compute(AminusSigmaB);
+ }
+ }
+ }
+
+ if (!(mode == 1 && isBempty) && !(mode == 2 && isBempty) && OP.info() != Success)
+ std::cout << "Error factoring matrix" << std::endl;
+
+ do
+ {
+ internal::arpack_wrapper<Scalar, RealScalar>::saupd(&ido, bmat, &n, whch, &nev, &tol, resid,
+ &ncv, v, &ldv, iparam, ipntr, workd, workl,
+ &lworkl, &info);
+
+ if (ido == -1 || ido == 1)
+ {
+ Scalar *in = workd + ipntr[0] - 1;
+ Scalar *out = workd + ipntr[1] - 1;
+
+ if (ido == 1 && mode != 2)
+ {
+ Scalar *out2 = workd + ipntr[2] - 1;
+ if (isBempty || mode == 1)
+ Matrix<Scalar, Dynamic, 1>::Map(out2, n) = Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ else
+ Matrix<Scalar, Dynamic, 1>::Map(out2, n) = B * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+
+ in = workd + ipntr[2] - 1;
+ }
+
+ if (mode == 1)
+ {
+ if (isBempty)
+ {
+ // OP = A
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = A * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ }
+ else
+ {
+ // OP = L^{-1}AL^{-T}
+ //
+ internal::OP<MatrixSolver, MatrixType, Scalar, BisSPD>::applyOP(OP, A, n, in, out);
+ }
+ }
+ else if (mode == 2)
+ {
+ if (ido == 1)
+ Matrix<Scalar, Dynamic, 1>::Map(in, n) = A * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+
+ // OP = B^{-1} A
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.solve(Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ }
+ else if (mode == 3)
+ {
+ // OP = (A-\sigmaB)B (\sigma could be 0, and B could be I)
+ // The B * in is already computed and stored at in if ido == 1
+ //
+ if (ido == 1 || isBempty)
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.solve(Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ else
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.solve(B * Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ }
+ }
+ else if (ido == 2)
+ {
+ Scalar *in = workd + ipntr[0] - 1;
+ Scalar *out = workd + ipntr[1] - 1;
+
+ if (isBempty || mode == 1)
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ else
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = B * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ }
+ } while (ido != 99);
+
+ if (info == 1)
+ m_info = NoConvergence;
+ else if (info == 3)
+ m_info = NumericalIssue;
+ else if (info < 0)
+ m_info = InvalidInput;
+ else if (info != 0)
+ eigen_assert(false && "Unknown ARPACK return value!");
+ else
+ {
+ // Do we compute eigenvectors or not?
+ //
+ int rvec = (options & ComputeEigenvectors) == ComputeEigenvectors;
+
+ // "A" means "All", use "S" to choose specific eigenvalues (not yet supported in ARPACK))
+ //
+ char howmny[2] = "A";
+
+ // if howmny == "S", specifies the eigenvalues to compute (not implemented in ARPACK)
+ //
+ int *select = new int[ncv];
+
+ // Final eigenvalues
+ //
+ m_eivalues.resize(nev, 1);
+
+ internal::arpack_wrapper<Scalar, RealScalar>::seupd(&rvec, howmny, select, m_eivalues.data(), v, &ldv,
+ &sigma, bmat, &n, whch, &nev, &tol, resid, &ncv,
+ v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info);
+
+ if (info == -14)
+ m_info = NoConvergence;
+ else if (info != 0)
+ m_info = InvalidInput;
+ else
+ {
+ if (rvec)
+ {
+ m_eivec.resize(A.rows(), nev);
+ for (int i=0; i<nev; i++)
+ for (int j=0; j<n; j++)
+ m_eivec(j,i) = v[i*n+j] / scale;
+
+ if (mode == 1 && !isBempty && BisSPD)
+ internal::OP<MatrixSolver, MatrixType, Scalar, BisSPD>::project(OP, n, nev, m_eivec.data());
+
+ m_eigenvectorsOk = true;
+ }
+
+ m_nbrIterations = iparam[2];
+ m_nbrConverged = iparam[4];
+
+ m_info = Success;
+ }
+
+ delete select;
+ }
+
+ delete v;
+ delete iparam;
+ delete ipntr;
+ delete workd;
+ delete workl;
+ delete resid;
+
+ m_isInitialized = true;
+
+ return *this;
+}
+
+
+// Single precision
+//
+extern "C" void ssaupd_(int *ido, char *bmat, int *n, char *which,
+ int *nev, float *tol, float *resid, int *ncv,
+ float *v, int *ldv, int *iparam, int *ipntr,
+ float *workd, float *workl, int *lworkl,
+ int *info);
+
+extern "C" void sseupd_(int *rvec, char *All, int *select, float *d,
+ float *z, int *ldz, float *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ float *tol, float *resid, int *ncv, float *v,
+ int *ldv, int *iparam, int *ipntr, float *workd,
+ float *workl, int *lworkl, int *ierr);
+
+// Double precision
+//
+extern "C" void dsaupd_(int *ido, char *bmat, int *n, char *which,
+ int *nev, double *tol, double *resid, int *ncv,
+ double *v, int *ldv, int *iparam, int *ipntr,
+ double *workd, double *workl, int *lworkl,
+ int *info);
+
+extern "C" void dseupd_(int *rvec, char *All, int *select, double *d,
+ double *z, int *ldz, double *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ double *tol, double *resid, int *ncv, double *v,
+ int *ldv, int *iparam, int *ipntr, double *workd,
+ double *workl, int *lworkl, int *ierr);
+
+
+namespace internal {
+
+template<typename Scalar, typename RealScalar> struct arpack_wrapper
+{
+ static inline void saupd(int *ido, char *bmat, int *n, char *which,
+ int *nev, RealScalar *tol, Scalar *resid, int *ncv,
+ Scalar *v, int *ldv, int *iparam, int *ipntr,
+ Scalar *workd, Scalar *workl, int *lworkl, int *info)
+ {
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+ }
+
+ static inline void seupd(int *rvec, char *All, int *select, Scalar *d,
+ Scalar *z, int *ldz, RealScalar *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ RealScalar *tol, Scalar *resid, int *ncv, Scalar *v,
+ int *ldv, int *iparam, int *ipntr, Scalar *workd,
+ Scalar *workl, int *lworkl, int *ierr)
+ {
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+ }
+};
+
+template <> struct arpack_wrapper<float, float>
+{
+ static inline void saupd(int *ido, char *bmat, int *n, char *which,
+ int *nev, float *tol, float *resid, int *ncv,
+ float *v, int *ldv, int *iparam, int *ipntr,
+ float *workd, float *workl, int *lworkl, int *info)
+ {
+ ssaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info);
+ }
+
+ static inline void seupd(int *rvec, char *All, int *select, float *d,
+ float *z, int *ldz, float *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ float *tol, float *resid, int *ncv, float *v,
+ int *ldv, int *iparam, int *ipntr, float *workd,
+ float *workl, int *lworkl, int *ierr)
+ {
+ sseupd_(rvec, All, select, d, z, ldz, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr,
+ workd, workl, lworkl, ierr);
+ }
+};
+
+template <> struct arpack_wrapper<double, double>
+{
+ static inline void saupd(int *ido, char *bmat, int *n, char *which,
+ int *nev, double *tol, double *resid, int *ncv,
+ double *v, int *ldv, int *iparam, int *ipntr,
+ double *workd, double *workl, int *lworkl, int *info)
+ {
+ dsaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info);
+ }
+
+ static inline void seupd(int *rvec, char *All, int *select, double *d,
+ double *z, int *ldz, double *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ double *tol, double *resid, int *ncv, double *v,
+ int *ldv, int *iparam, int *ipntr, double *workd,
+ double *workl, int *lworkl, int *ierr)
+ {
+ dseupd_(rvec, All, select, d, v, ldv, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr,
+ workd, workl, lworkl, ierr);
+ }
+};
+
+
+template<typename MatrixSolver, typename MatrixType, typename Scalar, bool BisSPD>
+struct OP
+{
+ static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out);
+ static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs);
+};
+
+template<typename MatrixSolver, typename MatrixType, typename Scalar>
+struct OP<MatrixSolver, MatrixType, Scalar, true>
+{
+ static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out)
+{
+ // OP = L^{-1} A L^{-T} (B = LL^T)
+ //
+ // First solve L^T out = in
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.matrixU().solve(Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.permutationPinv() * Matrix<Scalar, Dynamic, 1>::Map(out, n);
+
+ // Then compute out = A out
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = A * Matrix<Scalar, Dynamic, 1>::Map(out, n);
+
+ // Then solve L out = out
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.permutationP() * Matrix<Scalar, Dynamic, 1>::Map(out, n);
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.matrixL().solve(Matrix<Scalar, Dynamic, 1>::Map(out, n));
+}
+
+ static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs)
+{
+ // Solve L^T out = in
+ //
+ Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k) = OP.matrixU().solve(Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k));
+ Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k) = OP.permutationPinv() * Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k);
+}
+
+};
+
+template<typename MatrixSolver, typename MatrixType, typename Scalar>
+struct OP<MatrixSolver, MatrixType, Scalar, false>
+{
+ static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out)
+{
+ eigen_assert(false && "Should never be in here...");
+}
+
+ static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs)
+{
+ eigen_assert(false && "Should never be in here...");
+}
+
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARPACKSELFADJOINTEIGENSOLVER_H
+
diff --git a/unsupported/Eigen/src/FFT/CMakeLists.txt b/unsupported/Eigen/src/FFT/CMakeLists.txt
new file mode 100644
index 0000000..edcffcb
--- /dev/null
+++ b/unsupported/Eigen/src/FFT/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_FFT_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_FFT_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/FFT COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/FFT/ei_fftw_impl.h b/unsupported/Eigen/src/FFT/ei_fftw_impl.h
new file mode 100644
index 0000000..d49aa17
--- /dev/null
+++ b/unsupported/Eigen/src/FFT/ei_fftw_impl.h
@@ -0,0 +1,261 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Mark Borgerding mark a borgerding net
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+namespace Eigen {
+
+namespace internal {
+
+ // FFTW uses non-const arguments
+ // so we must use ugly const_cast calls for all the args it uses
+ //
+ // This should be safe as long as
+ // 1. we use FFTW_ESTIMATE for all our planning
+ // see the FFTW docs section 4.3.2 "Planner Flags"
+ // 2. fftw_complex is compatible with std::complex
+ // This assumes std::complex<T> layout is array of size 2 with real,imag
+ template <typename T>
+ inline
+ T * fftw_cast(const T* p)
+ {
+ return const_cast<T*>( p);
+ }
+
+ inline
+ fftw_complex * fftw_cast( const std::complex<double> * p)
+ {
+ return const_cast<fftw_complex*>( reinterpret_cast<const fftw_complex*>(p) );
+ }
+
+ inline
+ fftwf_complex * fftw_cast( const std::complex<float> * p)
+ {
+ return const_cast<fftwf_complex*>( reinterpret_cast<const fftwf_complex*>(p) );
+ }
+
+ inline
+ fftwl_complex * fftw_cast( const std::complex<long double> * p)
+ {
+ return const_cast<fftwl_complex*>( reinterpret_cast<const fftwl_complex*>(p) );
+ }
+
+ template <typename T>
+ struct fftw_plan {};
+
+ template <>
+ struct fftw_plan<float>
+ {
+ typedef float scalar_type;
+ typedef fftwf_complex complex_type;
+ fftwf_plan m_plan;
+ fftw_plan() :m_plan(NULL) {}
+ ~fftw_plan() {if (m_plan) fftwf_destroy_plan(m_plan);}
+
+ inline
+ void fwd(complex_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwf_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void inv(complex_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwf_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void fwd(complex_type * dst,scalar_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftwf_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwf_execute_dft_r2c( m_plan,src,dst);
+ }
+ inline
+ void inv(scalar_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL)
+ m_plan = fftwf_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwf_execute_dft_c2r( m_plan, src,dst);
+ }
+
+ inline
+ void fwd2( complex_type * dst,complex_type * src,int n0,int n1) {
+ if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwf_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void inv2( complex_type * dst,complex_type * src,int n0,int n1) {
+ if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwf_execute_dft( m_plan, src,dst);
+ }
+
+ };
+ template <>
+ struct fftw_plan<double>
+ {
+ typedef double scalar_type;
+ typedef fftw_complex complex_type;
+ ::fftw_plan m_plan;
+ fftw_plan() :m_plan(NULL) {}
+ ~fftw_plan() {if (m_plan) fftw_destroy_plan(m_plan);}
+
+ inline
+ void fwd(complex_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftw_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void inv(complex_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftw_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void fwd(complex_type * dst,scalar_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftw_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftw_execute_dft_r2c( m_plan,src,dst);
+ }
+ inline
+ void inv(scalar_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL)
+ m_plan = fftw_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftw_execute_dft_c2r( m_plan, src,dst);
+ }
+ inline
+ void fwd2( complex_type * dst,complex_type * src,int n0,int n1) {
+ if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftw_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void inv2( complex_type * dst,complex_type * src,int n0,int n1) {
+ if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftw_execute_dft( m_plan, src,dst);
+ }
+ };
+ template <>
+ struct fftw_plan<long double>
+ {
+ typedef long double scalar_type;
+ typedef fftwl_complex complex_type;
+ fftwl_plan m_plan;
+ fftw_plan() :m_plan(NULL) {}
+ ~fftw_plan() {if (m_plan) fftwl_destroy_plan(m_plan);}
+
+ inline
+ void fwd(complex_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwl_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void inv(complex_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwl_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void fwd(complex_type * dst,scalar_type * src,int nfft) {
+ if (m_plan==NULL) m_plan = fftwl_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwl_execute_dft_r2c( m_plan,src,dst);
+ }
+ inline
+ void inv(scalar_type * dst,complex_type * src,int nfft) {
+ if (m_plan==NULL)
+ m_plan = fftwl_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwl_execute_dft_c2r( m_plan, src,dst);
+ }
+ inline
+ void fwd2( complex_type * dst,complex_type * src,int n0,int n1) {
+ if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwl_execute_dft( m_plan, src,dst);
+ }
+ inline
+ void inv2( complex_type * dst,complex_type * src,int n0,int n1) {
+ if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT);
+ fftwl_execute_dft( m_plan, src,dst);
+ }
+ };
+
+ template <typename _Scalar>
+ struct fftw_impl
+ {
+ typedef _Scalar Scalar;
+ typedef std::complex<Scalar> Complex;
+
+ inline
+ void clear()
+ {
+ m_plans.clear();
+ }
+
+ // complex-to-complex forward FFT
+ inline
+ void fwd( Complex * dst,const Complex *src,int nfft)
+ {
+ get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src),nfft );
+ }
+
+ // real-to-complex forward FFT
+ inline
+ void fwd( Complex * dst,const Scalar * src,int nfft)
+ {
+ get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src) ,nfft);
+ }
+
+ // 2-d complex-to-complex
+ inline
+ void fwd2(Complex * dst, const Complex * src, int n0,int n1)
+ {
+ get_plan(n0,n1,false,dst,src).fwd2(fftw_cast(dst), fftw_cast(src) ,n0,n1);
+ }
+
+ // inverse complex-to-complex
+ inline
+ void inv(Complex * dst,const Complex *src,int nfft)
+ {
+ get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft );
+ }
+
+ // half-complex to scalar
+ inline
+ void inv( Scalar * dst,const Complex * src,int nfft)
+ {
+ get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft );
+ }
+
+ // 2-d complex-to-complex
+ inline
+ void inv2(Complex * dst, const Complex * src, int n0,int n1)
+ {
+ get_plan(n0,n1,true,dst,src).inv2(fftw_cast(dst), fftw_cast(src) ,n0,n1);
+ }
+
+
+ protected:
+ typedef fftw_plan<Scalar> PlanData;
+
+ typedef std::map<int64_t,PlanData> PlanMap;
+
+ PlanMap m_plans;
+
+ inline
+ PlanData & get_plan(int nfft,bool inverse,void * dst,const void * src)
+ {
+ bool inplace = (dst==src);
+ bool aligned = ( (reinterpret_cast<size_t>(src)&15) | (reinterpret_cast<size_t>(dst)&15) ) == 0;
+ int64_t key = ( (nfft<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1;
+ return m_plans[key];
+ }
+
+ inline
+ PlanData & get_plan(int n0,int n1,bool inverse,void * dst,const void * src)
+ {
+ bool inplace = (dst==src);
+ bool aligned = ( (reinterpret_cast<size_t>(src)&15) | (reinterpret_cast<size_t>(dst)&15) ) == 0;
+ int64_t key = ( ( (((int64_t)n0) << 30)|(n1<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1 ) + 1;
+ return m_plans[key];
+ }
+ };
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
new file mode 100644
index 0000000..be51b4e
--- /dev/null
+++ b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
@@ -0,0 +1,420 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Mark Borgerding mark a borgerding net
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+namespace Eigen {
+
+namespace internal {
+
+ // This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft
+ // Copyright 2003-2009 Mark Borgerding
+
+template <typename _Scalar>
+struct kiss_cpx_fft
+{
+ typedef _Scalar Scalar;
+ typedef std::complex<Scalar> Complex;
+ std::vector<Complex> m_twiddles;
+ std::vector<int> m_stageRadix;
+ std::vector<int> m_stageRemainder;
+ std::vector<Complex> m_scratchBuf;
+ bool m_inverse;
+
+ inline
+ void make_twiddles(int nfft,bool inverse)
+ {
+ using std::acos;
+ m_inverse = inverse;
+ m_twiddles.resize(nfft);
+ Scalar phinc = (inverse?2:-2)* acos( (Scalar) -1) / nfft;
+ for (int i=0;i<nfft;++i)
+ m_twiddles[i] = exp( Complex(0,i*phinc) );
+ }
+
+ void factorize(int nfft)
+ {
+ //start factoring out 4's, then 2's, then 3,5,7,9,...
+ int n= nfft;
+ int p=4;
+ do {
+ while (n % p) {
+ switch (p) {
+ case 4: p = 2; break;
+ case 2: p = 3; break;
+ default: p += 2; break;
+ }
+ if (p*p>n)
+ p=n;// impossible to have a factor > sqrt(n)
+ }
+ n /= p;
+ m_stageRadix.push_back(p);
+ m_stageRemainder.push_back(n);
+ if ( p > 5 )
+ m_scratchBuf.resize(p); // scratchbuf will be needed in bfly_generic
+ }while(n>1);
+ }
+
+ template <typename _Src>
+ inline
+ void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride)
+ {
+ int p = m_stageRadix[stage];
+ int m = m_stageRemainder[stage];
+ Complex * Fout_beg = xout;
+ Complex * Fout_end = xout + p*m;
+
+ if (m>1) {
+ do{
+ // recursive call:
+ // DFT of size m*p performed by doing
+ // p instances of smaller DFTs of size m,
+ // each one takes a decimated version of the input
+ work(stage+1, xout , xin, fstride*p,in_stride);
+ xin += fstride*in_stride;
+ }while( (xout += m) != Fout_end );
+ }else{
+ do{
+ *xout = *xin;
+ xin += fstride*in_stride;
+ }while(++xout != Fout_end );
+ }
+ xout=Fout_beg;
+
+ // recombine the p smaller DFTs
+ switch (p) {
+ case 2: bfly2(xout,fstride,m); break;
+ case 3: bfly3(xout,fstride,m); break;
+ case 4: bfly4(xout,fstride,m); break;
+ case 5: bfly5(xout,fstride,m); break;
+ default: bfly_generic(xout,fstride,m,p); break;
+ }
+ }
+
+ inline
+ void bfly2( Complex * Fout, const size_t fstride, int m)
+ {
+ for (int k=0;k<m;++k) {
+ Complex t = Fout[m+k] * m_twiddles[k*fstride];
+ Fout[m+k] = Fout[k] - t;
+ Fout[k] += t;
+ }
+ }
+
+ inline
+ void bfly4( Complex * Fout, const size_t fstride, const size_t m)
+ {
+ Complex scratch[6];
+ int negative_if_inverse = m_inverse * -2 +1;
+ for (size_t k=0;k<m;++k) {
+ scratch[0] = Fout[k+m] * m_twiddles[k*fstride];
+ scratch[1] = Fout[k+2*m] * m_twiddles[k*fstride*2];
+ scratch[2] = Fout[k+3*m] * m_twiddles[k*fstride*3];
+ scratch[5] = Fout[k] - scratch[1];
+
+ Fout[k] += scratch[1];
+ scratch[3] = scratch[0] + scratch[2];
+ scratch[4] = scratch[0] - scratch[2];
+ scratch[4] = Complex( scratch[4].imag()*negative_if_inverse , -scratch[4].real()* negative_if_inverse );
+
+ Fout[k+2*m] = Fout[k] - scratch[3];
+ Fout[k] += scratch[3];
+ Fout[k+m] = scratch[5] + scratch[4];
+ Fout[k+3*m] = scratch[5] - scratch[4];
+ }
+ }
+
+ inline
+ void bfly3( Complex * Fout, const size_t fstride, const size_t m)
+ {
+ size_t k=m;
+ const size_t m2 = 2*m;
+ Complex *tw1,*tw2;
+ Complex scratch[5];
+ Complex epi3;
+ epi3 = m_twiddles[fstride*m];
+
+ tw1=tw2=&m_twiddles[0];
+
+ do{
+ scratch[1]=Fout[m] * *tw1;
+ scratch[2]=Fout[m2] * *tw2;
+
+ scratch[3]=scratch[1]+scratch[2];
+ scratch[0]=scratch[1]-scratch[2];
+ tw1 += fstride;
+ tw2 += fstride*2;
+ Fout[m] = Complex( Fout->real() - Scalar(.5)*scratch[3].real() , Fout->imag() - Scalar(.5)*scratch[3].imag() );
+ scratch[0] *= epi3.imag();
+ *Fout += scratch[3];
+ Fout[m2] = Complex( Fout[m].real() + scratch[0].imag() , Fout[m].imag() - scratch[0].real() );
+ Fout[m] += Complex( -scratch[0].imag(),scratch[0].real() );
+ ++Fout;
+ }while(--k);
+ }
+
+ inline
+ void bfly5( Complex * Fout, const size_t fstride, const size_t m)
+ {
+ Complex *Fout0,*Fout1,*Fout2,*Fout3,*Fout4;
+ size_t u;
+ Complex scratch[13];
+ Complex * twiddles = &m_twiddles[0];
+ Complex *tw;
+ Complex ya,yb;
+ ya = twiddles[fstride*m];
+ yb = twiddles[fstride*2*m];
+
+ Fout0=Fout;
+ Fout1=Fout0+m;
+ Fout2=Fout0+2*m;
+ Fout3=Fout0+3*m;
+ Fout4=Fout0+4*m;
+
+ tw=twiddles;
+ for ( u=0; u<m; ++u ) {
+ scratch[0] = *Fout0;
+
+ scratch[1] = *Fout1 * tw[u*fstride];
+ scratch[2] = *Fout2 * tw[2*u*fstride];
+ scratch[3] = *Fout3 * tw[3*u*fstride];
+ scratch[4] = *Fout4 * tw[4*u*fstride];
+
+ scratch[7] = scratch[1] + scratch[4];
+ scratch[10] = scratch[1] - scratch[4];
+ scratch[8] = scratch[2] + scratch[3];
+ scratch[9] = scratch[2] - scratch[3];
+
+ *Fout0 += scratch[7];
+ *Fout0 += scratch[8];
+
+ scratch[5] = scratch[0] + Complex(
+ (scratch[7].real()*ya.real() ) + (scratch[8].real() *yb.real() ),
+ (scratch[7].imag()*ya.real()) + (scratch[8].imag()*yb.real())
+ );
+
+ scratch[6] = Complex(
+ (scratch[10].imag()*ya.imag()) + (scratch[9].imag()*yb.imag()),
+ -(scratch[10].real()*ya.imag()) - (scratch[9].real()*yb.imag())
+ );
+
+ *Fout1 = scratch[5] - scratch[6];
+ *Fout4 = scratch[5] + scratch[6];
+
+ scratch[11] = scratch[0] +
+ Complex(
+ (scratch[7].real()*yb.real()) + (scratch[8].real()*ya.real()),
+ (scratch[7].imag()*yb.real()) + (scratch[8].imag()*ya.real())
+ );
+
+ scratch[12] = Complex(
+ -(scratch[10].imag()*yb.imag()) + (scratch[9].imag()*ya.imag()),
+ (scratch[10].real()*yb.imag()) - (scratch[9].real()*ya.imag())
+ );
+
+ *Fout2=scratch[11]+scratch[12];
+ *Fout3=scratch[11]-scratch[12];
+
+ ++Fout0;++Fout1;++Fout2;++Fout3;++Fout4;
+ }
+ }
+
+ /* perform the butterfly for one stage of a mixed radix FFT */
+ inline
+ void bfly_generic(
+ Complex * Fout,
+ const size_t fstride,
+ int m,
+ int p
+ )
+ {
+ int u,k,q1,q;
+ Complex * twiddles = &m_twiddles[0];
+ Complex t;
+ int Norig = static_cast<int>(m_twiddles.size());
+ Complex * scratchbuf = &m_scratchBuf[0];
+
+ for ( u=0; u<m; ++u ) {
+ k=u;
+ for ( q1=0 ; q1<p ; ++q1 ) {
+ scratchbuf[q1] = Fout[ k ];
+ k += m;
+ }
+
+ k=u;
+ for ( q1=0 ; q1<p ; ++q1 ) {
+ int twidx=0;
+ Fout[ k ] = scratchbuf[0];
+ for (q=1;q<p;++q ) {
+ twidx += static_cast<int>(fstride) * k;
+ if (twidx>=Norig) twidx-=Norig;
+ t=scratchbuf[q] * twiddles[twidx];
+ Fout[ k ] += t;
+ }
+ k += m;
+ }
+ }
+ }
+};
+
+template <typename _Scalar>
+struct kissfft_impl
+{
+ typedef _Scalar Scalar;
+ typedef std::complex<Scalar> Complex;
+
+ void clear()
+ {
+ m_plans.clear();
+ m_realTwiddles.clear();
+ }
+
+ inline
+ void fwd( Complex * dst,const Complex *src,int nfft)
+ {
+ get_plan(nfft,false).work(0, dst, src, 1,1);
+ }
+
+ inline
+ void fwd2( Complex * dst,const Complex *src,int n0,int n1)
+ {
+ EIGEN_UNUSED_VARIABLE(dst);
+ EIGEN_UNUSED_VARIABLE(src);
+ EIGEN_UNUSED_VARIABLE(n0);
+ EIGEN_UNUSED_VARIABLE(n1);
+ }
+
+ inline
+ void inv2( Complex * dst,const Complex *src,int n0,int n1)
+ {
+ EIGEN_UNUSED_VARIABLE(dst);
+ EIGEN_UNUSED_VARIABLE(src);
+ EIGEN_UNUSED_VARIABLE(n0);
+ EIGEN_UNUSED_VARIABLE(n1);
+ }
+
+ // real-to-complex forward FFT
+ // perform two FFTs of src even and src odd
+ // then twiddle to recombine them into the half-spectrum format
+ // then fill in the conjugate symmetric half
+ inline
+ void fwd( Complex * dst,const Scalar * src,int nfft)
+ {
+ if ( nfft&3 ) {
+ // use generic mode for odd
+ m_tmpBuf1.resize(nfft);
+ get_plan(nfft,false).work(0, &m_tmpBuf1[0], src, 1,1);
+ std::copy(m_tmpBuf1.begin(),m_tmpBuf1.begin()+(nfft>>1)+1,dst );
+ }else{
+ int ncfft = nfft>>1;
+ int ncfft2 = nfft>>2;
+ Complex * rtw = real_twiddles(ncfft2);
+
+ // use optimized mode for even real
+ fwd( dst, reinterpret_cast<const Complex*> (src), ncfft);
+ Complex dc = dst[0].real() + dst[0].imag();
+ Complex nyquist = dst[0].real() - dst[0].imag();
+ int k;
+ for ( k=1;k <= ncfft2 ; ++k ) {
+ Complex fpk = dst[k];
+ Complex fpnk = conj(dst[ncfft-k]);
+ Complex f1k = fpk + fpnk;
+ Complex f2k = fpk - fpnk;
+ Complex tw= f2k * rtw[k-1];
+ dst[k] = (f1k + tw) * Scalar(.5);
+ dst[ncfft-k] = conj(f1k -tw)*Scalar(.5);
+ }
+ dst[0] = dc;
+ dst[ncfft] = nyquist;
+ }
+ }
+
+ // inverse complex-to-complex
+ inline
+ void inv(Complex * dst,const Complex *src,int nfft)
+ {
+ get_plan(nfft,true).work(0, dst, src, 1,1);
+ }
+
+ // half-complex to scalar
+ inline
+ void inv( Scalar * dst,const Complex * src,int nfft)
+ {
+ if (nfft&3) {
+ m_tmpBuf1.resize(nfft);
+ m_tmpBuf2.resize(nfft);
+ std::copy(src,src+(nfft>>1)+1,m_tmpBuf1.begin() );
+ for (int k=1;k<(nfft>>1)+1;++k)
+ m_tmpBuf1[nfft-k] = conj(m_tmpBuf1[k]);
+ inv(&m_tmpBuf2[0],&m_tmpBuf1[0],nfft);
+ for (int k=0;k<nfft;++k)
+ dst[k] = m_tmpBuf2[k].real();
+ }else{
+ // optimized version for multiple of 4
+ int ncfft = nfft>>1;
+ int ncfft2 = nfft>>2;
+ Complex * rtw = real_twiddles(ncfft2);
+ m_tmpBuf1.resize(ncfft);
+ m_tmpBuf1[0] = Complex( src[0].real() + src[ncfft].real(), src[0].real() - src[ncfft].real() );
+ for (int k = 1; k <= ncfft / 2; ++k) {
+ Complex fk = src[k];
+ Complex fnkc = conj(src[ncfft-k]);
+ Complex fek = fk + fnkc;
+ Complex tmp = fk - fnkc;
+ Complex fok = tmp * conj(rtw[k-1]);
+ m_tmpBuf1[k] = fek + fok;
+ m_tmpBuf1[ncfft-k] = conj(fek - fok);
+ }
+ get_plan(ncfft,true).work(0, reinterpret_cast<Complex*>(dst), &m_tmpBuf1[0], 1,1);
+ }
+ }
+
+ protected:
+ typedef kiss_cpx_fft<Scalar> PlanData;
+ typedef std::map<int,PlanData> PlanMap;
+
+ PlanMap m_plans;
+ std::map<int, std::vector<Complex> > m_realTwiddles;
+ std::vector<Complex> m_tmpBuf1;
+ std::vector<Complex> m_tmpBuf2;
+
+ inline
+ int PlanKey(int nfft, bool isinverse) const { return (nfft<<1) | int(isinverse); }
+
+ inline
+ PlanData & get_plan(int nfft, bool inverse)
+ {
+ // TODO look for PlanKey(nfft, ! inverse) and conjugate the twiddles
+ PlanData & pd = m_plans[ PlanKey(nfft,inverse) ];
+ if ( pd.m_twiddles.size() == 0 ) {
+ pd.make_twiddles(nfft,inverse);
+ pd.factorize(nfft);
+ }
+ return pd;
+ }
+
+ inline
+ Complex * real_twiddles(int ncfft2)
+ {
+ using std::acos;
+ std::vector<Complex> & twidref = m_realTwiddles[ncfft2];// creates new if not there
+ if ( (int)twidref.size() != ncfft2 ) {
+ twidref.resize(ncfft2);
+ int ncfft= ncfft2<<1;
+ Scalar pi = acos( Scalar(-1) );
+ for (int k=1;k<=ncfft2;++k)
+ twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) );
+ }
+ return &twidref[0];
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt b/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
new file mode 100644
index 0000000..7986afc
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_IterativeSolvers_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_IterativeSolvers_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/IterativeSolvers COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
new file mode 100644
index 0000000..dc0093e
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -0,0 +1,189 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/* NOTE The functions of this file have been adapted from the GMM++ library */
+
+//========================================================================
+//
+// Copyright (C) 2002-2007 Yves Renard
+//
+// This file is a part of GETFEM++
+//
+// Getfem++ is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation; version 2.1 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+//
+//========================================================================
+
+#include "../../../../Eigen/src/Core/util/NonMPL2.h"
+
+#ifndef EIGEN_CONSTRAINEDCG_H
+#define EIGEN_CONSTRAINEDCG_H
+
+#include <Eigen/Core>
+
+namespace Eigen {
+
+namespace internal {
+
+/** \ingroup IterativeSolvers_Module
+ * Compute the pseudo inverse of the non-square matrix C such that
+ * \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method.
+ *
+ * This function is internally used by constrained_cg.
+ */
+template <typename CMatrix, typename CINVMatrix>
+void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
+{
+ // optimisable : copie de la ligne, precalcul de C * trans(C).
+ typedef typename CMatrix::Scalar Scalar;
+ typedef typename CMatrix::Index Index;
+ // FIXME use sparse vectors ?
+ typedef Matrix<Scalar,Dynamic,1> TmpVec;
+
+ Index rows = C.rows(), cols = C.cols();
+
+ TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows);
+ Scalar rho, rho_1, alpha;
+ d.setZero();
+
+ typedef Triplet<double> T;
+ std::vector<T> tripletList;
+
+ for (Index i = 0; i < rows; ++i)
+ {
+ d[i] = 1.0;
+ rho = 1.0;
+ e.setZero();
+ r = d;
+ p = d;
+
+ while (rho >= 1e-38)
+ { /* conjugate gradient to compute e */
+ /* which is the i-th row of inv(C * trans(C)) */
+ l = C.transpose() * p;
+ q = C * l;
+ alpha = rho / p.dot(q);
+ e += alpha * p;
+ r += -alpha * q;
+ rho_1 = rho;
+ rho = r.dot(r);
+ p = (rho/rho_1) * p + r;
+ }
+
+ l = C.transpose() * e; // l is the i-th row of CINV
+ // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
+ for (Index j=0; j<l.size(); ++j)
+ if (l[j]<1e-15)
+ tripletList.push_back(T(i,j,l(j)));
+
+
+ d[i] = 0.0;
+ }
+ CINV.setFromTriplets(tripletList.begin(), tripletList.end());
+}
+
+
+
+/** \ingroup IterativeSolvers_Module
+ * Constrained conjugate gradient
+ *
+ * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$
+ */
+template<typename TMatrix, typename CMatrix,
+ typename VectorX, typename VectorB, typename VectorF>
+void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
+ const VectorB& b, const VectorF& f, IterationController &iter)
+{
+ using std::sqrt;
+ typedef typename TMatrix::Scalar Scalar;
+ typedef typename TMatrix::Index Index;
+ typedef Matrix<Scalar,Dynamic,1> TmpVec;
+
+ Scalar rho = 1.0, rho_1, lambda, gamma;
+ Index xSize = x.size();
+ TmpVec p(xSize), q(xSize), q2(xSize),
+ r(xSize), old_z(xSize), z(xSize),
+ memox(xSize);
+ std::vector<bool> satured(C.rows());
+ p.setZero();
+ iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b)
+ if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0);
+
+ SparseMatrix<Scalar,RowMajor> CINV(C.rows(), C.cols());
+ pseudo_inverse(C, CINV);
+
+ while(true)
+ {
+ // computation of residual
+ old_z = z;
+ memox = x;
+ r = b;
+ r += A * -x;
+ z = r;
+ bool transition = false;
+ for (Index i = 0; i < C.rows(); ++i)
+ {
+ Scalar al = C.row(i).dot(x) - f.coeff(i);
+ if (al >= -1.0E-15)
+ {
+ if (!satured[i])
+ {
+ satured[i] = true;
+ transition = true;
+ }
+ Scalar bb = CINV.row(i).dot(z);
+ if (bb > 0.0)
+ // FIXME: we should allow that: z += -bb * C.row(i);
+ for (typename CMatrix::InnerIterator it(C,i); it; ++it)
+ z.coeffRef(it.index()) -= bb*it.value();
+ }
+ else
+ satured[i] = false;
+ }
+
+ // descent direction
+ rho_1 = rho;
+ rho = r.dot(z);
+
+ if (iter.finished(rho)) break;
+
+ if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n";
+ if (transition || iter.first()) gamma = 0.0;
+ else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1);
+ p = z + gamma*p;
+
+ ++iter;
+ // one dimensionnal optimization
+ q = A * p;
+ lambda = rho / q.dot(p);
+ for (Index i = 0; i < C.rows(); ++i)
+ {
+ if (!satured[i])
+ {
+ Scalar bb = C.row(i).dot(p) - f[i];
+ if (bb > 0.0)
+ lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb);
+ }
+ }
+ x += lambda * p;
+ memox -= x;
+ }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSTRAINEDCG_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
new file mode 100644
index 0000000..9fcc8a8
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -0,0 +1,542 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DGMRES_H
+#define EIGEN_DGMRES_H
+
+#include <Eigen/Eigenvalues>
+
+namespace Eigen {
+
+template< typename _MatrixType,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class DGMRES;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<DGMRES<_MatrixType,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+/** \brief Computes a permutation vector to have a sorted sequence
+ * \param vec The vector to reorder.
+ * \param perm gives the sorted sequence on output. Must be initialized with 0..n-1
+ * \param ncut Put the ncut smallest elements at the end of the vector
+ * WARNING This is an expensive sort, so should be used only
+ * for small size vectors
+ * TODO Use modified QuickSplit or std::nth_element to get the smallest values
+ */
+template <typename VectorType, typename IndexType>
+void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut)
+{
+ eigen_assert(vec.size() == perm.size());
+ typedef typename IndexType::Scalar Index;
+ typedef typename VectorType::Scalar Scalar;
+ bool flag;
+ for (Index k = 0; k < ncut; k++)
+ {
+ flag = false;
+ for (Index j = 0; j < vec.size()-1; j++)
+ {
+ if ( vec(perm(j)) < vec(perm(j+1)) )
+ {
+ std::swap(perm(j),perm(j+1));
+ flag = true;
+ }
+ if (!flag) break; // The vector is in sorted order
+ }
+ }
+}
+
+}
+/**
+ * \ingroup IterativeLInearSolvers_Module
+ * \brief A Restarted GMRES with deflation.
+ * This class implements a modification of the GMRES solver for
+ * sparse linear systems. The basis is built with modified
+ * Gram-Schmidt. At each restart, a few approximated eigenvectors
+ * corresponding to the smallest eigenvalues are used to build a
+ * preconditioner for the next cycle. This preconditioner
+ * for deflation can be combined with any other preconditioner,
+ * the IncompleteLUT for instance. The preconditioner is applied
+ * at right of the matrix and the combination is multiplicative.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ * Typical usage :
+ * \code
+ * SparseMatrix<double> A;
+ * VectorXd x, b;
+ * //Fill A and b ...
+ * DGMRES<SparseMatrix<double> > solver;
+ * solver.set_restart(30); // Set restarting value
+ * solver.setEigenv(1); // Set the number of eigenvalues to deflate
+ * solver.compute(A);
+ * x = solver.solve(b);
+ * \endcode
+ *
+ * References :
+ * [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid
+ * Algebraic Solvers for Linear Systems Arising from Compressible
+ * Flows, Computers and Fluids, In Press,
+ * http://dx.doi.org/10.1016/j.compfluid.2012.03.023
+ * [2] K. Burrage and J. Erhel, On the performance of various
+ * adaptive preconditioned GMRES strategies, 5(1998), 101-121.
+ * [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES
+ * preconditioned by deflation,J. Computational and Applied
+ * Mathematics, 69(1996), 303-318.
+
+ *
+ */
+template< typename _MatrixType, typename _Preconditioner>
+class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
+{
+ typedef IterativeSolverBase<DGMRES> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+ using Base::m_tolerance;
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+ typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+ typedef Matrix<RealScalar,Dynamic,Dynamic> DenseRealMatrix;
+ typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ typedef Matrix<RealScalar,Dynamic,1> DenseRealVector;
+ typedef Matrix<std::complex<RealScalar>, Dynamic, 1> ComplexVector;
+
+
+ /** Default constructor. */
+ DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ DGMRES(const MatrixType& A) : Base(A),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false)
+ {}
+
+ ~DGMRES() {}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<DGMRES, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "DGMRES is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "DGMRES::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <DGMRES, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ bool failed = false;
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ dgmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner);
+ }
+ m_info = failed ? NumericalIssue
+ : m_error <= Base::m_tolerance ? Success
+ : NoConvergence;
+ m_isInitialized = true;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = b;
+ _solveWithGuess(b,x);
+ }
+ /**
+ * Get the restart value
+ */
+ int restart() { return m_restart; }
+
+ /**
+ * Set the restart value (default is 30)
+ */
+ void set_restart(const int restart) { m_restart=restart; }
+
+ /**
+ * Set the number of eigenvalues to deflate at each restart
+ */
+ void setEigenv(const int neig)
+ {
+ m_neig = neig;
+ if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates
+ }
+
+ /**
+ * Get the size of the deflation subspace size
+ */
+ int deflSize() {return m_r; }
+
+ /**
+ * Set the maximum size of the deflation subspace
+ */
+ void setMaxEigenv(const int maxNeig) { m_maxNeig = maxNeig; }
+
+ protected:
+ // DGMRES algorithm
+ template<typename Rhs, typename Dest>
+ void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const;
+ // Perform one cycle of GMRES
+ template<typename Dest>
+ int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const;
+ // Compute data to use for deflation
+ int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const;
+ // Apply deflation to a vector
+ template<typename RhsType, typename DestType>
+ int dgmresApplyDeflation(const RhsType& In, DestType& Out) const;
+ ComplexVector schurValues(const ComplexSchur<DenseMatrix>& schurofH) const;
+ ComplexVector schurValues(const RealSchur<DenseMatrix>& schurofH) const;
+ // Init data for deflation
+ void dgmresInitDeflation(Index& rows) const;
+ mutable DenseMatrix m_V; // Krylov basis vectors
+ mutable DenseMatrix m_H; // Hessenberg matrix
+ mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
+ mutable Index m_restart; // Maximum size of the Krylov subspace
+ mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace
+ mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
+ mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */
+ mutable PartialPivLU<DenseMatrix> m_luT; // LU factorization of m_T
+ mutable int m_neig; //Number of eigenvalues to extract at each restart
+ mutable int m_r; // Current number of deflated eigenvalues, size of m_U
+ mutable int m_maxNeig; // Maximum number of eigenvalues to deflate
+ mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A
+ mutable bool m_isDeflAllocated;
+ mutable bool m_isDeflInitialized;
+
+ //Adaptive strategy
+ mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed
+ mutable bool m_force; // Force the use of deflation at each restart
+
+};
+/**
+ * \brief Perform several cycles of restarted GMRES with modified Gram Schmidt,
+ *
+ * A right preconditioner is used combined with deflation.
+ *
+ */
+template< typename _MatrixType, typename _Preconditioner>
+template<typename Rhs, typename Dest>
+void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x,
+ const Preconditioner& precond) const
+{
+ //Initialization
+ int n = mat.rows();
+ DenseVector r0(n);
+ int nbIts = 0;
+ m_H.resize(m_restart+1, m_restart);
+ m_Hes.resize(m_restart, m_restart);
+ m_V.resize(n,m_restart+1);
+ //Initial residual vector and intial norm
+ x = precond.solve(x);
+ r0 = rhs - mat * x;
+ RealScalar beta = r0.norm();
+ RealScalar normRhs = rhs.norm();
+ m_error = beta/normRhs;
+ if(m_error < m_tolerance)
+ m_info = Success;
+ else
+ m_info = NoConvergence;
+
+ // Iterative process
+ while (nbIts < m_iterations && m_info == NoConvergence)
+ {
+ dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts);
+
+ // Compute the new residual vector for the restart
+ if (nbIts < m_iterations && m_info == NoConvergence)
+ r0 = rhs - mat * x;
+ }
+}
+
+/**
+ * \brief Perform one restart cycle of DGMRES
+ * \param mat The coefficient matrix
+ * \param precond The preconditioner
+ * \param x the new approximated solution
+ * \param r0 The initial residual vector
+ * \param beta The norm of the residual computed so far
+ * \param normRhs The norm of the right hand side vector
+ * \param nbIts The number of iterations
+ */
+template< typename _MatrixType, typename _Preconditioner>
+template<typename Dest>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const
+{
+ //Initialization
+ DenseVector g(m_restart+1); // Right hand side of the least square problem
+ g.setZero();
+ g(0) = Scalar(beta);
+ m_V.col(0) = r0/beta;
+ m_info = NoConvergence;
+ std::vector<JacobiRotation<Scalar> >gr(m_restart); // Givens rotations
+ int it = 0; // Number of inner iterations
+ int n = mat.rows();
+ DenseVector tv1(n), tv2(n); //Temporary vectors
+ while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations)
+ {
+ // Apply preconditioner(s) at right
+ if (m_isDeflInitialized )
+ {
+ dgmresApplyDeflation(m_V.col(it), tv1); // Deflation
+ tv2 = precond.solve(tv1);
+ }
+ else
+ {
+ tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner
+ }
+ tv1 = mat * tv2;
+
+ // Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt
+ Scalar coef;
+ for (int i = 0; i <= it; ++i)
+ {
+ coef = tv1.dot(m_V.col(i));
+ tv1 = tv1 - coef * m_V.col(i);
+ m_H(i,it) = coef;
+ m_Hes(i,it) = coef;
+ }
+ // Normalize the vector
+ coef = tv1.norm();
+ m_V.col(it+1) = tv1/coef;
+ m_H(it+1, it) = coef;
+// m_Hes(it+1,it) = coef;
+
+ // FIXME Check for happy breakdown
+
+ // Update Hessenberg matrix with Givens rotations
+ for (int i = 1; i <= it; ++i)
+ {
+ m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint());
+ }
+ // Compute the new plane rotation
+ gr[it].makeGivens(m_H(it, it), m_H(it+1,it));
+ // Apply the new rotation
+ m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint());
+ g.applyOnTheLeft(it,it+1, gr[it].adjoint());
+
+ beta = std::abs(g(it+1));
+ m_error = beta/normRhs;
+ std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl;
+ it++; nbIts++;
+
+ if (m_error < m_tolerance)
+ {
+ // The method has converged
+ m_info = Success;
+ break;
+ }
+ }
+
+ // Compute the new coefficients by solving the least square problem
+// it++;
+ //FIXME Check first if the matrix is singular ... zero diagonal
+ DenseVector nrs(m_restart);
+ nrs = m_H.topLeftCorner(it,it).template triangularView<Upper>().solve(g.head(it));
+
+ // Form the new solution
+ if (m_isDeflInitialized)
+ {
+ tv1 = m_V.leftCols(it) * nrs;
+ dgmresApplyDeflation(tv1, tv2);
+ x = x + precond.solve(tv2);
+ }
+ else
+ x = x + precond.solve(m_V.leftCols(it) * nrs);
+
+ // Go for a new cycle and compute data for deflation
+ if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig)
+ dgmresComputeDeflationData(mat, precond, it, m_neig);
+ return 0;
+
+}
+
+
+template< typename _MatrixType, typename _Preconditioner>
+void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const
+{
+ m_U.resize(rows, m_maxNeig);
+ m_MU.resize(rows, m_maxNeig);
+ m_T.resize(m_maxNeig, m_maxNeig);
+ m_lambdaN = 0.0;
+ m_isDeflAllocated = true;
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur<DenseMatrix>& schurofH) const
+{
+ return schurofH.matrixT().diagonal();
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
+{
+ typedef typename MatrixType::Index Index;
+ const DenseMatrix& T = schurofH.matrixT();
+ Index it = T.rows();
+ ComplexVector eig(it);
+ Index j = 0;
+ while (j < it-1)
+ {
+ if (T(j+1,j) ==Scalar(0))
+ {
+ eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
+ j++;
+ }
+ else
+ {
+ eig(j) = std::complex<RealScalar>(T(j,j),T(j+1,j));
+ eig(j+1) = std::complex<RealScalar>(T(j,j+1),T(j+1,j+1));
+ j++;
+ }
+ }
+ if (j < it-1) eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
+ return eig;
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const
+{
+ // First, find the Schur form of the Hessenberg matrix H
+ typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH;
+ bool computeU = true;
+ DenseMatrix matrixQ(it,it);
+ matrixQ.setIdentity();
+ schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU);
+
+ ComplexVector eig(it);
+ Matrix<Index,Dynamic,1>perm(it);
+ eig = this->schurValues(schurofH);
+
+ // Reorder the absolute values of Schur values
+ DenseRealVector modulEig(it);
+ for (int j=0; j<it; ++j) modulEig(j) = std::abs(eig(j));
+ perm.setLinSpaced(it,0,it-1);
+ internal::sortWithPermutation(modulEig, perm, neig);
+
+ if (!m_lambdaN)
+ {
+ m_lambdaN = (std::max)(modulEig.maxCoeff(), m_lambdaN);
+ }
+ //Count the real number of extracted eigenvalues (with complex conjugates)
+ int nbrEig = 0;
+ while (nbrEig < neig)
+ {
+ if(eig(perm(it-nbrEig-1)).imag() == RealScalar(0)) nbrEig++;
+ else nbrEig += 2;
+ }
+ // Extract the Schur vectors corresponding to the smallest Ritz values
+ DenseMatrix Sr(it, nbrEig);
+ Sr.setZero();
+ for (int j = 0; j < nbrEig; j++)
+ {
+ Sr.col(j) = schurofH.matrixU().col(perm(it-j-1));
+ }
+
+ // Form the Schur vectors of the initial matrix using the Krylov basis
+ DenseMatrix X;
+ X = m_V.leftCols(it) * Sr;
+ if (m_r)
+ {
+ // Orthogonalize X against m_U using modified Gram-Schmidt
+ for (int j = 0; j < nbrEig; j++)
+ for (int k =0; k < m_r; k++)
+ X.col(j) = X.col(j) - (m_U.col(k).dot(X.col(j)))*m_U.col(k);
+ }
+
+ // Compute m_MX = A * M^-1 * X
+ Index m = m_V.rows();
+ if (!m_isDeflAllocated)
+ dgmresInitDeflation(m);
+ DenseMatrix MX(m, nbrEig);
+ DenseVector tv1(m);
+ for (int j = 0; j < nbrEig; j++)
+ {
+ tv1 = mat * X.col(j);
+ MX.col(j) = precond.solve(tv1);
+ }
+
+ //Update m_T = [U'MU U'MX; X'MU X'MX]
+ m_T.block(m_r, m_r, nbrEig, nbrEig) = X.transpose() * MX;
+ if(m_r)
+ {
+ m_T.block(0, m_r, m_r, nbrEig) = m_U.leftCols(m_r).transpose() * MX;
+ m_T.block(m_r, 0, nbrEig, m_r) = X.transpose() * m_MU.leftCols(m_r);
+ }
+
+ // Save X into m_U and m_MX in m_MU
+ for (int j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j);
+ for (int j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j);
+ // Increase the size of the invariant subspace
+ m_r += nbrEig;
+
+ // Factorize m_T into m_luT
+ m_luT.compute(m_T.topLeftCorner(m_r, m_r));
+
+ //FIXME CHeck if the factorization was correctly done (nonsingular matrix)
+ m_isDeflInitialized = true;
+ return 0;
+}
+template<typename _MatrixType, typename _Preconditioner>
+template<typename RhsType, typename DestType>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
+{
+ DenseVector x1 = m_U.leftCols(m_r).transpose() * x;
+ y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1);
+ return 0;
+}
+
+namespace internal {
+
+ template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs>
+ : solve_retval_base<DGMRES<_MatrixType, _Preconditioner>, Rhs>
+{
+ typedef DGMRES<_MatrixType, _Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+} // end namespace internal
+
+} // end namespace Eigen
+#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
new file mode 100644
index 0000000..7ba13af
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -0,0 +1,370 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_GMRES_H
+#define EIGEN_GMRES_H
+
+namespace Eigen {
+
+namespace internal {
+
+/**
+ * Generalized Minimal Residual Algorithm based on the
+ * Arnoldi algorithm implemented with Householder reflections.
+ *
+ * Parameters:
+ * \param mat matrix of linear system of equations
+ * \param Rhs right hand side vector of linear system of equations
+ * \param x on input: initial guess, on output: solution
+ * \param precond preconditioner used
+ * \param iters on input: maximum number of iterations to perform
+ * on output: number of iterations performed
+ * \param restart number of iterations for a restart
+ * \param tol_error on input: residual tolerance
+ * on output: residuum achieved
+ *
+ * \sa IterativeMethods::bicgstab()
+ *
+ *
+ * For references, please see:
+ *
+ * Saad, Y. and Schultz, M. H.
+ * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems.
+ * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869.
+ *
+ * Saad, Y.
+ * Iterative Methods for Sparse Linear Systems.
+ * Society for Industrial and Applied Mathematics, Philadelphia, 2003.
+ *
+ * Walker, H. F.
+ * Implementations of the GMRES method.
+ * Comput.Phys.Comm. 53, 1989, pp. 311 - 320.
+ *
+ * Walker, H. F.
+ * Implementation of the GMRES Method using Householder Transformations.
+ * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163.
+ *
+ */
+template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
+ int &iters, const int &restart, typename Dest::RealScalar & tol_error) {
+
+ using std::sqrt;
+ using std::abs;
+
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix < Scalar, Dynamic, 1 > VectorType;
+ typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType;
+
+ RealScalar tol = tol_error;
+ const int maxIters = iters;
+ iters = 0;
+
+ const int m = mat.rows();
+
+ VectorType p0 = rhs - mat*x;
+ VectorType r0 = precond.solve(p0);
+
+ // is initial guess already good enough?
+ if(abs(r0.norm()) < tol) {
+ return true;
+ }
+
+ VectorType w = VectorType::Zero(restart + 1);
+
+ FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
+ VectorType tau = VectorType::Zero(restart + 1);
+ std::vector < JacobiRotation < Scalar > > G(restart);
+
+ // generate first Householder vector
+ VectorType e(m-1);
+ RealScalar beta;
+ r0.makeHouseholder(e, tau.coeffRef(0), beta);
+ w(0)=(Scalar) beta;
+ H.bottomLeftCorner(m - 1, 1) = e;
+
+ for (int k = 1; k <= restart; ++k) {
+
+ ++iters;
+
+ VectorType v = VectorType::Unit(m, k - 1), workspace(m);
+
+ // apply Householder reflections H_{1} ... H_{k-1} to v
+ for (int i = k - 1; i >= 0; --i) {
+ v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
+
+ // apply matrix M to v: v = mat * v;
+ VectorType t=mat*v;
+ v=precond.solve(t);
+
+ // apply Householder reflections H_{k-1} ... H_{1} to v
+ for (int i = 0; i < k; ++i) {
+ v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
+
+ if (v.tail(m - k).norm() != 0.0) {
+
+ if (k <= restart) {
+
+ // generate new Householder vector
+ VectorType e(m - k - 1);
+ RealScalar beta;
+ v.tail(m - k).makeHouseholder(e, tau.coeffRef(k), beta);
+ H.col(k).tail(m - k - 1) = e;
+
+ // apply Householder reflection H_{k} to v
+ v.tail(m - k).applyHouseholderOnTheLeft(H.col(k).tail(m - k - 1), tau.coeffRef(k), workspace.data());
+
+ }
+ }
+
+ if (k > 1) {
+ for (int i = 0; i < k - 1; ++i) {
+ // apply old Givens rotations to v
+ v.applyOnTheLeft(i, i + 1, G[i].adjoint());
+ }
+ }
+
+ if (k<m && v(k) != (Scalar) 0) {
+ // determine next Givens rotation
+ G[k - 1].makeGivens(v(k - 1), v(k));
+
+ // apply Givens rotation to v and w
+ v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+ w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint());
+
+ }
+
+ // insert coefficients into upper matrix triangle
+ H.col(k - 1).head(k) = v.head(k);
+
+ bool stop=(k==m || abs(w(k)) < tol || iters == maxIters);
+
+ if (stop || k == restart) {
+
+ // solve upper triangular system
+ VectorType y = w.head(k);
+ H.topLeftCorner(k, k).template triangularView < Eigen::Upper > ().solveInPlace(y);
+
+ // use Horner-like scheme to calculate solution vector
+ VectorType x_new = y(k - 1) * VectorType::Unit(m, k - 1);
+
+ // apply Householder reflection H_{k} to x_new
+ x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
+
+ for (int i = k - 2; i >= 0; --i) {
+ x_new += y(i) * VectorType::Unit(m, i);
+ // apply Householder reflection H_{i} to x_new
+ x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
+ }
+
+ x += x_new;
+
+ if (stop) {
+ return true;
+ } else {
+ k=0;
+
+ // reset data for a restart r0 = rhs - mat * x;
+ VectorType p0=mat*x;
+ VectorType p1=precond.solve(p0);
+ r0 = rhs - p1;
+// r0_sqnorm = r0.squaredNorm();
+ w = VectorType::Zero(restart + 1);
+ H = FMatrixType::Zero(m, restart + 1);
+ tau = VectorType::Zero(restart + 1);
+
+ // generate first Householder vector
+ RealScalar beta;
+ r0.makeHouseholder(e, tau.coeffRef(0), beta);
+ w(0)=(Scalar) beta;
+ H.bottomLeftCorner(m - 1, 1) = e;
+
+ }
+
+ }
+
+
+
+ }
+
+ return false;
+
+}
+
+}
+
+template< typename _MatrixType,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class GMRES;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<GMRES<_MatrixType,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+}
+
+/** \ingroup IterativeLinearSolvers_Module
+ * \brief A GMRES solver for sparse square problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using a generalized minimal
+ * residual method. The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+ * and NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * GMRES<SparseMatrix<double> > solver(A);
+ * x = solver.solve(b);
+ * std::cout << "#iterations: " << solver.iterations() << std::endl;
+ * std::cout << "estimated error: " << solver.error() << std::endl;
+ * // update b, and solve again
+ * x = solver.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method.
+ *
+ * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+template< typename _MatrixType, typename _Preconditioner>
+class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
+{
+ typedef IterativeSolverBase<GMRES> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+
+private:
+ int m_restart;
+
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+public:
+
+ /** Default constructor. */
+ GMRES() : Base(), m_restart(30) {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ GMRES(const MatrixType& A) : Base(A), m_restart(30) {}
+
+ ~GMRES() {}
+
+ /** Get the number of iterations after that a restart is performed.
+ */
+ int get_restart() { return m_restart; }
+
+ /** Set the number of iterations after that a restart is performed.
+ * \param restart number of iterations for a restarti, default is 30.
+ */
+ void set_restart(const int restart) { m_restart=restart; }
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<GMRES, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "GMRES is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "GMRES::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <GMRES, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ bool failed = false;
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ if(!internal::gmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error))
+ failed = true;
+ }
+ m_info = failed ? NumericalIssue
+ : m_error <= Base::m_tolerance ? Success
+ : NoConvergence;
+ m_isInitialized = true;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = b;
+ if(x.squaredNorm() == 0) return; // Check Zero right hand side
+ _solveWithGuess(b,x);
+ }
+
+protected:
+
+};
+
+
+namespace internal {
+
+ template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<GMRES<_MatrixType, _Preconditioner>, Rhs>
+ : solve_retval_base<GMRES<_MatrixType, _Preconditioner>, Rhs>
+{
+ typedef GMRES<_MatrixType, _Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_GMRES_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
new file mode 100644
index 0000000..661c1f2
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
@@ -0,0 +1,278 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_CHOlESKY_H
+#define EIGEN_INCOMPLETE_CHOlESKY_H
+#include "Eigen/src/IterativeLinearSolvers/IncompleteLUT.h"
+#include <Eigen/OrderingMethods>
+#include <list>
+
+namespace Eigen {
+/**
+ * \brief Modified Incomplete Cholesky with dual threshold
+ *
+ * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
+ * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
+ *
+ * \tparam _MatrixType The type of the sparse matrix. It should be a symmetric
+ * matrix. It is advised to give a row-oriented sparse matrix
+ * \tparam _UpLo The triangular part of the matrix to reference.
+ * \tparam _OrderingType
+ */
+
+template <typename Scalar, int _UpLo = Lower, typename _OrderingType = NaturalOrdering<int> >
+class IncompleteCholesky : internal::noncopyable
+{
+ public:
+ typedef SparseMatrix<Scalar,ColMajor> MatrixType;
+ typedef _OrderingType OrderingType;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+ typedef Matrix<Scalar,Dynamic,1> ScalarType;
+ typedef Matrix<Index,Dynamic, 1> IndexType;
+ typedef std::vector<std::list<Index> > VectorList;
+ enum { UpLo = _UpLo };
+ public:
+ IncompleteCholesky() : m_shift(1),m_factorizationIsOk(false) {}
+ IncompleteCholesky(const MatrixType& matrix) : m_shift(1),m_factorizationIsOk(false)
+ {
+ compute(matrix);
+ }
+
+ Index rows() const { return m_L.rows(); }
+
+ Index cols() const { return m_L.cols(); }
+
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
+ return m_info;
+ }
+
+ /**
+ * \brief Set the initial shift parameter
+ */
+ void setShift( Scalar shift) { m_shift = shift; }
+
+ /**
+ * \brief Computes the fill reducing permutation vector.
+ */
+ template<typename MatrixType>
+ void analyzePattern(const MatrixType& mat)
+ {
+ OrderingType ord;
+ ord(mat.template selfadjointView<UpLo>(), m_perm);
+ m_analysisIsOk = true;
+ }
+
+ template<typename MatrixType>
+ void factorize(const MatrixType& amat);
+
+ template<typename MatrixType>
+ void compute (const MatrixType& matrix)
+ {
+ analyzePattern(matrix);
+ factorize(matrix);
+ }
+
+ template<typename Rhs, typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ eigen_assert(m_factorizationIsOk && "factorize() should be called first");
+ if (m_perm.rows() == b.rows())
+ x = m_perm.inverse() * b;
+ else
+ x = b;
+ x = m_scal.asDiagonal() * x;
+ x = m_L.template triangularView<UnitLower>().solve(x);
+ x = m_L.adjoint().template triangularView<Upper>().solve(x);
+ if (m_perm.rows() == b.rows())
+ x = m_perm * x;
+ x = m_scal.asDiagonal() * x;
+ }
+ template<typename Rhs> inline const internal::solve_retval<IncompleteCholesky, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed");
+ eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
+ eigen_assert(cols()==b.rows()
+ && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<IncompleteCholesky, Rhs>(*this, b.derived());
+ }
+ protected:
+ SparseMatrix<Scalar,ColMajor> m_L; // The lower part stored in CSC
+ ScalarType m_scal; // The vector for scaling the matrix
+ Scalar m_shift; //The initial shift parameter
+ bool m_analysisIsOk;
+ bool m_factorizationIsOk;
+ bool m_isInitialized;
+ ComputationInfo m_info;
+ PermutationType m_perm;
+
+ private:
+ template <typename IdxType, typename SclType>
+ inline void updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol);
+};
+
+template<typename Scalar, int _UpLo, typename OrderingType>
+template<typename _MatrixType>
+void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
+{
+ using std::sqrt;
+ using std::min;
+ eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
+
+ // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
+
+ // Apply the fill-reducing permutation computed in analyzePattern()
+ if (m_perm.rows() == mat.rows() ) // To detect the null permutation
+ m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);
+ else
+ m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
+
+ Index n = m_L.cols();
+ Index nnz = m_L.nonZeros();
+ Map<ScalarType> vals(m_L.valuePtr(), nnz); //values
+ Map<IndexType> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices
+ Map<IndexType> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row
+ IndexType firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization
+ VectorList listCol(n); // listCol(j) is a linked list of columns to update column j
+ ScalarType curCol(n); // Store a nonzero values in each column
+ IndexType irow(n); // Row indices of nonzero elements in each column
+
+
+ // Computes the scaling factors
+ m_scal.resize(n);
+ for (int j = 0; j < n; j++)
+ {
+ m_scal(j) = m_L.col(j).norm();
+ m_scal(j) = sqrt(m_scal(j));
+ }
+ // Scale and compute the shift for the matrix
+ Scalar mindiag = vals[0];
+ for (int j = 0; j < n; j++){
+ for (int k = colPtr[j]; k < colPtr[j+1]; k++)
+ vals[k] /= (m_scal(j) * m_scal(rowIdx[k]));
+ mindiag = (min)(vals[colPtr[j]], mindiag);
+ }
+
+ if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag;
+ // Apply the shift to the diagonal elements of the matrix
+ for (int j = 0; j < n; j++)
+ vals[colPtr[j]] += m_shift;
+ // jki version of the Cholesky factorization
+ for (int j=0; j < n; ++j)
+ {
+ //Left-looking factorize the column j
+ // First, load the jth column into curCol
+ Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored
+ curCol.setZero();
+ irow.setLinSpaced(n,0,n-1);
+ for (int i = colPtr[j] + 1; i < colPtr[j+1]; i++)
+ {
+ curCol(rowIdx[i]) = vals[i];
+ irow(rowIdx[i]) = rowIdx[i];
+ }
+ std::list<int>::iterator k;
+ // Browse all previous columns that will update column j
+ for(k = listCol[j].begin(); k != listCol[j].end(); k++)
+ {
+ int jk = firstElt(*k); // First element to use in the column
+ jk += 1;
+ for (int i = jk; i < colPtr[*k+1]; i++)
+ {
+ curCol(rowIdx[i]) -= vals[i] * vals[jk] ;
+ }
+ updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);
+ }
+
+ // Scale the current column
+ if(RealScalar(diag) <= 0)
+ {
+ std::cerr << "\nNegative diagonal during Incomplete factorization... "<< j << "\n";
+ m_info = NumericalIssue;
+ return;
+ }
+ RealScalar rdiag = sqrt(RealScalar(diag));
+ vals[colPtr[j]] = rdiag;
+ for (int i = j+1; i < n; i++)
+ {
+ //Scale
+ curCol(i) /= rdiag;
+ //Update the remaining diagonals with curCol
+ vals[colPtr[i]] -= curCol(i) * curCol(i);
+ }
+ // Select the largest p elements
+ // p is the original number of elements in the column (without the diagonal)
+ int p = colPtr[j+1] - colPtr[j] - 1 ;
+ internal::QuickSplit(curCol, irow, p);
+ // Insert the largest p elements in the matrix
+ int cpt = 0;
+ for (int i = colPtr[j]+1; i < colPtr[j+1]; i++)
+ {
+ vals[i] = curCol(cpt);
+ rowIdx[i] = irow(cpt);
+ cpt ++;
+ }
+ // Get the first smallest row index and put it after the diagonal element
+ Index jk = colPtr(j)+1;
+ updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol);
+ }
+ m_factorizationIsOk = true;
+ m_isInitialized = true;
+ m_info = Success;
+}
+
+template<typename Scalar, int _UpLo, typename OrderingType>
+template <typename IdxType, typename SclType>
+inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol)
+{
+ if (jk < colPtr(col+1) )
+ {
+ Index p = colPtr(col+1) - jk;
+ Index minpos;
+ rowIdx.segment(jk,p).minCoeff(&minpos);
+ minpos += jk;
+ if (rowIdx(minpos) != rowIdx(jk))
+ {
+ //Swap
+ std::swap(rowIdx(jk),rowIdx(minpos));
+ std::swap(vals(jk),vals(minpos));
+ }
+ firstElt(col) = jk;
+ listCol[rowIdx(jk)].push_back(col);
+ }
+}
+namespace internal {
+
+template<typename _Scalar, int _UpLo, typename OrderingType, typename Rhs>
+struct solve_retval<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
+ : solve_retval_base<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
+{
+ typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
new file mode 100644
index 0000000..67e7801
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h
@@ -0,0 +1,113 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_LU_H
+#define EIGEN_INCOMPLETE_LU_H
+
+namespace Eigen {
+
+template <typename _Scalar>
+class IncompleteLU
+{
+ typedef _Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> Vector;
+ typedef typename Vector::Index Index;
+ typedef SparseMatrix<Scalar,RowMajor> FactorType;
+
+ public:
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+ IncompleteLU() : m_isInitialized(false) {}
+
+ template<typename MatrixType>
+ IncompleteLU(const MatrixType& mat) : m_isInitialized(false)
+ {
+ compute(mat);
+ }
+
+ Index rows() const { return m_lu.rows(); }
+ Index cols() const { return m_lu.cols(); }
+
+ template<typename MatrixType>
+ IncompleteLU& compute(const MatrixType& mat)
+ {
+ m_lu = mat;
+ int size = mat.cols();
+ Vector diag(size);
+ for(int i=0; i<size; ++i)
+ {
+ typename FactorType::InnerIterator k_it(m_lu,i);
+ for(; k_it && k_it.index()<i; ++k_it)
+ {
+ int k = k_it.index();
+ k_it.valueRef() /= diag(k);
+
+ typename FactorType::InnerIterator j_it(k_it);
+ typename FactorType::InnerIterator kj_it(m_lu, k);
+ while(kj_it && kj_it.index()<=k) ++kj_it;
+ for(++j_it; j_it; )
+ {
+ if(kj_it.index()==j_it.index())
+ {
+ j_it.valueRef() -= k_it.value() * kj_it.value();
+ ++j_it;
+ ++kj_it;
+ }
+ else if(kj_it.index()<j_it.index()) ++kj_it;
+ else ++j_it;
+ }
+ }
+ if(k_it && k_it.index()==i) diag(i) = k_it.value();
+ else diag(i) = 1;
+ }
+ m_isInitialized = true;
+ return *this;
+ }
+
+ template<typename Rhs, typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = m_lu.template triangularView<UnitLower>().solve(b);
+ x = m_lu.template triangularView<Upper>().solve(x);
+ }
+
+ template<typename Rhs> inline const internal::solve_retval<IncompleteLU, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_isInitialized && "IncompleteLU is not initialized.");
+ eigen_assert(cols()==b.rows()
+ && "IncompleteLU::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<IncompleteLU, Rhs>(*this, b.derived());
+ }
+
+ protected:
+ FactorType m_lu;
+ bool m_isInitialized;
+};
+
+namespace internal {
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<IncompleteLU<_MatrixType>, Rhs>
+ : solve_retval_base<IncompleteLU<_MatrixType>, Rhs>
+{
+ typedef IncompleteLU<_MatrixType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_INCOMPLETE_LU_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
new file mode 100644
index 0000000..c9c1a4b
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
@@ -0,0 +1,154 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+
+/* NOTE The class IterationController has been adapted from the iteration
+ * class of the GMM++ and ITL libraries.
+ */
+
+//=======================================================================
+// Copyright (C) 1997-2001
+// Authors: Andrew Lumsdaine <lums@osl.iu.edu>
+// Lie-Quan Lee <llee@osl.iu.edu>
+//
+// This file is part of the Iterative Template Library
+//
+// You should have received a copy of the License Agreement for the
+// Iterative Template Library along with the software; see the
+// file LICENSE.
+//
+// Permission to modify the code and to distribute modified code is
+// granted, provided the text of this NOTICE is retained, a notice that
+// the code was modified is included with the above COPYRIGHT NOTICE and
+// with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE
+// file is distributed with the modified code.
+//
+// LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED.
+// By way of example, but not limitation, Licensor MAKES NO
+// REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY
+// PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS
+// OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS
+// OR OTHER RIGHTS.
+//=======================================================================
+
+//========================================================================
+//
+// Copyright (C) 2002-2007 Yves Renard
+//
+// This file is a part of GETFEM++
+//
+// Getfem++ is free software; you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as
+// published by the Free Software Foundation; version 2.1 of the License.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+// You should have received a copy of the GNU Lesser General Public
+// License along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
+// USA.
+//
+//========================================================================
+
+#include "../../../../Eigen/src/Core/util/NonMPL2.h"
+
+#ifndef EIGEN_ITERATION_CONTROLLER_H
+#define EIGEN_ITERATION_CONTROLLER_H
+
+namespace Eigen {
+
+/** \ingroup IterativeSolvers_Module
+ * \class IterationController
+ *
+ * \brief Controls the iterations of the iterative solvers
+ *
+ * This class has been adapted from the iteration class of GMM++ and ITL libraries.
+ *
+ */
+class IterationController
+{
+ protected :
+ double m_rhsn; ///< Right hand side norm
+ size_t m_maxiter; ///< Max. number of iterations
+ int m_noise; ///< if noise > 0 iterations are printed
+ double m_resmax; ///< maximum residual
+ double m_resminreach, m_resadd;
+ size_t m_nit; ///< iteration number
+ double m_res; ///< last computed residual
+ bool m_written;
+ void (*m_callback)(const IterationController&);
+ public :
+
+ void init()
+ {
+ m_nit = 0; m_res = 0.0; m_written = false;
+ m_resminreach = 1E50; m_resadd = 0.0;
+ m_callback = 0;
+ }
+
+ IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1))
+ : m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); }
+
+ void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; }
+ void operator ++() { (*this)++; }
+
+ bool first() { return m_nit == 0; }
+
+ /* get/set the "noisyness" (verbosity) of the solvers */
+ int noiseLevel() const { return m_noise; }
+ void setNoiseLevel(int n) { m_noise = n; }
+ void reduceNoiseLevel() { if (m_noise > 0) m_noise--; }
+
+ double maxResidual() const { return m_resmax; }
+ void setMaxResidual(double r) { m_resmax = r; }
+
+ double residual() const { return m_res; }
+
+ /* change the user-definable callback, called after each iteration */
+ void setCallback(void (*t)(const IterationController&))
+ {
+ m_callback = t;
+ }
+
+ size_t iteration() const { return m_nit; }
+ void setIteration(size_t i) { m_nit = i; }
+
+ size_t maxIterarions() const { return m_maxiter; }
+ void setMaxIterations(size_t i) { m_maxiter = i; }
+
+ double rhsNorm() const { return m_rhsn; }
+ void setRhsNorm(double r) { m_rhsn = r; }
+
+ bool converged() const { return m_res <= m_rhsn * m_resmax; }
+ bool converged(double nr)
+ {
+ using std::abs;
+ m_res = abs(nr);
+ m_resminreach = (std::min)(m_resminreach, m_res);
+ return converged();
+ }
+ template<typename VectorType> bool converged(const VectorType &v)
+ { return converged(v.squaredNorm()); }
+
+ bool finished(double nr)
+ {
+ if (m_callback) m_callback(*this);
+ if (m_noise > 0 && !m_written)
+ {
+ converged(nr);
+ m_written = true;
+ }
+ return (m_nit >= m_maxiter || converged(nr));
+ }
+ template <typename VectorType>
+ bool finished(const MatrixBase<VectorType> &v)
+ { return finished(double(v.squaredNorm())); }
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_ITERATION_CONTROLLER_H
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
new file mode 100644
index 0000000..30f26aa
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -0,0 +1,310 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_MINRES_H_
+#define EIGEN_MINRES_H_
+
+
+namespace Eigen {
+
+ namespace internal {
+
+ /** \internal Low-level MINRES algorithm
+ * \param mat The matrix A
+ * \param rhs The right hand side vector b
+ * \param x On input and initial solution, on output the computed solution.
+ * \param precond A right preconditioner being able to efficiently solve for an
+ * approximation of Ax=b (regardless of b)
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+ */
+ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+ EIGEN_DONT_INLINE
+ void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
+ const Preconditioner& precond, int& iters,
+ typename Dest::RealScalar& tol_error)
+ {
+ using std::sqrt;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ // Check for zero rhs
+ const RealScalar rhsNorm2(rhs.squaredNorm());
+ if(rhsNorm2 == 0)
+ {
+ x.setZero();
+ iters = 0;
+ tol_error = 0;
+ return;
+ }
+
+ // initialize
+ const int maxIters(iters); // initialize maxIters to iters
+ const int N(mat.cols()); // the size of the matrix
+ const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
+
+ // Initialize preconditioned Lanczos
+ VectorType v_old(N); // will be initialized inside loop
+ VectorType v( VectorType::Zero(N) ); //initialize v
+ VectorType v_new(rhs-mat*x); //initialize v_new
+ RealScalar residualNorm2(v_new.squaredNorm());
+ VectorType w(N); // will be initialized inside loop
+ VectorType w_new(precond.solve(v_new)); // initialize w_new
+// RealScalar beta; // will be initialized inside loop
+ RealScalar beta_new2(v_new.dot(w_new));
+ eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+ RealScalar beta_new(sqrt(beta_new2));
+ const RealScalar beta_one(beta_new);
+ v_new /= beta_new;
+ w_new /= beta_new;
+ // Initialize other variables
+ RealScalar c(1.0); // the cosine of the Givens rotation
+ RealScalar c_old(1.0);
+ RealScalar s(0.0); // the sine of the Givens rotation
+ RealScalar s_old(0.0); // the sine of the Givens rotation
+ VectorType p_oold(N); // will be initialized in loop
+ VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
+ VectorType p(p_old); // initialize p=0
+ RealScalar eta(1.0);
+
+ iters = 0; // reset iters
+ while ( iters < maxIters )
+ {
+ // Preconditioned Lanczos
+ /* Note that there are 4 variants on the Lanczos algorithm. These are
+ * described in Paige, C. C. (1972). Computational variants of
+ * the Lanczos method for the eigenproblem. IMA Journal of Applied
+ * Mathematics, 10(3), 373–381. The current implementation corresponds
+ * to the case A(2,7) in the paper. It also corresponds to
+ * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
+ * Systems, 2003 p.173. For the preconditioned version see
+ * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
+ */
+ const RealScalar beta(beta_new);
+ v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
+// const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
+ v = v_new; // update
+ w = w_new; // update
+// const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
+ v_new.noalias() = mat*w - beta*v_old; // compute v_new
+ const RealScalar alpha = v_new.dot(w);
+ v_new -= alpha*v; // overwrite v_new
+ w_new = precond.solve(v_new); // overwrite w_new
+ beta_new2 = v_new.dot(w_new); // compute beta_new
+ eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+ beta_new = sqrt(beta_new2); // compute beta_new
+ v_new /= beta_new; // overwrite v_new for next iteration
+ w_new /= beta_new; // overwrite w_new for next iteration
+
+ // Givens rotation
+ const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
+ const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
+ const RealScalar r1_hat=c*alpha-c_old*s*beta;
+ const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
+ c_old = c; // store for next iteration
+ s_old = s; // store for next iteration
+ c=r1_hat/r1; // new cosine
+ s=beta_new/r1; // new sine
+
+ // Update solution
+ p_oold = p_old;
+// const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
+ p_old = p;
+ p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
+ x += beta_one*c*eta*p;
+
+ /* Update the squared residual. Note that this is the estimated residual.
+ The real residual |Ax-b|^2 may be slightly larger */
+ residualNorm2 *= s*s;
+
+ if ( residualNorm2 < threshold2)
+ {
+ break;
+ }
+
+ eta=-s*eta; // update eta
+ iters++; // increment iteration number (for output purposes)
+ }
+
+ /* Compute error. Note that this is the estimated error. The real
+ error |Ax-b|/|b| may be slightly larger */
+ tol_error = std::sqrt(residualNorm2 / rhsNorm2);
+ }
+
+ }
+
+ template< typename _MatrixType, int _UpLo=Lower,
+ typename _Preconditioner = IdentityPreconditioner>
+// typename _Preconditioner = IdentityPreconditioner<typename _MatrixType::Scalar> > // preconditioner must be positive definite
+ class MINRES;
+
+ namespace internal {
+
+ template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+ struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+ };
+
+ }
+
+ /** \ingroup IterativeLinearSolvers_Module
+ * \brief A minimal residual solver for sparse symmetric problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
+ * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
+ * The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+ * and NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * MINRES<SparseMatrix<double> > mr;
+ * mr.compute(A);
+ * x = mr.solve(b);
+ * std::cout << "#iterations: " << mr.iterations() << std::endl;
+ * std::cout << "estimated error: " << mr.error() << std::endl;
+ * // update b, and solve again
+ * x = mr.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method.
+ *
+ * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+ template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+ class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
+ {
+
+ typedef IterativeSolverBase<MINRES> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+ enum {UpLo = _UpLo};
+
+ public:
+
+ /** Default constructor. */
+ MINRES() : Base() {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ MINRES(const MatrixType& A) : Base(A) {}
+
+ /** Destructor. */
+ ~MINRES(){}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "MINRES is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <MINRES, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ typedef typename internal::conditional<UpLo==(Lower|Upper),
+ const MatrixType&,
+ SparseSelfAdjointView<const MatrixType, UpLo>
+ >::type MatrixWrapperType;
+
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ internal::minres(MatrixWrapperType(*mp_matrix), b.col(j), xj,
+ Base::m_preconditioner, m_iterations, m_error);
+ }
+
+ m_isInitialized = true;
+ m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x.setZero();
+ _solveWithGuess(b,x);
+ }
+
+ protected:
+
+ };
+
+ namespace internal {
+
+ template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
+ struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+ : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+ {
+ typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+ };
+
+ } // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MINRES_H
+
diff --git a/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
new file mode 100644
index 0000000..4fd4392
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
@@ -0,0 +1,185 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ITERSCALING_H
+#define EIGEN_ITERSCALING_H
+/**
+ * \ingroup IterativeSolvers_Module
+ * \brief iterative scaling algorithm to equilibrate rows and column norms in matrices
+ *
+ * This class can be used as a preprocessing tool to accelerate the convergence of iterative methods
+ *
+ * This feature is useful to limit the pivoting amount during LU/ILU factorization
+ * The scaling strategy as presented here preserves the symmetry of the problem
+ * NOTE It is assumed that the matrix does not have empty row or column,
+ *
+ * Example with key steps
+ * \code
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A;
+ * // fill A and b;
+ * IterScaling<SparseMatrix<double> > scal;
+ * // Compute the left and right scaling vectors. The matrix is equilibrated at output
+ * scal.computeRef(A);
+ * // Scale the right hand side
+ * b = scal.LeftScaling().cwiseProduct(b);
+ * // Now, solve the equilibrated linear system with any available solver
+ *
+ * // Scale back the computed solution
+ * x = scal.RightScaling().cwiseProduct(x);
+ * \endcode
+ *
+ * \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix
+ *
+ * References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552
+ *
+ * \sa \ref IncompleteLUT
+ */
+namespace Eigen {
+using std::abs;
+template<typename _MatrixType>
+class IterScaling
+{
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ public:
+ IterScaling() { init(); }
+
+ IterScaling(const MatrixType& matrix)
+ {
+ init();
+ compute(matrix);
+ }
+
+ ~IterScaling() { }
+
+ /**
+ * Compute the left and right diagonal matrices to scale the input matrix @p mat
+ *
+ * FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal.
+ *
+ * \sa LeftScaling() RightScaling()
+ */
+ void compute (const MatrixType& mat)
+ {
+ int m = mat.rows();
+ int n = mat.cols();
+ eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
+ m_left.resize(m);
+ m_right.resize(n);
+ m_left.setOnes();
+ m_right.setOnes();
+ m_matrix = mat;
+ VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors
+ Dr.resize(m); Dc.resize(n);
+ DrRes.resize(m); DcRes.resize(n);
+ double EpsRow = 1.0, EpsCol = 1.0;
+ int its = 0;
+ do
+ { // Iterate until the infinite norm of each row and column is approximately 1
+ // Get the maximum value in each row and column
+ Dr.setZero(); Dc.setZero();
+ for (int k=0; k<m_matrix.outerSize(); ++k)
+ {
+ for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
+ {
+ if ( Dr(it.row()) < abs(it.value()) )
+ Dr(it.row()) = abs(it.value());
+
+ if ( Dc(it.col()) < abs(it.value()) )
+ Dc(it.col()) = abs(it.value());
+ }
+ }
+ for (int i = 0; i < m; ++i)
+ {
+ Dr(i) = std::sqrt(Dr(i));
+ Dc(i) = std::sqrt(Dc(i));
+ }
+ // Save the scaling factors
+ for (int i = 0; i < m; ++i)
+ {
+ m_left(i) /= Dr(i);
+ m_right(i) /= Dc(i);
+ }
+ // Scale the rows and the columns of the matrix
+ DrRes.setZero(); DcRes.setZero();
+ for (int k=0; k<m_matrix.outerSize(); ++k)
+ {
+ for (typename MatrixType::InnerIterator it(m_matrix, k); it; ++it)
+ {
+ it.valueRef() = it.value()/( Dr(it.row()) * Dc(it.col()) );
+ // Accumulate the norms of the row and column vectors
+ if ( DrRes(it.row()) < abs(it.value()) )
+ DrRes(it.row()) = abs(it.value());
+
+ if ( DcRes(it.col()) < abs(it.value()) )
+ DcRes(it.col()) = abs(it.value());
+ }
+ }
+ DrRes.array() = (1-DrRes.array()).abs();
+ EpsRow = DrRes.maxCoeff();
+ DcRes.array() = (1-DcRes.array()).abs();
+ EpsCol = DcRes.maxCoeff();
+ its++;
+ }while ( (EpsRow >m_tol || EpsCol > m_tol) && (its < m_maxits) );
+ m_isInitialized = true;
+ }
+ /** Compute the left and right vectors to scale the vectors
+ * the input matrix is scaled with the computed vectors at output
+ *
+ * \sa compute()
+ */
+ void computeRef (MatrixType& mat)
+ {
+ compute (mat);
+ mat = m_matrix;
+ }
+ /** Get the vector to scale the rows of the matrix
+ */
+ VectorXd& LeftScaling()
+ {
+ return m_left;
+ }
+
+ /** Get the vector to scale the columns of the matrix
+ */
+ VectorXd& RightScaling()
+ {
+ return m_right;
+ }
+
+ /** Set the tolerance for the convergence of the iterative scaling algorithm
+ */
+ void setTolerance(double tol)
+ {
+ m_tol = tol;
+ }
+
+ protected:
+
+ void init()
+ {
+ m_tol = 1e-10;
+ m_maxits = 5;
+ m_isInitialized = false;
+ }
+
+ MatrixType m_matrix;
+ mutable ComputationInfo m_info;
+ bool m_isInitialized;
+ VectorXd m_left; // Left scaling vector
+ VectorXd m_right; // m_right scaling vector
+ double m_tol;
+ int m_maxits; // Maximum number of iterations allowed
+};
+}
+#endif
diff --git a/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt b/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
new file mode 100644
index 0000000..4daefeb
--- /dev/null
+++ b/unsupported/Eigen/src/KroneckerProduct/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_KroneckerProduct_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_KroneckerProduct_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/KroneckerProduct COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
new file mode 100644
index 0000000..532896c
--- /dev/null
+++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
@@ -0,0 +1,244 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Kolja Brix <brix@igpm.rwth-aachen.de>
+// Copyright (C) 2011 Andreas Platen <andiplaten@gmx.de>
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef KRONECKER_TENSOR_PRODUCT_H
+#define KRONECKER_TENSOR_PRODUCT_H
+
+namespace Eigen {
+
+template<typename Scalar, int Options, typename Index> class SparseMatrix;
+
+/*!
+ * \brief Kronecker tensor product helper class for dense matrices
+ *
+ * This class is the return value of kroneckerProduct(MatrixBase,
+ * MatrixBase). Use the function rather than construct this class
+ * directly to avoid specifying template prarameters.
+ *
+ * \tparam Lhs Type of the left-hand side, a matrix expression.
+ * \tparam Rhs Type of the rignt-hand side, a matrix expression.
+ */
+template<typename Lhs, typename Rhs>
+class KroneckerProduct : public ReturnByValue<KroneckerProduct<Lhs,Rhs> >
+{
+ private:
+ typedef ReturnByValue<KroneckerProduct> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+
+ public:
+ /*! \brief Constructor. */
+ KroneckerProduct(const Lhs& A, const Rhs& B)
+ : m_A(A), m_B(B)
+ {}
+
+ /*! \brief Evaluate the Kronecker tensor product. */
+ template<typename Dest> void evalTo(Dest& dst) const;
+
+ inline Index rows() const { return m_A.rows() * m_B.rows(); }
+ inline Index cols() const { return m_A.cols() * m_B.cols(); }
+
+ Scalar coeff(Index row, Index col) const
+ {
+ return m_A.coeff(row / m_B.rows(), col / m_B.cols()) *
+ m_B.coeff(row % m_B.rows(), col % m_B.cols());
+ }
+
+ Scalar coeff(Index i) const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(KroneckerProduct);
+ return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size());
+ }
+
+ private:
+ typename Lhs::Nested m_A;
+ typename Rhs::Nested m_B;
+};
+
+/*!
+ * \brief Kronecker tensor product helper class for sparse matrices
+ *
+ * If at least one of the operands is a sparse matrix expression,
+ * then this class is returned and evaluates into a sparse matrix.
+ *
+ * This class is the return value of kroneckerProduct(EigenBase,
+ * EigenBase). Use the function rather than construct this class
+ * directly to avoid specifying template prarameters.
+ *
+ * \tparam Lhs Type of the left-hand side, a matrix expression.
+ * \tparam Rhs Type of the rignt-hand side, a matrix expression.
+ */
+template<typename Lhs, typename Rhs>
+class KroneckerProductSparse : public EigenBase<KroneckerProductSparse<Lhs,Rhs> >
+{
+ private:
+ typedef typename internal::traits<KroneckerProductSparse>::Index Index;
+
+ public:
+ /*! \brief Constructor. */
+ KroneckerProductSparse(const Lhs& A, const Rhs& B)
+ : m_A(A), m_B(B)
+ {}
+
+ /*! \brief Evaluate the Kronecker tensor product. */
+ template<typename Dest> void evalTo(Dest& dst) const;
+
+ inline Index rows() const { return m_A.rows() * m_B.rows(); }
+ inline Index cols() const { return m_A.cols() * m_B.cols(); }
+
+ template<typename Scalar, int Options, typename Index>
+ operator SparseMatrix<Scalar, Options, Index>()
+ {
+ SparseMatrix<Scalar, Options, Index> result;
+ evalTo(result.derived());
+ return result;
+ }
+
+ private:
+ typename Lhs::Nested m_A;
+ typename Rhs::Nested m_B;
+};
+
+template<typename Lhs, typename Rhs>
+template<typename Dest>
+void KroneckerProduct<Lhs,Rhs>::evalTo(Dest& dst) const
+{
+ const int BlockRows = Rhs::RowsAtCompileTime,
+ BlockCols = Rhs::ColsAtCompileTime;
+ const Index Br = m_B.rows(),
+ Bc = m_B.cols();
+ for (Index i=0; i < m_A.rows(); ++i)
+ for (Index j=0; j < m_A.cols(); ++j)
+ Block<Dest,BlockRows,BlockCols>(dst,i*Br,j*Bc,Br,Bc) = m_A.coeff(i,j) * m_B;
+}
+
+template<typename Lhs, typename Rhs>
+template<typename Dest>
+void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
+{
+ const Index Br = m_B.rows(),
+ Bc = m_B.cols();
+ dst.resize(rows(),cols());
+ dst.resizeNonZeros(0);
+ dst.reserve(m_A.nonZeros() * m_B.nonZeros());
+
+ for (Index kA=0; kA < m_A.outerSize(); ++kA)
+ {
+ for (Index kB=0; kB < m_B.outerSize(); ++kB)
+ {
+ for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA)
+ {
+ for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB)
+ {
+ const Index i = itA.row() * Br + itB.row(),
+ j = itA.col() * Bc + itB.col();
+ dst.insert(i,j) = itA.value() * itB.value();
+ }
+ }
+ }
+ }
+}
+
+namespace internal {
+
+template<typename _Lhs, typename _Rhs>
+struct traits<KroneckerProduct<_Lhs,_Rhs> >
+{
+ typedef typename remove_all<_Lhs>::type Lhs;
+ typedef typename remove_all<_Rhs>::type Rhs;
+ typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+
+ enum {
+ Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
+ Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
+ MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
+ MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
+ CoeffReadCost = Lhs::CoeffReadCost + Rhs::CoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+
+ typedef Matrix<Scalar,Rows,Cols> ReturnType;
+};
+
+template<typename _Lhs, typename _Rhs>
+struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
+{
+ typedef MatrixXpr XprKind;
+ typedef typename remove_all<_Lhs>::type Lhs;
+ typedef typename remove_all<_Rhs>::type Rhs;
+ typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
+
+ enum {
+ LhsFlags = Lhs::Flags,
+ RhsFlags = Rhs::Flags,
+
+ RowsAtCompileTime = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
+ ColsAtCompileTime = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
+ MaxRowsAtCompileTime = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
+ MaxColsAtCompileTime = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
+
+ EvalToRowMajor = (LhsFlags & RhsFlags & RowMajorBit),
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = ((LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeNestingBit | EvalBeforeAssigningBit,
+ CoeffReadCost = Dynamic
+ };
+};
+
+} // end namespace internal
+
+/*!
+ * \ingroup KroneckerProduct_Module
+ *
+ * Computes Kronecker tensor product of two dense matrices
+ *
+ * \warning If you want to replace a matrix by its Kronecker product
+ * with some matrix, do \b NOT do this:
+ * \code
+ * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect
+ * \endcode
+ * instead, use eval() to work around this:
+ * \code
+ * A = kroneckerProduct(A,B).eval();
+ * \endcode
+ *
+ * \param a Dense matrix a
+ * \param b Dense matrix b
+ * \return Kronecker tensor product of a and b
+ */
+template<typename A, typename B>
+KroneckerProduct<A,B> kroneckerProduct(const MatrixBase<A>& a, const MatrixBase<B>& b)
+{
+ return KroneckerProduct<A, B>(a.derived(), b.derived());
+}
+
+/*!
+ * \ingroup KroneckerProduct_Module
+ *
+ * Computes Kronecker tensor product of two matrices, at least one of
+ * which is sparse
+ *
+ * \param a Dense/sparse matrix a
+ * \param b Dense/sparse matrix b
+ * \return Kronecker tensor product of a and b, stored in a sparse
+ * matrix
+ */
+template<typename A, typename B>
+KroneckerProductSparse<A,B> kroneckerProduct(const EigenBase<A>& a, const EigenBase<B>& b)
+{
+ return KroneckerProductSparse<A,B>(a.derived(), b.derived());
+}
+
+} // end namespace Eigen
+
+#endif // KRONECKER_TENSOR_PRODUCT_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
new file mode 100644
index 0000000..d969085
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_LevenbergMarquardt_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_LevenbergMarquardt_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/LevenbergMarquardt COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt b/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt
new file mode 100644
index 0000000..ae7984d
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt
@@ -0,0 +1,52 @@
+Minpack Copyright Notice (1999) University of Chicago. All rights reserved
+
+Redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the
+following conditions are met:
+
+1. Redistributions of source code must retain the above
+copyright notice, this list of conditions and the following
+disclaimer.
+
+2. Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following
+disclaimer in the documentation and/or other materials
+provided with the distribution.
+
+3. The end-user documentation included with the
+redistribution, if any, must include the following
+acknowledgment:
+
+ "This product includes software developed by the
+ University of Chicago, as Operator of Argonne National
+ Laboratory.
+
+Alternately, this acknowledgment may appear in the software
+itself, if and wherever such third-party acknowledgments
+normally appear.
+
+4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+BE CORRECTED.
+
+5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+POSSIBILITY OF SUCH LOSS OR DAMAGES.
+
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
new file mode 100644
index 0000000..32d3ad5
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
@@ -0,0 +1,85 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMCOVAR_H
+#define EIGEN_LMCOVAR_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void covar(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const VectorXi& ipvt,
+ Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
+{
+ using std::abs;
+ typedef DenseIndex Index;
+ /* Local variables */
+ Index i, j, k, l, ii, jj;
+ bool sing;
+ Scalar temp;
+
+ /* Function Body */
+ const Index n = r.cols();
+ const Scalar tolr = tol * abs(r(0,0));
+ Matrix< Scalar, Dynamic, 1 > wa(n);
+ eigen_assert(ipvt.size()==n);
+
+ /* form the inverse of r in the full upper triangle of r. */
+ l = -1;
+ for (k = 0; k < n; ++k)
+ if (abs(r(k,k)) > tolr) {
+ r(k,k) = 1. / r(k,k);
+ for (j = 0; j <= k-1; ++j) {
+ temp = r(k,k) * r(j,k);
+ r(j,k) = 0.;
+ r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
+ }
+ l = k;
+ }
+
+ /* form the full upper triangle of the inverse of (r transpose)*r */
+ /* in the full upper triangle of r. */
+ for (k = 0; k <= l; ++k) {
+ for (j = 0; j <= k-1; ++j)
+ r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
+ r.col(k).head(k+1) *= r(k,k);
+ }
+
+ /* form the full lower triangle of the covariance matrix */
+ /* in the strict lower triangle of r and in wa. */
+ for (j = 0; j < n; ++j) {
+ jj = ipvt[j];
+ sing = j > l;
+ for (i = 0; i <= j; ++i) {
+ if (sing)
+ r(i,j) = 0.;
+ ii = ipvt[i];
+ if (ii > jj)
+ r(ii,jj) = r(i,j);
+ if (ii < jj)
+ r(jj,ii) = r(i,j);
+ }
+ wa[jj] = r(j,j);
+ }
+
+ /* symmetrize the covariance matrix in r. */
+ r.topLeftCorner(n,n).template triangularView<StrictlyUpper>() = r.topLeftCorner(n,n).transpose();
+ r.diagonal() = wa;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMCOVAR_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h b/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h
new file mode 100644
index 0000000..25b32ec
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h
@@ -0,0 +1,202 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMONESTEP_H
+#define EIGEN_LMONESTEP_H
+
+namespace Eigen {
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::minimizeOneStep(FVectorType &x)
+{
+ using std::abs;
+ using std::sqrt;
+ RealScalar temp, temp1,temp2;
+ RealScalar ratio;
+ RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered;
+ eigen_assert(x.size()==n); // check the caller is not cheating us
+
+ temp = 0.0; xnorm = 0.0;
+ /* calculate the jacobian matrix. */
+ Index df_ret = m_functor.df(x, m_fjac);
+ if (df_ret<0)
+ return LevenbergMarquardtSpace::UserAsked;
+ if (df_ret>0)
+ // numerical diff, we evaluated the function df_ret times
+ m_nfev += df_ret;
+ else m_njev++;
+
+ /* compute the qr factorization of the jacobian. */
+ for (int j = 0; j < x.size(); ++j)
+ m_wa2(j) = m_fjac.col(j).blueNorm();
+ QRSolver qrfac(m_fjac);
+ if(qrfac.info() != Success) {
+ m_info = NumericalIssue;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+ }
+ // Make a copy of the first factor with the associated permutation
+ m_rfactor = qrfac.matrixR();
+ m_permutation = (qrfac.colsPermutation());
+
+ /* on the first iteration and if external scaling is not used, scale according */
+ /* to the norms of the columns of the initial jacobian. */
+ if (m_iter == 1) {
+ if (!m_useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ m_diag[j] = (m_wa2[j]==0.)? 1. : m_wa2[j];
+
+ /* on the first iteration, calculate the norm of the scaled x */
+ /* and initialize the step bound m_delta. */
+ xnorm = m_diag.cwiseProduct(x).stableNorm();
+ m_delta = m_factor * xnorm;
+ if (m_delta == 0.)
+ m_delta = m_factor;
+ }
+
+ /* form (q transpose)*m_fvec and store the first n components in */
+ /* m_qtf. */
+ m_wa4 = m_fvec;
+ m_wa4 = qrfac.matrixQ().adjoint() * m_fvec;
+ m_qtf = m_wa4.head(n);
+
+ /* compute the norm of the scaled gradient. */
+ m_gnorm = 0.;
+ if (m_fnorm != 0.)
+ for (Index j = 0; j < n; ++j)
+ if (m_wa2[m_permutation.indices()[j]] != 0.)
+ m_gnorm = (std::max)(m_gnorm, abs( m_rfactor.col(j).head(j+1).dot(m_qtf.head(j+1)/m_fnorm) / m_wa2[m_permutation.indices()[j]]));
+
+ /* test for convergence of the gradient norm. */
+ if (m_gnorm <= m_gtol) {
+ m_info = Success;
+ return LevenbergMarquardtSpace::CosinusTooSmall;
+ }
+
+ /* rescale if necessary. */
+ if (!m_useExternalScaling)
+ m_diag = m_diag.cwiseMax(m_wa2);
+
+ do {
+ /* determine the levenberg-marquardt parameter. */
+ internal::lmpar2(qrfac, m_diag, m_qtf, m_delta, m_par, m_wa1);
+
+ /* store the direction p and x + p. calculate the norm of p. */
+ m_wa1 = -m_wa1;
+ m_wa2 = x + m_wa1;
+ pnorm = m_diag.cwiseProduct(m_wa1).stableNorm();
+
+ /* on the first iteration, adjust the initial step bound. */
+ if (m_iter == 1)
+ m_delta = (std::min)(m_delta,pnorm);
+
+ /* evaluate the function at x + p and calculate its norm. */
+ if ( m_functor(m_wa2, m_wa4) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ ++m_nfev;
+ fnorm1 = m_wa4.stableNorm();
+
+ /* compute the scaled actual reduction. */
+ actred = -1.;
+ if (Scalar(.1) * fnorm1 < m_fnorm)
+ actred = 1. - numext::abs2(fnorm1 / m_fnorm);
+
+ /* compute the scaled predicted reduction and */
+ /* the scaled directional derivative. */
+ m_wa3 = m_rfactor.template triangularView<Upper>() * (m_permutation.inverse() *m_wa1);
+ temp1 = numext::abs2(m_wa3.stableNorm() / m_fnorm);
+ temp2 = numext::abs2(sqrt(m_par) * pnorm / m_fnorm);
+ prered = temp1 + temp2 / Scalar(.5);
+ dirder = -(temp1 + temp2);
+
+ /* compute the ratio of the actual to the predicted */
+ /* reduction. */
+ ratio = 0.;
+ if (prered != 0.)
+ ratio = actred / prered;
+
+ /* update the step bound. */
+ if (ratio <= Scalar(.25)) {
+ if (actred >= 0.)
+ temp = RealScalar(.5);
+ if (actred < 0.)
+ temp = RealScalar(.5) * dirder / (dirder + RealScalar(.5) * actred);
+ if (RealScalar(.1) * fnorm1 >= m_fnorm || temp < RealScalar(.1))
+ temp = Scalar(.1);
+ /* Computing MIN */
+ m_delta = temp * (std::min)(m_delta, pnorm / RealScalar(.1));
+ m_par /= temp;
+ } else if (!(m_par != 0. && ratio < RealScalar(.75))) {
+ m_delta = pnorm / RealScalar(.5);
+ m_par = RealScalar(.5) * m_par;
+ }
+
+ /* test for successful iteration. */
+ if (ratio >= RealScalar(1e-4)) {
+ /* successful iteration. update x, m_fvec, and their norms. */
+ x = m_wa2;
+ m_wa2 = m_diag.cwiseProduct(x);
+ m_fvec = m_wa4;
+ xnorm = m_wa2.stableNorm();
+ m_fnorm = fnorm1;
+ ++m_iter;
+ }
+
+ /* tests for convergence. */
+ if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1. && m_delta <= m_xtol * xnorm)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
+ }
+ if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1.)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
+ }
+ if (m_delta <= m_xtol * xnorm)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
+ }
+
+ /* tests for termination and stringent tolerances. */
+ if (m_nfev >= m_maxfev)
+ {
+ m_info = NoConvergence;
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ }
+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ }
+ if (m_delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ }
+ if (m_gnorm <= NumTraits<Scalar>::epsilon())
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::GtolTooSmall;
+ }
+
+ } while (ratio < Scalar(1e-4));
+
+ return LevenbergMarquardtSpace::Running;
+}
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMONESTEP_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
new file mode 100644
index 0000000..9532042
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
@@ -0,0 +1,160 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMPAR_H
+#define EIGEN_LMPAR_H
+
+namespace Eigen {
+
+namespace internal {
+
+ template <typename QRSolver, typename VectorType>
+ void lmpar2(
+ const QRSolver &qr,
+ const VectorType &diag,
+ const VectorType &qtb,
+ typename VectorType::Scalar m_delta,
+ typename VectorType::Scalar &par,
+ VectorType &x)
+
+ {
+ using std::sqrt;
+ using std::abs;
+ typedef typename QRSolver::MatrixType MatrixType;
+ typedef typename QRSolver::Scalar Scalar;
+ typedef typename QRSolver::Index Index;
+
+ /* Local variables */
+ Index j;
+ Scalar fp;
+ Scalar parc, parl;
+ Index iter;
+ Scalar temp, paru;
+ Scalar gnorm;
+ Scalar dxnorm;
+
+ // Make a copy of the triangular factor.
+ // This copy is modified during call the qrsolv
+ MatrixType s;
+ s = qr.matrixR();
+
+ /* Function Body */
+ const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
+ const Index n = qr.matrixR().cols();
+ eigen_assert(n==diag.size());
+ eigen_assert(n==qtb.size());
+
+ VectorType wa1, wa2;
+
+ /* compute and store in x the gauss-newton direction. if the */
+ /* jacobian is rank-deficient, obtain a least squares solution. */
+
+ // const Index rank = qr.nonzeroPivots(); // exactly double(0.)
+ const Index rank = qr.rank(); // use a threshold
+ wa1 = qtb;
+ wa1.tail(n-rank).setZero();
+ //FIXME There is no solve in place for sparse triangularView
+ wa1.head(rank) = s.topLeftCorner(rank,rank).template triangularView<Upper>().solve(qtb.head(rank));
+
+ x = qr.colsPermutation()*wa1;
+
+ /* initialize the iteration counter. */
+ /* evaluate the function at the origin, and test */
+ /* for acceptance of the gauss-newton direction. */
+ iter = 0;
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ fp = dxnorm - m_delta;
+ if (fp <= Scalar(0.1) * m_delta) {
+ par = 0;
+ return;
+ }
+
+ /* if the jacobian is not rank deficient, the newton */
+ /* step provides a lower bound, parl, for the zero of */
+ /* the function. otherwise set this bound to zero. */
+ parl = 0.;
+ if (rank==n) {
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
+ s.topLeftCorner(n,n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ temp = wa1.blueNorm();
+ parl = fp / m_delta / temp / temp;
+ }
+
+ /* calculate an upper bound, paru, for the zero of the function. */
+ for (j = 0; j < n; ++j)
+ wa1[j] = s.col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
+
+ gnorm = wa1.stableNorm();
+ paru = gnorm / m_delta;
+ if (paru == 0.)
+ paru = dwarf / (std::min)(m_delta,Scalar(0.1));
+
+ /* if the input par lies outside of the interval (parl,paru), */
+ /* set par to the closer endpoint. */
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
+ if (par == 0.)
+ par = gnorm / dxnorm;
+
+ /* beginning of an iteration. */
+ while (true) {
+ ++iter;
+
+ /* evaluate the function at the current value of par. */
+ if (par == 0.)
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ wa1 = sqrt(par)* diag;
+
+ VectorType sdiag(n);
+ lmqrsolv(s, qr.colsPermutation(), wa1, qtb, x, sdiag);
+
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ temp = fp;
+ fp = dxnorm - m_delta;
+
+ /* if the function is small enough, accept the current value */
+ /* of par. also test for the exceptional cases where parl */
+ /* is zero or the number of iterations has reached 10. */
+ if (abs(fp) <= Scalar(0.1) * m_delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
+ break;
+
+ /* compute the newton correction. */
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
+ // we could almost use this here, but the diagonal is outside qr, in sdiag[]
+ for (j = 0; j < n; ++j) {
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ for (Index i = j+1; i < n; ++i)
+ wa1[i] -= s.coeff(i,j) * temp;
+ }
+ temp = wa1.blueNorm();
+ parc = fp / m_delta / temp / temp;
+
+ /* depending on the sign of the function, update parl or paru. */
+ if (fp > 0.)
+ parl = (std::max)(parl,par);
+ if (fp < 0.)
+ paru = (std::min)(paru,par);
+
+ /* compute an improved estimate for par. */
+ par = (std::max)(parl,par+parc);
+ }
+ if (iter == 0)
+ par = 0.;
+ return;
+ }
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMPAR_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
new file mode 100644
index 0000000..f5290de
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
@@ -0,0 +1,189 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMQRSOLV_H
+#define EIGEN_LMQRSOLV_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar,int Rows, int Cols, typename Index>
+void lmqrsolv(
+ Matrix<Scalar,Rows,Cols> &s,
+ const PermutationMatrix<Dynamic,Dynamic,Index> &iPerm,
+ const Matrix<Scalar,Dynamic,1> &diag,
+ const Matrix<Scalar,Dynamic,1> &qtb,
+ Matrix<Scalar,Dynamic,1> &x,
+ Matrix<Scalar,Dynamic,1> &sdiag)
+{
+
+ /* Local variables */
+ Index i, j, k, l;
+ Scalar temp;
+ Index n = s.cols();
+ Matrix<Scalar,Dynamic,1> wa(n);
+ JacobiRotation<Scalar> givens;
+
+ /* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
+
+ /* copy r and (q transpose)*b to preserve input and initialize s. */
+ /* in particular, save the diagonal elements of r in x. */
+ x = s.diagonal();
+ wa = qtb;
+
+
+ s.topLeftCorner(n,n).template triangularView<StrictlyLower>() = s.topLeftCorner(n,n).transpose();
+ /* eliminate the diagonal matrix d using a givens rotation. */
+ for (j = 0; j < n; ++j) {
+
+ /* prepare the row of d to be eliminated, locating the */
+ /* diagonal element using p from the qr factorization. */
+ l = iPerm.indices()(j);
+ if (diag[l] == 0.)
+ break;
+ sdiag.tail(n-j).setZero();
+ sdiag[j] = diag[l];
+
+ /* the transformations to eliminate the row of d */
+ /* modify only a single element of (q transpose)*b */
+ /* beyond the first n, which is initially zero. */
+ Scalar qtbpj = 0.;
+ for (k = j; k < n; ++k) {
+ /* determine a givens rotation which eliminates the */
+ /* appropriate element in the current row of d. */
+ givens.makeGivens(-s(k,k), sdiag[k]);
+
+ /* compute the modified diagonal element of r and */
+ /* the modified element of ((q transpose)*b,0). */
+ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k];
+ temp = givens.c() * wa[k] + givens.s() * qtbpj;
+ qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
+ wa[k] = temp;
+
+ /* accumulate the tranformation in the row of s. */
+ for (i = k+1; i<n; ++i) {
+ temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
+ sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
+ s(i,k) = temp;
+ }
+ }
+ }
+
+ /* solve the triangular system for z. if the system is */
+ /* singular, then obtain a least squares solution. */
+ Index nsing;
+ for(nsing=0; nsing<n && sdiag[nsing]!=0; nsing++) {}
+
+ wa.tail(n-nsing).setZero();
+ s.topLeftCorner(nsing, nsing).transpose().template triangularView<Upper>().solveInPlace(wa.head(nsing));
+
+ // restore
+ sdiag = s.diagonal();
+ s.diagonal() = x;
+
+ /* permute the components of z back to components of x. */
+ x = iPerm * wa;
+}
+
+template <typename Scalar, int _Options, typename Index>
+void lmqrsolv(
+ SparseMatrix<Scalar,_Options,Index> &s,
+ const PermutationMatrix<Dynamic,Dynamic> &iPerm,
+ const Matrix<Scalar,Dynamic,1> &diag,
+ const Matrix<Scalar,Dynamic,1> &qtb,
+ Matrix<Scalar,Dynamic,1> &x,
+ Matrix<Scalar,Dynamic,1> &sdiag)
+{
+ /* Local variables */
+ typedef SparseMatrix<Scalar,RowMajor,Index> FactorType;
+ Index i, j, k, l;
+ Scalar temp;
+ Index n = s.cols();
+ Matrix<Scalar,Dynamic,1> wa(n);
+ JacobiRotation<Scalar> givens;
+
+ /* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
+
+ /* copy r and (q transpose)*b to preserve input and initialize R. */
+ wa = qtb;
+ FactorType R(s);
+ // Eliminate the diagonal matrix d using a givens rotation
+ for (j = 0; j < n; ++j)
+ {
+ // Prepare the row of d to be eliminated, locating the
+ // diagonal element using p from the qr factorization
+ l = iPerm.indices()(j);
+ if (diag(l) == Scalar(0))
+ break;
+ sdiag.tail(n-j).setZero();
+ sdiag[j] = diag[l];
+ // the transformations to eliminate the row of d
+ // modify only a single element of (q transpose)*b
+ // beyond the first n, which is initially zero.
+
+ Scalar qtbpj = 0;
+ // Browse the nonzero elements of row j of the upper triangular s
+ for (k = j; k < n; ++k)
+ {
+ typename FactorType::InnerIterator itk(R,k);
+ for (; itk; ++itk){
+ if (itk.index() < k) continue;
+ else break;
+ }
+ //At this point, we have the diagonal element R(k,k)
+ // Determine a givens rotation which eliminates
+ // the appropriate element in the current row of d
+ givens.makeGivens(-itk.value(), sdiag(k));
+
+ // Compute the modified diagonal element of r and
+ // the modified element of ((q transpose)*b,0).
+ itk.valueRef() = givens.c() * itk.value() + givens.s() * sdiag(k);
+ temp = givens.c() * wa(k) + givens.s() * qtbpj;
+ qtbpj = -givens.s() * wa(k) + givens.c() * qtbpj;
+ wa(k) = temp;
+
+ // Accumulate the transformation in the remaining k row/column of R
+ for (++itk; itk; ++itk)
+ {
+ i = itk.index();
+ temp = givens.c() * itk.value() + givens.s() * sdiag(i);
+ sdiag(i) = -givens.s() * itk.value() + givens.c() * sdiag(i);
+ itk.valueRef() = temp;
+ }
+ }
+ }
+
+ // Solve the triangular system for z. If the system is
+ // singular, then obtain a least squares solution
+ Index nsing;
+ for(nsing = 0; nsing<n && sdiag(nsing) !=0; nsing++) {}
+
+ wa.tail(n-nsing).setZero();
+// x = wa;
+ wa.head(nsing) = R.topLeftCorner(nsing,nsing).template triangularView<Upper>().solve/*InPlace*/(wa.head(nsing));
+
+ sdiag = R.diagonal();
+ // Permute the components of z back to components of x
+ x = iPerm * wa;
+}
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMQRSOLV_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
new file mode 100644
index 0000000..51dd1d3
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -0,0 +1,377 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+//
+// The algorithm of this class initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LEVENBERGMARQUARDT_H
+#define EIGEN_LEVENBERGMARQUARDT_H
+
+
+namespace Eigen {
+namespace LevenbergMarquardtSpace {
+ enum Status {
+ NotStarted = -2,
+ Running = -1,
+ ImproperInputParameters = 0,
+ RelativeReductionTooSmall = 1,
+ RelativeErrorTooSmall = 2,
+ RelativeErrorAndReductionTooSmall = 3,
+ CosinusTooSmall = 4,
+ TooManyFunctionEvaluation = 5,
+ FtolTooSmall = 6,
+ XtolTooSmall = 7,
+ GtolTooSmall = 8,
+ UserAsked = 9
+ };
+}
+
+template <typename _Scalar, int NX=Dynamic, int NY=Dynamic>
+struct DenseFunctor
+{
+ typedef _Scalar Scalar;
+ enum {
+ InputsAtCompileTime = NX,
+ ValuesAtCompileTime = NY
+ };
+ typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
+ typedef ColPivHouseholderQR<JacobianType> QRSolver;
+ const int m_inputs, m_values;
+
+ DenseFunctor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
+ DenseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ //int operator()(const InputType &x, ValueType& fvec) { }
+ // should be defined in derived classes
+
+ //int df(const InputType &x, JacobianType& fjac) { }
+ // should be defined in derived classes
+};
+
+template <typename _Scalar, typename _Index>
+struct SparseFunctor
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Matrix<Scalar,Dynamic,1> InputType;
+ typedef Matrix<Scalar,Dynamic,1> ValueType;
+ typedef SparseMatrix<Scalar, ColMajor, Index> JacobianType;
+ typedef SparseQR<JacobianType, COLAMDOrdering<int> > QRSolver;
+ enum {
+ InputsAtCompileTime = Dynamic,
+ ValuesAtCompileTime = Dynamic
+ };
+
+ SparseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ const int m_inputs, m_values;
+ //int operator()(const InputType &x, ValueType& fvec) { }
+ // to be defined in the functor
+
+ //int df(const InputType &x, JacobianType& fjac) { }
+ // to be defined in the functor if no automatic differentiation
+
+};
+namespace internal {
+template <typename QRSolver, typename VectorType>
+void lmpar2(const QRSolver &qr, const VectorType &diag, const VectorType &qtb,
+ typename VectorType::Scalar m_delta, typename VectorType::Scalar &par,
+ VectorType &x);
+ }
+/**
+ * \ingroup NonLinearOptimization_Module
+ * \brief Performs non linear optimization over a non-linear function,
+ * using a variant of the Levenberg Marquardt algorithm.
+ *
+ * Check wikipedia for more information.
+ * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
+ */
+template<typename _FunctorType>
+class LevenbergMarquardt : internal::no_assignment_operator
+{
+ public:
+ typedef _FunctorType FunctorType;
+ typedef typename FunctorType::QRSolver QRSolver;
+ typedef typename FunctorType::JacobianType JacobianType;
+ typedef typename JacobianType::Scalar Scalar;
+ typedef typename JacobianType::RealScalar RealScalar;
+ typedef typename JacobianType::Index Index;
+ typedef typename QRSolver::Index PermIndex;
+ typedef Matrix<Scalar,Dynamic,1> FVectorType;
+ typedef PermutationMatrix<Dynamic,Dynamic> PermutationType;
+ public:
+ LevenbergMarquardt(FunctorType& functor)
+ : m_functor(functor),m_nfev(0),m_njev(0),m_fnorm(0.0),m_gnorm(0),
+ m_isInitialized(false),m_info(InvalidInput)
+ {
+ resetParameters();
+ m_useExternalScaling=false;
+ }
+
+ LevenbergMarquardtSpace::Status minimize(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x);
+ LevenbergMarquardtSpace::Status lmder1(
+ FVectorType &x,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+ static LevenbergMarquardtSpace::Status lmdif1(
+ FunctorType &functor,
+ FVectorType &x,
+ Index *nfev,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ /** Sets the default parameters */
+ void resetParameters()
+ {
+ m_factor = 100.;
+ m_maxfev = 400;
+ m_ftol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_xtol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_gtol = 0. ;
+ m_epsfcn = 0. ;
+ }
+
+ /** Sets the tolerance for the norm of the solution vector*/
+ void setXtol(RealScalar xtol) { m_xtol = xtol; }
+
+ /** Sets the tolerance for the norm of the vector function*/
+ void setFtol(RealScalar ftol) { m_ftol = ftol; }
+
+ /** Sets the tolerance for the norm of the gradient of the error vector*/
+ void setGtol(RealScalar gtol) { m_gtol = gtol; }
+
+ /** Sets the step bound for the diagonal shift */
+ void setFactor(RealScalar factor) { m_factor = factor; }
+
+ /** Sets the error precision */
+ void setEpsilon (RealScalar epsfcn) { m_epsfcn = epsfcn; }
+
+ /** Sets the maximum number of function evaluation */
+ void setMaxfev(Index maxfev) {m_maxfev = maxfev; }
+
+ /** Use an external Scaling. If set to true, pass a nonzero diagonal to diag() */
+ void setExternalScaling(bool value) {m_useExternalScaling = value; }
+
+ /** \returns a reference to the diagonal of the jacobian */
+ FVectorType& diag() {return m_diag; }
+
+ /** \returns the number of iterations performed */
+ Index iterations() { return m_iter; }
+
+ /** \returns the number of functions evaluation */
+ Index nfev() { return m_nfev; }
+
+ /** \returns the number of jacobian evaluation */
+ Index njev() { return m_njev; }
+
+ /** \returns the norm of current vector function */
+ RealScalar fnorm() {return m_fnorm; }
+
+ /** \returns the norm of the gradient of the error */
+ RealScalar gnorm() {return m_gnorm; }
+
+ /** \returns the LevenbergMarquardt parameter */
+ RealScalar lm_param(void) { return m_par; }
+
+ /** \returns a reference to the current vector function
+ */
+ FVectorType& fvec() {return m_fvec; }
+
+ /** \returns a reference to the matrix where the current Jacobian matrix is stored
+ */
+ JacobianType& jacobian() {return m_fjac; }
+
+ /** \returns a reference to the triangular matrix R from the QR of the jacobian matrix.
+ * \sa jacobian()
+ */
+ JacobianType& matrixR() {return m_rfactor; }
+
+ /** the permutation used in the QR factorization
+ */
+ PermutationType permutation() {return m_permutation; }
+
+ /**
+ * \brief Reports whether the minimization was successful
+ * \returns \c Success if the minimization was succesful,
+ * \c NumericalIssue if a numerical problem arises during the
+ * minimization process, for exemple during the QR factorization
+ * \c NoConvergence if the minimization did not converge after
+ * the maximum number of function evaluation allowed
+ * \c InvalidInput if the input matrix is invalid
+ */
+ ComputationInfo info() const
+ {
+
+ return m_info;
+ }
+ private:
+ JacobianType m_fjac;
+ JacobianType m_rfactor; // The triangular matrix R from the QR of the jacobian matrix m_fjac
+ FunctorType &m_functor;
+ FVectorType m_fvec, m_qtf, m_diag;
+ Index n;
+ Index m;
+ Index m_nfev;
+ Index m_njev;
+ RealScalar m_fnorm; // Norm of the current vector function
+ RealScalar m_gnorm; //Norm of the gradient of the error
+ RealScalar m_factor; //
+ Index m_maxfev; // Maximum number of function evaluation
+ RealScalar m_ftol; //Tolerance in the norm of the vector function
+ RealScalar m_xtol; //
+ RealScalar m_gtol; //tolerance of the norm of the error gradient
+ RealScalar m_epsfcn; //
+ Index m_iter; // Number of iterations performed
+ RealScalar m_delta;
+ bool m_useExternalScaling;
+ PermutationType m_permutation;
+ FVectorType m_wa1, m_wa2, m_wa3, m_wa4; //Temporary vectors
+ RealScalar m_par;
+ bool m_isInitialized; // Check whether the minimization step has been called
+ ComputationInfo m_info;
+};
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::minimize(FVectorType &x)
+{
+ LevenbergMarquardtSpace::Status status = minimizeInit(x);
+ if (status==LevenbergMarquardtSpace::ImproperInputParameters) {
+ m_isInitialized = true;
+ return status;
+ }
+ do {
+// std::cout << " uv " << x.transpose() << "\n";
+ status = minimizeOneStep(x);
+ } while (status==LevenbergMarquardtSpace::Running);
+ m_isInitialized = true;
+ return status;
+}
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::minimizeInit(FVectorType &x)
+{
+ n = x.size();
+ m = m_functor.values();
+
+ m_wa1.resize(n); m_wa2.resize(n); m_wa3.resize(n);
+ m_wa4.resize(m);
+ m_fvec.resize(m);
+ //FIXME Sparse Case : Allocate space for the jacobian
+ m_fjac.resize(m, n);
+// m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative
+ if (!m_useExternalScaling)
+ m_diag.resize(n);
+ eigen_assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'");
+ m_qtf.resize(n);
+
+ /* Function Body */
+ m_nfev = 0;
+ m_njev = 0;
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || m_ftol < 0. || m_xtol < 0. || m_gtol < 0. || m_maxfev <= 0 || m_factor <= 0.){
+ m_info = InvalidInput;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+ }
+
+ if (m_useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ if (m_diag[j] <= 0.)
+ {
+ m_info = InvalidInput;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+ }
+
+ /* evaluate the function at the starting point */
+ /* and calculate its norm. */
+ m_nfev = 1;
+ if ( m_functor(x, m_fvec) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ m_fnorm = m_fvec.stableNorm();
+
+ /* initialize levenberg-marquardt parameter and iteration counter. */
+ m_par = 0.;
+ m_iter = 1;
+
+ return LevenbergMarquardtSpace::NotStarted;
+}
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::lmder1(
+ FVectorType &x,
+ const Scalar tol
+ )
+{
+ n = x.size();
+ m = m_functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ resetParameters();
+ m_ftol = tol;
+ m_xtol = tol;
+ m_maxfev = 100*(n+1);
+
+ return minimize(x);
+}
+
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::lmdif1(
+ FunctorType &functor,
+ FVectorType &x,
+ Index *nfev,
+ const Scalar tol
+ )
+{
+ Index n = x.size();
+ Index m = functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ NumericalDiff<FunctorType> numDiff(functor);
+ // embedded LevenbergMarquardt
+ LevenbergMarquardt<NumericalDiff<FunctorType> > lm(numDiff);
+ lm.setFtol(tol);
+ lm.setXtol(tol);
+ lm.setMaxfev(200*(n+1));
+
+ LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x));
+ if (nfev)
+ * nfev = lm.nfev();
+ return info;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LEVENBERGMARQUARDT_H
diff --git a/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt b/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
new file mode 100644
index 0000000..cdde64d
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_MatrixFunctions_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_MatrixFunctions_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/MatrixFunctions COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
new file mode 100644
index 0000000..6825a78
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -0,0 +1,451 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009, 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_EXPONENTIAL
+#define EIGEN_MATRIX_EXPONENTIAL
+
+#include "StemFunction.h"
+
+namespace Eigen {
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing the matrix exponential.
+ * \tparam MatrixType type of the argument of the exponential,
+ * expected to be an instantiation of the Matrix class template.
+ */
+template <typename MatrixType>
+class MatrixExponential {
+
+ public:
+
+ /** \brief Constructor.
+ *
+ * The class stores a reference to \p M, so it should not be
+ * changed (or destroyed) before compute() is called.
+ *
+ * \param[in] M matrix whose exponential is to be computed.
+ */
+ MatrixExponential(const MatrixType &M);
+
+ /** \brief Computes the matrix exponential.
+ *
+ * \param[out] result the matrix exponential of \p M in the constructor.
+ */
+ template <typename ResultType>
+ void compute(ResultType &result);
+
+ private:
+
+ // Prevent copying
+ MatrixExponential(const MatrixExponential&);
+ MatrixExponential& operator=(const MatrixExponential&);
+
+ /** \brief Compute the (3,3)-Padé approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * \param[in] A Argument of matrix exponential
+ */
+ void pade3(const MatrixType &A);
+
+ /** \brief Compute the (5,5)-Padé approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * \param[in] A Argument of matrix exponential
+ */
+ void pade5(const MatrixType &A);
+
+ /** \brief Compute the (7,7)-Padé approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * \param[in] A Argument of matrix exponential
+ */
+ void pade7(const MatrixType &A);
+
+ /** \brief Compute the (9,9)-Padé approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * \param[in] A Argument of matrix exponential
+ */
+ void pade9(const MatrixType &A);
+
+ /** \brief Compute the (13,13)-Padé approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * \param[in] A Argument of matrix exponential
+ */
+ void pade13(const MatrixType &A);
+
+ /** \brief Compute the (17,17)-Padé approximant to the exponential.
+ *
+ * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé
+ * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$.
+ *
+ * This function activates only if your long double is double-double or quadruple.
+ *
+ * \param[in] A Argument of matrix exponential
+ */
+ void pade17(const MatrixType &A);
+
+ /** \brief Compute Padé approximant to the exponential.
+ *
+ * Computes \c m_U, \c m_V and \c m_squarings such that
+ * \f$ (V+U)(V-U)^{-1} \f$ is a Padé of
+ * \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$. The
+ * degree of the Padé approximant and the value of
+ * squarings are chosen such that the approximation error is no
+ * more than the round-off error.
+ *
+ * The argument of this function should correspond with the (real
+ * part of) the entries of \c m_M. It is used to select the
+ * correct implementation using overloading.
+ */
+ void computeUV(double);
+
+ /** \brief Compute Padé approximant to the exponential.
+ *
+ * \sa computeUV(double);
+ */
+ void computeUV(float);
+
+ /** \brief Compute Padé approximant to the exponential.
+ *
+ * \sa computeUV(double);
+ */
+ void computeUV(long double);
+
+ typedef typename internal::traits<MatrixType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename std::complex<RealScalar> ComplexScalar;
+
+ /** \brief Reference to matrix whose exponential is to be computed. */
+ typename internal::nested<MatrixType>::type m_M;
+
+ /** \brief Odd-degree terms in numerator of Padé approximant. */
+ MatrixType m_U;
+
+ /** \brief Even-degree terms in numerator of Padé approximant. */
+ MatrixType m_V;
+
+ /** \brief Used for temporary storage. */
+ MatrixType m_tmp1;
+
+ /** \brief Used for temporary storage. */
+ MatrixType m_tmp2;
+
+ /** \brief Identity matrix of the same size as \c m_M. */
+ MatrixType m_Id;
+
+ /** \brief Number of squarings required in the last step. */
+ int m_squarings;
+
+ /** \brief L1 norm of m_M. */
+ RealScalar m_l1norm;
+};
+
+template <typename MatrixType>
+MatrixExponential<MatrixType>::MatrixExponential(const MatrixType &M) :
+ m_M(M),
+ m_U(M.rows(),M.cols()),
+ m_V(M.rows(),M.cols()),
+ m_tmp1(M.rows(),M.cols()),
+ m_tmp2(M.rows(),M.cols()),
+ m_Id(MatrixType::Identity(M.rows(), M.cols())),
+ m_squarings(0),
+ m_l1norm(M.cwiseAbs().colwise().sum().maxCoeff())
+{
+ /* empty body */
+}
+
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixExponential<MatrixType>::compute(ResultType &result)
+{
+#if LDBL_MANT_DIG > 112 // rarely happens
+ if(sizeof(RealScalar) > 14) {
+ result = m_M.matrixFunction(StdStemFunctions<ComplexScalar>::exp);
+ return;
+ }
+#endif
+ computeUV(RealScalar());
+ m_tmp1 = m_U + m_V; // numerator of Pade approximant
+ m_tmp2 = -m_U + m_V; // denominator of Pade approximant
+ result = m_tmp2.partialPivLu().solve(m_tmp1);
+ for (int i=0; i<m_squarings; i++)
+ result *= result; // undo scaling by repeated squaring
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade3(const MatrixType &A)
+{
+ const RealScalar b[] = {120., 60., 12., 1.};
+ m_tmp1.noalias() = A * A;
+ m_tmp2 = b[3]*m_tmp1 + b[1]*m_Id;
+ m_U.noalias() = A * m_tmp2;
+ m_V = b[2]*m_tmp1 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade5(const MatrixType &A)
+{
+ const RealScalar b[] = {30240., 15120., 3360., 420., 30., 1.};
+ MatrixType A2 = A * A;
+ m_tmp1.noalias() = A2 * A2;
+ m_tmp2 = b[5]*m_tmp1 + b[3]*A2 + b[1]*m_Id;
+ m_U.noalias() = A * m_tmp2;
+ m_V = b[4]*m_tmp1 + b[2]*A2 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade7(const MatrixType &A)
+{
+ const RealScalar b[] = {17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.};
+ MatrixType A2 = A * A;
+ MatrixType A4 = A2 * A2;
+ m_tmp1.noalias() = A4 * A2;
+ m_tmp2 = b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+ m_U.noalias() = A * m_tmp2;
+ m_V = b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade9(const MatrixType &A)
+{
+ const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240.,
+ 2162160., 110880., 3960., 90., 1.};
+ MatrixType A2 = A * A;
+ MatrixType A4 = A2 * A2;
+ MatrixType A6 = A4 * A2;
+ m_tmp1.noalias() = A6 * A2;
+ m_tmp2 = b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+ m_U.noalias() = A * m_tmp2;
+ m_V = b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
+{
+ const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
+ 1187353796428800., 129060195264000., 10559470521600., 670442572800.,
+ 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
+ MatrixType A2 = A * A;
+ MatrixType A4 = A2 * A2;
+ m_tmp1.noalias() = A4 * A2;
+ m_V = b[13]*m_tmp1 + b[11]*A4 + b[9]*A2; // used for temporary storage
+ m_tmp2.noalias() = m_tmp1 * m_V;
+ m_tmp2 += b[7]*m_tmp1 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+ m_U.noalias() = A * m_tmp2;
+ m_tmp2 = b[12]*m_tmp1 + b[10]*A4 + b[8]*A2;
+ m_V.noalias() = m_tmp1 * m_tmp2;
+ m_V += b[6]*m_tmp1 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+
+#if LDBL_MANT_DIG > 64
+template <typename MatrixType>
+EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade17(const MatrixType &A)
+{
+ const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
+ 100610229646136770560000.L, 15720348382208870400000.L,
+ 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
+ 595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
+ 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
+ 46512.L, 306.L, 1.L};
+ MatrixType A2 = A * A;
+ MatrixType A4 = A2 * A2;
+ MatrixType A6 = A4 * A2;
+ m_tmp1.noalias() = A4 * A4;
+ m_V = b[17]*m_tmp1 + b[15]*A6 + b[13]*A4 + b[11]*A2; // used for temporary storage
+ m_tmp2.noalias() = m_tmp1 * m_V;
+ m_tmp2 += b[9]*m_tmp1 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*m_Id;
+ m_U.noalias() = A * m_tmp2;
+ m_tmp2 = b[16]*m_tmp1 + b[14]*A6 + b[12]*A4 + b[10]*A2;
+ m_V.noalias() = m_tmp1 * m_tmp2;
+ m_V += b[8]*m_tmp1 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*m_Id;
+}
+#endif
+
+template <typename MatrixType>
+void MatrixExponential<MatrixType>::computeUV(float)
+{
+ using std::frexp;
+ using std::pow;
+ if (m_l1norm < 4.258730016922831e-001) {
+ pade3(m_M);
+ } else if (m_l1norm < 1.880152677804762e+000) {
+ pade5(m_M);
+ } else {
+ const float maxnorm = 3.925724783138660f;
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
+ MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ pade7(A);
+ }
+}
+
+template <typename MatrixType>
+void MatrixExponential<MatrixType>::computeUV(double)
+{
+ using std::frexp;
+ using std::pow;
+ if (m_l1norm < 1.495585217958292e-002) {
+ pade3(m_M);
+ } else if (m_l1norm < 2.539398330063230e-001) {
+ pade5(m_M);
+ } else if (m_l1norm < 9.504178996162932e-001) {
+ pade7(m_M);
+ } else if (m_l1norm < 2.097847961257068e+000) {
+ pade9(m_M);
+ } else {
+ const double maxnorm = 5.371920351148152;
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
+ MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ pade13(A);
+ }
+}
+
+template <typename MatrixType>
+void MatrixExponential<MatrixType>::computeUV(long double)
+{
+ using std::frexp;
+ using std::pow;
+#if LDBL_MANT_DIG == 53 // double precision
+ computeUV(double());
+#elif LDBL_MANT_DIG <= 64 // extended precision
+ if (m_l1norm < 4.1968497232266989671e-003L) {
+ pade3(m_M);
+ } else if (m_l1norm < 1.1848116734693823091e-001L) {
+ pade5(m_M);
+ } else if (m_l1norm < 5.5170388480686700274e-001L) {
+ pade7(m_M);
+ } else if (m_l1norm < 1.3759868875587845383e+000L) {
+ pade9(m_M);
+ } else {
+ const long double maxnorm = 4.0246098906697353063L;
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
+ MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ pade13(A);
+ }
+#elif LDBL_MANT_DIG <= 106 // double-double
+ if (m_l1norm < 3.2787892205607026992947488108213e-005L) {
+ pade3(m_M);
+ } else if (m_l1norm < 6.4467025060072760084130906076332e-003L) {
+ pade5(m_M);
+ } else if (m_l1norm < 6.8988028496595374751374122881143e-002L) {
+ pade7(m_M);
+ } else if (m_l1norm < 2.7339737518502231741495857201670e-001L) {
+ pade9(m_M);
+ } else if (m_l1norm < 1.3203382096514474905666448850278e+000L) {
+ pade13(m_M);
+ } else {
+ const long double maxnorm = 3.2579440895405400856599663723517L;
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
+ MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ pade17(A);
+ }
+#elif LDBL_MANT_DIG <= 112 // quadruple precison
+ if (m_l1norm < 1.639394610288918690547467954466970e-005L) {
+ pade3(m_M);
+ } else if (m_l1norm < 4.253237712165275566025884344433009e-003L) {
+ pade5(m_M);
+ } else if (m_l1norm < 5.125804063165764409885122032933142e-002L) {
+ pade7(m_M);
+ } else if (m_l1norm < 2.170000765161155195453205651889853e-001L) {
+ pade9(m_M);
+ } else if (m_l1norm < 1.125358383453143065081397882891878e+000L) {
+ pade13(m_M);
+ } else {
+ const long double maxnorm = 2.884233277829519311757165057717815L;
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
+ MatrixType A = m_M / pow(Scalar(2), m_squarings);
+ pade17(A);
+ }
+#else
+ // this case should be handled in compute()
+ eigen_assert(false && "Bug in MatrixExponential");
+#endif // LDBL_MANT_DIG
+}
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix exponential of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix exponential.
+ *
+ * This class holds the argument to the matrix exponential until it
+ * is assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::exp() and most of the time this is the only way it is
+ * used.
+ */
+template<typename Derived> struct MatrixExponentialReturnValue
+: public ReturnByValue<MatrixExponentialReturnValue<Derived> >
+{
+ typedef typename Derived::Index Index;
+ public:
+ /** \brief Constructor.
+ *
+ * \param[in] src %Matrix (expression) forming the argument of the
+ * matrix exponential.
+ */
+ MatrixExponentialReturnValue(const Derived& src) : m_src(src) { }
+
+ /** \brief Compute the matrix exponential.
+ *
+ * \param[out] result the matrix exponential of \p src in the
+ * constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ const typename Derived::PlainObject srcEvaluated = m_src.eval();
+ MatrixExponential<typename Derived::PlainObject> me(srcEvaluated);
+ me.compute(result);
+ }
+
+ Index rows() const { return m_src.rows(); }
+ Index cols() const { return m_src.cols(); }
+
+ protected:
+ const Derived& m_src;
+ private:
+ MatrixExponentialReturnValue& operator=(const MatrixExponentialReturnValue&);
+};
+
+namespace internal {
+template<typename Derived>
+struct traits<MatrixExponentialReturnValue<Derived> >
+{
+ typedef typename Derived::PlainObject ReturnType;
+};
+}
+
+template <typename Derived>
+const MatrixExponentialReturnValue<Derived> MatrixBase<Derived>::exp() const
+{
+ eigen_assert(rows() == cols());
+ return MatrixExponentialReturnValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_EXPONENTIAL
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
new file mode 100644
index 0000000..7d42664
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -0,0 +1,591 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_FUNCTION
+#define EIGEN_MATRIX_FUNCTION
+
+#include "StemFunction.h"
+#include "MatrixFunctionAtomic.h"
+
+
+namespace Eigen {
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix functions.
+ * \tparam MatrixType type of the argument of the matrix function,
+ * expected to be an instantiation of the Matrix class template.
+ * \tparam AtomicType type for computing matrix function of atomic blocks.
+ * \tparam IsComplex used internally to select correct specialization.
+ *
+ * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the
+ * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the
+ * computation of the matrix function on every block corresponding to these clusters to an object of type
+ * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class
+ * \p AtomicType should have a \p compute() member function for computing the matrix function of a block.
+ *
+ * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic
+ */
+template <typename MatrixType,
+ typename AtomicType,
+ int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+class MatrixFunction
+{
+ public:
+
+ /** \brief Constructor.
+ *
+ * \param[in] A argument of matrix function, should be a square matrix.
+ * \param[in] atomic class for computing matrix function of atomic blocks.
+ *
+ * The class stores references to \p A and \p atomic, so they should not be
+ * changed (or destroyed) before compute() is called.
+ */
+ MatrixFunction(const MatrixType& A, AtomicType& atomic);
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
+ *
+ * See MatrixBase::matrixFunction() for details on how this computation
+ * is implemented.
+ */
+ template <typename ResultType>
+ void compute(ResultType &result);
+};
+
+
+/** \internal \ingroup MatrixFunctions_Module
+ * \brief Partial specialization of MatrixFunction for real matrices
+ */
+template <typename MatrixType, typename AtomicType>
+class MatrixFunction<MatrixType, AtomicType, 0>
+{
+ private:
+
+ typedef internal::traits<MatrixType> Traits;
+ typedef typename Traits::Scalar Scalar;
+ static const int Rows = Traits::RowsAtCompileTime;
+ static const int Cols = Traits::ColsAtCompileTime;
+ static const int Options = MatrixType::Options;
+ static const int MaxRows = Traits::MaxRowsAtCompileTime;
+ static const int MaxCols = Traits::MaxColsAtCompileTime;
+
+ typedef std::complex<Scalar> ComplexScalar;
+ typedef Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols> ComplexMatrix;
+
+ public:
+
+ /** \brief Constructor.
+ *
+ * \param[in] A argument of matrix function, should be a square matrix.
+ * \param[in] atomic class for computing matrix function of atomic blocks.
+ */
+ MatrixFunction(const MatrixType& A, AtomicType& atomic) : m_A(A), m_atomic(atomic) { }
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
+ *
+ * This function converts the real matrix \c A to a complex matrix,
+ * uses MatrixFunction<MatrixType,1> and then converts the result back to
+ * a real matrix.
+ */
+ template <typename ResultType>
+ void compute(ResultType& result)
+ {
+ ComplexMatrix CA = m_A.template cast<ComplexScalar>();
+ ComplexMatrix Cresult;
+ MatrixFunction<ComplexMatrix, AtomicType> mf(CA, m_atomic);
+ mf.compute(Cresult);
+ result = Cresult.real();
+ }
+
+ private:
+ typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
+ AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
+
+ MatrixFunction& operator=(const MatrixFunction&);
+};
+
+
+/** \internal \ingroup MatrixFunctions_Module
+ * \brief Partial specialization of MatrixFunction for complex matrices
+ */
+template <typename MatrixType, typename AtomicType>
+class MatrixFunction<MatrixType, AtomicType, 1>
+{
+ private:
+
+ typedef internal::traits<MatrixType> Traits;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+ static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+ static const int Options = MatrixType::Options;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Matrix<Scalar, Traits::RowsAtCompileTime, 1> VectorType;
+ typedef Matrix<Index, Traits::RowsAtCompileTime, 1> IntVectorType;
+ typedef Matrix<Index, Dynamic, 1> DynamicIntVectorType;
+ typedef std::list<Scalar> Cluster;
+ typedef std::list<Cluster> ListOfClusters;
+ typedef Matrix<Scalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+
+ public:
+
+ MatrixFunction(const MatrixType& A, AtomicType& atomic);
+ template <typename ResultType> void compute(ResultType& result);
+
+ private:
+
+ void computeSchurDecomposition();
+ void partitionEigenvalues();
+ typename ListOfClusters::iterator findCluster(Scalar key);
+ void computeClusterSize();
+ void computeBlockStart();
+ void constructPermutation();
+ void permuteSchur();
+ void swapEntriesInSchur(Index index);
+ void computeBlockAtomic();
+ Block<MatrixType> block(MatrixType& A, Index i, Index j);
+ void computeOffDiagonal();
+ DynMatrixType solveTriangularSylvester(const DynMatrixType& A, const DynMatrixType& B, const DynMatrixType& C);
+
+ typename internal::nested<MatrixType>::type m_A; /**< \brief Reference to argument of matrix function. */
+ AtomicType& m_atomic; /**< \brief Class for computing matrix function of atomic blocks. */
+ MatrixType m_T; /**< \brief Triangular part of Schur decomposition */
+ MatrixType m_U; /**< \brief Unitary part of Schur decomposition */
+ MatrixType m_fT; /**< \brief %Matrix function applied to #m_T */
+ ListOfClusters m_clusters; /**< \brief Partition of eigenvalues into clusters of ei'vals "close" to each other */
+ DynamicIntVectorType m_eivalToCluster; /**< \brief m_eivalToCluster[i] = j means i-th ei'val is in j-th cluster */
+ DynamicIntVectorType m_clusterSize; /**< \brief Number of eigenvalues in each clusters */
+ DynamicIntVectorType m_blockStart; /**< \brief Row index at which block corresponding to i-th cluster starts */
+ IntVectorType m_permutation; /**< \brief Permutation which groups ei'vals in the same cluster together */
+
+ /** \brief Maximum distance allowed between eigenvalues to be considered "close".
+ *
+ * This is morally a \c static \c const \c Scalar, but only
+ * integers can be static constant class members in C++. The
+ * separation constant is set to 0.1, a value taken from the
+ * paper by Davies and Higham. */
+ static const RealScalar separation() { return static_cast<RealScalar>(0.1); }
+
+ MatrixFunction& operator=(const MatrixFunction&);
+};
+
+/** \brief Constructor.
+ *
+ * \param[in] A argument of matrix function, should be a square matrix.
+ * \param[in] atomic class for computing matrix function of atomic blocks.
+ */
+template <typename MatrixType, typename AtomicType>
+MatrixFunction<MatrixType,AtomicType,1>::MatrixFunction(const MatrixType& A, AtomicType& atomic)
+ : m_A(A), m_atomic(atomic)
+{
+ /* empty body */
+}
+
+/** \brief Compute the matrix function.
+ *
+ * \param[out] result the function \p f applied to \p A, as
+ * specified in the constructor.
+ */
+template <typename MatrixType, typename AtomicType>
+template <typename ResultType>
+void MatrixFunction<MatrixType,AtomicType,1>::compute(ResultType& result)
+{
+ computeSchurDecomposition();
+ partitionEigenvalues();
+ computeClusterSize();
+ computeBlockStart();
+ constructPermutation();
+ permuteSchur();
+ computeBlockAtomic();
+ computeOffDiagonal();
+ result = m_U * (m_fT.template triangularView<Upper>() * m_U.adjoint());
+}
+
+/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeSchurDecomposition()
+{
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ m_T = schurOfA.matrixT();
+ m_U = schurOfA.matrixU();
+}
+
+/** \brief Partition eigenvalues in clusters of ei'vals close to each other
+ *
+ * This function computes #m_clusters. This is a partition of the
+ * eigenvalues of #m_T in clusters, such that
+ * # Any eigenvalue in a certain cluster is at most separation() away
+ * from another eigenvalue in the same cluster.
+ * # The distance between two eigenvalues in different clusters is
+ * more than separation().
+ * The implementation follows Algorithm 4.1 in the paper of Davies
+ * and Higham.
+ */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues()
+{
+ using std::abs;
+ const Index rows = m_T.rows();
+ VectorType diag = m_T.diagonal(); // contains eigenvalues of A
+
+ for (Index i=0; i<rows; ++i) {
+ // Find set containing diag(i), adding a new set if necessary
+ typename ListOfClusters::iterator qi = findCluster(diag(i));
+ if (qi == m_clusters.end()) {
+ Cluster l;
+ l.push_back(diag(i));
+ m_clusters.push_back(l);
+ qi = m_clusters.end();
+ --qi;
+ }
+
+ // Look for other element to add to the set
+ for (Index j=i+1; j<rows; ++j) {
+ if (abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
+ typename ListOfClusters::iterator qj = findCluster(diag(j));
+ if (qj == m_clusters.end()) {
+ qi->push_back(diag(j));
+ } else {
+ qi->insert(qi->end(), qj->begin(), qj->end());
+ m_clusters.erase(qj);
+ }
+ }
+ }
+ }
+}
+
+/** \brief Find cluster in #m_clusters containing some value
+ * \param[in] key Value to find
+ * \returns Iterator to cluster containing \c key, or
+ * \c m_clusters.end() if no cluster in m_clusters contains \c key.
+ */
+template <typename MatrixType, typename AtomicType>
+typename MatrixFunction<MatrixType,AtomicType,1>::ListOfClusters::iterator MatrixFunction<MatrixType,AtomicType,1>::findCluster(Scalar key)
+{
+ typename Cluster::iterator j;
+ for (typename ListOfClusters::iterator i = m_clusters.begin(); i != m_clusters.end(); ++i) {
+ j = std::find(i->begin(), i->end(), key);
+ if (j != i->end())
+ return i;
+ }
+ return m_clusters.end();
+}
+
+/** \brief Compute #m_clusterSize and #m_eivalToCluster using #m_clusters */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeClusterSize()
+{
+ const Index rows = m_T.rows();
+ VectorType diag = m_T.diagonal();
+ const Index numClusters = static_cast<Index>(m_clusters.size());
+
+ m_clusterSize.setZero(numClusters);
+ m_eivalToCluster.resize(rows);
+ Index clusterIndex = 0;
+ for (typename ListOfClusters::const_iterator cluster = m_clusters.begin(); cluster != m_clusters.end(); ++cluster) {
+ for (Index i = 0; i < diag.rows(); ++i) {
+ if (std::find(cluster->begin(), cluster->end(), diag(i)) != cluster->end()) {
+ ++m_clusterSize[clusterIndex];
+ m_eivalToCluster[i] = clusterIndex;
+ }
+ }
+ ++clusterIndex;
+ }
+}
+
+/** \brief Compute #m_blockStart using #m_clusterSize */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeBlockStart()
+{
+ m_blockStart.resize(m_clusterSize.rows());
+ m_blockStart(0) = 0;
+ for (Index i = 1; i < m_clusterSize.rows(); i++) {
+ m_blockStart(i) = m_blockStart(i-1) + m_clusterSize(i-1);
+ }
+}
+
+/** \brief Compute #m_permutation using #m_eivalToCluster and #m_blockStart */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::constructPermutation()
+{
+ DynamicIntVectorType indexNextEntry = m_blockStart;
+ m_permutation.resize(m_T.rows());
+ for (Index i = 0; i < m_T.rows(); i++) {
+ Index cluster = m_eivalToCluster[i];
+ m_permutation[i] = indexNextEntry[cluster];
+ ++indexNextEntry[cluster];
+ }
+}
+
+/** \brief Permute Schur decomposition in #m_U and #m_T according to #m_permutation */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::permuteSchur()
+{
+ IntVectorType p = m_permutation;
+ for (Index i = 0; i < p.rows() - 1; i++) {
+ Index j;
+ for (j = i; j < p.rows(); j++) {
+ if (p(j) == i) break;
+ }
+ eigen_assert(p(j) == i);
+ for (Index k = j-1; k >= i; k--) {
+ swapEntriesInSchur(k);
+ std::swap(p.coeffRef(k), p.coeffRef(k+1));
+ }
+ }
+}
+
+/** \brief Swap rows \a index and \a index+1 in Schur decomposition in #m_U and #m_T */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::swapEntriesInSchur(Index index)
+{
+ JacobiRotation<Scalar> rotation;
+ rotation.makeGivens(m_T(index, index+1), m_T(index+1, index+1) - m_T(index, index));
+ m_T.applyOnTheLeft(index, index+1, rotation.adjoint());
+ m_T.applyOnTheRight(index, index+1, rotation);
+ m_U.applyOnTheRight(index, index+1, rotation);
+}
+
+/** \brief Compute block diagonal part of #m_fT.
+ *
+ * This routine computes the matrix function applied to the block diagonal part of #m_T, with the blocking
+ * given by #m_blockStart. The matrix function of each diagonal block is computed by #m_atomic. The
+ * off-diagonal parts of #m_fT are set to zero.
+ */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeBlockAtomic()
+{
+ m_fT.resize(m_T.rows(), m_T.cols());
+ m_fT.setZero();
+ for (Index i = 0; i < m_clusterSize.rows(); ++i) {
+ block(m_fT, i, i) = m_atomic.compute(block(m_T, i, i));
+ }
+}
+
+/** \brief Return block of matrix according to blocking given by #m_blockStart */
+template <typename MatrixType, typename AtomicType>
+Block<MatrixType> MatrixFunction<MatrixType,AtomicType,1>::block(MatrixType& A, Index i, Index j)
+{
+ return A.block(m_blockStart(i), m_blockStart(j), m_clusterSize(i), m_clusterSize(j));
+}
+
+/** \brief Compute part of #m_fT above block diagonal.
+ *
+ * This routine assumes that the block diagonal part of #m_fT (which
+ * equals the matrix function applied to #m_T) has already been computed and computes
+ * the part above the block diagonal. The part below the diagonal is
+ * zero, because #m_T is upper triangular.
+ */
+template <typename MatrixType, typename AtomicType>
+void MatrixFunction<MatrixType,AtomicType,1>::computeOffDiagonal()
+{
+ for (Index diagIndex = 1; diagIndex < m_clusterSize.rows(); diagIndex++) {
+ for (Index blockIndex = 0; blockIndex < m_clusterSize.rows() - diagIndex; blockIndex++) {
+ // compute (blockIndex, blockIndex+diagIndex) block
+ DynMatrixType A = block(m_T, blockIndex, blockIndex);
+ DynMatrixType B = -block(m_T, blockIndex+diagIndex, blockIndex+diagIndex);
+ DynMatrixType C = block(m_fT, blockIndex, blockIndex) * block(m_T, blockIndex, blockIndex+diagIndex);
+ C -= block(m_T, blockIndex, blockIndex+diagIndex) * block(m_fT, blockIndex+diagIndex, blockIndex+diagIndex);
+ for (Index k = blockIndex + 1; k < blockIndex + diagIndex; k++) {
+ C += block(m_fT, blockIndex, k) * block(m_T, k, blockIndex+diagIndex);
+ C -= block(m_T, blockIndex, k) * block(m_fT, k, blockIndex+diagIndex);
+ }
+ block(m_fT, blockIndex, blockIndex+diagIndex) = solveTriangularSylvester(A, B, C);
+ }
+ }
+}
+
+/** \brief Solve a triangular Sylvester equation AX + XB = C
+ *
+ * \param[in] A the matrix A; should be square and upper triangular
+ * \param[in] B the matrix B; should be square and upper triangular
+ * \param[in] C the matrix C; should have correct size.
+ *
+ * \returns the solution X.
+ *
+ * If A is m-by-m and B is n-by-n, then both C and X are m-by-n.
+ * The (i,j)-th component of the Sylvester equation is
+ * \f[
+ * \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}.
+ * \f]
+ * This can be re-arranged to yield:
+ * \f[
+ * X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij}
+ * - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr).
+ * \f]
+ * It is assumed that A and B are such that the numerator is never
+ * zero (otherwise the Sylvester equation does not have a unique
+ * solution). In that case, these equations can be evaluated in the
+ * order \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$.
+ */
+template <typename MatrixType, typename AtomicType>
+typename MatrixFunction<MatrixType,AtomicType,1>::DynMatrixType MatrixFunction<MatrixType,AtomicType,1>::solveTriangularSylvester(
+ const DynMatrixType& A,
+ const DynMatrixType& B,
+ const DynMatrixType& C)
+{
+ eigen_assert(A.rows() == A.cols());
+ eigen_assert(A.isUpperTriangular());
+ eigen_assert(B.rows() == B.cols());
+ eigen_assert(B.isUpperTriangular());
+ eigen_assert(C.rows() == A.rows());
+ eigen_assert(C.cols() == B.rows());
+
+ Index m = A.rows();
+ Index n = B.rows();
+ DynMatrixType X(m, n);
+
+ for (Index i = m - 1; i >= 0; --i) {
+ for (Index j = 0; j < n; ++j) {
+
+ // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj}
+ Scalar AX;
+ if (i == m - 1) {
+ AX = 0;
+ } else {
+ Matrix<Scalar,1,1> AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i);
+ AX = AXmatrix(0,0);
+ }
+
+ // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj}
+ Scalar XB;
+ if (j == 0) {
+ XB = 0;
+ } else {
+ Matrix<Scalar,1,1> XBmatrix = X.row(i).head(j) * B.col(j).head(j);
+ XB = XBmatrix(0,0);
+ }
+
+ X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j));
+ }
+ }
+ return X;
+}
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix function of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix function.
+ *
+ * This class holds the argument to the matrix function until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * matrixBase::matrixFunction() and related functions and most of the
+ * time this is the only way it is used.
+ */
+template<typename Derived> class MatrixFunctionReturnValue
+: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
+{
+ public:
+
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+ typedef typename internal::stem_function<Scalar>::type StemFunction;
+
+ /** \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression) forming the argument of the
+ * matrix function.
+ * \param[in] f Stem function for matrix function under consideration.
+ */
+ MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
+
+ /** \brief Compute the matrix function.
+ *
+ * \param[out] result \p f applied to \p A, where \p f and \p A
+ * are as in the constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ typedef typename Derived::PlainObject PlainObject;
+ typedef internal::traits<PlainObject> Traits;
+ static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+ static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+ static const int Options = PlainObject::Options;
+ typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+ typedef MatrixFunctionAtomic<DynMatrixType> AtomicType;
+ AtomicType atomic(m_f);
+
+ const PlainObject Aevaluated = m_A.eval();
+ MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
+ mf.compute(result);
+ }
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ typename internal::nested<Derived>::type m_A;
+ StemFunction *m_f;
+
+ MatrixFunctionReturnValue& operator=(const MatrixFunctionReturnValue&);
+};
+
+namespace internal {
+template<typename Derived>
+struct traits<MatrixFunctionReturnValue<Derived> >
+{
+ typedef typename Derived::PlainObject ReturnType;
+};
+}
+
+
+/********** MatrixBase methods **********/
+
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
+{
+ eigen_assert(rows() == cols());
+ return MatrixFunctionReturnValue<Derived>(derived(), f);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
+{
+ eigen_assert(rows() == cols());
+ typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+ return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sin);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
+{
+ eigen_assert(rows() == cols());
+ typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+ return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cos);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
+{
+ eigen_assert(rows() == cols());
+ typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+ return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::sinh);
+}
+
+template <typename Derived>
+const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
+{
+ eigen_assert(rows() == cols());
+ typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
+ return MatrixFunctionReturnValue<Derived>(derived(), StdStemFunctions<ComplexScalar>::cosh);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_FUNCTION
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
new file mode 100644
index 0000000..efe332c
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunctionAtomic.h
@@ -0,0 +1,131 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_FUNCTION_ATOMIC
+#define EIGEN_MATRIX_FUNCTION_ATOMIC
+
+namespace Eigen {
+
+/** \ingroup MatrixFunctions_Module
+ * \class MatrixFunctionAtomic
+ * \brief Helper class for computing matrix functions of atomic matrices.
+ *
+ * \internal
+ * Here, an atomic matrix is a triangular matrix whose diagonal
+ * entries are close to each other.
+ */
+template <typename MatrixType>
+class MatrixFunctionAtomic
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef typename internal::stem_function<Scalar>::type StemFunction;
+ typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+ /** \brief Constructor
+ * \param[in] f matrix function to compute.
+ */
+ MatrixFunctionAtomic(StemFunction f) : m_f(f) { }
+
+ /** \brief Compute matrix function of atomic matrix
+ * \param[in] A argument of matrix function, should be upper triangular and atomic
+ * \returns f(A), the matrix function evaluated at the given matrix
+ */
+ MatrixType compute(const MatrixType& A);
+
+ private:
+
+ // Prevent copying
+ MatrixFunctionAtomic(const MatrixFunctionAtomic&);
+ MatrixFunctionAtomic& operator=(const MatrixFunctionAtomic&);
+
+ void computeMu();
+ bool taylorConverged(Index s, const MatrixType& F, const MatrixType& Fincr, const MatrixType& P);
+
+ /** \brief Pointer to scalar function */
+ StemFunction* m_f;
+
+ /** \brief Size of matrix function */
+ Index m_Arows;
+
+ /** \brief Mean of eigenvalues */
+ Scalar m_avgEival;
+
+ /** \brief Argument shifted by mean of eigenvalues */
+ MatrixType m_Ashifted;
+
+ /** \brief Constant used to determine whether Taylor series has converged */
+ RealScalar m_mu;
+};
+
+template <typename MatrixType>
+MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
+{
+ // TODO: Use that A is upper triangular
+ m_Arows = A.rows();
+ m_avgEival = A.trace() / Scalar(RealScalar(m_Arows));
+ m_Ashifted = A - m_avgEival * MatrixType::Identity(m_Arows, m_Arows);
+ computeMu();
+ MatrixType F = m_f(m_avgEival, 0) * MatrixType::Identity(m_Arows, m_Arows);
+ MatrixType P = m_Ashifted;
+ MatrixType Fincr;
+ for (Index s = 1; s < 1.1 * m_Arows + 10; s++) { // upper limit is fairly arbitrary
+ Fincr = m_f(m_avgEival, static_cast<int>(s)) * P;
+ F += Fincr;
+ P = Scalar(RealScalar(1.0/(s + 1))) * P * m_Ashifted;
+ if (taylorConverged(s, F, Fincr, P)) {
+ return F;
+ }
+ }
+ eigen_assert("Taylor series does not converge" && 0);
+ return F;
+}
+
+/** \brief Compute \c m_mu. */
+template <typename MatrixType>
+void MatrixFunctionAtomic<MatrixType>::computeMu()
+{
+ const MatrixType N = MatrixType::Identity(m_Arows, m_Arows) - m_Ashifted;
+ VectorType e = VectorType::Ones(m_Arows);
+ N.template triangularView<Upper>().solveInPlace(e);
+ m_mu = e.cwiseAbs().maxCoeff();
+}
+
+/** \brief Determine whether Taylor series has converged */
+template <typename MatrixType>
+bool MatrixFunctionAtomic<MatrixType>::taylorConverged(Index s, const MatrixType& F,
+ const MatrixType& Fincr, const MatrixType& P)
+{
+ const Index n = F.rows();
+ const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
+ const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
+ if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
+ RealScalar delta = 0;
+ RealScalar rfactorial = 1;
+ for (Index r = 0; r < n; r++) {
+ RealScalar mx = 0;
+ for (Index i = 0; i < n; i++)
+ mx = (std::max)(mx, std::abs(m_f(m_Ashifted(i, i) + m_avgEival, static_cast<int>(s+r))));
+ if (r != 0)
+ rfactorial *= RealScalar(r);
+ delta = (std::max)(delta, mx / rfactorial);
+ }
+ const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
+ if (m_mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm)
+ return true;
+ }
+ return false;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_FUNCTION_ATOMIC
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
new file mode 100644
index 0000000..c744fc0
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -0,0 +1,486 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_LOGARITHM
+#define EIGEN_MATRIX_LOGARITHM
+
+#ifndef M_PI
+#define M_PI 3.141592653589793238462643383279503L
+#endif
+
+namespace Eigen {
+
+/** \ingroup MatrixFunctions_Module
+ * \class MatrixLogarithmAtomic
+ * \brief Helper class for computing matrix logarithm of atomic matrices.
+ *
+ * \internal
+ * Here, an atomic matrix is a triangular matrix whose diagonal
+ * entries are close to each other.
+ *
+ * \sa class MatrixFunctionAtomic, MatrixBase::log()
+ */
+template <typename MatrixType>
+class MatrixLogarithmAtomic
+{
+public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ // typedef typename MatrixType::Index Index;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ // typedef typename internal::stem_function<Scalar>::type StemFunction;
+ // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
+
+ /** \brief Constructor. */
+ MatrixLogarithmAtomic() { }
+
+ /** \brief Compute matrix logarithm of atomic matrix
+ * \param[in] A argument of matrix logarithm, should be upper triangular and atomic
+ * \returns The logarithm of \p A.
+ */
+ MatrixType compute(const MatrixType& A);
+
+private:
+
+ void compute2x2(const MatrixType& A, MatrixType& result);
+ void computeBig(const MatrixType& A, MatrixType& result);
+ int getPadeDegree(float normTminusI);
+ int getPadeDegree(double normTminusI);
+ int getPadeDegree(long double normTminusI);
+ void computePade(MatrixType& result, const MatrixType& T, int degree);
+ void computePade3(MatrixType& result, const MatrixType& T);
+ void computePade4(MatrixType& result, const MatrixType& T);
+ void computePade5(MatrixType& result, const MatrixType& T);
+ void computePade6(MatrixType& result, const MatrixType& T);
+ void computePade7(MatrixType& result, const MatrixType& T);
+ void computePade8(MatrixType& result, const MatrixType& T);
+ void computePade9(MatrixType& result, const MatrixType& T);
+ void computePade10(MatrixType& result, const MatrixType& T);
+ void computePade11(MatrixType& result, const MatrixType& T);
+
+ static const int minPadeDegree = 3;
+ static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
+ std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
+ std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
+ std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
+ 11; // quadruple precision
+
+ // Prevent copying
+ MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
+ MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
+};
+
+/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
+template <typename MatrixType>
+MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
+{
+ using std::log;
+ MatrixType result(A.rows(), A.rows());
+ if (A.rows() == 1)
+ result(0,0) = log(A(0,0));
+ else if (A.rows() == 2)
+ compute2x2(A, result);
+ else
+ computeBig(A, result);
+ return result;
+}
+
+/** \brief Compute logarithm of 2x2 triangular matrix. */
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
+{
+ using std::abs;
+ using std::ceil;
+ using std::imag;
+ using std::log;
+
+ Scalar logA00 = log(A(0,0));
+ Scalar logA11 = log(A(1,1));
+
+ result(0,0) = logA00;
+ result(1,0) = Scalar(0);
+ result(1,1) = logA11;
+
+ if (A(0,0) == A(1,1)) {
+ result(0,1) = A(0,1) / A(0,0);
+ } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
+ result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
+ } else {
+ // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
+ int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
+ Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0);
+ result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y;
+ }
+}
+
+/** \brief Compute logarithm of triangular matrices with size > 2.
+ * \details This uses a inverse scale-and-square algorithm. */
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
+{
+ using std::pow;
+ int numberOfSquareRoots = 0;
+ int numberOfExtraSquareRoots = 0;
+ int degree;
+ MatrixType T = A, sqrtT;
+ const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision
+ maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision
+ maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
+ maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double
+ 1.1880960220216759245467951592883642e-1L; // quadruple precision
+
+ while (true) {
+ RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
+ if (normTminusI < maxNormForPade) {
+ degree = getPadeDegree(normTminusI);
+ int degree2 = getPadeDegree(normTminusI / RealScalar(2));
+ if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
+ break;
+ ++numberOfExtraSquareRoots;
+ }
+ MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+ T = sqrtT.template triangularView<Upper>();
+ ++numberOfSquareRoots;
+ }
+
+ computePade(result, T, degree);
+ result *= pow(RealScalar(2), numberOfSquareRoots);
+}
+
+/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
+template <typename MatrixType>
+int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
+{
+ const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
+ 5.3149729967117310e-1 };
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
+ if (normTminusI <= maxNormForPade[degree - minPadeDegree])
+ break;
+ return degree;
+}
+
+/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
+template <typename MatrixType>
+int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
+{
+ const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
+ 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
+ if (normTminusI <= maxNormForPade[degree - minPadeDegree])
+ break;
+ return degree;
+}
+
+/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
+template <typename MatrixType>
+int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
+{
+#if LDBL_MANT_DIG == 53 // double precision
+ const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
+ 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
+#elif LDBL_MANT_DIG <= 64 // extended precision
+ const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
+ 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
+ 2.32777776523703892094e-1L };
+#elif LDBL_MANT_DIG <= 106 // double-double
+ const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
+ 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
+ 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
+ 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
+ 1.05026503471351080481093652651105e-1L };
+#else // quadruple precision
+ const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
+ 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
+ 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
+ 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
+ 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
+#endif
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
+ if (normTminusI <= maxNormForPade[degree - minPadeDegree])
+ break;
+ return degree;
+}
+
+/* \brief Compute Pade approximation to matrix logarithm */
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
+{
+ switch (degree) {
+ case 3: computePade3(result, T); break;
+ case 4: computePade4(result, T); break;
+ case 5: computePade5(result, T); break;
+ case 6: computePade6(result, T); break;
+ case 7: computePade7(result, T); break;
+ case 8: computePade8(result, T); break;
+ case 9: computePade9(result, T); break;
+ case 10: computePade10(result, T); break;
+ case 11: computePade11(result, T); break;
+ default: assert(false); // should never happen
+ }
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 3;
+ const RealScalar nodes[] = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
+ 0.8872983346207416885179265399782400L };
+ const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
+ 0.2777777777777777777777777777777778L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 4;
+ const RealScalar nodes[] = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
+ 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
+ const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
+ 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 5;
+ const RealScalar nodes[] = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
+ 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
+ 0.9530899229693319963988134391496965L };
+ const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
+ 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
+ 0.1184634425280945437571320203599587L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 6;
+ const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
+ 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
+ 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
+ const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
+ 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
+ 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 7;
+ const RealScalar nodes[] = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
+ 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
+ 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
+ 0.9745539561713792622630948420239256L };
+ const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
+ 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
+ 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
+ 0.0647424830844348466353057163395410L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 8;
+ const RealScalar nodes[] = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
+ 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
+ 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
+ 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
+ const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
+ 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
+ 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
+ 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 9;
+ const RealScalar nodes[] = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
+ 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
+ 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
+ 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
+ 0.9840801197538130449177881014518364L };
+ const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
+ 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
+ 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
+ 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
+ 0.0406371941807872059859460790552618L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 10;
+ const RealScalar nodes[] = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
+ 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
+ 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
+ 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
+ 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
+ const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
+ 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
+ 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
+ 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
+ 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+template <typename MatrixType>
+void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
+{
+ const int degree = 11;
+ const RealScalar nodes[] = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
+ 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
+ 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
+ 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
+ 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
+ 0.9891143290730284964019690005614287L };
+ const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
+ 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
+ 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
+ 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
+ 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
+ 0.0278342835580868332413768602212743L };
+ eigen_assert(degree <= maxPadeDegree);
+ MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
+ result.setZero(T.rows(), T.rows());
+ for (int k = 0; k < degree; ++k)
+ result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
+ .template triangularView<Upper>().solve(TminusI);
+}
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix logarithm of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix function.
+ *
+ * This class holds the argument to the matrix function until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::log() and most of the time this is the only way it
+ * is used.
+ */
+template<typename Derived> class MatrixLogarithmReturnValue
+: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
+{
+public:
+
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
+ /** \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm.
+ */
+ MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
+
+ /** \brief Compute the matrix logarithm.
+ *
+ * \param[out] result Logarithm of \p A, where \A is as specified in the constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ typedef typename Derived::PlainObject PlainObject;
+ typedef internal::traits<PlainObject> Traits;
+ static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
+ static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
+ static const int Options = PlainObject::Options;
+ typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
+ typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
+ typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
+ AtomicType atomic;
+
+ const PlainObject Aevaluated = m_A.eval();
+ MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
+ mf.compute(result);
+ }
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+private:
+ typename internal::nested<Derived>::type m_A;
+
+ MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
+};
+
+namespace internal {
+ template<typename Derived>
+ struct traits<MatrixLogarithmReturnValue<Derived> >
+ {
+ typedef typename Derived::PlainObject ReturnType;
+ };
+}
+
+
+/********** MatrixBase method **********/
+
+
+template <typename Derived>
+const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
+{
+ eigen_assert(rows() == cols());
+ return MatrixLogarithmReturnValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_LOGARITHM
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
new file mode 100644
index 0000000..78a307e
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -0,0 +1,508 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_POWER
+#define EIGEN_MATRIX_POWER
+
+namespace Eigen {
+
+template<typename MatrixType> class MatrixPower;
+
+template<typename MatrixType>
+class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval<MatrixType> >
+{
+ public:
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ MatrixPowerRetval(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
+ { }
+
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { m_pow.compute(res, m_p); }
+
+ Index rows() const { return m_pow.rows(); }
+ Index cols() const { return m_pow.cols(); }
+
+ private:
+ MatrixPower<MatrixType>& m_pow;
+ const RealScalar m_p;
+ MatrixPowerRetval& operator=(const MatrixPowerRetval&);
+};
+
+template<typename MatrixType>
+class MatrixPowerAtomic
+{
+ private:
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef std::complex<RealScalar> ComplexScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Array<Scalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> ArrayType;
+
+ const MatrixType& m_A;
+ RealScalar m_p;
+
+ void computePade(int degree, const MatrixType& IminusT, MatrixType& res) const;
+ void compute2x2(MatrixType& res, RealScalar p) const;
+ void computeBig(MatrixType& res) const;
+ static int getPadeDegree(float normIminusT);
+ static int getPadeDegree(double normIminusT);
+ static int getPadeDegree(long double normIminusT);
+ static ComplexScalar computeSuperDiag(const ComplexScalar&, const ComplexScalar&, RealScalar p);
+ static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p);
+
+ public:
+ MatrixPowerAtomic(const MatrixType& T, RealScalar p);
+ void compute(MatrixType& res) const;
+};
+
+template<typename MatrixType>
+MatrixPowerAtomic<MatrixType>::MatrixPowerAtomic(const MatrixType& T, RealScalar p) :
+ m_A(T), m_p(p)
+{ eigen_assert(T.rows() == T.cols()); }
+
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::compute(MatrixType& res) const
+{
+ res.resizeLike(m_A);
+ switch (m_A.rows()) {
+ case 0:
+ break;
+ case 1:
+ res(0,0) = std::pow(m_A(0,0), m_p);
+ break;
+ case 2:
+ compute2x2(res, m_p);
+ break;
+ default:
+ computeBig(res);
+ }
+}
+
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, MatrixType& res) const
+{
+ int i = degree<<1;
+ res = (m_p-degree) / ((i-1)<<1) * IminusT;
+ for (--i; i; --i) {
+ res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView<Upper>()
+ .solve((i==1 ? -m_p : i&1 ? (-m_p-(i>>1))/(i<<1) : (m_p-(i>>1))/((i-1)<<1)) * IminusT).eval();
+ }
+ res += MatrixType::Identity(IminusT.rows(), IminusT.cols());
+}
+
+// This function assumes that res has the correct size (see bug 614)
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) const
+{
+ using std::abs;
+ using std::pow;
+
+ res.coeffRef(0,0) = pow(m_A.coeff(0,0), p);
+
+ for (Index i=1; i < m_A.cols(); ++i) {
+ res.coeffRef(i,i) = pow(m_A.coeff(i,i), p);
+ if (m_A.coeff(i-1,i-1) == m_A.coeff(i,i))
+ res.coeffRef(i-1,i) = p * pow(m_A.coeff(i,i), p-1);
+ else if (2*abs(m_A.coeff(i-1,i-1)) < abs(m_A.coeff(i,i)) || 2*abs(m_A.coeff(i,i)) < abs(m_A.coeff(i-1,i-1)))
+ res.coeffRef(i-1,i) = (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_A.coeff(i,i)-m_A.coeff(i-1,i-1));
+ else
+ res.coeffRef(i-1,i) = computeSuperDiag(m_A.coeff(i,i), m_A.coeff(i-1,i-1), p);
+ res.coeffRef(i-1,i) *= m_A.coeff(i-1,i);
+ }
+}
+
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
+{
+ const int digits = std::numeric_limits<RealScalar>::digits;
+ const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision
+ digits <= 53? 2.789358995219730e-1: // double precision
+ digits <= 64? 2.4471944416607995472e-1L: // extended precision
+ digits <= 106? 1.1016843812851143391275867258512e-1L: // double-double
+ 9.134603732914548552537150753385375e-2L; // quadruple precision
+ MatrixType IminusT, sqrtT, T = m_A.template triangularView<Upper>();
+ RealScalar normIminusT;
+ int degree, degree2, numberOfSquareRoots = 0;
+ bool hasExtraSquareRoot = false;
+
+ /* FIXME
+ * For singular T, norm(I - T) >= 1 but maxNormForPade < 1, leads to infinite
+ * loop. We should move 0 eigenvalues to bottom right corner. We need not
+ * worry about tiny values (e.g. 1e-300) because they will reach 1 if
+ * repetitively sqrt'ed.
+ *
+ * If the 0 eigenvalues are semisimple, they can form a 0 matrix at the
+ * bottom right corner.
+ *
+ * [ T A ]^p [ T^p (T^-1 T^p A) ]
+ * [ ] = [ ]
+ * [ 0 0 ] [ 0 0 ]
+ */
+ for (Index i=0; i < m_A.cols(); ++i)
+ eigen_assert(m_A(i,i) != RealScalar(0));
+
+ while (true) {
+ IminusT = MatrixType::Identity(m_A.rows(), m_A.cols()) - T;
+ normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff();
+ if (normIminusT < maxNormForPade) {
+ degree = getPadeDegree(normIminusT);
+ degree2 = getPadeDegree(normIminusT/2);
+ if (degree - degree2 <= 1 || hasExtraSquareRoot)
+ break;
+ hasExtraSquareRoot = true;
+ }
+ MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+ T = sqrtT.template triangularView<Upper>();
+ ++numberOfSquareRoots;
+ }
+ computePade(degree, IminusT, res);
+
+ for (; numberOfSquareRoots; --numberOfSquareRoots) {
+ compute2x2(res, std::ldexp(m_p, -numberOfSquareRoots));
+ res = res.template triangularView<Upper>() * res;
+ }
+ compute2x2(res, m_p);
+}
+
+template<typename MatrixType>
+inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(float normIminusT)
+{
+ const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f };
+ int degree = 3;
+ for (; degree <= 4; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+template<typename MatrixType>
+inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(double normIminusT)
+{
+ const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1,
+ 1.999045567181744e-1, 2.789358995219730e-1 };
+ int degree = 3;
+ for (; degree <= 7; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+template<typename MatrixType>
+inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(long double normIminusT)
+{
+#if LDBL_MANT_DIG == 53
+ const int maxPadeDegree = 7;
+ const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L,
+ 1.999045567181744e-1L, 2.789358995219730e-1L };
+#elif LDBL_MANT_DIG <= 64
+ const int maxPadeDegree = 8;
+ const double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
+ 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L };
+#elif LDBL_MANT_DIG <= 106
+ const int maxPadeDegree = 10;
+ const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ ,
+ 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L,
+ 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L,
+ 1.1016843812851143391275867258512e-1L };
+#else
+ const int maxPadeDegree = 10;
+ const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ ,
+ 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L,
+ 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L,
+ 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L,
+ 9.134603732914548552537150753385375e-2L };
+#endif
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+template<typename MatrixType>
+inline typename MatrixPowerAtomic<MatrixType>::ComplexScalar
+MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p)
+{
+ ComplexScalar logCurr = std::log(curr);
+ ComplexScalar logPrev = std::log(prev);
+ int unwindingNumber = std::ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI));
+ ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber);
+ return RealScalar(2) * std::exp(RealScalar(0.5) * p * (logCurr + logPrev)) * std::sinh(p * w) / (curr - prev);
+}
+
+template<typename MatrixType>
+inline typename MatrixPowerAtomic<MatrixType>::RealScalar
+MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p)
+{
+ RealScalar w = numext::atanh2(curr - prev, curr + prev);
+ return 2 * std::exp(p * (std::log(curr) + std::log(prev)) / 2) * std::sinh(p * w) / (curr - prev);
+}
+
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Class for computing matrix powers.
+ *
+ * \tparam MatrixType type of the base, expected to be an instantiation
+ * of the Matrix class template.
+ *
+ * This class is capable of computing real/complex matrices raised to
+ * an arbitrary real power. Meanwhile, it saves the result of Schur
+ * decomposition if an non-integral power has even been calculated.
+ * Therefore, if you want to compute multiple (>= 2) matrix powers
+ * for the same matrix, using the class directly is more efficient than
+ * calling MatrixBase::pow().
+ *
+ * Example:
+ * \include MatrixPower_optimal.cpp
+ * Output: \verbinclude MatrixPower_optimal.out
+ */
+template<typename MatrixType>
+class MatrixPower
+{
+ private:
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ public:
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] A the base of the matrix power.
+ *
+ * The class stores a reference to A, so it should not be changed
+ * (or destroyed) before evaluation.
+ */
+ explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0)
+ { eigen_assert(A.rows() == A.cols()); }
+
+ /**
+ * \brief Returns the matrix power.
+ *
+ * \param[in] p exponent, a real scalar.
+ * \return The expression \f$ A^p \f$, where A is specified in the
+ * constructor.
+ */
+ const MatrixPowerRetval<MatrixType> operator()(RealScalar p)
+ { return MatrixPowerRetval<MatrixType>(*this, p); }
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[in] p exponent, a real scalar.
+ * \param[out] res \f$ A^p \f$ where A is specified in the
+ * constructor.
+ */
+ template<typename ResultType>
+ void compute(ResultType& res, RealScalar p);
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ typedef std::complex<RealScalar> ComplexScalar;
+ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, MatrixType::Options,
+ MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrix;
+
+ typename MatrixType::Nested m_A;
+ MatrixType m_tmp;
+ ComplexMatrix m_T, m_U, m_fT;
+ RealScalar m_conditionNumber;
+
+ RealScalar modfAndInit(RealScalar, RealScalar*);
+
+ template<typename ResultType>
+ void computeIntPower(ResultType&, RealScalar);
+
+ template<typename ResultType>
+ void computeFracPower(ResultType&, RealScalar);
+
+ template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+ static void revertSchur(
+ Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U);
+
+ template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+ static void revertSchur(
+ Matrix<RealScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U);
+};
+
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::compute(ResultType& res, RealScalar p)
+{
+ switch (cols()) {
+ case 0:
+ break;
+ case 1:
+ res(0,0) = std::pow(m_A.coeff(0,0), p);
+ break;
+ default:
+ RealScalar intpart, x = modfAndInit(p, &intpart);
+ computeIntPower(res, intpart);
+ computeFracPower(res, x);
+ }
+}
+
+template<typename MatrixType>
+typename MatrixPower<MatrixType>::RealScalar
+MatrixPower<MatrixType>::modfAndInit(RealScalar x, RealScalar* intpart)
+{
+ typedef Array<RealScalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> RealArray;
+
+ *intpart = std::floor(x);
+ RealScalar res = x - *intpart;
+
+ if (!m_conditionNumber && res) {
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ m_T = schurOfA.matrixT();
+ m_U = schurOfA.matrixU();
+
+ const RealArray absTdiag = m_T.diagonal().array().abs();
+ m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff();
+ }
+
+ if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) {
+ --res;
+ ++*intpart;
+ }
+ return res;
+}
+
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::computeIntPower(ResultType& res, RealScalar p)
+{
+ RealScalar pp = std::abs(p);
+
+ if (p<0) m_tmp = m_A.inverse();
+ else m_tmp = m_A;
+
+ res = MatrixType::Identity(rows(), cols());
+ while (pp >= 1) {
+ if (std::fmod(pp, 2) >= 1)
+ res = m_tmp * res;
+ m_tmp *= m_tmp;
+ pp /= 2;
+ }
+}
+
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::computeFracPower(ResultType& res, RealScalar p)
+{
+ if (p) {
+ eigen_assert(m_conditionNumber);
+ MatrixPowerAtomic<ComplexMatrix>(m_T, p).compute(m_fT);
+ revertSchur(m_tmp, m_fT, m_U);
+ res = m_tmp * res;
+ }
+}
+
+template<typename MatrixType>
+template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+inline void MatrixPower<MatrixType>::revertSchur(
+ Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U)
+{ res.noalias() = U * (T.template triangularView<Upper>() * U.adjoint()); }
+
+template<typename MatrixType>
+template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+inline void MatrixPower<MatrixType>::revertSchur(
+ Matrix<RealScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U)
+{ res.noalias() = (U * (T.template triangularView<Upper>() * U.adjoint())).real(); }
+
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix power of some matrix (expression).
+ *
+ * \tparam Derived type of the base, a matrix (expression).
+ *
+ * This class holds the arguments to the matrix power until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::pow() and related functions and most of the
+ * time this is the only way it is used.
+ */
+template<typename Derived>
+class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Derived> >
+{
+ public:
+ typedef typename Derived::PlainObject PlainObject;
+ typedef typename Derived::RealScalar RealScalar;
+ typedef typename Derived::Index Index;
+
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression), the base of the matrix power.
+ * \param[in] p scalar, the exponent of the matrix power.
+ */
+ MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p)
+ { }
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the
+ * constructor.
+ */
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { MatrixPower<PlainObject>(m_A.eval()).compute(res, m_p); }
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ const Derived& m_A;
+ const RealScalar m_p;
+ MatrixPowerReturnValue& operator=(const MatrixPowerReturnValue&);
+};
+
+namespace internal {
+
+template<typename MatrixPowerType>
+struct traits< MatrixPowerRetval<MatrixPowerType> >
+{ typedef typename MatrixPowerType::PlainObject ReturnType; };
+
+template<typename Derived>
+struct traits< MatrixPowerReturnValue<Derived> >
+{ typedef typename Derived::PlainObject ReturnType; };
+
+}
+
+template<typename Derived>
+const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(const RealScalar& p) const
+{ return MatrixPowerReturnValue<Derived>(derived(), p); }
+
+} // namespace Eigen
+
+#endif // EIGEN_MATRIX_POWER
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
new file mode 100644
index 0000000..b48ea9d
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -0,0 +1,466 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_SQUARE_ROOT
+#define EIGEN_MATRIX_SQUARE_ROOT
+
+namespace Eigen {
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix square roots of upper quasi-triangular matrices.
+ * \tparam MatrixType type of the argument of the matrix square root,
+ * expected to be an instantiation of the Matrix class template.
+ *
+ * This class computes the square root of the upper quasi-triangular
+ * matrix stored in the upper Hessenberg part of the matrix passed to
+ * the constructor.
+ *
+ * \sa MatrixSquareRoot, MatrixSquareRootTriangular
+ */
+template <typename MatrixType>
+class MatrixSquareRootQuasiTriangular
+{
+ public:
+
+ /** \brief Constructor.
+ *
+ * \param[in] A upper quasi-triangular matrix whose square root
+ * is to be computed.
+ *
+ * The class stores a reference to \p A, so it should not be
+ * changed (or destroyed) before compute() is called.
+ */
+ MatrixSquareRootQuasiTriangular(const MatrixType& A)
+ : m_A(A)
+ {
+ eigen_assert(A.rows() == A.cols());
+ }
+
+ /** \brief Compute the matrix square root
+ *
+ * \param[out] result square root of \p A, as specified in the constructor.
+ *
+ * Only the upper Hessenberg part of \p result is updated, the
+ * rest is not touched. See MatrixBase::sqrt() for details on
+ * how this computation is implemented.
+ */
+ template <typename ResultType> void compute(ResultType &result);
+
+ private:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+
+ void computeDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
+ void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
+ void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i);
+ void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j);
+ void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j);
+ void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j);
+ void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j);
+
+ template <typename SmallMatrixType>
+ static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
+ const SmallMatrixType& B, const SmallMatrixType& C);
+
+ const MatrixType& m_A;
+};
+
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixSquareRootQuasiTriangular<MatrixType>::compute(ResultType &result)
+{
+ result.resize(m_A.rows(), m_A.cols());
+ computeDiagonalPartOfSqrt(result, m_A);
+ computeOffDiagonalPartOfSqrt(result, m_A);
+}
+
+// pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size
+// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>::computeDiagonalPartOfSqrt(MatrixType& sqrtT,
+ const MatrixType& T)
+{
+ using std::sqrt;
+ const Index size = m_A.rows();
+ for (Index i = 0; i < size; i++) {
+ if (i == size - 1 || T.coeff(i+1, i) == 0) {
+ eigen_assert(T(i,i) >= 0);
+ sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i));
+ }
+ else {
+ compute2x2diagonalBlock(sqrtT, T, i);
+ ++i;
+ }
+ }
+}
+
+// pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T.
+// post: sqrtT is the square root of T.
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>::computeOffDiagonalPartOfSqrt(MatrixType& sqrtT,
+ const MatrixType& T)
+{
+ const Index size = m_A.rows();
+ for (Index j = 1; j < size; j++) {
+ if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block
+ continue;
+ for (Index i = j-1; i >= 0; i--) {
+ if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block
+ continue;
+ bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0);
+ bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0);
+ if (iBlockIs2x2 && jBlockIs2x2)
+ compute2x2offDiagonalBlock(sqrtT, T, i, j);
+ else if (iBlockIs2x2 && !jBlockIs2x2)
+ compute2x1offDiagonalBlock(sqrtT, T, i, j);
+ else if (!iBlockIs2x2 && jBlockIs2x2)
+ compute1x2offDiagonalBlock(sqrtT, T, i, j);
+ else if (!iBlockIs2x2 && !jBlockIs2x2)
+ compute1x1offDiagonalBlock(sqrtT, T, i, j);
+ }
+ }
+}
+
+// pre: T.block(i,i,2,2) has complex conjugate eigenvalues
+// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2)
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+ ::compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i)
+{
+ // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere
+ // in EigenSolver. If we expose it, we could call it directly from here.
+ Matrix<Scalar,2,2> block = T.template block<2,2>(i,i);
+ EigenSolver<Matrix<Scalar,2,2> > es(block);
+ sqrtT.template block<2,2>(i,i)
+ = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real();
+}
+
+// pre: block structure of T is such that (i,j) is a 1x1 block,
+// all blocks of sqrtT to left of and below (i,j) are correct
+// post: sqrtT(i,j) has the correct value
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+ ::compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j)
+{
+ Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value();
+ sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j));
+}
+
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+ ::compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j)
+{
+ Matrix<Scalar,1,2> rhs = T.template block<1,2>(i,j);
+ if (j-i > 1)
+ rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2);
+ Matrix<Scalar,2,2> A = sqrtT.coeff(i,i) * Matrix<Scalar,2,2>::Identity();
+ A += sqrtT.template block<2,2>(j,j).transpose();
+ sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose());
+}
+
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+ ::compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j)
+{
+ Matrix<Scalar,2,1> rhs = T.template block<2,1>(i,j);
+ if (j-i > 2)
+ rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1);
+ Matrix<Scalar,2,2> A = sqrtT.coeff(j,j) * Matrix<Scalar,2,2>::Identity();
+ A += sqrtT.template block<2,2>(i,i);
+ sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs);
+}
+
+// similar to compute1x1offDiagonalBlock()
+template <typename MatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+ ::compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
+ typename MatrixType::Index i, typename MatrixType::Index j)
+{
+ Matrix<Scalar,2,2> A = sqrtT.template block<2,2>(i,i);
+ Matrix<Scalar,2,2> B = sqrtT.template block<2,2>(j,j);
+ Matrix<Scalar,2,2> C = T.template block<2,2>(i,j);
+ if (j-i > 2)
+ C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2);
+ Matrix<Scalar,2,2> X;
+ solveAuxiliaryEquation(X, A, B, C);
+ sqrtT.template block<2,2>(i,j) = X;
+}
+
+// solves the equation A X + X B = C where all matrices are 2-by-2
+template <typename MatrixType>
+template <typename SmallMatrixType>
+void MatrixSquareRootQuasiTriangular<MatrixType>
+ ::solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
+ const SmallMatrixType& B, const SmallMatrixType& C)
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<SmallMatrixType, Matrix<Scalar,2,2> >::value),
+ EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT);
+
+ Matrix<Scalar,4,4> coeffMatrix = Matrix<Scalar,4,4>::Zero();
+ coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0);
+ coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1);
+ coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0);
+ coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1);
+ coeffMatrix.coeffRef(0,1) = B.coeff(1,0);
+ coeffMatrix.coeffRef(0,2) = A.coeff(0,1);
+ coeffMatrix.coeffRef(1,0) = B.coeff(0,1);
+ coeffMatrix.coeffRef(1,3) = A.coeff(0,1);
+ coeffMatrix.coeffRef(2,0) = A.coeff(1,0);
+ coeffMatrix.coeffRef(2,3) = B.coeff(1,0);
+ coeffMatrix.coeffRef(3,1) = A.coeff(1,0);
+ coeffMatrix.coeffRef(3,2) = B.coeff(0,1);
+
+ Matrix<Scalar,4,1> rhs;
+ rhs.coeffRef(0) = C.coeff(0,0);
+ rhs.coeffRef(1) = C.coeff(0,1);
+ rhs.coeffRef(2) = C.coeff(1,0);
+ rhs.coeffRef(3) = C.coeff(1,1);
+
+ Matrix<Scalar,4,1> result;
+ result = coeffMatrix.fullPivLu().solve(rhs);
+
+ X.coeffRef(0,0) = result.coeff(0);
+ X.coeffRef(0,1) = result.coeff(1);
+ X.coeffRef(1,0) = result.coeff(2);
+ X.coeffRef(1,1) = result.coeff(3);
+}
+
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix square roots of upper triangular matrices.
+ * \tparam MatrixType type of the argument of the matrix square root,
+ * expected to be an instantiation of the Matrix class template.
+ *
+ * This class computes the square root of the upper triangular matrix
+ * stored in the upper triangular part (including the diagonal) of
+ * the matrix passed to the constructor.
+ *
+ * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular
+ */
+template <typename MatrixType>
+class MatrixSquareRootTriangular
+{
+ public:
+ MatrixSquareRootTriangular(const MatrixType& A)
+ : m_A(A)
+ {
+ eigen_assert(A.rows() == A.cols());
+ }
+
+ /** \brief Compute the matrix square root
+ *
+ * \param[out] result square root of \p A, as specified in the constructor.
+ *
+ * Only the upper triangular part (including the diagonal) of
+ * \p result is updated, the rest is not touched. See
+ * MatrixBase::sqrt() for details on how this computation is
+ * implemented.
+ */
+ template <typename ResultType> void compute(ResultType &result);
+
+ private:
+ const MatrixType& m_A;
+};
+
+template <typename MatrixType>
+template <typename ResultType>
+void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result)
+{
+ using std::sqrt;
+
+ // Compute square root of m_A and store it in upper triangular part of result
+ // This uses that the square root of triangular matrices can be computed directly.
+ result.resize(m_A.rows(), m_A.cols());
+ typedef typename MatrixType::Index Index;
+ for (Index i = 0; i < m_A.rows(); i++) {
+ result.coeffRef(i,i) = sqrt(m_A.coeff(i,i));
+ }
+ for (Index j = 1; j < m_A.cols(); j++) {
+ for (Index i = j-1; i >= 0; i--) {
+ typedef typename MatrixType::Scalar Scalar;
+ // if i = j-1, then segment has length 0 so tmp = 0
+ Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value();
+ // denominator may be zero if original matrix is singular
+ result.coeffRef(i,j) = (m_A.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
+ }
+ }
+}
+
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Class for computing matrix square roots of general matrices.
+ * \tparam MatrixType type of the argument of the matrix square root,
+ * expected to be an instantiation of the Matrix class template.
+ *
+ * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt()
+ */
+template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+class MatrixSquareRoot
+{
+ public:
+
+ /** \brief Constructor.
+ *
+ * \param[in] A matrix whose square root is to be computed.
+ *
+ * The class stores a reference to \p A, so it should not be
+ * changed (or destroyed) before compute() is called.
+ */
+ MatrixSquareRoot(const MatrixType& A);
+
+ /** \brief Compute the matrix square root
+ *
+ * \param[out] result square root of \p A, as specified in the constructor.
+ *
+ * See MatrixBase::sqrt() for details on how this computation is
+ * implemented.
+ */
+ template <typename ResultType> void compute(ResultType &result);
+};
+
+
+// ********** Partial specialization for real matrices **********
+
+template <typename MatrixType>
+class MatrixSquareRoot<MatrixType, 0>
+{
+ public:
+
+ MatrixSquareRoot(const MatrixType& A)
+ : m_A(A)
+ {
+ eigen_assert(A.rows() == A.cols());
+ }
+
+ template <typename ResultType> void compute(ResultType &result)
+ {
+ // Compute Schur decomposition of m_A
+ const RealSchur<MatrixType> schurOfA(m_A);
+ const MatrixType& T = schurOfA.matrixT();
+ const MatrixType& U = schurOfA.matrixU();
+
+ // Compute square root of T
+ MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.cols());
+ MatrixSquareRootQuasiTriangular<MatrixType>(T).compute(sqrtT);
+
+ // Compute square root of m_A
+ result = U * sqrtT * U.adjoint();
+ }
+
+ private:
+ const MatrixType& m_A;
+};
+
+
+// ********** Partial specialization for complex matrices **********
+
+template <typename MatrixType>
+class MatrixSquareRoot<MatrixType, 1>
+{
+ public:
+
+ MatrixSquareRoot(const MatrixType& A)
+ : m_A(A)
+ {
+ eigen_assert(A.rows() == A.cols());
+ }
+
+ template <typename ResultType> void compute(ResultType &result)
+ {
+ // Compute Schur decomposition of m_A
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ const MatrixType& T = schurOfA.matrixT();
+ const MatrixType& U = schurOfA.matrixU();
+
+ // Compute square root of T
+ MatrixType sqrtT;
+ MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+
+ // Compute square root of m_A
+ result = U * (sqrtT.template triangularView<Upper>() * U.adjoint());
+ }
+
+ private:
+ const MatrixType& m_A;
+};
+
+
+/** \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix square root of some matrix (expression).
+ *
+ * \tparam Derived Type of the argument to the matrix square root.
+ *
+ * This class holds the argument to the matrix square root until it
+ * is assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::sqrt() and most of the time this is the only way it is
+ * used.
+ */
+template<typename Derived> class MatrixSquareRootReturnValue
+: public ReturnByValue<MatrixSquareRootReturnValue<Derived> >
+{
+ typedef typename Derived::Index Index;
+ public:
+ /** \brief Constructor.
+ *
+ * \param[in] src %Matrix (expression) forming the argument of the
+ * matrix square root.
+ */
+ MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { }
+
+ /** \brief Compute the matrix square root.
+ *
+ * \param[out] result the matrix square root of \p src in the
+ * constructor.
+ */
+ template <typename ResultType>
+ inline void evalTo(ResultType& result) const
+ {
+ const typename Derived::PlainObject srcEvaluated = m_src.eval();
+ MatrixSquareRoot<typename Derived::PlainObject> me(srcEvaluated);
+ me.compute(result);
+ }
+
+ Index rows() const { return m_src.rows(); }
+ Index cols() const { return m_src.cols(); }
+
+ protected:
+ const Derived& m_src;
+ private:
+ MatrixSquareRootReturnValue& operator=(const MatrixSquareRootReturnValue&);
+};
+
+namespace internal {
+template<typename Derived>
+struct traits<MatrixSquareRootReturnValue<Derived> >
+{
+ typedef typename Derived::PlainObject ReturnType;
+};
+}
+
+template <typename Derived>
+const MatrixSquareRootReturnValue<Derived> MatrixBase<Derived>::sqrt() const
+{
+ eigen_assert(rows() == cols());
+ return MatrixSquareRootReturnValue<Derived>(derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_MATRIX_FUNCTION
diff --git a/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
new file mode 100644
index 0000000..724e55c
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/StemFunction.h
@@ -0,0 +1,105 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_STEM_FUNCTION
+#define EIGEN_STEM_FUNCTION
+
+namespace Eigen {
+
+/** \ingroup MatrixFunctions_Module
+ * \brief Stem functions corresponding to standard mathematical functions.
+ */
+template <typename Scalar>
+class StdStemFunctions
+{
+ public:
+
+ /** \brief The exponential function (and its derivatives). */
+ static Scalar exp(Scalar x, int)
+ {
+ return std::exp(x);
+ }
+
+ /** \brief Cosine (and its derivatives). */
+ static Scalar cos(Scalar x, int n)
+ {
+ Scalar res;
+ switch (n % 4) {
+ case 0:
+ res = std::cos(x);
+ break;
+ case 1:
+ res = -std::sin(x);
+ break;
+ case 2:
+ res = -std::cos(x);
+ break;
+ case 3:
+ res = std::sin(x);
+ break;
+ }
+ return res;
+ }
+
+ /** \brief Sine (and its derivatives). */
+ static Scalar sin(Scalar x, int n)
+ {
+ Scalar res;
+ switch (n % 4) {
+ case 0:
+ res = std::sin(x);
+ break;
+ case 1:
+ res = std::cos(x);
+ break;
+ case 2:
+ res = -std::sin(x);
+ break;
+ case 3:
+ res = -std::cos(x);
+ break;
+ }
+ return res;
+ }
+
+ /** \brief Hyperbolic cosine (and its derivatives). */
+ static Scalar cosh(Scalar x, int n)
+ {
+ Scalar res;
+ switch (n % 2) {
+ case 0:
+ res = std::cosh(x);
+ break;
+ case 1:
+ res = std::sinh(x);
+ break;
+ }
+ return res;
+ }
+
+ /** \brief Hyperbolic sine (and its derivatives). */
+ static Scalar sinh(Scalar x, int n)
+ {
+ Scalar res;
+ switch (n % 2) {
+ case 0:
+ res = std::sinh(x);
+ break;
+ case 1:
+ res = std::cosh(x);
+ break;
+ }
+ return res;
+ }
+
+}; // end of class StdStemFunctions
+
+} // end namespace Eigen
+
+#endif // EIGEN_STEM_FUNCTION
diff --git a/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt b/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
new file mode 100644
index 0000000..1b887cc
--- /dev/null
+++ b/unsupported/Eigen/src/MoreVectorization/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_MoreVectorization_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_MoreVectorization_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/MoreVectorization COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/MoreVectorization/MathFunctions.h b/unsupported/Eigen/src/MoreVectorization/MathFunctions.h
new file mode 100644
index 0000000..63cb28d
--- /dev/null
+++ b/unsupported/Eigen/src/MoreVectorization/MathFunctions.h
@@ -0,0 +1,95 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
+#define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal \returns the arcsin of \a a (coeff-wise) */
+template<typename Packet> inline static Packet pasin(Packet a) { return std::asin(a); }
+
+#ifdef EIGEN_VECTORIZE_SSE
+
+template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x)
+{
+ _EIGEN_DECLARE_CONST_Packet4f(half, 0.5);
+ _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5);
+ _EIGEN_DECLARE_CONST_Packet4f(3half, 1.5);
+
+ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
+
+ _EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654);
+ _EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5);
+
+ _EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2);
+ _EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2);
+ _EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2);
+ _EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2);
+ _EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1);
+
+ Packet4f a = pabs(x);//got the absolute value
+
+ Packet4f sign_bit= _mm_and_ps(x, p4f_sign_mask);//extracted the sign bit
+
+ Packet4f z1,z2;//will need them during computation
+
+
+//will compute the two branches for asin
+//so first compare with half
+
+ Packet4f branch_mask= _mm_cmpgt_ps(a, p4f_half);//this is to select which branch to take
+//both will be taken, and finally results will be merged
+//the branch for values >0.5
+
+ {
+//the core series expansion
+ z1=pmadd(p4f_minus_half,a,p4f_half);
+ Packet4f x1=psqrt(z1);
+ Packet4f s1=pmadd(p4f_asin1, z1, p4f_asin2);
+ Packet4f s2=pmadd(s1, z1, p4f_asin3);
+ Packet4f s3=pmadd(s2,z1, p4f_asin4);
+ Packet4f s4=pmadd(s3,z1, p4f_asin5);
+ Packet4f temp=pmul(s4,z1);//not really a madd but a mul by z so that the next term can be a madd
+ z1=pmadd(temp,x1,x1);
+ z1=padd(z1,z1);
+ z1=psub(p4f_pi_over_2,z1);
+ }
+
+ {
+//the core series expansion
+ Packet4f x2=a;
+ z2=pmul(x2,x2);
+ Packet4f s1=pmadd(p4f_asin1, z2, p4f_asin2);
+ Packet4f s2=pmadd(s1, z2, p4f_asin3);
+ Packet4f s3=pmadd(s2,z2, p4f_asin4);
+ Packet4f s4=pmadd(s3,z2, p4f_asin5);
+ Packet4f temp=pmul(s4,z2);//not really a madd but a mul by z so that the next term can be a madd
+ z2=pmadd(temp,x2,x2);
+ }
+
+/* select the correct result from the two branch evaluations */
+ z1 = _mm_and_ps(branch_mask, z1);
+ z2 = _mm_andnot_ps(branch_mask, z2);
+ Packet4f z = _mm_or_ps(z1,z2);
+
+/* update the sign */
+ return _mm_xor_ps(z, sign_bit);
+}
+
+#endif // EIGEN_VECTORIZE_SSE
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H
diff --git a/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt b/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
new file mode 100644
index 0000000..9322dda
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_NonLinearOptimization_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_NonLinearOptimization_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/NonLinearOptimization COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
new file mode 100644
index 0000000..b8ba6dd
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -0,0 +1,601 @@
+// -*- coding: utf-8
+// vim: set fileencoding=utf-8
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_HYBRIDNONLINEARSOLVER_H
+#define EIGEN_HYBRIDNONLINEARSOLVER_H
+
+namespace Eigen {
+
+namespace HybridNonLinearSolverSpace {
+ enum Status {
+ Running = -1,
+ ImproperInputParameters = 0,
+ RelativeErrorTooSmall = 1,
+ TooManyFunctionEvaluation = 2,
+ TolTooSmall = 3,
+ NotMakingProgressJacobian = 4,
+ NotMakingProgressIterations = 5,
+ UserAsked = 6
+ };
+}
+
+/**
+ * \ingroup NonLinearOptimization_Module
+ * \brief Finds a zero of a system of n
+ * nonlinear functions in n variables by a modification of the Powell
+ * hybrid method ("dogleg").
+ *
+ * The user must provide a subroutine which calculates the
+ * functions. The Jacobian is either provided by the user, or approximated
+ * using a forward-difference method.
+ *
+ */
+template<typename FunctorType, typename Scalar=double>
+class HybridNonLinearSolver
+{
+public:
+ typedef DenseIndex Index;
+
+ HybridNonLinearSolver(FunctorType &_functor)
+ : functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;}
+
+ struct Parameters {
+ Parameters()
+ : factor(Scalar(100.))
+ , maxfev(1000)
+ , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , nb_of_subdiagonals(-1)
+ , nb_of_superdiagonals(-1)
+ , epsfcn(Scalar(0.)) {}
+ Scalar factor;
+ Index maxfev; // maximum number of function evaluation
+ Scalar xtol;
+ Index nb_of_subdiagonals;
+ Index nb_of_superdiagonals;
+ Scalar epsfcn;
+ };
+ typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
+ typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType;
+ /* TODO: if eigen provides a triangular storage, use it here */
+ typedef Matrix< Scalar, Dynamic, Dynamic > UpperTriangularType;
+
+ HybridNonLinearSolverSpace::Status hybrj1(
+ FVectorType &x,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ HybridNonLinearSolverSpace::Status solveInit(FVectorType &x);
+ HybridNonLinearSolverSpace::Status solveOneStep(FVectorType &x);
+ HybridNonLinearSolverSpace::Status solve(FVectorType &x);
+
+ HybridNonLinearSolverSpace::Status hybrd1(
+ FVectorType &x,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x);
+ HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(FVectorType &x);
+ HybridNonLinearSolverSpace::Status solveNumericalDiff(FVectorType &x);
+
+ void resetParameters(void) { parameters = Parameters(); }
+ Parameters parameters;
+ FVectorType fvec, qtf, diag;
+ JacobianType fjac;
+ UpperTriangularType R;
+ Index nfev;
+ Index njev;
+ Index iter;
+ Scalar fnorm;
+ bool useExternalScaling;
+private:
+ FunctorType &functor;
+ Index n;
+ Scalar sum;
+ bool sing;
+ Scalar temp;
+ Scalar delta;
+ bool jeval;
+ Index ncsuc;
+ Scalar ratio;
+ Scalar pnorm, xnorm, fnorm1;
+ Index nslow1, nslow2;
+ Index ncfail;
+ Scalar actred, prered;
+ FVectorType wa1, wa2, wa3, wa4;
+
+ HybridNonLinearSolver& operator=(const HybridNonLinearSolver&);
+};
+
+
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
+ FVectorType &x,
+ const Scalar tol
+ )
+{
+ n = x.size();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || tol < 0.)
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
+
+ resetParameters();
+ parameters.maxfev = 100*(n+1);
+ parameters.xtol = tol;
+ diag.setConstant(n, 1.);
+ useExternalScaling = true;
+ return solve(x);
+}
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
+{
+ n = x.size();
+
+ wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n);
+ fvec.resize(n);
+ qtf.resize(n);
+ fjac.resize(n, n);
+ if (!useExternalScaling)
+ diag.resize(n);
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+
+ /* Function Body */
+ nfev = 0;
+ njev = 0;
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. )
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
+ if (useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ if (diag[j] <= 0.)
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
+
+ /* evaluate the function at the starting point */
+ /* and calculate its norm. */
+ nfev = 1;
+ if ( functor(x, fvec) < 0)
+ return HybridNonLinearSolverSpace::UserAsked;
+ fnorm = fvec.stableNorm();
+
+ /* initialize iteration counter and monitors. */
+ iter = 1;
+ ncsuc = 0;
+ ncfail = 0;
+ nslow1 = 0;
+ nslow2 = 0;
+
+ return HybridNonLinearSolverSpace::Running;
+}
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
+{
+ using std::abs;
+
+ eigen_assert(x.size()==n); // check the caller is not cheating us
+
+ Index j;
+ std::vector<JacobiRotation<Scalar> > v_givens(n), w_givens(n);
+
+ jeval = true;
+
+ /* calculate the jacobian matrix. */
+ if ( functor.df(x, fjac) < 0)
+ return HybridNonLinearSolverSpace::UserAsked;
+ ++njev;
+
+ wa2 = fjac.colwise().blueNorm();
+
+ /* on the first iteration and if external scaling is not used, scale according */
+ /* to the norms of the columns of the initial jacobian. */
+ if (iter == 1) {
+ if (!useExternalScaling)
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
+
+ /* on the first iteration, calculate the norm of the scaled x */
+ /* and initialize the step bound delta. */
+ xnorm = diag.cwiseProduct(x).stableNorm();
+ delta = parameters.factor * xnorm;
+ if (delta == 0.)
+ delta = parameters.factor;
+ }
+
+ /* compute the qr factorization of the jacobian. */
+ HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
+
+ /* copy the triangular factor of the qr factorization into r. */
+ R = qrfac.matrixQR();
+
+ /* accumulate the orthogonal factor in fjac. */
+ fjac = qrfac.householderQ();
+
+ /* form (q transpose)*fvec and store in qtf. */
+ qtf = fjac.transpose() * fvec;
+
+ /* rescale if necessary. */
+ if (!useExternalScaling)
+ diag = diag.cwiseMax(wa2);
+
+ while (true) {
+ /* determine the direction p. */
+ internal::dogleg<Scalar>(R, diag, qtf, delta, wa1);
+
+ /* store the direction p and x + p. calculate the norm of p. */
+ wa1 = -wa1;
+ wa2 = x + wa1;
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
+
+ /* on the first iteration, adjust the initial step bound. */
+ if (iter == 1)
+ delta = (std::min)(delta,pnorm);
+
+ /* evaluate the function at x + p and calculate its norm. */
+ if ( functor(wa2, wa4) < 0)
+ return HybridNonLinearSolverSpace::UserAsked;
+ ++nfev;
+ fnorm1 = wa4.stableNorm();
+
+ /* compute the scaled actual reduction. */
+ actred = -1.;
+ if (fnorm1 < fnorm) /* Computing 2nd power */
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
+
+ /* compute the scaled predicted reduction. */
+ wa3 = R.template triangularView<Upper>()*wa1 + qtf;
+ temp = wa3.stableNorm();
+ prered = 0.;
+ if (temp < fnorm) /* Computing 2nd power */
+ prered = 1. - numext::abs2(temp / fnorm);
+
+ /* compute the ratio of the actual to the predicted reduction. */
+ ratio = 0.;
+ if (prered > 0.)
+ ratio = actred / prered;
+
+ /* update the step bound. */
+ if (ratio < Scalar(.1)) {
+ ncsuc = 0;
+ ++ncfail;
+ delta = Scalar(.5) * delta;
+ } else {
+ ncfail = 0;
+ ++ncsuc;
+ if (ratio >= Scalar(.5) || ncsuc > 1)
+ delta = (std::max)(delta, pnorm / Scalar(.5));
+ if (abs(ratio - 1.) <= Scalar(.1)) {
+ delta = pnorm / Scalar(.5);
+ }
+ }
+
+ /* test for successful iteration. */
+ if (ratio >= Scalar(1e-4)) {
+ /* successful iteration. update x, fvec, and their norms. */
+ x = wa2;
+ wa2 = diag.cwiseProduct(x);
+ fvec = wa4;
+ xnorm = wa2.stableNorm();
+ fnorm = fnorm1;
+ ++iter;
+ }
+
+ /* determine the progress of the iteration. */
+ ++nslow1;
+ if (actred >= Scalar(.001))
+ nslow1 = 0;
+ if (jeval)
+ ++nslow2;
+ if (actred >= Scalar(.1))
+ nslow2 = 0;
+
+ /* test for convergence. */
+ if (delta <= parameters.xtol * xnorm || fnorm == 0.)
+ return HybridNonLinearSolverSpace::RelativeErrorTooSmall;
+
+ /* tests for termination and stringent tolerances. */
+ if (nfev >= parameters.maxfev)
+ return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
+ if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ return HybridNonLinearSolverSpace::TolTooSmall;
+ if (nslow2 == 5)
+ return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
+ if (nslow1 == 10)
+ return HybridNonLinearSolverSpace::NotMakingProgressIterations;
+
+ /* criterion for recalculating jacobian. */
+ if (ncfail == 2)
+ break; // leave inner loop and go for the next outer loop iteration
+
+ /* calculate the rank one modification to the jacobian */
+ /* and update qtf if necessary. */
+ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm );
+ wa2 = fjac.transpose() * wa4;
+ if (ratio >= Scalar(1e-4))
+ qtf = wa2;
+ wa2 = (wa2-wa3)/pnorm;
+
+ /* compute the qr factorization of the updated jacobian. */
+ internal::r1updt<Scalar>(R, wa1, v_givens, w_givens, wa2, wa3, &sing);
+ internal::r1mpyq<Scalar>(n, n, fjac.data(), v_givens, w_givens);
+ internal::r1mpyq<Scalar>(1, n, qtf.data(), v_givens, w_givens);
+
+ jeval = false;
+ }
+ return HybridNonLinearSolverSpace::Running;
+}
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::solve(FVectorType &x)
+{
+ HybridNonLinearSolverSpace::Status status = solveInit(x);
+ if (status==HybridNonLinearSolverSpace::ImproperInputParameters)
+ return status;
+ while (status==HybridNonLinearSolverSpace::Running)
+ status = solveOneStep(x);
+ return status;
+}
+
+
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
+ FVectorType &x,
+ const Scalar tol
+ )
+{
+ n = x.size();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || tol < 0.)
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
+
+ resetParameters();
+ parameters.maxfev = 200*(n+1);
+ parameters.xtol = tol;
+
+ diag.setConstant(n, 1.);
+ useExternalScaling = true;
+ return solveNumericalDiff(x);
+}
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &x)
+{
+ n = x.size();
+
+ if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1;
+ if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1;
+
+ wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n);
+ qtf.resize(n);
+ fjac.resize(n, n);
+ fvec.resize(n);
+ if (!useExternalScaling)
+ diag.resize(n);
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+
+ /* Function Body */
+ nfev = 0;
+ njev = 0;
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. )
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
+ if (useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ if (diag[j] <= 0.)
+ return HybridNonLinearSolverSpace::ImproperInputParameters;
+
+ /* evaluate the function at the starting point */
+ /* and calculate its norm. */
+ nfev = 1;
+ if ( functor(x, fvec) < 0)
+ return HybridNonLinearSolverSpace::UserAsked;
+ fnorm = fvec.stableNorm();
+
+ /* initialize iteration counter and monitors. */
+ iter = 1;
+ ncsuc = 0;
+ ncfail = 0;
+ nslow1 = 0;
+ nslow2 = 0;
+
+ return HybridNonLinearSolverSpace::Running;
+}
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType &x)
+{
+ using std::sqrt;
+ using std::abs;
+
+ assert(x.size()==n); // check the caller is not cheating us
+
+ Index j;
+ std::vector<JacobiRotation<Scalar> > v_givens(n), w_givens(n);
+
+ jeval = true;
+ if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1;
+ if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1;
+
+ /* calculate the jacobian matrix. */
+ if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0)
+ return HybridNonLinearSolverSpace::UserAsked;
+ nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n);
+
+ wa2 = fjac.colwise().blueNorm();
+
+ /* on the first iteration and if external scaling is not used, scale according */
+ /* to the norms of the columns of the initial jacobian. */
+ if (iter == 1) {
+ if (!useExternalScaling)
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
+
+ /* on the first iteration, calculate the norm of the scaled x */
+ /* and initialize the step bound delta. */
+ xnorm = diag.cwiseProduct(x).stableNorm();
+ delta = parameters.factor * xnorm;
+ if (delta == 0.)
+ delta = parameters.factor;
+ }
+
+ /* compute the qr factorization of the jacobian. */
+ HouseholderQR<JacobianType> qrfac(fjac); // no pivoting:
+
+ /* copy the triangular factor of the qr factorization into r. */
+ R = qrfac.matrixQR();
+
+ /* accumulate the orthogonal factor in fjac. */
+ fjac = qrfac.householderQ();
+
+ /* form (q transpose)*fvec and store in qtf. */
+ qtf = fjac.transpose() * fvec;
+
+ /* rescale if necessary. */
+ if (!useExternalScaling)
+ diag = diag.cwiseMax(wa2);
+
+ while (true) {
+ /* determine the direction p. */
+ internal::dogleg<Scalar>(R, diag, qtf, delta, wa1);
+
+ /* store the direction p and x + p. calculate the norm of p. */
+ wa1 = -wa1;
+ wa2 = x + wa1;
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
+
+ /* on the first iteration, adjust the initial step bound. */
+ if (iter == 1)
+ delta = (std::min)(delta,pnorm);
+
+ /* evaluate the function at x + p and calculate its norm. */
+ if ( functor(wa2, wa4) < 0)
+ return HybridNonLinearSolverSpace::UserAsked;
+ ++nfev;
+ fnorm1 = wa4.stableNorm();
+
+ /* compute the scaled actual reduction. */
+ actred = -1.;
+ if (fnorm1 < fnorm) /* Computing 2nd power */
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
+
+ /* compute the scaled predicted reduction. */
+ wa3 = R.template triangularView<Upper>()*wa1 + qtf;
+ temp = wa3.stableNorm();
+ prered = 0.;
+ if (temp < fnorm) /* Computing 2nd power */
+ prered = 1. - numext::abs2(temp / fnorm);
+
+ /* compute the ratio of the actual to the predicted reduction. */
+ ratio = 0.;
+ if (prered > 0.)
+ ratio = actred / prered;
+
+ /* update the step bound. */
+ if (ratio < Scalar(.1)) {
+ ncsuc = 0;
+ ++ncfail;
+ delta = Scalar(.5) * delta;
+ } else {
+ ncfail = 0;
+ ++ncsuc;
+ if (ratio >= Scalar(.5) || ncsuc > 1)
+ delta = (std::max)(delta, pnorm / Scalar(.5));
+ if (abs(ratio - 1.) <= Scalar(.1)) {
+ delta = pnorm / Scalar(.5);
+ }
+ }
+
+ /* test for successful iteration. */
+ if (ratio >= Scalar(1e-4)) {
+ /* successful iteration. update x, fvec, and their norms. */
+ x = wa2;
+ wa2 = diag.cwiseProduct(x);
+ fvec = wa4;
+ xnorm = wa2.stableNorm();
+ fnorm = fnorm1;
+ ++iter;
+ }
+
+ /* determine the progress of the iteration. */
+ ++nslow1;
+ if (actred >= Scalar(.001))
+ nslow1 = 0;
+ if (jeval)
+ ++nslow2;
+ if (actred >= Scalar(.1))
+ nslow2 = 0;
+
+ /* test for convergence. */
+ if (delta <= parameters.xtol * xnorm || fnorm == 0.)
+ return HybridNonLinearSolverSpace::RelativeErrorTooSmall;
+
+ /* tests for termination and stringent tolerances. */
+ if (nfev >= parameters.maxfev)
+ return HybridNonLinearSolverSpace::TooManyFunctionEvaluation;
+ if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits<Scalar>::epsilon() * xnorm)
+ return HybridNonLinearSolverSpace::TolTooSmall;
+ if (nslow2 == 5)
+ return HybridNonLinearSolverSpace::NotMakingProgressJacobian;
+ if (nslow1 == 10)
+ return HybridNonLinearSolverSpace::NotMakingProgressIterations;
+
+ /* criterion for recalculating jacobian. */
+ if (ncfail == 2)
+ break; // leave inner loop and go for the next outer loop iteration
+
+ /* calculate the rank one modification to the jacobian */
+ /* and update qtf if necessary. */
+ wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm );
+ wa2 = fjac.transpose() * wa4;
+ if (ratio >= Scalar(1e-4))
+ qtf = wa2;
+ wa2 = (wa2-wa3)/pnorm;
+
+ /* compute the qr factorization of the updated jacobian. */
+ internal::r1updt<Scalar>(R, wa1, v_givens, w_givens, wa2, wa3, &sing);
+ internal::r1mpyq<Scalar>(n, n, fjac.data(), v_givens, w_givens);
+ internal::r1mpyq<Scalar>(1, n, qtf.data(), v_givens, w_givens);
+
+ jeval = false;
+ }
+ return HybridNonLinearSolverSpace::Running;
+}
+
+template<typename FunctorType, typename Scalar>
+HybridNonLinearSolverSpace::Status
+HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(FVectorType &x)
+{
+ HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x);
+ if (status==HybridNonLinearSolverSpace::ImproperInputParameters)
+ return status;
+ while (status==HybridNonLinearSolverSpace::Running)
+ status = solveNumericalDiffOneStep(x);
+ return status;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_HYBRIDNONLINEARSOLVER_H
+
+//vim: ai ts=4 sts=4 et sw=4
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
new file mode 100644
index 0000000..bfeb26f
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -0,0 +1,650 @@
+// -*- coding: utf-8
+// vim: set fileencoding=utf-8
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LEVENBERGMARQUARDT__H
+#define EIGEN_LEVENBERGMARQUARDT__H
+
+namespace Eigen {
+
+namespace LevenbergMarquardtSpace {
+ enum Status {
+ NotStarted = -2,
+ Running = -1,
+ ImproperInputParameters = 0,
+ RelativeReductionTooSmall = 1,
+ RelativeErrorTooSmall = 2,
+ RelativeErrorAndReductionTooSmall = 3,
+ CosinusTooSmall = 4,
+ TooManyFunctionEvaluation = 5,
+ FtolTooSmall = 6,
+ XtolTooSmall = 7,
+ GtolTooSmall = 8,
+ UserAsked = 9
+ };
+}
+
+
+
+/**
+ * \ingroup NonLinearOptimization_Module
+ * \brief Performs non linear optimization over a non-linear function,
+ * using a variant of the Levenberg Marquardt algorithm.
+ *
+ * Check wikipedia for more information.
+ * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
+ */
+template<typename FunctorType, typename Scalar=double>
+class LevenbergMarquardt
+{
+public:
+ LevenbergMarquardt(FunctorType &_functor)
+ : functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; }
+
+ typedef DenseIndex Index;
+
+ struct Parameters {
+ Parameters()
+ : factor(Scalar(100.))
+ , maxfev(400)
+ , ftol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , gtol(Scalar(0.))
+ , epsfcn(Scalar(0.)) {}
+ Scalar factor;
+ Index maxfev; // maximum number of function evaluation
+ Scalar ftol;
+ Scalar xtol;
+ Scalar gtol;
+ Scalar epsfcn;
+ };
+
+ typedef Matrix< Scalar, Dynamic, 1 > FVectorType;
+ typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType;
+
+ LevenbergMarquardtSpace::Status lmder1(
+ FVectorType &x,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ LevenbergMarquardtSpace::Status minimize(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x);
+
+ static LevenbergMarquardtSpace::Status lmdif1(
+ FunctorType &functor,
+ FVectorType &x,
+ Index *nfev,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ LevenbergMarquardtSpace::Status lmstr1(
+ FVectorType &x,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(FVectorType &x);
+
+ void resetParameters(void) { parameters = Parameters(); }
+
+ Parameters parameters;
+ FVectorType fvec, qtf, diag;
+ JacobianType fjac;
+ PermutationMatrix<Dynamic,Dynamic> permutation;
+ Index nfev;
+ Index njev;
+ Index iter;
+ Scalar fnorm, gnorm;
+ bool useExternalScaling;
+
+ Scalar lm_param(void) { return par; }
+private:
+ FunctorType &functor;
+ Index n;
+ Index m;
+ FVectorType wa1, wa2, wa3, wa4;
+
+ Scalar par, sum;
+ Scalar temp, temp1, temp2;
+ Scalar delta;
+ Scalar ratio;
+ Scalar pnorm, xnorm, fnorm1, actred, dirder, prered;
+
+ LevenbergMarquardt& operator=(const LevenbergMarquardt&);
+};
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::lmder1(
+ FVectorType &x,
+ const Scalar tol
+ )
+{
+ n = x.size();
+ m = functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ resetParameters();
+ parameters.ftol = tol;
+ parameters.xtol = tol;
+ parameters.maxfev = 100*(n+1);
+
+ return minimize(x);
+}
+
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::minimize(FVectorType &x)
+{
+ LevenbergMarquardtSpace::Status status = minimizeInit(x);
+ if (status==LevenbergMarquardtSpace::ImproperInputParameters)
+ return status;
+ do {
+ status = minimizeOneStep(x);
+ } while (status==LevenbergMarquardtSpace::Running);
+ return status;
+}
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x)
+{
+ n = x.size();
+ m = functor.values();
+
+ wa1.resize(n); wa2.resize(n); wa3.resize(n);
+ wa4.resize(m);
+ fvec.resize(m);
+ fjac.resize(m, n);
+ if (!useExternalScaling)
+ diag.resize(n);
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ qtf.resize(n);
+
+ /* Function Body */
+ nfev = 0;
+ njev = 0;
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ if (useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ if (diag[j] <= 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ /* evaluate the function at the starting point */
+ /* and calculate its norm. */
+ nfev = 1;
+ if ( functor(x, fvec) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ fnorm = fvec.stableNorm();
+
+ /* initialize levenberg-marquardt parameter and iteration counter. */
+ par = 0.;
+ iter = 1;
+
+ return LevenbergMarquardtSpace::NotStarted;
+}
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
+{
+ using std::abs;
+ using std::sqrt;
+
+ eigen_assert(x.size()==n); // check the caller is not cheating us
+
+ /* calculate the jacobian matrix. */
+ Index df_ret = functor.df(x, fjac);
+ if (df_ret<0)
+ return LevenbergMarquardtSpace::UserAsked;
+ if (df_ret>0)
+ // numerical diff, we evaluated the function df_ret times
+ nfev += df_ret;
+ else njev++;
+
+ /* compute the qr factorization of the jacobian. */
+ wa2 = fjac.colwise().blueNorm();
+ ColPivHouseholderQR<JacobianType> qrfac(fjac);
+ fjac = qrfac.matrixQR();
+ permutation = qrfac.colsPermutation();
+
+ /* on the first iteration and if external scaling is not used, scale according */
+ /* to the norms of the columns of the initial jacobian. */
+ if (iter == 1) {
+ if (!useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.)? 1. : wa2[j];
+
+ /* on the first iteration, calculate the norm of the scaled x */
+ /* and initialize the step bound delta. */
+ xnorm = diag.cwiseProduct(x).stableNorm();
+ delta = parameters.factor * xnorm;
+ if (delta == 0.)
+ delta = parameters.factor;
+ }
+
+ /* form (q transpose)*fvec and store the first n components in */
+ /* qtf. */
+ wa4 = fvec;
+ wa4.applyOnTheLeft(qrfac.householderQ().adjoint());
+ qtf = wa4.head(n);
+
+ /* compute the norm of the scaled gradient. */
+ gnorm = 0.;
+ if (fnorm != 0.)
+ for (Index j = 0; j < n; ++j)
+ if (wa2[permutation.indices()[j]] != 0.)
+ gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+
+ /* test for convergence of the gradient norm. */
+ if (gnorm <= parameters.gtol)
+ return LevenbergMarquardtSpace::CosinusTooSmall;
+
+ /* rescale if necessary. */
+ if (!useExternalScaling)
+ diag = diag.cwiseMax(wa2);
+
+ do {
+
+ /* determine the levenberg-marquardt parameter. */
+ internal::lmpar2<Scalar>(qrfac, diag, qtf, delta, par, wa1);
+
+ /* store the direction p and x + p. calculate the norm of p. */
+ wa1 = -wa1;
+ wa2 = x + wa1;
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
+
+ /* on the first iteration, adjust the initial step bound. */
+ if (iter == 1)
+ delta = (std::min)(delta,pnorm);
+
+ /* evaluate the function at x + p and calculate its norm. */
+ if ( functor(wa2, wa4) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ ++nfev;
+ fnorm1 = wa4.stableNorm();
+
+ /* compute the scaled actual reduction. */
+ actred = -1.;
+ if (Scalar(.1) * fnorm1 < fnorm)
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
+
+ /* compute the scaled predicted reduction and */
+ /* the scaled directional derivative. */
+ wa3 = fjac.template triangularView<Upper>() * (qrfac.colsPermutation().inverse() *wa1);
+ temp1 = numext::abs2(wa3.stableNorm() / fnorm);
+ temp2 = numext::abs2(sqrt(par) * pnorm / fnorm);
+ prered = temp1 + temp2 / Scalar(.5);
+ dirder = -(temp1 + temp2);
+
+ /* compute the ratio of the actual to the predicted */
+ /* reduction. */
+ ratio = 0.;
+ if (prered != 0.)
+ ratio = actred / prered;
+
+ /* update the step bound. */
+ if (ratio <= Scalar(.25)) {
+ if (actred >= 0.)
+ temp = Scalar(.5);
+ if (actred < 0.)
+ temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred);
+ if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
+ temp = Scalar(.1);
+ /* Computing MIN */
+ delta = temp * (std::min)(delta, pnorm / Scalar(.1));
+ par /= temp;
+ } else if (!(par != 0. && ratio < Scalar(.75))) {
+ delta = pnorm / Scalar(.5);
+ par = Scalar(.5) * par;
+ }
+
+ /* test for successful iteration. */
+ if (ratio >= Scalar(1e-4)) {
+ /* successful iteration. update x, fvec, and their norms. */
+ x = wa2;
+ wa2 = diag.cwiseProduct(x);
+ fvec = wa4;
+ xnorm = wa2.stableNorm();
+ fnorm = fnorm1;
+ ++iter;
+ }
+
+ /* tests for convergence. */
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
+ if (delta <= parameters.xtol * xnorm)
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
+
+ /* tests for termination and stringent tolerances. */
+ if (nfev >= parameters.maxfev)
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ if (gnorm <= NumTraits<Scalar>::epsilon())
+ return LevenbergMarquardtSpace::GtolTooSmall;
+
+ } while (ratio < Scalar(1e-4));
+
+ return LevenbergMarquardtSpace::Running;
+}
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
+ FVectorType &x,
+ const Scalar tol
+ )
+{
+ n = x.size();
+ m = functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ resetParameters();
+ parameters.ftol = tol;
+ parameters.xtol = tol;
+ parameters.maxfev = 100*(n+1);
+
+ return minimizeOptimumStorage(x);
+}
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType &x)
+{
+ n = x.size();
+ m = functor.values();
+
+ wa1.resize(n); wa2.resize(n); wa3.resize(n);
+ wa4.resize(m);
+ fvec.resize(m);
+ // Only R is stored in fjac. Q is only used to compute 'qtf', which is
+ // Q.transpose()*rhs. qtf will be updated using givens rotation,
+ // instead of storing them in Q.
+ // The purpose it to only use a nxn matrix, instead of mxn here, so
+ // that we can handle cases where m>>n :
+ fjac.resize(n, n);
+ if (!useExternalScaling)
+ diag.resize(n);
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ qtf.resize(n);
+
+ /* Function Body */
+ nfev = 0;
+ njev = 0;
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ if (useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ if (diag[j] <= 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ /* evaluate the function at the starting point */
+ /* and calculate its norm. */
+ nfev = 1;
+ if ( functor(x, fvec) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ fnorm = fvec.stableNorm();
+
+ /* initialize levenberg-marquardt parameter and iteration counter. */
+ par = 0.;
+ iter = 1;
+
+ return LevenbergMarquardtSpace::NotStarted;
+}
+
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorType &x)
+{
+ using std::abs;
+ using std::sqrt;
+
+ eigen_assert(x.size()==n); // check the caller is not cheating us
+
+ Index i, j;
+ bool sing;
+
+ /* compute the qr factorization of the jacobian matrix */
+ /* calculated one row at a time, while simultaneously */
+ /* forming (q transpose)*fvec and storing the first */
+ /* n components in qtf. */
+ qtf.fill(0.);
+ fjac.fill(0.);
+ Index rownb = 2;
+ for (i = 0; i < m; ++i) {
+ if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked;
+ internal::rwupdt<Scalar>(fjac, wa3, qtf, fvec[i]);
+ ++rownb;
+ }
+ ++njev;
+
+ /* if the jacobian is rank deficient, call qrfac to */
+ /* reorder its columns and update the components of qtf. */
+ sing = false;
+ for (j = 0; j < n; ++j) {
+ if (fjac(j,j) == 0.)
+ sing = true;
+ wa2[j] = fjac.col(j).head(j).stableNorm();
+ }
+ permutation.setIdentity(n);
+ if (sing) {
+ wa2 = fjac.colwise().blueNorm();
+ // TODO We have no unit test covering this code path, do not modify
+ // until it is carefully tested
+ ColPivHouseholderQR<JacobianType> qrfac(fjac);
+ fjac = qrfac.matrixQR();
+ wa1 = fjac.diagonal();
+ fjac.diagonal() = qrfac.hCoeffs();
+ permutation = qrfac.colsPermutation();
+ // TODO : avoid this:
+ for(Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors
+
+ for (j = 0; j < n; ++j) {
+ if (fjac(j,j) != 0.) {
+ sum = 0.;
+ for (i = j; i < n; ++i)
+ sum += fjac(i,j) * qtf[i];
+ temp = -sum / fjac(j,j);
+ for (i = j; i < n; ++i)
+ qtf[i] += fjac(i,j) * temp;
+ }
+ fjac(j,j) = wa1[j];
+ }
+ }
+
+ /* on the first iteration and if external scaling is not used, scale according */
+ /* to the norms of the columns of the initial jacobian. */
+ if (iter == 1) {
+ if (!useExternalScaling)
+ for (j = 0; j < n; ++j)
+ diag[j] = (wa2[j]==0.)? 1. : wa2[j];
+
+ /* on the first iteration, calculate the norm of the scaled x */
+ /* and initialize the step bound delta. */
+ xnorm = diag.cwiseProduct(x).stableNorm();
+ delta = parameters.factor * xnorm;
+ if (delta == 0.)
+ delta = parameters.factor;
+ }
+
+ /* compute the norm of the scaled gradient. */
+ gnorm = 0.;
+ if (fnorm != 0.)
+ for (j = 0; j < n; ++j)
+ if (wa2[permutation.indices()[j]] != 0.)
+ gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+
+ /* test for convergence of the gradient norm. */
+ if (gnorm <= parameters.gtol)
+ return LevenbergMarquardtSpace::CosinusTooSmall;
+
+ /* rescale if necessary. */
+ if (!useExternalScaling)
+ diag = diag.cwiseMax(wa2);
+
+ do {
+
+ /* determine the levenberg-marquardt parameter. */
+ internal::lmpar<Scalar>(fjac, permutation.indices(), diag, qtf, delta, par, wa1);
+
+ /* store the direction p and x + p. calculate the norm of p. */
+ wa1 = -wa1;
+ wa2 = x + wa1;
+ pnorm = diag.cwiseProduct(wa1).stableNorm();
+
+ /* on the first iteration, adjust the initial step bound. */
+ if (iter == 1)
+ delta = (std::min)(delta,pnorm);
+
+ /* evaluate the function at x + p and calculate its norm. */
+ if ( functor(wa2, wa4) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ ++nfev;
+ fnorm1 = wa4.stableNorm();
+
+ /* compute the scaled actual reduction. */
+ actred = -1.;
+ if (Scalar(.1) * fnorm1 < fnorm)
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
+
+ /* compute the scaled predicted reduction and */
+ /* the scaled directional derivative. */
+ wa3 = fjac.topLeftCorner(n,n).template triangularView<Upper>() * (permutation.inverse() * wa1);
+ temp1 = numext::abs2(wa3.stableNorm() / fnorm);
+ temp2 = numext::abs2(sqrt(par) * pnorm / fnorm);
+ prered = temp1 + temp2 / Scalar(.5);
+ dirder = -(temp1 + temp2);
+
+ /* compute the ratio of the actual to the predicted */
+ /* reduction. */
+ ratio = 0.;
+ if (prered != 0.)
+ ratio = actred / prered;
+
+ /* update the step bound. */
+ if (ratio <= Scalar(.25)) {
+ if (actred >= 0.)
+ temp = Scalar(.5);
+ if (actred < 0.)
+ temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred);
+ if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1))
+ temp = Scalar(.1);
+ /* Computing MIN */
+ delta = temp * (std::min)(delta, pnorm / Scalar(.1));
+ par /= temp;
+ } else if (!(par != 0. && ratio < Scalar(.75))) {
+ delta = pnorm / Scalar(.5);
+ par = Scalar(.5) * par;
+ }
+
+ /* test for successful iteration. */
+ if (ratio >= Scalar(1e-4)) {
+ /* successful iteration. update x, fvec, and their norms. */
+ x = wa2;
+ wa2 = diag.cwiseProduct(x);
+ fvec = wa4;
+ xnorm = wa2.stableNorm();
+ fnorm = fnorm1;
+ ++iter;
+ }
+
+ /* tests for convergence. */
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
+ if (delta <= parameters.xtol * xnorm)
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
+
+ /* tests for termination and stringent tolerances. */
+ if (nfev >= parameters.maxfev)
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ if (gnorm <= NumTraits<Scalar>::epsilon())
+ return LevenbergMarquardtSpace::GtolTooSmall;
+
+ } while (ratio < Scalar(1e-4));
+
+ return LevenbergMarquardtSpace::Running;
+}
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(FVectorType &x)
+{
+ LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x);
+ if (status==LevenbergMarquardtSpace::ImproperInputParameters)
+ return status;
+ do {
+ status = minimizeOptimumStorageOneStep(x);
+ } while (status==LevenbergMarquardtSpace::Running);
+ return status;
+}
+
+template<typename FunctorType, typename Scalar>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType,Scalar>::lmdif1(
+ FunctorType &functor,
+ FVectorType &x,
+ Index *nfev,
+ const Scalar tol
+ )
+{
+ Index n = x.size();
+ Index m = functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ NumericalDiff<FunctorType> numDiff(functor);
+ // embedded LevenbergMarquardt
+ LevenbergMarquardt<NumericalDiff<FunctorType>, Scalar > lm(numDiff);
+ lm.parameters.ftol = tol;
+ lm.parameters.xtol = tol;
+ lm.parameters.maxfev = 200*(n+1);
+
+ LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x));
+ if (nfev)
+ * nfev = lm.nfev;
+ return info;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LEVENBERGMARQUARDT__H
+
+//vim: ai ts=4 sts=4 et sw=4
diff --git a/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
new file mode 100644
index 0000000..db8ff7d
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
@@ -0,0 +1,66 @@
+#define chkder_log10e 0.43429448190325182765
+#define chkder_factor 100.
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Scalar>
+void chkder(
+ const Matrix< Scalar, Dynamic, 1 > &x,
+ const Matrix< Scalar, Dynamic, 1 > &fvec,
+ const Matrix< Scalar, Dynamic, Dynamic > &fjac,
+ Matrix< Scalar, Dynamic, 1 > &xp,
+ const Matrix< Scalar, Dynamic, 1 > &fvecp,
+ int mode,
+ Matrix< Scalar, Dynamic, 1 > &err
+ )
+{
+ using std::sqrt;
+ using std::abs;
+ using std::log;
+
+ typedef DenseIndex Index;
+
+ const Scalar eps = sqrt(NumTraits<Scalar>::epsilon());
+ const Scalar epsf = chkder_factor * NumTraits<Scalar>::epsilon();
+ const Scalar epslog = chkder_log10e * log(eps);
+ Scalar temp;
+
+ const Index m = fvec.size(), n = x.size();
+
+ if (mode != 2) {
+ /* mode = 1. */
+ xp.resize(n);
+ for (Index j = 0; j < n; ++j) {
+ temp = eps * abs(x[j]);
+ if (temp == 0.)
+ temp = eps;
+ xp[j] = x[j] + temp;
+ }
+ }
+ else {
+ /* mode = 2. */
+ err.setZero(m);
+ for (Index j = 0; j < n; ++j) {
+ temp = abs(x[j]);
+ if (temp == 0.)
+ temp = 1.;
+ err += temp * fjac.col(j);
+ }
+ for (Index i = 0; i < m; ++i) {
+ temp = 1.;
+ if (fvec[i] != 0. && fvecp[i] != 0. && abs(fvecp[i] - fvec[i]) >= epsf * abs(fvec[i]))
+ temp = eps * abs((fvecp[i] - fvec[i]) / eps - err[i]) / (abs(fvec[i]) + abs(fvecp[i]));
+ err[i] = 1.;
+ if (temp > NumTraits<Scalar>::epsilon() && temp < eps)
+ err[i] = (chkder_log10e * log(temp) - epslog) / epslog;
+ if (temp >= eps)
+ err[i] = 0.;
+ }
+ }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h
new file mode 100644
index 0000000..68260d1
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h
@@ -0,0 +1,70 @@
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void covar(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const VectorXi &ipvt,
+ Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
+{
+ using std::abs;
+ typedef DenseIndex Index;
+
+ /* Local variables */
+ Index i, j, k, l, ii, jj;
+ bool sing;
+ Scalar temp;
+
+ /* Function Body */
+ const Index n = r.cols();
+ const Scalar tolr = tol * abs(r(0,0));
+ Matrix< Scalar, Dynamic, 1 > wa(n);
+ eigen_assert(ipvt.size()==n);
+
+ /* form the inverse of r in the full upper triangle of r. */
+ l = -1;
+ for (k = 0; k < n; ++k)
+ if (abs(r(k,k)) > tolr) {
+ r(k,k) = 1. / r(k,k);
+ for (j = 0; j <= k-1; ++j) {
+ temp = r(k,k) * r(j,k);
+ r(j,k) = 0.;
+ r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
+ }
+ l = k;
+ }
+
+ /* form the full upper triangle of the inverse of (r transpose)*r */
+ /* in the full upper triangle of r. */
+ for (k = 0; k <= l; ++k) {
+ for (j = 0; j <= k-1; ++j)
+ r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
+ r.col(k).head(k+1) *= r(k,k);
+ }
+
+ /* form the full lower triangle of the covariance matrix */
+ /* in the strict lower triangle of r and in wa. */
+ for (j = 0; j < n; ++j) {
+ jj = ipvt[j];
+ sing = j > l;
+ for (i = 0; i <= j; ++i) {
+ if (sing)
+ r(i,j) = 0.;
+ ii = ipvt[i];
+ if (ii > jj)
+ r(ii,jj) = r(i,j);
+ if (ii < jj)
+ r(jj,ii) = r(i,j);
+ }
+ wa[jj] = r(j,j);
+ }
+
+ /* symmetrize the covariance matrix in r. */
+ r.topLeftCorner(n,n).template triangularView<StrictlyUpper>() = r.topLeftCorner(n,n).transpose();
+ r.diagonal() = wa;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
new file mode 100644
index 0000000..80c5d27
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -0,0 +1,107 @@
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void dogleg(
+ const Matrix< Scalar, Dynamic, Dynamic > &qrfac,
+ const Matrix< Scalar, Dynamic, 1 > &diag,
+ const Matrix< Scalar, Dynamic, 1 > &qtb,
+ Scalar delta,
+ Matrix< Scalar, Dynamic, 1 > &x)
+{
+ using std::abs;
+ using std::sqrt;
+
+ typedef DenseIndex Index;
+
+ /* Local variables */
+ Index i, j;
+ Scalar sum, temp, alpha, bnorm;
+ Scalar gnorm, qnorm;
+ Scalar sgnorm;
+
+ /* Function Body */
+ const Scalar epsmch = NumTraits<Scalar>::epsilon();
+ const Index n = qrfac.cols();
+ eigen_assert(n==qtb.size());
+ eigen_assert(n==x.size());
+ eigen_assert(n==diag.size());
+ Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
+
+ /* first, calculate the gauss-newton direction. */
+ for (j = n-1; j >=0; --j) {
+ temp = qrfac(j,j);
+ if (temp == 0.) {
+ temp = epsmch * qrfac.col(j).head(j+1).maxCoeff();
+ if (temp == 0.)
+ temp = epsmch;
+ }
+ if (j==n-1)
+ x[j] = qtb[j] / temp;
+ else
+ x[j] = (qtb[j] - qrfac.row(j).tail(n-j-1).dot(x.tail(n-j-1))) / temp;
+ }
+
+ /* test whether the gauss-newton direction is acceptable. */
+ qnorm = diag.cwiseProduct(x).stableNorm();
+ if (qnorm <= delta)
+ return;
+
+ // TODO : this path is not tested by Eigen unit tests
+
+ /* the gauss-newton direction is not acceptable. */
+ /* next, calculate the scaled gradient direction. */
+
+ wa1.fill(0.);
+ for (j = 0; j < n; ++j) {
+ wa1.tail(n-j) += qrfac.row(j).tail(n-j) * qtb[j];
+ wa1[j] /= diag[j];
+ }
+
+ /* calculate the norm of the scaled gradient and test for */
+ /* the special case in which the scaled gradient is zero. */
+ gnorm = wa1.stableNorm();
+ sgnorm = 0.;
+ alpha = delta / qnorm;
+ if (gnorm == 0.)
+ goto algo_end;
+
+ /* calculate the point along the scaled gradient */
+ /* at which the quadratic is minimized. */
+ wa1.array() /= (diag*gnorm).array();
+ // TODO : once unit tests cover this part,:
+ // wa2 = qrfac.template triangularView<Upper>() * wa1;
+ for (j = 0; j < n; ++j) {
+ sum = 0.;
+ for (i = j; i < n; ++i) {
+ sum += qrfac(j,i) * wa1[i];
+ }
+ wa2[j] = sum;
+ }
+ temp = wa2.stableNorm();
+ sgnorm = gnorm / temp / temp;
+
+ /* test whether the scaled gradient direction is acceptable. */
+ alpha = 0.;
+ if (sgnorm >= delta)
+ goto algo_end;
+
+ /* the scaled gradient direction is not acceptable. */
+ /* finally, calculate the point along the dogleg */
+ /* at which the quadratic is minimized. */
+ bnorm = qtb.stableNorm();
+ temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta);
+ temp = temp - delta / qnorm * numext::abs2(sgnorm / delta) + sqrt(numext::abs2(temp - delta / qnorm) + (1.-numext::abs2(delta / qnorm)) * (1.-numext::abs2(sgnorm / delta)));
+ alpha = delta / qnorm * (1. - numext::abs2(sgnorm / delta)) / temp;
+algo_end:
+
+ /* form appropriate convex combination of the gauss-newton */
+ /* direction and the scaled gradient direction. */
+ temp = (1.-alpha) * (std::min)(sgnorm,delta);
+ x = temp * wa1 + alpha * x;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
new file mode 100644
index 0000000..bb7cf26
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -0,0 +1,79 @@
+namespace Eigen {
+
+namespace internal {
+
+template<typename FunctorType, typename Scalar>
+DenseIndex fdjac1(
+ const FunctorType &Functor,
+ Matrix< Scalar, Dynamic, 1 > &x,
+ Matrix< Scalar, Dynamic, 1 > &fvec,
+ Matrix< Scalar, Dynamic, Dynamic > &fjac,
+ DenseIndex ml, DenseIndex mu,
+ Scalar epsfcn)
+{
+ using std::sqrt;
+ using std::abs;
+
+ typedef DenseIndex Index;
+
+ /* Local variables */
+ Scalar h;
+ Index j, k;
+ Scalar eps, temp;
+ Index msum;
+ int iflag;
+ Index start, length;
+
+ /* Function Body */
+ const Scalar epsmch = NumTraits<Scalar>::epsilon();
+ const Index n = x.size();
+ eigen_assert(fvec.size()==n);
+ Matrix< Scalar, Dynamic, 1 > wa1(n);
+ Matrix< Scalar, Dynamic, 1 > wa2(n);
+
+ eps = sqrt((std::max)(epsfcn,epsmch));
+ msum = ml + mu + 1;
+ if (msum >= n) {
+ /* computation of dense approximate jacobian. */
+ for (j = 0; j < n; ++j) {
+ temp = x[j];
+ h = eps * abs(temp);
+ if (h == 0.)
+ h = eps;
+ x[j] = temp + h;
+ iflag = Functor(x, wa1);
+ if (iflag < 0)
+ return iflag;
+ x[j] = temp;
+ fjac.col(j) = (wa1-fvec)/h;
+ }
+
+ }else {
+ /* computation of banded approximate jacobian. */
+ for (k = 0; k < msum; ++k) {
+ for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
+ wa2[j] = x[j];
+ h = eps * abs(wa2[j]);
+ if (h == 0.) h = eps;
+ x[j] = wa2[j] + h;
+ }
+ iflag = Functor(x, wa1);
+ if (iflag < 0)
+ return iflag;
+ for (j = k; (msum<0) ? (j>n): (j<n); j += msum) {
+ x[j] = wa2[j];
+ h = eps * abs(wa2[j]);
+ if (h == 0.) h = eps;
+ fjac.col(j).setZero();
+ start = std::max<Index>(0,j-mu);
+ length = (std::min)(n-1, j+ml) - start + 1;
+ fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h;
+ }
+ }
+ }
+ return 0;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
new file mode 100644
index 0000000..4c17d4c
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -0,0 +1,298 @@
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void lmpar(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const VectorXi &ipvt,
+ const Matrix< Scalar, Dynamic, 1 > &diag,
+ const Matrix< Scalar, Dynamic, 1 > &qtb,
+ Scalar delta,
+ Scalar &par,
+ Matrix< Scalar, Dynamic, 1 > &x)
+{
+ using std::abs;
+ using std::sqrt;
+ typedef DenseIndex Index;
+
+ /* Local variables */
+ Index i, j, l;
+ Scalar fp;
+ Scalar parc, parl;
+ Index iter;
+ Scalar temp, paru;
+ Scalar gnorm;
+ Scalar dxnorm;
+
+
+ /* Function Body */
+ const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
+ const Index n = r.cols();
+ eigen_assert(n==diag.size());
+ eigen_assert(n==qtb.size());
+ eigen_assert(n==x.size());
+
+ Matrix< Scalar, Dynamic, 1 > wa1, wa2;
+
+ /* compute and store in x the gauss-newton direction. if the */
+ /* jacobian is rank-deficient, obtain a least squares solution. */
+ Index nsing = n-1;
+ wa1 = qtb;
+ for (j = 0; j < n; ++j) {
+ if (r(j,j) == 0. && nsing == n-1)
+ nsing = j - 1;
+ if (nsing < n-1)
+ wa1[j] = 0.;
+ }
+ for (j = nsing; j>=0; --j) {
+ wa1[j] /= r(j,j);
+ temp = wa1[j];
+ for (i = 0; i < j ; ++i)
+ wa1[i] -= r(i,j) * temp;
+ }
+
+ for (j = 0; j < n; ++j)
+ x[ipvt[j]] = wa1[j];
+
+ /* initialize the iteration counter. */
+ /* evaluate the function at the origin, and test */
+ /* for acceptance of the gauss-newton direction. */
+ iter = 0;
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ fp = dxnorm - delta;
+ if (fp <= Scalar(0.1) * delta) {
+ par = 0;
+ return;
+ }
+
+ /* if the jacobian is not rank deficient, the newton */
+ /* step provides a lower bound, parl, for the zero of */
+ /* the function. otherwise set this bound to zero. */
+ parl = 0.;
+ if (nsing >= n-1) {
+ for (j = 0; j < n; ++j) {
+ l = ipvt[j];
+ wa1[j] = diag[l] * (wa2[l] / dxnorm);
+ }
+ // it's actually a triangularView.solveInplace(), though in a weird
+ // way:
+ for (j = 0; j < n; ++j) {
+ Scalar sum = 0.;
+ for (i = 0; i < j; ++i)
+ sum += r(i,j) * wa1[i];
+ wa1[j] = (wa1[j] - sum) / r(j,j);
+ }
+ temp = wa1.blueNorm();
+ parl = fp / delta / temp / temp;
+ }
+
+ /* calculate an upper bound, paru, for the zero of the function. */
+ for (j = 0; j < n; ++j)
+ wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]];
+
+ gnorm = wa1.stableNorm();
+ paru = gnorm / delta;
+ if (paru == 0.)
+ paru = dwarf / (std::min)(delta,Scalar(0.1));
+
+ /* if the input par lies outside of the interval (parl,paru), */
+ /* set par to the closer endpoint. */
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
+ if (par == 0.)
+ par = gnorm / dxnorm;
+
+ /* beginning of an iteration. */
+ while (true) {
+ ++iter;
+
+ /* evaluate the function at the current value of par. */
+ if (par == 0.)
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ wa1 = sqrt(par)* diag;
+
+ Matrix< Scalar, Dynamic, 1 > sdiag(n);
+ qrsolv<Scalar>(r, ipvt, wa1, qtb, x, sdiag);
+
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ temp = fp;
+ fp = dxnorm - delta;
+
+ /* if the function is small enough, accept the current value */
+ /* of par. also test for the exceptional cases where parl */
+ /* is zero or the number of iterations has reached 10. */
+ if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
+ break;
+
+ /* compute the newton correction. */
+ for (j = 0; j < n; ++j) {
+ l = ipvt[j];
+ wa1[j] = diag[l] * (wa2[l] / dxnorm);
+ }
+ for (j = 0; j < n; ++j) {
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ for (i = j+1; i < n; ++i)
+ wa1[i] -= r(i,j) * temp;
+ }
+ temp = wa1.blueNorm();
+ parc = fp / delta / temp / temp;
+
+ /* depending on the sign of the function, update parl or paru. */
+ if (fp > 0.)
+ parl = (std::max)(parl,par);
+ if (fp < 0.)
+ paru = (std::min)(paru,par);
+
+ /* compute an improved estimate for par. */
+ /* Computing MAX */
+ par = (std::max)(parl,par+parc);
+
+ /* end of an iteration. */
+ }
+
+ /* termination. */
+ if (iter == 0)
+ par = 0.;
+ return;
+}
+
+template <typename Scalar>
+void lmpar2(
+ const ColPivHouseholderQR<Matrix< Scalar, Dynamic, Dynamic> > &qr,
+ const Matrix< Scalar, Dynamic, 1 > &diag,
+ const Matrix< Scalar, Dynamic, 1 > &qtb,
+ Scalar delta,
+ Scalar &par,
+ Matrix< Scalar, Dynamic, 1 > &x)
+
+{
+ using std::sqrt;
+ using std::abs;
+ typedef DenseIndex Index;
+
+ /* Local variables */
+ Index j;
+ Scalar fp;
+ Scalar parc, parl;
+ Index iter;
+ Scalar temp, paru;
+ Scalar gnorm;
+ Scalar dxnorm;
+
+
+ /* Function Body */
+ const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
+ const Index n = qr.matrixQR().cols();
+ eigen_assert(n==diag.size());
+ eigen_assert(n==qtb.size());
+
+ Matrix< Scalar, Dynamic, 1 > wa1, wa2;
+
+ /* compute and store in x the gauss-newton direction. if the */
+ /* jacobian is rank-deficient, obtain a least squares solution. */
+
+// const Index rank = qr.nonzeroPivots(); // exactly double(0.)
+ const Index rank = qr.rank(); // use a threshold
+ wa1 = qtb;
+ wa1.tail(n-rank).setZero();
+ qr.matrixQR().topLeftCorner(rank, rank).template triangularView<Upper>().solveInPlace(wa1.head(rank));
+
+ x = qr.colsPermutation()*wa1;
+
+ /* initialize the iteration counter. */
+ /* evaluate the function at the origin, and test */
+ /* for acceptance of the gauss-newton direction. */
+ iter = 0;
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ fp = dxnorm - delta;
+ if (fp <= Scalar(0.1) * delta) {
+ par = 0;
+ return;
+ }
+
+ /* if the jacobian is not rank deficient, the newton */
+ /* step provides a lower bound, parl, for the zero of */
+ /* the function. otherwise set this bound to zero. */
+ parl = 0.;
+ if (rank==n) {
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
+ qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ temp = wa1.blueNorm();
+ parl = fp / delta / temp / temp;
+ }
+
+ /* calculate an upper bound, paru, for the zero of the function. */
+ for (j = 0; j < n; ++j)
+ wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
+
+ gnorm = wa1.stableNorm();
+ paru = gnorm / delta;
+ if (paru == 0.)
+ paru = dwarf / (std::min)(delta,Scalar(0.1));
+
+ /* if the input par lies outside of the interval (parl,paru), */
+ /* set par to the closer endpoint. */
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
+ if (par == 0.)
+ par = gnorm / dxnorm;
+
+ /* beginning of an iteration. */
+ Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR();
+ while (true) {
+ ++iter;
+
+ /* evaluate the function at the current value of par. */
+ if (par == 0.)
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ wa1 = sqrt(par)* diag;
+
+ Matrix< Scalar, Dynamic, 1 > sdiag(n);
+ qrsolv<Scalar>(s, qr.colsPermutation().indices(), wa1, qtb, x, sdiag);
+
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ temp = fp;
+ fp = dxnorm - delta;
+
+ /* if the function is small enough, accept the current value */
+ /* of par. also test for the exceptional cases where parl */
+ /* is zero or the number of iterations has reached 10. */
+ if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
+ break;
+
+ /* compute the newton correction. */
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
+ // we could almost use this here, but the diagonal is outside qr, in sdiag[]
+ // qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ for (j = 0; j < n; ++j) {
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ for (Index i = j+1; i < n; ++i)
+ wa1[i] -= s(i,j) * temp;
+ }
+ temp = wa1.blueNorm();
+ parc = fp / delta / temp / temp;
+
+ /* depending on the sign of the function, update parl or paru. */
+ if (fp > 0.)
+ parl = (std::max)(parl,par);
+ if (fp < 0.)
+ paru = (std::min)(paru,par);
+
+ /* compute an improved estimate for par. */
+ par = (std::max)(parl,par+parc);
+ }
+ if (iter == 0)
+ par = 0.;
+ return;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
new file mode 100644
index 0000000..feafd62
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h
@@ -0,0 +1,91 @@
+namespace Eigen {
+
+namespace internal {
+
+// TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt
+template <typename Scalar>
+void qrsolv(
+ Matrix< Scalar, Dynamic, Dynamic > &s,
+ // TODO : use a PermutationMatrix once lmpar is no more:
+ const VectorXi &ipvt,
+ const Matrix< Scalar, Dynamic, 1 > &diag,
+ const Matrix< Scalar, Dynamic, 1 > &qtb,
+ Matrix< Scalar, Dynamic, 1 > &x,
+ Matrix< Scalar, Dynamic, 1 > &sdiag)
+
+{
+ typedef DenseIndex Index;
+
+ /* Local variables */
+ Index i, j, k, l;
+ Scalar temp;
+ Index n = s.cols();
+ Matrix< Scalar, Dynamic, 1 > wa(n);
+ JacobiRotation<Scalar> givens;
+
+ /* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
+
+ /* copy r and (q transpose)*b to preserve input and initialize s. */
+ /* in particular, save the diagonal elements of r in x. */
+ x = s.diagonal();
+ wa = qtb;
+
+ s.topLeftCorner(n,n).template triangularView<StrictlyLower>() = s.topLeftCorner(n,n).transpose();
+
+ /* eliminate the diagonal matrix d using a givens rotation. */
+ for (j = 0; j < n; ++j) {
+
+ /* prepare the row of d to be eliminated, locating the */
+ /* diagonal element using p from the qr factorization. */
+ l = ipvt[j];
+ if (diag[l] == 0.)
+ break;
+ sdiag.tail(n-j).setZero();
+ sdiag[j] = diag[l];
+
+ /* the transformations to eliminate the row of d */
+ /* modify only a single element of (q transpose)*b */
+ /* beyond the first n, which is initially zero. */
+ Scalar qtbpj = 0.;
+ for (k = j; k < n; ++k) {
+ /* determine a givens rotation which eliminates the */
+ /* appropriate element in the current row of d. */
+ givens.makeGivens(-s(k,k), sdiag[k]);
+
+ /* compute the modified diagonal element of r and */
+ /* the modified element of ((q transpose)*b,0). */
+ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k];
+ temp = givens.c() * wa[k] + givens.s() * qtbpj;
+ qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
+ wa[k] = temp;
+
+ /* accumulate the tranformation in the row of s. */
+ for (i = k+1; i<n; ++i) {
+ temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
+ sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
+ s(i,k) = temp;
+ }
+ }
+ }
+
+ /* solve the triangular system for z. if the system is */
+ /* singular, then obtain a least squares solution. */
+ Index nsing;
+ for(nsing=0; nsing<n && sdiag[nsing]!=0; nsing++) {}
+
+ wa.tail(n-nsing).setZero();
+ s.topLeftCorner(nsing, nsing).transpose().template triangularView<Upper>().solveInPlace(wa.head(nsing));
+
+ // restore
+ sdiag = s.diagonal();
+ s.diagonal() = x;
+
+ /* permute the components of z back to components of x. */
+ for (j = 0; j < n; ++j) x[ipvt[j]] = wa[j];
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
new file mode 100644
index 0000000..36ff700
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h
@@ -0,0 +1,30 @@
+namespace Eigen {
+
+namespace internal {
+
+// TODO : move this to GivensQR once there's such a thing in Eigen
+
+template <typename Scalar>
+void r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector<JacobiRotation<Scalar> > &v_givens, const std::vector<JacobiRotation<Scalar> > &w_givens)
+{
+ typedef DenseIndex Index;
+
+ /* apply the first set of givens rotations to a. */
+ for (Index j = n-2; j>=0; --j)
+ for (Index i = 0; i<m; ++i) {
+ Scalar temp = v_givens[j].c() * a[i+m*j] - v_givens[j].s() * a[i+m*(n-1)];
+ a[i+m*(n-1)] = v_givens[j].s() * a[i+m*j] + v_givens[j].c() * a[i+m*(n-1)];
+ a[i+m*j] = temp;
+ }
+ /* apply the second set of givens rotations to a. */
+ for (Index j = 0; j<n-1; ++j)
+ for (Index i = 0; i<m; ++i) {
+ Scalar temp = w_givens[j].c() * a[i+m*j] + w_givens[j].s() * a[i+m*(n-1)];
+ a[i+m*(n-1)] = -w_givens[j].s() * a[i+m*j] + w_givens[j].c() * a[i+m*(n-1)];
+ a[i+m*j] = temp;
+ }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
new file mode 100644
index 0000000..f287660
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -0,0 +1,99 @@
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void r1updt(
+ Matrix< Scalar, Dynamic, Dynamic > &s,
+ const Matrix< Scalar, Dynamic, 1> &u,
+ std::vector<JacobiRotation<Scalar> > &v_givens,
+ std::vector<JacobiRotation<Scalar> > &w_givens,
+ Matrix< Scalar, Dynamic, 1> &v,
+ Matrix< Scalar, Dynamic, 1> &w,
+ bool *sing)
+{
+ typedef DenseIndex Index;
+ const JacobiRotation<Scalar> IdentityRotation = JacobiRotation<Scalar>(1,0);
+
+ /* Local variables */
+ const Index m = s.rows();
+ const Index n = s.cols();
+ Index i, j=1;
+ Scalar temp;
+ JacobiRotation<Scalar> givens;
+
+ // r1updt had a broader usecase, but we dont use it here. And, more
+ // importantly, we can not test it.
+ eigen_assert(m==n);
+ eigen_assert(u.size()==m);
+ eigen_assert(v.size()==n);
+ eigen_assert(w.size()==n);
+
+ /* move the nontrivial part of the last column of s into w. */
+ w[n-1] = s(n-1,n-1);
+
+ /* rotate the vector v into a multiple of the n-th unit vector */
+ /* in such a way that a spike is introduced into w. */
+ for (j=n-2; j>=0; --j) {
+ w[j] = 0.;
+ if (v[j] != 0.) {
+ /* determine a givens rotation which eliminates the */
+ /* j-th element of v. */
+ givens.makeGivens(-v[n-1], v[j]);
+
+ /* apply the transformation to v and store the information */
+ /* necessary to recover the givens rotation. */
+ v[n-1] = givens.s() * v[j] + givens.c() * v[n-1];
+ v_givens[j] = givens;
+
+ /* apply the transformation to s and extend the spike in w. */
+ for (i = j; i < m; ++i) {
+ temp = givens.c() * s(j,i) - givens.s() * w[i];
+ w[i] = givens.s() * s(j,i) + givens.c() * w[i];
+ s(j,i) = temp;
+ }
+ } else
+ v_givens[j] = IdentityRotation;
+ }
+
+ /* add the spike from the rank 1 update to w. */
+ w += v[n-1] * u;
+
+ /* eliminate the spike. */
+ *sing = false;
+ for (j = 0; j < n-1; ++j) {
+ if (w[j] != 0.) {
+ /* determine a givens rotation which eliminates the */
+ /* j-th element of the spike. */
+ givens.makeGivens(-s(j,j), w[j]);
+
+ /* apply the transformation to s and reduce the spike in w. */
+ for (i = j; i < m; ++i) {
+ temp = givens.c() * s(j,i) + givens.s() * w[i];
+ w[i] = -givens.s() * s(j,i) + givens.c() * w[i];
+ s(j,i) = temp;
+ }
+
+ /* store the information necessary to recover the */
+ /* givens rotation. */
+ w_givens[j] = givens;
+ } else
+ v_givens[j] = IdentityRotation;
+
+ /* test for zero diagonal elements in the output s. */
+ if (s(j,j) == 0.) {
+ *sing = true;
+ }
+ }
+ /* move w back into the last column of the output s. */
+ s(n-1,n-1) = w[n-1];
+
+ if (s(j,j) == 0.) {
+ *sing = true;
+ }
+ return;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
new file mode 100644
index 0000000..6ebf856
--- /dev/null
+++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
@@ -0,0 +1,49 @@
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void rwupdt(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const Matrix< Scalar, Dynamic, 1> &w,
+ Matrix< Scalar, Dynamic, 1> &b,
+ Scalar alpha)
+{
+ typedef DenseIndex Index;
+
+ const Index n = r.cols();
+ eigen_assert(r.rows()>=n);
+ std::vector<JacobiRotation<Scalar> > givens(n);
+
+ /* Local variables */
+ Scalar temp, rowj;
+
+ /* Function Body */
+ for (Index j = 0; j < n; ++j) {
+ rowj = w[j];
+
+ /* apply the previous transformations to */
+ /* r(i,j), i=0,1,...,j-1, and to w(j). */
+ for (Index i = 0; i < j; ++i) {
+ temp = givens[i].c() * r(i,j) + givens[i].s() * rowj;
+ rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj;
+ r(i,j) = temp;
+ }
+
+ /* determine a givens rotation which eliminates w(j). */
+ givens[j].makeGivens(-r(j,j), rowj);
+
+ if (rowj == 0.)
+ continue; // givens[j] is identity
+
+ /* apply the current transformation to r(j,j), b(j), and alpha. */
+ r(j,j) = givens[j].c() * r(j,j) + givens[j].s() * rowj;
+ temp = givens[j].c() * b[j] + givens[j].s() * alpha;
+ alpha = -givens[j].s() * b[j] + givens[j].c() * alpha;
+ b[j] = temp;
+ }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
diff --git a/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt b/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
new file mode 100644
index 0000000..1199aca
--- /dev/null
+++ b/unsupported/Eigen/src/NumericalDiff/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_NumericalDiff_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_NumericalDiff_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/NumericalDiff COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
new file mode 100644
index 0000000..ea5d8bc
--- /dev/null
+++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
@@ -0,0 +1,130 @@
+// -*- coding: utf-8
+// vim: set fileencoding=utf-8
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_NUMERICAL_DIFF_H
+#define EIGEN_NUMERICAL_DIFF_H
+
+namespace Eigen {
+
+enum NumericalDiffMode {
+ Forward,
+ Central
+};
+
+
+/**
+ * This class allows you to add a method df() to your functor, which will
+ * use numerical differentiation to compute an approximate of the
+ * derivative for the functor. Of course, if you have an analytical form
+ * for the derivative, you should rather implement df() by yourself.
+ *
+ * More information on
+ * http://en.wikipedia.org/wiki/Numerical_differentiation
+ *
+ * Currently only "Forward" and "Central" scheme are implemented.
+ */
+template<typename _Functor, NumericalDiffMode mode=Forward>
+class NumericalDiff : public _Functor
+{
+public:
+ typedef _Functor Functor;
+ typedef typename Functor::Scalar Scalar;
+ typedef typename Functor::InputType InputType;
+ typedef typename Functor::ValueType ValueType;
+ typedef typename Functor::JacobianType JacobianType;
+
+ NumericalDiff(Scalar _epsfcn=0.) : Functor(), epsfcn(_epsfcn) {}
+ NumericalDiff(const Functor& f, Scalar _epsfcn=0.) : Functor(f), epsfcn(_epsfcn) {}
+
+ // forward constructors
+ template<typename T0>
+ NumericalDiff(const T0& a0) : Functor(a0), epsfcn(0) {}
+ template<typename T0, typename T1>
+ NumericalDiff(const T0& a0, const T1& a1) : Functor(a0, a1), epsfcn(0) {}
+ template<typename T0, typename T1, typename T2>
+ NumericalDiff(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2), epsfcn(0) {}
+
+ enum {
+ InputsAtCompileTime = Functor::InputsAtCompileTime,
+ ValuesAtCompileTime = Functor::ValuesAtCompileTime
+ };
+
+ /**
+ * return the number of evaluation of functor
+ */
+ int df(const InputType& _x, JacobianType &jac) const
+ {
+ using std::sqrt;
+ using std::abs;
+ /* Local variables */
+ Scalar h;
+ int nfev=0;
+ const typename InputType::Index n = _x.size();
+ const Scalar eps = sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() )));
+ ValueType val1, val2;
+ InputType x = _x;
+ // TODO : we should do this only if the size is not already known
+ val1.resize(Functor::values());
+ val2.resize(Functor::values());
+
+ // initialization
+ switch(mode) {
+ case Forward:
+ // compute f(x)
+ Functor::operator()(x, val1); nfev++;
+ break;
+ case Central:
+ // do nothing
+ break;
+ default:
+ eigen_assert(false);
+ };
+
+ // Function Body
+ for (int j = 0; j < n; ++j) {
+ h = eps * abs(x[j]);
+ if (h == 0.) {
+ h = eps;
+ }
+ switch(mode) {
+ case Forward:
+ x[j] += h;
+ Functor::operator()(x, val2);
+ nfev++;
+ x[j] = _x[j];
+ jac.col(j) = (val2-val1)/h;
+ break;
+ case Central:
+ x[j] += h;
+ Functor::operator()(x, val2); nfev++;
+ x[j] -= 2*h;
+ Functor::operator()(x, val1); nfev++;
+ x[j] = _x[j];
+ jac.col(j) = (val2-val1)/(2*h);
+ break;
+ default:
+ eigen_assert(false);
+ };
+ }
+ return nfev;
+ }
+private:
+ Scalar epsfcn;
+
+ NumericalDiff& operator=(const NumericalDiff&);
+};
+
+} // end namespace Eigen
+
+//vim: ai ts=4 sts=4 et sw=4
+#endif // EIGEN_NUMERICAL_DIFF_H
+
diff --git a/unsupported/Eigen/src/Polynomials/CMakeLists.txt b/unsupported/Eigen/src/Polynomials/CMakeLists.txt
new file mode 100644
index 0000000..51f13f3
--- /dev/null
+++ b/unsupported/Eigen/src/Polynomials/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Polynomials_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Polynomials_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Polynomials COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h
new file mode 100644
index 0000000..b515c29
--- /dev/null
+++ b/unsupported/Eigen/src/Polynomials/Companion.h
@@ -0,0 +1,276 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPANION_H
+#define EIGEN_COMPANION_H
+
+// This file requires the user to include
+// * Eigen/Core
+// * Eigen/src/PolynomialSolver.h
+
+namespace Eigen {
+
+namespace internal {
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+template <typename T>
+T radix(){ return 2; }
+
+template <typename T>
+T radix2(){ return radix<T>()*radix<T>(); }
+
+template<int Size>
+struct decrement_if_fixed_size
+{
+ enum {
+ ret = (Size == Dynamic) ? Dynamic : Size-1 };
+};
+
+#endif
+
+template< typename _Scalar, int _Deg >
+class companion
+{
+ public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
+
+ enum {
+ Deg = _Deg,
+ Deg_1=decrement_if_fixed_size<Deg>::ret
+ };
+
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef Matrix<Scalar, Deg, 1> RightColumn;
+ //typedef DiagonalMatrix< Scalar, Deg_1, Deg_1 > BottomLeftDiagonal;
+ typedef Matrix<Scalar, Deg_1, 1> BottomLeftDiagonal;
+
+ typedef Matrix<Scalar, Deg, Deg> DenseCompanionMatrixType;
+ typedef Matrix< Scalar, _Deg, Deg_1 > LeftBlock;
+ typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock;
+ typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow;
+
+ typedef DenseIndex Index;
+
+ public:
+ EIGEN_STRONG_INLINE const _Scalar operator()(Index row, Index col ) const
+ {
+ if( m_bl_diag.rows() > col )
+ {
+ if( 0 < row ){ return m_bl_diag[col]; }
+ else{ return 0; }
+ }
+ else{ return m_monic[row]; }
+ }
+
+ public:
+ template<typename VectorType>
+ void setPolynomial( const VectorType& poly )
+ {
+ const Index deg = poly.size()-1;
+ m_monic = -1/poly[deg] * poly.head(deg);
+ //m_bl_diag.setIdentity( deg-1 );
+ m_bl_diag.setOnes(deg-1);
+ }
+
+ template<typename VectorType>
+ companion( const VectorType& poly ){
+ setPolynomial( poly ); }
+
+ public:
+ DenseCompanionMatrixType denseMatrix() const
+ {
+ const Index deg = m_monic.size();
+ const Index deg_1 = deg-1;
+ DenseCompanionMatrixType companion(deg,deg);
+ companion <<
+ ( LeftBlock(deg,deg_1)
+ << LeftBlockFirstRow::Zero(1,deg_1),
+ BottomLeftBlock::Identity(deg-1,deg-1)*m_bl_diag.asDiagonal() ).finished()
+ , m_monic;
+ return companion;
+ }
+
+
+
+ protected:
+ /** Helper function for the balancing algorithm.
+ * \returns true if the row and the column, having colNorm and rowNorm
+ * as norms, are balanced, false otherwise.
+ * colB and rowB are repectively the multipliers for
+ * the column and the row in order to balance them.
+ * */
+ bool balanced( Scalar colNorm, Scalar rowNorm,
+ bool& isBalanced, Scalar& colB, Scalar& rowB );
+
+ /** Helper function for the balancing algorithm.
+ * \returns true if the row and the column, having colNorm and rowNorm
+ * as norms, are balanced, false otherwise.
+ * colB and rowB are repectively the multipliers for
+ * the column and the row in order to balance them.
+ * */
+ bool balancedR( Scalar colNorm, Scalar rowNorm,
+ bool& isBalanced, Scalar& colB, Scalar& rowB );
+
+ public:
+ /**
+ * Balancing algorithm from B. N. PARLETT and C. REINSCH (1969)
+ * "Balancing a matrix for calculation of eigenvalues and eigenvectors"
+ * adapted to the case of companion matrices.
+ * A matrix with non zero row and non zero column is balanced
+ * for a certain norm if the i-th row and the i-th column
+ * have same norm for all i.
+ */
+ void balance();
+
+ protected:
+ RightColumn m_monic;
+ BottomLeftDiagonal m_bl_diag;
+};
+
+
+
+template< typename _Scalar, int _Deg >
+inline
+bool companion<_Scalar,_Deg>::balanced( Scalar colNorm, Scalar rowNorm,
+ bool& isBalanced, Scalar& colB, Scalar& rowB )
+{
+ if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; }
+ else
+ {
+ //To find the balancing coefficients, if the radix is 2,
+ //one finds \f$ \sigma \f$ such that
+ // \f$ 2^{2\sigma-1} < rowNorm / colNorm \le 2^{2\sigma+1} \f$
+ // then the balancing coefficient for the row is \f$ 1/2^{\sigma} \f$
+ // and the balancing coefficient for the column is \f$ 2^{\sigma} \f$
+ rowB = rowNorm / radix<Scalar>();
+ colB = Scalar(1);
+ const Scalar s = colNorm + rowNorm;
+
+ while (colNorm < rowB)
+ {
+ colB *= radix<Scalar>();
+ colNorm *= radix2<Scalar>();
+ }
+
+ rowB = rowNorm * radix<Scalar>();
+
+ while (colNorm >= rowB)
+ {
+ colB /= radix<Scalar>();
+ colNorm /= radix2<Scalar>();
+ }
+
+ //This line is used to avoid insubstantial balancing
+ if ((rowNorm + colNorm) < Scalar(0.95) * s * colB)
+ {
+ isBalanced = false;
+ rowB = Scalar(1) / colB;
+ return false;
+ }
+ else{
+ return true; }
+ }
+}
+
+template< typename _Scalar, int _Deg >
+inline
+bool companion<_Scalar,_Deg>::balancedR( Scalar colNorm, Scalar rowNorm,
+ bool& isBalanced, Scalar& colB, Scalar& rowB )
+{
+ if( Scalar(0) == colNorm || Scalar(0) == rowNorm ){ return true; }
+ else
+ {
+ /**
+ * Set the norm of the column and the row to the geometric mean
+ * of the row and column norm
+ */
+ const _Scalar q = colNorm/rowNorm;
+ if( !isApprox( q, _Scalar(1) ) )
+ {
+ rowB = sqrt( colNorm/rowNorm );
+ colB = Scalar(1)/rowB;
+
+ isBalanced = false;
+ return false;
+ }
+ else{
+ return true; }
+ }
+}
+
+
+template< typename _Scalar, int _Deg >
+void companion<_Scalar,_Deg>::balance()
+{
+ using std::abs;
+ EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE );
+ const Index deg = m_monic.size();
+ const Index deg_1 = deg-1;
+
+ bool hasConverged=false;
+ while( !hasConverged )
+ {
+ hasConverged = true;
+ Scalar colNorm,rowNorm;
+ Scalar colB,rowB;
+
+ //First row, first column excluding the diagonal
+ //==============================================
+ colNorm = abs(m_bl_diag[0]);
+ rowNorm = abs(m_monic[0]);
+
+ //Compute balancing of the row and the column
+ if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) )
+ {
+ m_bl_diag[0] *= colB;
+ m_monic[0] *= rowB;
+ }
+
+ //Middle rows and columns excluding the diagonal
+ //==============================================
+ for( Index i=1; i<deg_1; ++i )
+ {
+ // column norm, excluding the diagonal
+ colNorm = abs(m_bl_diag[i]);
+
+ // row norm, excluding the diagonal
+ rowNorm = abs(m_bl_diag[i-1]) + abs(m_monic[i]);
+
+ //Compute balancing of the row and the column
+ if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) )
+ {
+ m_bl_diag[i] *= colB;
+ m_bl_diag[i-1] *= rowB;
+ m_monic[i] *= rowB;
+ }
+ }
+
+ //Last row, last column excluding the diagonal
+ //============================================
+ const Index ebl = m_bl_diag.size()-1;
+ VectorBlock<RightColumn,Deg_1> headMonic( m_monic, 0, deg_1 );
+ colNorm = headMonic.array().abs().sum();
+ rowNorm = abs( m_bl_diag[ebl] );
+
+ //Compute balancing of the row and the column
+ if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) )
+ {
+ headMonic *= colB;
+ m_bl_diag[ebl] *= rowB;
+ }
+ }
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPANION_H
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
new file mode 100644
index 0000000..cd5c04b
--- /dev/null
+++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
@@ -0,0 +1,389 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_POLYNOMIAL_SOLVER_H
+#define EIGEN_POLYNOMIAL_SOLVER_H
+
+namespace Eigen {
+
+/** \ingroup Polynomials_Module
+ * \class PolynomialSolverBase.
+ *
+ * \brief Defined to be inherited by polynomial solvers: it provides
+ * convenient methods such as
+ * - real roots,
+ * - greatest, smallest complex roots,
+ * - real roots with greatest, smallest absolute real value,
+ * - greatest, smallest real roots.
+ *
+ * It stores the set of roots as a vector of complexes.
+ *
+ */
+template< typename _Scalar, int _Deg >
+class PolynomialSolverBase
+{
+ public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
+
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef std::complex<RealScalar> RootType;
+ typedef Matrix<RootType,_Deg,1> RootsType;
+
+ typedef DenseIndex Index;
+
+ protected:
+ template< typename OtherPolynomial >
+ inline void setPolynomial( const OtherPolynomial& poly ){
+ m_roots.resize(poly.size()); }
+
+ public:
+ template< typename OtherPolynomial >
+ inline PolynomialSolverBase( const OtherPolynomial& poly ){
+ setPolynomial( poly() ); }
+
+ inline PolynomialSolverBase(){}
+
+ public:
+ /** \returns the complex roots of the polynomial */
+ inline const RootsType& roots() const { return m_roots; }
+
+ public:
+ /** Clear and fills the back insertion sequence with the real roots of the polynomial
+ * i.e. the real part of the complex roots that have an imaginary part which
+ * absolute value is smaller than absImaginaryThreshold.
+ * absImaginaryThreshold takes the dummy_precision associated
+ * with the _Scalar template parameter of the PolynomialSolver class as the default value.
+ *
+ * \param[out] bi_seq : the back insertion sequence (stl concept)
+ * \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex
+ * number that is considered as real.
+ * */
+ template<typename Stl_back_insertion_sequence>
+ inline void realRoots( Stl_back_insertion_sequence& bi_seq,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ using std::abs;
+ bi_seq.clear();
+ for(Index i=0; i<m_roots.size(); ++i )
+ {
+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold ){
+ bi_seq.push_back( m_roots[i].real() ); }
+ }
+ }
+
+ protected:
+ template<typename squaredNormBinaryPredicate>
+ inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const
+ {
+ Index res=0;
+ RealScalar norm2 = numext::abs2( m_roots[0] );
+ for( Index i=1; i<m_roots.size(); ++i )
+ {
+ const RealScalar currNorm2 = numext::abs2( m_roots[i] );
+ if( pred( currNorm2, norm2 ) ){
+ res=i; norm2=currNorm2; }
+ }
+ return m_roots[res];
+ }
+
+ public:
+ /**
+ * \returns the complex root with greatest norm.
+ */
+ inline const RootType& greatestRoot() const
+ {
+ std::greater<Scalar> greater;
+ return selectComplexRoot_withRespectToNorm( greater );
+ }
+
+ /**
+ * \returns the complex root with smallest norm.
+ */
+ inline const RootType& smallestRoot() const
+ {
+ std::less<Scalar> less;
+ return selectComplexRoot_withRespectToNorm( less );
+ }
+
+ protected:
+ template<typename squaredRealPartBinaryPredicate>
+ inline const RealScalar& selectRealRoot_withRespectToAbsRealPart(
+ squaredRealPartBinaryPredicate& pred,
+ bool& hasArealRoot,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ using std::abs;
+ hasArealRoot = false;
+ Index res=0;
+ RealScalar abs2(0);
+
+ for( Index i=0; i<m_roots.size(); ++i )
+ {
+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold )
+ {
+ if( !hasArealRoot )
+ {
+ hasArealRoot = true;
+ res = i;
+ abs2 = m_roots[i].real() * m_roots[i].real();
+ }
+ else
+ {
+ const RealScalar currAbs2 = m_roots[i].real() * m_roots[i].real();
+ if( pred( currAbs2, abs2 ) )
+ {
+ abs2 = currAbs2;
+ res = i;
+ }
+ }
+ }
+ else
+ {
+ if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){
+ res = i; }
+ }
+ }
+ return numext::real_ref(m_roots[res]);
+ }
+
+
+ template<typename RealPartBinaryPredicate>
+ inline const RealScalar& selectRealRoot_withRespectToRealPart(
+ RealPartBinaryPredicate& pred,
+ bool& hasArealRoot,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ using std::abs;
+ hasArealRoot = false;
+ Index res=0;
+ RealScalar val(0);
+
+ for( Index i=0; i<m_roots.size(); ++i )
+ {
+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold )
+ {
+ if( !hasArealRoot )
+ {
+ hasArealRoot = true;
+ res = i;
+ val = m_roots[i].real();
+ }
+ else
+ {
+ const RealScalar curr = m_roots[i].real();
+ if( pred( curr, val ) )
+ {
+ val = curr;
+ res = i;
+ }
+ }
+ }
+ else
+ {
+ if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){
+ res = i; }
+ }
+ }
+ return numext::real_ref(m_roots[res]);
+ }
+
+ public:
+ /**
+ * \returns a real root with greatest absolute magnitude.
+ * A real root is defined as the real part of a complex root with absolute imaginary
+ * part smallest than absImaginaryThreshold.
+ * absImaginaryThreshold takes the dummy_precision associated
+ * with the _Scalar template parameter of the PolynomialSolver class as the default value.
+ * If no real root is found the boolean hasArealRoot is set to false and the real part of
+ * the root with smallest absolute imaginary part is returned instead.
+ *
+ * \param[out] hasArealRoot : boolean true if a real root is found according to the
+ * absImaginaryThreshold criterion, false otherwise.
+ * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
+ * whether or not a root is real.
+ */
+ inline const RealScalar& absGreatestRealRoot(
+ bool& hasArealRoot,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ std::greater<Scalar> greater;
+ return selectRealRoot_withRespectToAbsRealPart( greater, hasArealRoot, absImaginaryThreshold );
+ }
+
+
+ /**
+ * \returns a real root with smallest absolute magnitude.
+ * A real root is defined as the real part of a complex root with absolute imaginary
+ * part smallest than absImaginaryThreshold.
+ * absImaginaryThreshold takes the dummy_precision associated
+ * with the _Scalar template parameter of the PolynomialSolver class as the default value.
+ * If no real root is found the boolean hasArealRoot is set to false and the real part of
+ * the root with smallest absolute imaginary part is returned instead.
+ *
+ * \param[out] hasArealRoot : boolean true if a real root is found according to the
+ * absImaginaryThreshold criterion, false otherwise.
+ * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
+ * whether or not a root is real.
+ */
+ inline const RealScalar& absSmallestRealRoot(
+ bool& hasArealRoot,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ std::less<Scalar> less;
+ return selectRealRoot_withRespectToAbsRealPart( less, hasArealRoot, absImaginaryThreshold );
+ }
+
+
+ /**
+ * \returns the real root with greatest value.
+ * A real root is defined as the real part of a complex root with absolute imaginary
+ * part smallest than absImaginaryThreshold.
+ * absImaginaryThreshold takes the dummy_precision associated
+ * with the _Scalar template parameter of the PolynomialSolver class as the default value.
+ * If no real root is found the boolean hasArealRoot is set to false and the real part of
+ * the root with smallest absolute imaginary part is returned instead.
+ *
+ * \param[out] hasArealRoot : boolean true if a real root is found according to the
+ * absImaginaryThreshold criterion, false otherwise.
+ * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
+ * whether or not a root is real.
+ */
+ inline const RealScalar& greatestRealRoot(
+ bool& hasArealRoot,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ std::greater<Scalar> greater;
+ return selectRealRoot_withRespectToRealPart( greater, hasArealRoot, absImaginaryThreshold );
+ }
+
+
+ /**
+ * \returns the real root with smallest value.
+ * A real root is defined as the real part of a complex root with absolute imaginary
+ * part smallest than absImaginaryThreshold.
+ * absImaginaryThreshold takes the dummy_precision associated
+ * with the _Scalar template parameter of the PolynomialSolver class as the default value.
+ * If no real root is found the boolean hasArealRoot is set to false and the real part of
+ * the root with smallest absolute imaginary part is returned instead.
+ *
+ * \param[out] hasArealRoot : boolean true if a real root is found according to the
+ * absImaginaryThreshold criterion, false otherwise.
+ * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide
+ * whether or not a root is real.
+ */
+ inline const RealScalar& smallestRealRoot(
+ bool& hasArealRoot,
+ const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
+ {
+ std::less<Scalar> less;
+ return selectRealRoot_withRespectToRealPart( less, hasArealRoot, absImaginaryThreshold );
+ }
+
+ protected:
+ RootsType m_roots;
+};
+
+#define EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( BASE ) \
+ typedef typename BASE::Scalar Scalar; \
+ typedef typename BASE::RealScalar RealScalar; \
+ typedef typename BASE::RootType RootType; \
+ typedef typename BASE::RootsType RootsType;
+
+
+
+/** \ingroup Polynomials_Module
+ *
+ * \class PolynomialSolver
+ *
+ * \brief A polynomial solver
+ *
+ * Computes the complex roots of a real polynomial.
+ *
+ * \param _Scalar the scalar type, i.e., the type of the polynomial coefficients
+ * \param _Deg the degree of the polynomial, can be a compile time value or Dynamic.
+ * Notice that the number of polynomial coefficients is _Deg+1.
+ *
+ * This class implements a polynomial solver and provides convenient methods such as
+ * - real roots,
+ * - greatest, smallest complex roots,
+ * - real roots with greatest, smallest absolute real value.
+ * - greatest, smallest real roots.
+ *
+ * WARNING: this polynomial solver is experimental, part of the unsuported Eigen modules.
+ *
+ *
+ * Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of
+ * the polynomial to compute its roots.
+ * This supposes that the complex moduli of the roots are all distinct: e.g. there should
+ * be no multiple roots or conjugate roots for instance.
+ * With 32bit (float) floating types this problem shows up frequently.
+ * However, almost always, correct accuracy is reached even in these cases for 64bit
+ * (double) floating types and small polynomial degree (<20).
+ */
+template< typename _Scalar, int _Deg >
+class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
+{
+ public:
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg)
+
+ typedef PolynomialSolverBase<_Scalar,_Deg> PS_Base;
+ EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
+
+ typedef Matrix<Scalar,_Deg,_Deg> CompanionMatrixType;
+ typedef EigenSolver<CompanionMatrixType> EigenSolverType;
+
+ public:
+ /** Computes the complex roots of a new polynomial. */
+ template< typename OtherPolynomial >
+ void compute( const OtherPolynomial& poly )
+ {
+ eigen_assert( Scalar(0) != poly[poly.size()-1] );
+ internal::companion<Scalar,_Deg> companion( poly );
+ companion.balance();
+ m_eigenSolver.compute( companion.denseMatrix() );
+ m_roots = m_eigenSolver.eigenvalues();
+ }
+
+ public:
+ template< typename OtherPolynomial >
+ inline PolynomialSolver( const OtherPolynomial& poly ){
+ compute( poly ); }
+
+ inline PolynomialSolver(){}
+
+ protected:
+ using PS_Base::m_roots;
+ EigenSolverType m_eigenSolver;
+};
+
+
+template< typename _Scalar >
+class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
+{
+ public:
+ typedef PolynomialSolverBase<_Scalar,1> PS_Base;
+ EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base )
+
+ public:
+ /** Computes the complex roots of a new polynomial. */
+ template< typename OtherPolynomial >
+ void compute( const OtherPolynomial& poly )
+ {
+ eigen_assert( Scalar(0) != poly[poly.size()-1] );
+ m_roots[0] = -poly[0]/poly[poly.size()-1];
+ }
+
+ protected:
+ using PS_Base::m_roots;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_POLYNOMIAL_SOLVER_H
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
new file mode 100644
index 0000000..2bb8bc8
--- /dev/null
+++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
@@ -0,0 +1,143 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_POLYNOMIAL_UTILS_H
+#define EIGEN_POLYNOMIAL_UTILS_H
+
+namespace Eigen {
+
+/** \ingroup Polynomials_Module
+ * \returns the evaluation of the polynomial at x using Horner algorithm.
+ *
+ * \param[in] poly : the vector of coefficients of the polynomial ordered
+ * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
+ * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
+ * \param[in] x : the value to evaluate the polynomial at.
+ *
+ * <i><b>Note for stability:</b></i>
+ * <dd> \f$ |x| \le 1 \f$ </dd>
+ */
+template <typename Polynomials, typename T>
+inline
+T poly_eval_horner( const Polynomials& poly, const T& x )
+{
+ T val=poly[poly.size()-1];
+ for(DenseIndex i=poly.size()-2; i>=0; --i ){
+ val = val*x + poly[i]; }
+ return val;
+}
+
+/** \ingroup Polynomials_Module
+ * \returns the evaluation of the polynomial at x using stabilized Horner algorithm.
+ *
+ * \param[in] poly : the vector of coefficients of the polynomial ordered
+ * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
+ * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
+ * \param[in] x : the value to evaluate the polynomial at.
+ */
+template <typename Polynomials, typename T>
+inline
+T poly_eval( const Polynomials& poly, const T& x )
+{
+ typedef typename NumTraits<T>::Real Real;
+
+ if( numext::abs2( x ) <= Real(1) ){
+ return poly_eval_horner( poly, x ); }
+ else
+ {
+ T val=poly[0];
+ T inv_x = T(1)/x;
+ for( DenseIndex i=1; i<poly.size(); ++i ){
+ val = val*inv_x + poly[i]; }
+
+ return std::pow(x,(T)(poly.size()-1)) * val;
+ }
+}
+
+/** \ingroup Polynomials_Module
+ * \returns a maximum bound for the absolute value of any root of the polynomial.
+ *
+ * \param[in] poly : the vector of coefficients of the polynomial ordered
+ * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
+ * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
+ *
+ * <i><b>Precondition:</b></i>
+ * <dd> the leading coefficient of the input polynomial poly must be non zero </dd>
+ */
+template <typename Polynomial>
+inline
+typename NumTraits<typename Polynomial::Scalar>::Real cauchy_max_bound( const Polynomial& poly )
+{
+ using std::abs;
+ typedef typename Polynomial::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real Real;
+
+ eigen_assert( Scalar(0) != poly[poly.size()-1] );
+ const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1];
+ Real cb(0);
+
+ for( DenseIndex i=0; i<poly.size()-1; ++i ){
+ cb += abs(poly[i]*inv_leading_coeff); }
+ return cb + Real(1);
+}
+
+/** \ingroup Polynomials_Module
+ * \returns a minimum bound for the absolute value of any non zero root of the polynomial.
+ * \param[in] poly : the vector of coefficients of the polynomial ordered
+ * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
+ * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$.
+ */
+template <typename Polynomial>
+inline
+typename NumTraits<typename Polynomial::Scalar>::Real cauchy_min_bound( const Polynomial& poly )
+{
+ using std::abs;
+ typedef typename Polynomial::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real Real;
+
+ DenseIndex i=0;
+ while( i<poly.size()-1 && Scalar(0) == poly(i) ){ ++i; }
+ if( poly.size()-1 == i ){
+ return Real(1); }
+
+ const Scalar inv_min_coeff = Scalar(1)/poly[i];
+ Real cb(1);
+ for( DenseIndex j=i+1; j<poly.size(); ++j ){
+ cb += abs(poly[j]*inv_min_coeff); }
+ return Real(1)/cb;
+}
+
+/** \ingroup Polynomials_Module
+ * Given the roots of a polynomial compute the coefficients in the
+ * monomial basis of the monic polynomial with same roots and minimal degree.
+ * If RootVector is a vector of complexes, Polynomial should also be a vector
+ * of complexes.
+ * \param[in] rv : a vector containing the roots of a polynomial.
+ * \param[out] poly : the vector of coefficients of the polynomial ordered
+ * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial
+ * e.g. \f$ 3 + x^2 \f$ is stored as a vector \f$ [ 3, 0, 1 ] \f$.
+ */
+template <typename RootVector, typename Polynomial>
+void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly )
+{
+
+ typedef typename Polynomial::Scalar Scalar;
+
+ poly.setZero( rv.size()+1 );
+ poly[0] = -rv[0]; poly[1] = Scalar(1);
+ for( DenseIndex i=1; i< rv.size(); ++i )
+ {
+ for( DenseIndex j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; }
+ poly[0] = -rv[i]*poly[0];
+ }
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_POLYNOMIAL_UTILS_H
diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/SVD/BDCSVD.h
new file mode 100644
index 0000000..11d4882
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/BDCSVD.h
@@ -0,0 +1,748 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
+// research report written by Ming Gu and Stanley C.Eisenstat
+// The code variable names correspond to the names they used in their
+// report
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BDCSVD_H
+#define EIGEN_BDCSVD_H
+
+#define EPSILON 0.0000000000000001
+
+#define ALGOSWAP 32
+
+namespace Eigen {
+/** \ingroup SVD_Module
+ *
+ *
+ * \class BDCSVD
+ *
+ * \brief class Bidiagonal Divide and Conquer SVD
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * We plan to have a very similar interface to JacobiSVD on this class.
+ * It should be used to speed up the calcul of SVD for big matrices.
+ */
+template<typename _MatrixType>
+class BDCSVD : public SVDBase<_MatrixType>
+{
+ typedef SVDBase<_MatrixType> Base;
+
+public:
+ using Base::rows;
+ using Base::cols;
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
+ typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
+ typedef Matrix<RealScalar, Dynamic, 1> VectorType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via BDCSVD::compute(const MatrixType&).
+ */
+ BDCSVD()
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa BDCSVD()
+ */
+ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
+ */
+ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {
+ compute(matrix, computationOptions);
+ }
+
+ ~BDCSVD()
+ {
+ }
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, this->m_computationOptions);
+ }
+
+ void setSwitchSize(int s)
+ {
+ eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4");
+ algoswap = s;
+ }
+
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right - hand - side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<BDCSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
+ eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() &&
+ "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
+ }
+
+
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(this->m_isInitialized && "SVD is not initialized.");
+ if (isTranspose){
+ eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return this->m_matrixV;
+ }
+ else
+ {
+ eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return this->m_matrixU;
+ }
+
+ }
+
+
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(this->m_isInitialized && "SVD is not initialized.");
+ if (isTranspose){
+ eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return this->m_matrixU;
+ }
+ else
+ {
+ eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return this->m_matrixV;
+ }
+ }
+
+private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+ void divide (Index firstCol, Index lastCol, Index firstRowW,
+ Index firstColW, Index shift);
+ void deflation43(Index firstCol, Index shift, Index i, Index size);
+ void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
+ void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
+ void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV);
+
+protected:
+ MatrixXr m_naiveU, m_naiveV;
+ MatrixXr m_computed;
+ Index nRec;
+ int algoswap;
+ bool isTranspose, compU, compV;
+
+}; //end class BDCSVD
+
+
+// Methode to allocate ans initialize matrix and attributs
+template<typename MatrixType>
+void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ isTranspose = (cols > rows);
+ if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+ m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
+ if (isTranspose){
+ compU = this->computeU();
+ compV = this->computeV();
+ }
+ else
+ {
+ compV = this->computeU();
+ compU = this->computeV();
+ }
+ if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
+ else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
+
+ if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
+
+
+ //should be changed for a cleaner implementation
+ if (isTranspose){
+ bool aux;
+ if (this->computeU()||this->computeV()){
+ aux = this->m_computeFullU;
+ this->m_computeFullU = this->m_computeFullV;
+ this->m_computeFullV = aux;
+ aux = this->m_computeThinU;
+ this->m_computeThinU = this->m_computeThinV;
+ this->m_computeThinV = aux;
+ }
+ }
+}// end allocate
+
+// Methode which compute the BDCSVD for the int
+template<>
+SVDBase<Matrix<int, Dynamic, Dynamic> >&
+BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+ this->m_nonzeroSingularValues = 0;
+ m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
+ for (int i=0; i<this->m_diagSize; i++) {
+ this->m_singularValues.coeffRef(i) = 0;
+ }
+ if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
+ if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols());
+ this->m_isInitialized = true;
+ return *this;
+}
+
+
+// Methode which compute the BDCSVD
+template<typename MatrixType>
+SVDBase<MatrixType>&
+BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+ using std::abs;
+
+ //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ;
+ MatrixType copy;
+ if (isTranspose) copy = matrix.adjoint();
+ else copy = matrix;
+
+ internal::UpperBidiagonalization<MatrixX > bid(copy);
+
+ //**** step 2 Divide
+ // this is ugly and has to be redone (care of complex cast)
+ MatrixXr temp;
+ temp = bid.bidiagonal().toDenseMatrix().transpose();
+ m_computed.setZero();
+ for (int i=0; i<this->m_diagSize - 1; i++) {
+ m_computed(i, i) = temp(i, i);
+ m_computed(i + 1, i) = temp(i + 1, i);
+ }
+ m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1);
+ divide(0, this->m_diagSize - 1, 0, 0, 0);
+
+ //**** step 3 copy
+ for (int i=0; i<this->m_diagSize; i++) {
+ RealScalar a = abs(m_computed.coeff(i, i));
+ this->m_singularValues.coeffRef(i) = a;
+ if (a == 0){
+ this->m_nonzeroSingularValues = i;
+ break;
+ }
+ else if (i == this->m_diagSize - 1)
+ {
+ this->m_nonzeroSingularValues = i + 1;
+ break;
+ }
+ }
+ copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV());
+ this->m_isInitialized = true;
+ return *this;
+}// end compute
+
+
+template<typename MatrixType>
+void BDCSVD<MatrixType>::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){
+ if (this->computeU()){
+ MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols());
+ temp.real() = naiveU;
+ if (this->m_computeThinU){
+ this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues );
+ this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) =
+ temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues);
+ this->m_matrixU = householderU * this->m_matrixU ;
+ }
+ else
+ {
+ this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols());
+ this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
+ this->m_matrixU = householderU * this->m_matrixU ;
+ }
+ }
+ if (this->computeV()){
+ MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols());
+ temp.real() = naiveV;
+ if (this->m_computeThinV){
+ this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues );
+ this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) =
+ temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues);
+ this->m_matrixV = householderV * this->m_matrixV ;
+ }
+ else
+ {
+ this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols());
+ this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
+ this->m_matrixV = householderV * this->m_matrixV;
+ }
+ }
+}
+
+// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
+// place of the submatrix we are currently working on.
+
+//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
+//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
+// lastCol + 1 - firstCol is the size of the submatrix.
+//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
+//@param firstRowW : Same as firstRowW with the column.
+//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
+// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
+template<typename MatrixType>
+void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
+ Index firstColW, Index shift)
+{
+ // requires nbRows = nbCols + 1;
+ using std::pow;
+ using std::sqrt;
+ using std::abs;
+ const Index n = lastCol - firstCol + 1;
+ const Index k = n/2;
+ RealScalar alphaK;
+ RealScalar betaK;
+ RealScalar r0;
+ RealScalar lambda, phi, c0, s0;
+ MatrixXr l, f;
+ // We use the other algorithm which is more efficient for small
+ // matrices.
+ if (n < algoswap){
+ JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n),
+ ComputeFullU | (ComputeFullV * compV)) ;
+ if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
+ else
+ {
+ m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
+ m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
+ }
+ if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
+ m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
+ for (int i=0; i<n; i++)
+ {
+ m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
+ }
+ return;
+ }
+ // We use the divide and conquer algorithm
+ alphaK = m_computed(firstCol + k, firstCol + k);
+ betaK = m_computed(firstCol + k + 1, firstCol + k);
+ // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
+ // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
+ // right submatrix before the left one.
+ divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
+ divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
+ if (compU)
+ {
+ lambda = m_naiveU(firstCol + k, firstCol + k);
+ phi = m_naiveU(firstCol + k + 1, lastCol + 1);
+ }
+ else
+ {
+ lambda = m_naiveU(1, firstCol + k);
+ phi = m_naiveU(0, lastCol + 1);
+ }
+ r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
+ + abs(betaK * phi) * abs(betaK * phi));
+ if (compU)
+ {
+ l = m_naiveU.row(firstCol + k).segment(firstCol, k);
+ f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
+ }
+ else
+ {
+ l = m_naiveU.row(1).segment(firstCol, k);
+ f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
+ }
+ if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
+ if (r0 == 0)
+ {
+ c0 = 1;
+ s0 = 0;
+ }
+ else
+ {
+ c0 = alphaK * lambda / r0;
+ s0 = betaK * phi / r0;
+ }
+ if (compU)
+ {
+ MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
+ // we shiftW Q1 to the right
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
+ {
+ m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
+ }
+ // we shift q1 at the left with a factor c0
+ m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
+ // last column = q1 * - s0
+ m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
+ // first column = q2 * s0
+ m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) <<
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0;
+ // q2 *= c0
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
+ }
+ else
+ {
+ RealScalar q1 = (m_naiveU(0, firstCol + k));
+ // we shift Q1 to the right
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
+ {
+ m_naiveU(0, i + 1) = m_naiveU(0, i);
+ }
+ // we shift q1 at the left with a factor c0
+ m_naiveU(0, firstCol) = (q1 * c0);
+ // last column = q1 * - s0
+ m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
+ // first column = q2 * s0
+ m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
+ // q2 *= c0
+ m_naiveU(1, lastCol + 1) *= c0;
+ m_naiveU.row(1).segment(firstCol + 1, k).setZero();
+ m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
+ }
+ m_computed(firstCol + shift, firstCol + shift) = r0;
+ m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
+ m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
+
+
+ // the line below do the deflation of the matrix for the third part of the algorithm
+ // Here the deflation is commented because the third part of the algorithm is not implemented
+ // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation
+
+ deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
+
+ // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD
+ JacobiSVD<MatrixXr> res= JacobiSVD<MatrixXr>(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n),
+ ComputeFullU | (ComputeFullV * compV)) ;
+ if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU();
+ else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU();
+
+ if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV();
+ m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n);
+ for (int i=0; i<n; i++)
+ m_computed(firstCol + shift + i, firstCol + shift +i) = res.singularValues().coeffRef(i);
+ // end of the third part
+
+
+}// end divide
+
+
+// page 12_13
+// i >= 1, di almost null and zi non null.
+// We use a rotation to zero out zi applied to the left of M
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
+ using std::abs;
+ using std::sqrt;
+ using std::pow;
+ RealScalar c = m_computed(firstCol + shift, firstCol + shift);
+ RealScalar s = m_computed(i, firstCol + shift);
+ RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
+ if (r == 0){
+ m_computed(i, i)=0;
+ return;
+ }
+ c/=r;
+ s/=r;
+ m_computed(firstCol + shift, firstCol + shift) = r;
+ m_computed(i, firstCol + shift) = 0;
+ m_computed(i, i) = 0;
+ if (compU){
+ m_naiveU.col(firstCol).segment(firstCol,size) =
+ c * m_naiveU.col(firstCol).segment(firstCol, size) -
+ s * m_naiveU.col(i).segment(firstCol, size) ;
+
+ m_naiveU.col(i).segment(firstCol, size) =
+ (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) +
+ (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
+ }
+}// end deflation 43
+
+
+// page 13
+// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
+// We apply two rotations to have zj = 0;
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
+ using std::abs;
+ using std::sqrt;
+ using std::conj;
+ using std::pow;
+ RealScalar c = m_computed(firstColm, firstColm + j - 1);
+ RealScalar s = m_computed(firstColm, firstColm + i - 1);
+ RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
+ if (r==0){
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
+ return;
+ }
+ c/=r;
+ s/=r;
+ m_computed(firstColm + i, firstColm) = r;
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
+ m_computed(firstColm + j, firstColm) = 0;
+ if (compU){
+ m_naiveU.col(firstColu + i).segment(firstColu, size) =
+ c * m_naiveU.col(firstColu + i).segment(firstColu, size) -
+ s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
+
+ m_naiveU.col(firstColu + j).segment(firstColu, size) =
+ (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) +
+ (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
+ }
+ if (compV){
+ m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) =
+ c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) +
+ s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
+
+ m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) =
+ (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) -
+ (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
+ }
+}// end deflation 44
+
+
+
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
+ //condition 4.1
+ RealScalar EPS = EPSILON * (std::max<RealScalar>(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k)));
+ const Index length = lastCol + 1 - firstCol;
+ if (m_computed(firstCol + shift, firstCol + shift) < EPS){
+ m_computed(firstCol + shift, firstCol + shift) = EPS;
+ }
+ //condition 4.2
+ for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
+ if (std::abs(m_computed(i, firstCol + shift)) < EPS){
+ m_computed(i, firstCol + shift) = 0;
+ }
+ }
+
+ //condition 4.3
+ for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
+ if (m_computed(i, i) < EPS){
+ deflation43(firstCol, shift, i, length);
+ }
+ }
+
+ //condition 4.4
+
+ Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
+ //we stock the final place of each line
+ Index *permutation = new Index[length];
+
+ for (Index p =1; p < length; p++) {
+ if (i> firstCol + shift + k){
+ permutation[p] = j;
+ j++;
+ } else if (j> lastCol + shift)
+ {
+ permutation[p] = i;
+ i++;
+ }
+ else
+ {
+ if (m_computed(i, i) < m_computed(j, j)){
+ permutation[p] = j;
+ j++;
+ }
+ else
+ {
+ permutation[p] = i;
+ i++;
+ }
+ }
+ }
+ //we do the permutation
+ RealScalar aux;
+ //we stock the current index of each col
+ //and the column of each index
+ Index *realInd = new Index[length];
+ Index *realCol = new Index[length];
+ for (int pos = 0; pos< length; pos++){
+ realCol[pos] = pos + firstCol + shift;
+ realInd[pos] = pos;
+ }
+ const Index Zero = firstCol + shift;
+ VectorType temp;
+ for (int i = 1; i < length - 1; i++){
+ const Index I = i + Zero;
+ const Index realI = realInd[i];
+ const Index j = permutation[length - i] - Zero;
+ const Index J = realCol[j];
+
+ //diag displace
+ aux = m_computed(I, I);
+ m_computed(I, I) = m_computed(J, J);
+ m_computed(J, J) = aux;
+
+ //firstrow displace
+ aux = m_computed(I, Zero);
+ m_computed(I, Zero) = m_computed(J, Zero);
+ m_computed(J, Zero) = aux;
+
+ // change columns
+ if (compU) {
+ temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
+ m_naiveU.col(I - shift).segment(firstCol, length + 1) <<
+ m_naiveU.col(J - shift).segment(firstCol, length + 1);
+ m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
+ }
+ else
+ {
+ temp = m_naiveU.col(I - shift).segment(0, 2);
+ m_naiveU.col(I - shift).segment(0, 2) <<
+ m_naiveU.col(J - shift).segment(0, 2);
+ m_naiveU.col(J - shift).segment(0, 2) << temp;
+ }
+ if (compV) {
+ const Index CWI = I + firstColW - Zero;
+ const Index CWJ = J + firstColW - Zero;
+ temp = m_naiveV.col(CWI).segment(firstRowW, length);
+ m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
+ m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
+ }
+
+ //update real pos
+ realCol[realI] = J;
+ realCol[j] = I;
+ realInd[J - Zero] = realI;
+ realInd[I - Zero] = j;
+ }
+ for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
+ if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
+ deflation44(firstCol ,
+ firstCol + shift,
+ firstRowW,
+ firstColW,
+ i - Zero,
+ i + 1 - Zero,
+ length);
+ }
+ }
+ delete [] permutation;
+ delete [] realInd;
+ delete [] realCol;
+
+}//end deflation
+
+
+namespace internal{
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<BDCSVD<_MatrixType>, Rhs>
+ : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
+{
+ typedef BDCSVD<_MatrixType> BDCSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+ // A = U S V^*
+ // So A^{ - 1} = V S^{ - 1} U^*
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ return;
+ }
+};
+
+} //end namespace internal
+
+ /** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by
+ * BDC Algorithm
+ *
+ * \sa class BDCSVD
+ */
+/*
+template<typename Derived>
+BDCSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
+{
+ return BDCSVD<PlainObject>(*this, computationOptions);
+}
+*/
+
+} // end namespace Eigen
+
+#endif
diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt
new file mode 100644
index 0000000..b40baf0
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SVD_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SVD_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 0000000..02fac40
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,782 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace Eigen {
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+ bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+ enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+ b = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+ ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+ (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+ (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+ };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+ bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
+ bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+ {
+ return false;
+ }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef FullPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ WorkspaceType m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ m_adjoint.resize(svd.cols(), svd.rows());
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+private:
+ typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef HouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+ {
+ using std::sqrt;
+ Scalar z;
+ JacobiRotation<Scalar> rot;
+ RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+ if(n==0)
+ {
+ z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.row(p) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ else
+ {
+ rot.c() = conj(work_matrix.coeff(p,p)) / n;
+ rot.s() = work_matrix.coeff(q,p) / n;
+ work_matrix.applyOnTheLeft(p,q,rot);
+ if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+ if(work_matrix.coeff(p,q) != Scalar(0))
+ {
+ Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.col(q) *= z;
+ if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+ }
+ if(work_matrix.coeff(q,q) != Scalar(0))
+ {
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ }
+ }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+ JacobiRotation<RealScalar> *j_left,
+ JacobiRotation<RealScalar> *j_right)
+{
+ using std::sqrt;
+ Matrix<RealScalar,2,2> m;
+ m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+ numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
+ JacobiRotation<RealScalar> rot1;
+ RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+ RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+ if(t == RealScalar(0))
+ {
+ rot1.c() = RealScalar(0);
+ rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ }
+ else
+ {
+ RealScalar u = d / t;
+ rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
+ rot1.s() = rot1.c() * u;
+ }
+ m.applyOnTheLeft(0,1,rot1);
+ j_right->makeJacobi(m,0,1);
+ *j_left = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+ *
+ *
+ * \class JacobiSVD
+ *
+ * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+ * for the R-SVD step for non-square matrices. See discussion of possible values below.
+ *
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * Here's an example demonstrating basic usage:
+ * \include JacobiSVD_basic.cpp
+ * Output: \verbinclude JacobiSVD_basic.out
+ *
+ * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+ * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+ * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+ * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ *
+ * The possible values for QRPreconditioner are:
+ * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+ * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+ * Contrary to other QRs, it doesn't allow computing thin unitaries.
+ * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+ * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+ * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+ * process is more reliable than the optimized bidiagonal SVD iterations.
+ * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+ * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+ * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+ * if QR preconditioning is needed before applying it anyway.
+ *
+ * \sa MatrixBase::jacobiSvd()
+ */
+template<typename _MatrixType, int QRPreconditioner>
+class JacobiSVD : public SVDBase<_MatrixType>
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via JacobiSVD::compute(const MatrixType&).
+ */
+ JacobiSVD()
+ : SVDBase<_MatrixType>::SVDBase()
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa JacobiSVD()
+ */
+ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase()
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase()
+ {
+ compute(matrix, computationOptions);
+ }
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, this->m_computationOptions);
+ }
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<JacobiSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(SVDBase<MatrixType>::computeU() && SVDBase<MatrixType>::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+ }
+
+
+
+ private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ protected:
+ WorkMatrixType m_workMatrix;
+
+ template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+ friend struct internal::svd_precondition_2x2_block_to_be_real;
+ template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+ friend struct internal::qr_preconditioner_impl;
+
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+
+ if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ {
+ eigen_assert(!(this->m_computeThinU || this->m_computeThinV) &&
+ "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
+ }
+
+ m_workMatrix.resize(this->m_diagSize, this->m_diagSize);
+
+ if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this);
+ if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+SVDBase<MatrixType>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ using std::abs;
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+ // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+ // only worsening the precision of U and V as we accumulate more rotations
+ const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+ // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
+ const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+
+ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+ if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
+ {
+ m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize);
+ if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows);
+ if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize);
+ if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols);
+ if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize);
+ }
+
+ /*** step 2. The main Jacobi SVD iteration. ***/
+
+ bool finished = false;
+ while(!finished)
+ {
+ finished = true;
+
+ // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+ for(Index p = 1; p < this->m_diagSize; ++p)
+ {
+ for(Index q = 0; q < p; ++q)
+ {
+ // if this 2x2 sub-matrix is not diagonal already...
+ // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+ // keep us iterating forever. Similarly, small denormal numbers are considered zero.
+ using std::max;
+ RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+ abs(m_workMatrix.coeff(q,q))));
+ if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
+ {
+ finished = false;
+
+ // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+ internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+ JacobiRotation<RealScalar> j_left, j_right;
+ internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+ // accumulate resulting Jacobi rotations
+ m_workMatrix.applyOnTheLeft(p,q,j_left);
+ if(SVDBase<MatrixType>::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+ m_workMatrix.applyOnTheRight(p,q,j_right);
+ if(SVDBase<MatrixType>::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right);
+ }
+ }
+ }
+ }
+
+ /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+ for(Index i = 0; i < this->m_diagSize; ++i)
+ {
+ RealScalar a = abs(m_workMatrix.coeff(i,i));
+ this->m_singularValues.coeffRef(i) = a;
+ if(SVDBase<MatrixType>::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a;
+ }
+
+ /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+ this->m_nonzeroSingularValues = this->m_diagSize;
+ for(Index i = 0; i < this->m_diagSize; i++)
+ {
+ Index pos;
+ RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos);
+ if(maxRemainingSingularValue == RealScalar(0))
+ {
+ this->m_nonzeroSingularValues = i;
+ break;
+ }
+ if(pos)
+ {
+ pos += i;
+ std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos));
+ if(SVDBase<MatrixType>::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i));
+ if(SVDBase<MatrixType>::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i));
+ }
+ }
+
+ this->m_isInitialized = true;
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+ : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+ typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+
+ // A = U S V^*
+ // So A^{-1} = V S^{-1} U^*
+
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
+
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ }
+};
+} // end namespace internal
+
+/** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by two-sided
+ * Jacobi transformations.
+ *
+ * \sa class JacobiSVD
+ */
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+ return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/unsupported/Eigen/src/SVD/SVDBase.h
new file mode 100644
index 0000000..fd8af3b
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/SVDBase.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SVD_H
+#define EIGEN_SVD_H
+
+namespace Eigen {
+/** \ingroup SVD_Module
+ *
+ *
+ * \class SVDBase
+ *
+ * \brief Mother class of SVD classes algorithms
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ * \sa MatrixBase::genericSvd()
+ */
+template<typename _MatrixType>
+class SVDBase
+{
+
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+
+
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ //virtual SVDBase& compute(const MatrixType& matrix) = 0;
+ SVDBase& compute(const MatrixType& matrix);
+
+ /** \returns the \a U matrix.
+ *
+ * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+ *
+ * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a U to be computed.
+ */
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return m_matrixU;
+ }
+
+ /** \returns the \a V matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+ *
+ * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a V to be computed.
+ */
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return m_matrixV;
+ }
+
+ /** \returns the vector of singular values.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+ * returned vector has size \a m. Singular values are always sorted in decreasing order.
+ */
+ const SingularValuesType& singularValues() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ return m_singularValues;
+ }
+
+
+
+ /** \returns the number of singular values that are not exactly 0 */
+ Index nonzeroSingularValues() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ return m_nonzeroSingularValues;
+ }
+
+
+ /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+ inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+ /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+ inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+
+ inline Index rows() const { return m_rows; }
+ inline Index cols() const { return m_cols; }
+
+
+protected:
+ // return true if already allocated
+ bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
+
+ MatrixUType m_matrixU;
+ MatrixVType m_matrixV;
+ SingularValuesType m_singularValues;
+ bool m_isInitialized, m_isAllocated;
+ bool m_computeFullU, m_computeThinU;
+ bool m_computeFullV, m_computeThinV;
+ unsigned int m_computationOptions;
+ Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+
+
+ /** \brief Default Constructor.
+ *
+ * Default constructor of SVDBase
+ */
+ SVDBase()
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {}
+
+
+};
+
+
+template<typename MatrixType>
+bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ eigen_assert(rows >= 0 && cols >= 0);
+
+ if (m_isAllocated &&
+ rows == m_rows &&
+ cols == m_cols &&
+ computationOptions == m_computationOptions)
+ {
+ return true;
+ }
+
+ m_rows = rows;
+ m_cols = cols;
+ m_isInitialized = false;
+ m_isAllocated = true;
+ m_computationOptions = computationOptions;
+ m_computeFullU = (computationOptions & ComputeFullU) != 0;
+ m_computeThinU = (computationOptions & ComputeThinU) != 0;
+ m_computeFullV = (computationOptions & ComputeFullV) != 0;
+ m_computeThinV = (computationOptions & ComputeThinV) != 0;
+ eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
+ eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
+ eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+ "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
+
+ m_diagSize = (std::min)(m_rows, m_cols);
+ m_singularValues.resize(m_diagSize);
+ if(RowsAtCompileTime==Dynamic)
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ if(ColsAtCompileTime==Dynamic)
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
+
+ return false;
+}
+
+}// end namespace
+
+#endif // EIGEN_SVD_H
diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
new file mode 100644
index 0000000..0bc9a46
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
@@ -0,0 +1,29 @@
+TO DO LIST
+
+
+
+(optional optimization) - do all the allocations in the allocate part
+ - support static matrices
+ - return a error at compilation time when using integer matrices (int, long, std::complex<int>, ...)
+
+to finish the algorithm :
+ -implement the last part of the algorithm as described on the reference paper.
+ You may find more information on that part on this paper
+
+ -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to
+ deflation.
+
+(suggested step by step resolution)
+ 0) comment the call to Jacobi in the last part of the divide method and everything right after
+ until the end of the method. What is commented can be a guideline to steps 3) 4) and 6)
+ 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation
+ wich should be uncommented in the divide method
+ 2) remember the values of the singular values that are already computed (zi=0)
+ 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) )
+ in decreasing order
+ 4) set the firstcol to zero (except the first element) in m_computed
+ 5) compute all the singular vectors when CompV is set to true and only the left vectors when
+ CompV is set to false
+ 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to
+ false, /!\ if CompU is false NaiveU has only 2 rows
+ 7) delete everything commented in step 0)
diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
new file mode 100644
index 0000000..8563dda
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
@@ -0,0 +1,21 @@
+This unsupported package is about a divide and conquer algorithm to compute SVD.
+
+The implementation follows as closely as possible the following reference paper :
+http://www.cs.yale.edu/publications/techreports/tr933.pdf
+
+The code documentation uses the same names for variables as the reference paper. The code, deflation included, is
+working but there are a few things that could be optimised as explained in the TODOBdsvd.
+
+In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call
+of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented.
+
+In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it.
+
+The implemented has trouble with fixed size matrices.
+
+In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix.
+
+
+Paper for the third part:
+http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
+
diff --git a/unsupported/Eigen/src/Skyline/CMakeLists.txt b/unsupported/Eigen/src/Skyline/CMakeLists.txt
new file mode 100644
index 0000000..3bf1b0d
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Skyline_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Skyline_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Skyline COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
new file mode 100644
index 0000000..a1f54ed
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h
@@ -0,0 +1,352 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Guillaume Saupin <guillaume.saupin@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINEINPLACELU_H
+#define EIGEN_SKYLINEINPLACELU_H
+
+namespace Eigen {
+
+/** \ingroup Skyline_Module
+ *
+ * \class SkylineInplaceLU
+ *
+ * \brief Inplace LU decomposition of a skyline matrix and associated features
+ *
+ * \param MatrixType the type of the matrix of which we are computing the LU factorization
+ *
+ */
+template<typename MatrixType>
+class SkylineInplaceLU {
+protected:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+
+public:
+
+ /** Creates a LU object and compute the respective factorization of \a matrix using
+ * flags \a flags. */
+ SkylineInplaceLU(MatrixType& matrix, int flags = 0)
+ : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0), m_lu(matrix) {
+ m_precision = RealScalar(0.1) * Eigen::dummy_precision<RealScalar > ();
+ m_lu.IsRowMajor ? computeRowMajor() : compute();
+ }
+
+ /** Sets the relative threshold value used to prune zero coefficients during the decomposition.
+ *
+ * Setting a value greater than zero speeds up computation, and yields to an imcomplete
+ * factorization with fewer non zero coefficients. Such approximate factors are especially
+ * useful to initialize an iterative solver.
+ *
+ * Note that the exact meaning of this parameter might depends on the actual
+ * backend. Moreover, not all backends support this feature.
+ *
+ * \sa precision() */
+ void setPrecision(RealScalar v) {
+ m_precision = v;
+ }
+
+ /** \returns the current precision.
+ *
+ * \sa setPrecision() */
+ RealScalar precision() const {
+ return m_precision;
+ }
+
+ /** Sets the flags. Possible values are:
+ * - CompleteFactorization
+ * - IncompleteFactorization
+ * - MemoryEfficient
+ * - one of the ordering methods
+ * - etc...
+ *
+ * \sa flags() */
+ void setFlags(int f) {
+ m_flags = f;
+ }
+
+ /** \returns the current flags */
+ int flags() const {
+ return m_flags;
+ }
+
+ void setOrderingMethod(int m) {
+ m_flags = m;
+ }
+
+ int orderingMethod() const {
+ return m_flags;
+ }
+
+ /** Computes/re-computes the LU factorization */
+ void compute();
+ void computeRowMajor();
+
+ /** \returns the lower triangular matrix L */
+ //inline const MatrixType& matrixL() const { return m_matrixL; }
+
+ /** \returns the upper triangular matrix U */
+ //inline const MatrixType& matrixU() const { return m_matrixU; }
+
+ template<typename BDerived, typename XDerived>
+ bool solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x,
+ const int transposed = 0) const;
+
+ /** \returns true if the factorization succeeded */
+ inline bool succeeded(void) const {
+ return m_succeeded;
+ }
+
+protected:
+ RealScalar m_precision;
+ int m_flags;
+ mutable int m_status;
+ bool m_succeeded;
+ MatrixType& m_lu;
+};
+
+/** Computes / recomputes the in place LU decomposition of the SkylineInplaceLU.
+ * using the default algorithm.
+ */
+template<typename MatrixType>
+//template<typename _Scalar>
+void SkylineInplaceLU<MatrixType>::compute() {
+ const size_t rows = m_lu.rows();
+ const size_t cols = m_lu.cols();
+
+ eigen_assert(rows == cols && "We do not (yet) support rectangular LU.");
+ eigen_assert(!m_lu.IsRowMajor && "LU decomposition does not work with rowMajor Storage");
+
+ for (Index row = 0; row < rows; row++) {
+ const double pivot = m_lu.coeffDiag(row);
+
+ //Lower matrix Columns update
+ const Index& col = row;
+ for (typename MatrixType::InnerLowerIterator lIt(m_lu, col); lIt; ++lIt) {
+ lIt.valueRef() /= pivot;
+ }
+
+ //Upper matrix update -> contiguous memory access
+ typename MatrixType::InnerLowerIterator lIt(m_lu, col);
+ for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) {
+ typename MatrixType::InnerUpperIterator uItPivot(m_lu, row);
+ typename MatrixType::InnerUpperIterator uIt(m_lu, rrow);
+ const double coef = lIt.value();
+
+ uItPivot += (rrow - row - 1);
+
+ //update upper part -> contiguous memory access
+ for (++uItPivot; uIt && uItPivot;) {
+ uIt.valueRef() -= uItPivot.value() * coef;
+
+ ++uIt;
+ ++uItPivot;
+ }
+ ++lIt;
+ }
+
+ //Upper matrix update -> non contiguous memory access
+ typename MatrixType::InnerLowerIterator lIt3(m_lu, col);
+ for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) {
+ typename MatrixType::InnerUpperIterator uItPivot(m_lu, row);
+ const double coef = lIt3.value();
+
+ //update lower part -> non contiguous memory access
+ for (Index i = 0; i < rrow - row - 1; i++) {
+ m_lu.coeffRefLower(rrow, row + i + 1) -= uItPivot.value() * coef;
+ ++uItPivot;
+ }
+ ++lIt3;
+ }
+ //update diag -> contiguous
+ typename MatrixType::InnerLowerIterator lIt2(m_lu, col);
+ for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) {
+
+ typename MatrixType::InnerUpperIterator uItPivot(m_lu, row);
+ typename MatrixType::InnerUpperIterator uIt(m_lu, rrow);
+ const double coef = lIt2.value();
+
+ uItPivot += (rrow - row - 1);
+ m_lu.coeffRefDiag(rrow) -= uItPivot.value() * coef;
+ ++lIt2;
+ }
+ }
+}
+
+template<typename MatrixType>
+void SkylineInplaceLU<MatrixType>::computeRowMajor() {
+ const size_t rows = m_lu.rows();
+ const size_t cols = m_lu.cols();
+
+ eigen_assert(rows == cols && "We do not (yet) support rectangular LU.");
+ eigen_assert(m_lu.IsRowMajor && "You're trying to apply rowMajor decomposition on a ColMajor matrix !");
+
+ for (Index row = 0; row < rows; row++) {
+ typename MatrixType::InnerLowerIterator llIt(m_lu, row);
+
+
+ for (Index col = llIt.col(); col < row; col++) {
+ if (m_lu.coeffExistLower(row, col)) {
+ const double diag = m_lu.coeffDiag(col);
+
+ typename MatrixType::InnerLowerIterator lIt(m_lu, row);
+ typename MatrixType::InnerUpperIterator uIt(m_lu, col);
+
+
+ const Index offset = lIt.col() - uIt.row();
+
+
+ Index stop = offset > 0 ? col - lIt.col() : col - uIt.row();
+
+ //#define VECTORIZE
+#ifdef VECTORIZE
+ Map<VectorXd > rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop);
+ Map<VectorXd > colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop);
+
+
+ Scalar newCoeff = m_lu.coeffLower(row, col) - rowVal.dot(colVal);
+#else
+ if (offset > 0) //Skip zero value of lIt
+ uIt += offset;
+ else //Skip zero values of uIt
+ lIt += -offset;
+ Scalar newCoeff = m_lu.coeffLower(row, col);
+
+ for (Index k = 0; k < stop; ++k) {
+ const Scalar tmp = newCoeff;
+ newCoeff = tmp - lIt.value() * uIt.value();
+ ++lIt;
+ ++uIt;
+ }
+#endif
+
+ m_lu.coeffRefLower(row, col) = newCoeff / diag;
+ }
+ }
+
+ //Upper matrix update
+ const Index col = row;
+ typename MatrixType::InnerUpperIterator uuIt(m_lu, col);
+ for (Index rrow = uuIt.row(); rrow < col; rrow++) {
+
+ typename MatrixType::InnerLowerIterator lIt(m_lu, rrow);
+ typename MatrixType::InnerUpperIterator uIt(m_lu, col);
+ const Index offset = lIt.col() - uIt.row();
+
+ Index stop = offset > 0 ? rrow - lIt.col() : rrow - uIt.row();
+
+#ifdef VECTORIZE
+ Map<VectorXd > rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop);
+ Map<VectorXd > colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop);
+
+ Scalar newCoeff = m_lu.coeffUpper(rrow, col) - rowVal.dot(colVal);
+#else
+ if (offset > 0) //Skip zero value of lIt
+ uIt += offset;
+ else //Skip zero values of uIt
+ lIt += -offset;
+ Scalar newCoeff = m_lu.coeffUpper(rrow, col);
+ for (Index k = 0; k < stop; ++k) {
+ const Scalar tmp = newCoeff;
+ newCoeff = tmp - lIt.value() * uIt.value();
+
+ ++lIt;
+ ++uIt;
+ }
+#endif
+ m_lu.coeffRefUpper(rrow, col) = newCoeff;
+ }
+
+
+ //Diag matrix update
+ typename MatrixType::InnerLowerIterator lIt(m_lu, row);
+ typename MatrixType::InnerUpperIterator uIt(m_lu, row);
+
+ const Index offset = lIt.col() - uIt.row();
+
+
+ Index stop = offset > 0 ? lIt.size() : uIt.size();
+#ifdef VECTORIZE
+ Map<VectorXd > rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop);
+ Map<VectorXd > colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop);
+ Scalar newCoeff = m_lu.coeffDiag(row) - rowVal.dot(colVal);
+#else
+ if (offset > 0) //Skip zero value of lIt
+ uIt += offset;
+ else //Skip zero values of uIt
+ lIt += -offset;
+ Scalar newCoeff = m_lu.coeffDiag(row);
+ for (Index k = 0; k < stop; ++k) {
+ const Scalar tmp = newCoeff;
+ newCoeff = tmp - lIt.value() * uIt.value();
+ ++lIt;
+ ++uIt;
+ }
+#endif
+ m_lu.coeffRefDiag(row) = newCoeff;
+ }
+}
+
+/** Computes *x = U^-1 L^-1 b
+ *
+ * If \a transpose is set to SvTranspose or SvAdjoint, the solution
+ * of the transposed/adjoint system is computed instead.
+ *
+ * Not all backends implement the solution of the transposed or
+ * adjoint system.
+ */
+template<typename MatrixType>
+template<typename BDerived, typename XDerived>
+bool SkylineInplaceLU<MatrixType>::solve(const MatrixBase<BDerived> &b, MatrixBase<XDerived>* x, const int transposed) const {
+ const size_t rows = m_lu.rows();
+ const size_t cols = m_lu.cols();
+
+
+ for (Index row = 0; row < rows; row++) {
+ x->coeffRef(row) = b.coeff(row);
+ Scalar newVal = x->coeff(row);
+ typename MatrixType::InnerLowerIterator lIt(m_lu, row);
+
+ Index col = lIt.col();
+ while (lIt.col() < row) {
+
+ newVal -= x->coeff(col++) * lIt.value();
+ ++lIt;
+ }
+
+ x->coeffRef(row) = newVal;
+ }
+
+
+ for (Index col = rows - 1; col > 0; col--) {
+ x->coeffRef(col) = x->coeff(col) / m_lu.coeffDiag(col);
+
+ const Scalar x_col = x->coeff(col);
+
+ typename MatrixType::InnerUpperIterator uIt(m_lu, col);
+ uIt += uIt.size()-1;
+
+
+ while (uIt) {
+ x->coeffRef(uIt.row()) -= x_col * uIt.value();
+ //TODO : introduce --operator
+ uIt += -1;
+ }
+
+
+ }
+ x->coeffRef(0) = x->coeff(0) / m_lu.coeffDiag(0);
+
+ return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SKYLINELU_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
new file mode 100644
index 0000000..a2a8933
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrix.h
@@ -0,0 +1,862 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Guillaume Saupin <guillaume.saupin@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINEMATRIX_H
+#define EIGEN_SKYLINEMATRIX_H
+
+#include "SkylineStorage.h"
+#include "SkylineMatrixBase.h"
+
+namespace Eigen {
+
+/** \ingroup Skyline_Module
+ *
+ * \class SkylineMatrix
+ *
+ * \brief The main skyline matrix class
+ *
+ * This class implements a skyline matrix using the very uncommon storage
+ * scheme.
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ * \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+ * is RowMajor. The default is 0 which means column-major.
+ *
+ *
+ */
+namespace internal {
+template<typename _Scalar, int _Options>
+struct traits<SkylineMatrix<_Scalar, _Options> > {
+ typedef _Scalar Scalar;
+ typedef Sparse StorageKind;
+
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = SkylineBit | _Options,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ };
+};
+}
+
+template<typename _Scalar, int _Options>
+class SkylineMatrix
+: public SkylineMatrixBase<SkylineMatrix<_Scalar, _Options> > {
+public:
+ EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(SkylineMatrix)
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, +=)
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, -=)
+
+ using Base::IsRowMajor;
+
+protected:
+
+ typedef SkylineMatrix<Scalar, (Flags&~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0) > TransposedSkylineMatrix;
+
+ Index m_outerSize;
+ Index m_innerSize;
+
+public:
+ Index* m_colStartIndex;
+ Index* m_rowStartIndex;
+ SkylineStorage<Scalar> m_data;
+
+public:
+
+ inline Index rows() const {
+ return IsRowMajor ? m_outerSize : m_innerSize;
+ }
+
+ inline Index cols() const {
+ return IsRowMajor ? m_innerSize : m_outerSize;
+ }
+
+ inline Index innerSize() const {
+ return m_innerSize;
+ }
+
+ inline Index outerSize() const {
+ return m_outerSize;
+ }
+
+ inline Index upperNonZeros() const {
+ return m_data.upperSize();
+ }
+
+ inline Index lowerNonZeros() const {
+ return m_data.lowerSize();
+ }
+
+ inline Index upperNonZeros(Index j) const {
+ return m_colStartIndex[j + 1] - m_colStartIndex[j];
+ }
+
+ inline Index lowerNonZeros(Index j) const {
+ return m_rowStartIndex[j + 1] - m_rowStartIndex[j];
+ }
+
+ inline const Scalar* _diagPtr() const {
+ return &m_data.diag(0);
+ }
+
+ inline Scalar* _diagPtr() {
+ return &m_data.diag(0);
+ }
+
+ inline const Scalar* _upperPtr() const {
+ return &m_data.upper(0);
+ }
+
+ inline Scalar* _upperPtr() {
+ return &m_data.upper(0);
+ }
+
+ inline const Scalar* _lowerPtr() const {
+ return &m_data.lower(0);
+ }
+
+ inline Scalar* _lowerPtr() {
+ return &m_data.lower(0);
+ }
+
+ inline const Index* _upperProfilePtr() const {
+ return &m_data.upperProfile(0);
+ }
+
+ inline Index* _upperProfilePtr() {
+ return &m_data.upperProfile(0);
+ }
+
+ inline const Index* _lowerProfilePtr() const {
+ return &m_data.lowerProfile(0);
+ }
+
+ inline Index* _lowerProfilePtr() {
+ return &m_data.lowerProfile(0);
+ }
+
+ inline Scalar coeff(Index row, Index col) const {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+
+ if (outer == inner)
+ return this->m_data.diag(outer);
+
+ if (IsRowMajor) {
+ if (inner > outer) //upper matrix
+ {
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
+ if (outer >= minOuterIndex)
+ return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
+ else
+ return Scalar(0);
+ }
+ if (inner < outer) //lower matrix
+ {
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
+ if (inner >= minInnerIndex)
+ return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
+ else
+ return Scalar(0);
+ }
+ return m_data.upper(m_colStartIndex[inner] + outer - inner);
+ } else {
+ if (outer > inner) //upper matrix
+ {
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
+ if (outer <= maxOuterIndex)
+ return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
+ else
+ return Scalar(0);
+ }
+ if (outer < inner) //lower matrix
+ {
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
+
+ if (inner <= maxInnerIndex)
+ return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
+ else
+ return Scalar(0);
+ }
+ }
+ }
+
+ inline Scalar& coeffRef(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+
+ if (outer == inner)
+ return this->m_data.diag(outer);
+
+ if (IsRowMajor) {
+ if (col > row) //upper matrix
+ {
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
+ eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
+ }
+ if (col < row) //lower matrix
+ {
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
+ eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
+ }
+ } else {
+ if (outer > inner) //upper matrix
+ {
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
+ eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
+ }
+ if (outer < inner) //lower matrix
+ {
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
+ eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
+ }
+ }
+ }
+
+ inline Scalar coeffDiag(Index idx) const {
+ eigen_assert(idx < outerSize());
+ eigen_assert(idx < innerSize());
+ return this->m_data.diag(idx);
+ }
+
+ inline Scalar coeffLower(Index row, Index col) const {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+ eigen_assert(inner != outer);
+
+ if (IsRowMajor) {
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
+ if (inner >= minInnerIndex)
+ return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
+ else
+ return Scalar(0);
+
+ } else {
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
+ if (inner <= maxInnerIndex)
+ return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
+ else
+ return Scalar(0);
+ }
+ }
+
+ inline Scalar coeffUpper(Index row, Index col) const {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+ eigen_assert(inner != outer);
+
+ if (IsRowMajor) {
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
+ if (outer >= minOuterIndex)
+ return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
+ else
+ return Scalar(0);
+ } else {
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
+ if (outer <= maxOuterIndex)
+ return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
+ else
+ return Scalar(0);
+ }
+ }
+
+ inline Scalar& coeffRefDiag(Index idx) {
+ eigen_assert(idx < outerSize());
+ eigen_assert(idx < innerSize());
+ return this->m_data.diag(idx);
+ }
+
+ inline Scalar& coeffRefLower(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+ eigen_assert(inner != outer);
+
+ if (IsRowMajor) {
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
+ eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
+ } else {
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
+ eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
+ }
+ }
+
+ inline bool coeffExistLower(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+ eigen_assert(inner != outer);
+
+ if (IsRowMajor) {
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
+ return inner >= minInnerIndex;
+ } else {
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
+ return inner <= maxInnerIndex;
+ }
+ }
+
+ inline Scalar& coeffRefUpper(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+ eigen_assert(inner != outer);
+
+ if (IsRowMajor) {
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
+ eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
+ } else {
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
+ eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
+ return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
+ }
+ }
+
+ inline bool coeffExistUpper(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+ eigen_assert(inner != outer);
+
+ if (IsRowMajor) {
+ const Index minOuterIndex = inner - m_data.upperProfile(inner);
+ return outer >= minOuterIndex;
+ } else {
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
+ return outer <= maxOuterIndex;
+ }
+ }
+
+
+protected:
+
+public:
+ class InnerUpperIterator;
+ class InnerLowerIterator;
+
+ class OuterUpperIterator;
+ class OuterLowerIterator;
+
+ /** Removes all non zeros */
+ inline void setZero() {
+ m_data.clear();
+ memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (Index));
+ memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (Index));
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const {
+ return m_data.diagSize() + m_data.upperSize() + m_data.lowerSize();
+ }
+
+ /** Preallocates \a reserveSize non zeros */
+ inline void reserve(Index reserveSize, Index reserveUpperSize, Index reserveLowerSize) {
+ m_data.reserve(reserveSize, reserveUpperSize, reserveLowerSize);
+ }
+
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+
+ *
+ * \warning This function can be extremely slow if the non zero coefficients
+ * are not inserted in a coherent order.
+ *
+ * After an insertion session, you should call the finalize() function.
+ */
+ EIGEN_DONT_INLINE Scalar & insert(Index row, Index col) {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(outer < outerSize());
+ eigen_assert(inner < innerSize());
+
+ if (outer == inner)
+ return m_data.diag(col);
+
+ if (IsRowMajor) {
+ if (outer < inner) //upper matrix
+ {
+ Index minOuterIndex = 0;
+ minOuterIndex = inner - m_data.upperProfile(inner);
+
+ if (outer < minOuterIndex) //The value does not yet exist
+ {
+ const Index previousProfile = m_data.upperProfile(inner);
+
+ m_data.upperProfile(inner) = inner - outer;
+
+
+ const Index bandIncrement = m_data.upperProfile(inner) - previousProfile;
+ //shift data stored after this new one
+ const Index stop = m_colStartIndex[cols()];
+ const Index start = m_colStartIndex[inner];
+
+
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
+ m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx);
+ }
+
+ for (Index innerIdx = cols(); innerIdx > inner; innerIdx--) {
+ m_colStartIndex[innerIdx] += bandIncrement;
+ }
+
+ //zeros new data
+ memset(this->_upperPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar));
+
+ return m_data.upper(m_colStartIndex[inner]);
+ } else {
+ return m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
+ }
+ }
+
+ if (outer > inner) //lower matrix
+ {
+ const Index minInnerIndex = outer - m_data.lowerProfile(outer);
+ if (inner < minInnerIndex) //The value does not yet exist
+ {
+ const Index previousProfile = m_data.lowerProfile(outer);
+ m_data.lowerProfile(outer) = outer - inner;
+
+ const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile;
+ //shift data stored after this new one
+ const Index stop = m_rowStartIndex[rows()];
+ const Index start = m_rowStartIndex[outer];
+
+
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
+ m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx);
+ }
+
+ for (Index innerIdx = rows(); innerIdx > outer; innerIdx--) {
+ m_rowStartIndex[innerIdx] += bandIncrement;
+ }
+
+ //zeros new data
+ memset(this->_lowerPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar));
+ return m_data.lower(m_rowStartIndex[outer]);
+ } else {
+ return m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
+ }
+ }
+ } else {
+ if (outer > inner) //upper matrix
+ {
+ const Index maxOuterIndex = inner + m_data.upperProfile(inner);
+ if (outer > maxOuterIndex) //The value does not yet exist
+ {
+ const Index previousProfile = m_data.upperProfile(inner);
+ m_data.upperProfile(inner) = outer - inner;
+
+ const Index bandIncrement = m_data.upperProfile(inner) - previousProfile;
+ //shift data stored after this new one
+ const Index stop = m_rowStartIndex[rows()];
+ const Index start = m_rowStartIndex[inner + 1];
+
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
+ m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx);
+ }
+
+ for (Index innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) {
+ m_rowStartIndex[innerIdx] += bandIncrement;
+ }
+ memset(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar));
+ return m_data.upper(m_rowStartIndex[inner] + m_data.upperProfile(inner));
+ } else {
+ return m_data.upper(m_rowStartIndex[inner] + (outer - inner));
+ }
+ }
+
+ if (outer < inner) //lower matrix
+ {
+ const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
+ if (inner > maxInnerIndex) //The value does not yet exist
+ {
+ const Index previousProfile = m_data.lowerProfile(outer);
+ m_data.lowerProfile(outer) = inner - outer;
+
+ const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile;
+ //shift data stored after this new one
+ const Index stop = m_colStartIndex[cols()];
+ const Index start = m_colStartIndex[outer + 1];
+
+ for (Index innerIdx = stop; innerIdx >= start; innerIdx--) {
+ m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx);
+ }
+
+ for (Index innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) {
+ m_colStartIndex[innerIdx] += bandIncrement;
+ }
+ memset(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar));
+ return m_data.lower(m_colStartIndex[outer] + m_data.lowerProfile(outer));
+ } else {
+ return m_data.lower(m_colStartIndex[outer] + (inner - outer));
+ }
+ }
+ }
+ }
+
+ /** Must be called after inserting a set of non zero entries.
+ */
+ inline void finalize() {
+ if (IsRowMajor) {
+ if (rows() > cols())
+ m_data.resize(cols(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1);
+ else
+ m_data.resize(rows(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1);
+
+ // eigen_assert(rows() == cols() && "memory reorganisatrion only works with suare matrix");
+ //
+ // Scalar* newArray = new Scalar[m_colStartIndex[cols()] + 1 + m_rowStartIndex[rows()] + 1];
+ // Index dataIdx = 0;
+ // for (Index row = 0; row < rows(); row++) {
+ //
+ // const Index nbLowerElts = m_rowStartIndex[row + 1] - m_rowStartIndex[row];
+ // // std::cout << "nbLowerElts" << nbLowerElts << std::endl;
+ // memcpy(newArray + dataIdx, m_data.m_lower + m_rowStartIndex[row], nbLowerElts * sizeof (Scalar));
+ // m_rowStartIndex[row] = dataIdx;
+ // dataIdx += nbLowerElts;
+ //
+ // const Index nbUpperElts = m_colStartIndex[row + 1] - m_colStartIndex[row];
+ // memcpy(newArray + dataIdx, m_data.m_upper + m_colStartIndex[row], nbUpperElts * sizeof (Scalar));
+ // m_colStartIndex[row] = dataIdx;
+ // dataIdx += nbUpperElts;
+ //
+ //
+ // }
+ // //todo : don't access m_data profile directly : add an accessor from SkylineMatrix
+ // m_rowStartIndex[rows()] = m_rowStartIndex[rows()-1] + m_data.lowerProfile(rows()-1);
+ // m_colStartIndex[cols()] = m_colStartIndex[cols()-1] + m_data.upperProfile(cols()-1);
+ //
+ // delete[] m_data.m_lower;
+ // delete[] m_data.m_upper;
+ //
+ // m_data.m_lower = newArray;
+ // m_data.m_upper = newArray;
+ } else {
+ if (rows() > cols())
+ m_data.resize(cols(), rows(), cols(), m_rowStartIndex[cols()] + 1, m_colStartIndex[cols()] + 1);
+ else
+ m_data.resize(rows(), rows(), cols(), m_rowStartIndex[rows()] + 1, m_colStartIndex[rows()] + 1);
+ }
+ }
+
+ inline void squeeze() {
+ finalize();
+ m_data.squeeze();
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar > ()) {
+ //TODO
+ }
+
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void resize(size_t rows, size_t cols) {
+ const Index diagSize = rows > cols ? cols : rows;
+ m_innerSize = IsRowMajor ? cols : rows;
+
+ eigen_assert(rows == cols && "Skyline matrix must be square matrix");
+
+ if (diagSize % 2) { // diagSize is odd
+ const Index k = (diagSize - 1) / 2;
+
+ m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols,
+ 2 * k * k + k + 1,
+ 2 * k * k + k + 1);
+
+ } else // diagSize is even
+ {
+ const Index k = diagSize / 2;
+ m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols,
+ 2 * k * k - k + 1,
+ 2 * k * k - k + 1);
+ }
+
+ if (m_colStartIndex && m_rowStartIndex) {
+ delete[] m_colStartIndex;
+ delete[] m_rowStartIndex;
+ }
+ m_colStartIndex = new Index [cols + 1];
+ m_rowStartIndex = new Index [rows + 1];
+ m_outerSize = diagSize;
+
+ m_data.reset();
+ m_data.clear();
+
+ m_outerSize = diagSize;
+ memset(m_colStartIndex, 0, (cols + 1) * sizeof (Index));
+ memset(m_rowStartIndex, 0, (rows + 1) * sizeof (Index));
+ }
+
+ void resizeNonZeros(Index size) {
+ m_data.resize(size);
+ }
+
+ inline SkylineMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) {
+ resize(0, 0);
+ }
+
+ inline SkylineMatrix(size_t rows, size_t cols)
+ : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) {
+ resize(rows, cols);
+ }
+
+ template<typename OtherDerived>
+ inline SkylineMatrix(const SkylineMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) {
+ *this = other.derived();
+ }
+
+ inline SkylineMatrix(const SkylineMatrix & other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) {
+ *this = other.derived();
+ }
+
+ inline void swap(SkylineMatrix & other) {
+ //EIGEN_DBG_SKYLINE(std::cout << "SkylineMatrix:: swap\n");
+ std::swap(m_colStartIndex, other.m_colStartIndex);
+ std::swap(m_rowStartIndex, other.m_rowStartIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline SkylineMatrix & operator=(const SkylineMatrix & other) {
+ std::cout << "SkylineMatrix& operator=(const SkylineMatrix& other)\n";
+ if (other.isRValue()) {
+ swap(other.const_cast_derived());
+ } else {
+ resize(other.rows(), other.cols());
+ memcpy(m_colStartIndex, other.m_colStartIndex, (m_outerSize + 1) * sizeof (Index));
+ memcpy(m_rowStartIndex, other.m_rowStartIndex, (m_outerSize + 1) * sizeof (Index));
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SkylineMatrix & operator=(const SkylineMatrixBase<OtherDerived>& other) {
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose) {
+ // TODO
+ // return *this;
+ } else {
+ // there is no special optimization
+ return SkylineMatrixBase<SkylineMatrix>::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator <<(std::ostream & s, const SkylineMatrix & m) {
+
+ EIGEN_DBG_SKYLINE(
+ std::cout << "upper elements : " << std::endl;
+ for (Index i = 0; i < m.m_data.upperSize(); i++)
+ std::cout << m.m_data.upper(i) << "\t";
+ std::cout << std::endl;
+ std::cout << "upper profile : " << std::endl;
+ for (Index i = 0; i < m.m_data.upperProfileSize(); i++)
+ std::cout << m.m_data.upperProfile(i) << "\t";
+ std::cout << std::endl;
+ std::cout << "lower startIdx : " << std::endl;
+ for (Index i = 0; i < m.m_data.upperProfileSize(); i++)
+ std::cout << (IsRowMajor ? m.m_colStartIndex[i] : m.m_rowStartIndex[i]) << "\t";
+ std::cout << std::endl;
+
+
+ std::cout << "lower elements : " << std::endl;
+ for (Index i = 0; i < m.m_data.lowerSize(); i++)
+ std::cout << m.m_data.lower(i) << "\t";
+ std::cout << std::endl;
+ std::cout << "lower profile : " << std::endl;
+ for (Index i = 0; i < m.m_data.lowerProfileSize(); i++)
+ std::cout << m.m_data.lowerProfile(i) << "\t";
+ std::cout << std::endl;
+ std::cout << "lower startIdx : " << std::endl;
+ for (Index i = 0; i < m.m_data.lowerProfileSize(); i++)
+ std::cout << (IsRowMajor ? m.m_rowStartIndex[i] : m.m_colStartIndex[i]) << "\t";
+ std::cout << std::endl;
+ );
+ for (Index rowIdx = 0; rowIdx < m.rows(); rowIdx++) {
+ for (Index colIdx = 0; colIdx < m.cols(); colIdx++) {
+ s << m.coeff(rowIdx, colIdx) << "\t";
+ }
+ s << std::endl;
+ }
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SkylineMatrix() {
+ delete[] m_colStartIndex;
+ delete[] m_rowStartIndex;
+ }
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+};
+
+template<typename Scalar, int _Options>
+class SkylineMatrix<Scalar, _Options>::InnerUpperIterator {
+public:
+
+ InnerUpperIterator(const SkylineMatrix& mat, Index outer)
+ : m_matrix(mat), m_outer(outer),
+ m_id(_Options == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1),
+ m_start(m_id),
+ m_end(_Options == RowMajor ? mat.m_colStartIndex[outer + 1] : mat.m_rowStartIndex[outer + 1] + 1) {
+ }
+
+ inline InnerUpperIterator & operator++() {
+ m_id++;
+ return *this;
+ }
+
+ inline InnerUpperIterator & operator+=(Index shift) {
+ m_id += shift;
+ return *this;
+ }
+
+ inline Scalar value() const {
+ return m_matrix.m_data.upper(m_id);
+ }
+
+ inline Scalar* valuePtr() {
+ return const_cast<Scalar*> (&(m_matrix.m_data.upper(m_id)));
+ }
+
+ inline Scalar& valueRef() {
+ return const_cast<Scalar&> (m_matrix.m_data.upper(m_id));
+ }
+
+ inline Index index() const {
+ return IsRowMajor ? m_outer - m_matrix.m_data.upperProfile(m_outer) + (m_id - m_start) :
+ m_outer + (m_id - m_start) + 1;
+ }
+
+ inline Index row() const {
+ return IsRowMajor ? index() : m_outer;
+ }
+
+ inline Index col() const {
+ return IsRowMajor ? m_outer : index();
+ }
+
+ inline size_t size() const {
+ return m_matrix.m_data.upperProfile(m_outer);
+ }
+
+ inline operator bool() const {
+ return (m_id < m_end) && (m_id >= m_start);
+ }
+
+protected:
+ const SkylineMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+template<typename Scalar, int _Options>
+class SkylineMatrix<Scalar, _Options>::InnerLowerIterator {
+public:
+
+ InnerLowerIterator(const SkylineMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(_Options == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1),
+ m_start(m_id),
+ m_end(_Options == RowMajor ? mat.m_rowStartIndex[outer + 1] : mat.m_colStartIndex[outer + 1] + 1) {
+ }
+
+ inline InnerLowerIterator & operator++() {
+ m_id++;
+ return *this;
+ }
+
+ inline InnerLowerIterator & operator+=(Index shift) {
+ m_id += shift;
+ return *this;
+ }
+
+ inline Scalar value() const {
+ return m_matrix.m_data.lower(m_id);
+ }
+
+ inline Scalar* valuePtr() {
+ return const_cast<Scalar*> (&(m_matrix.m_data.lower(m_id)));
+ }
+
+ inline Scalar& valueRef() {
+ return const_cast<Scalar&> (m_matrix.m_data.lower(m_id));
+ }
+
+ inline Index index() const {
+ return IsRowMajor ? m_outer - m_matrix.m_data.lowerProfile(m_outer) + (m_id - m_start) :
+ m_outer + (m_id - m_start) + 1;
+ ;
+ }
+
+ inline Index row() const {
+ return IsRowMajor ? m_outer : index();
+ }
+
+ inline Index col() const {
+ return IsRowMajor ? index() : m_outer;
+ }
+
+ inline size_t size() const {
+ return m_matrix.m_data.lowerProfile(m_outer);
+ }
+
+ inline operator bool() const {
+ return (m_id < m_end) && (m_id >= m_start);
+ }
+
+protected:
+ const SkylineMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SkylineMatrix_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
new file mode 100644
index 0000000..b3a2372
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h
@@ -0,0 +1,212 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Guillaume Saupin <guillaume.saupin@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINEMATRIXBASE_H
+#define EIGEN_SKYLINEMATRIXBASE_H
+
+#include "SkylineUtil.h"
+
+namespace Eigen {
+
+/** \ingroup Skyline_Module
+ *
+ * \class SkylineMatrixBase
+ *
+ * \brief Base class of any skyline matrices or skyline expressions
+ *
+ * \param Derived
+ *
+ */
+template<typename Derived> class SkylineMatrixBase : public EigenBase<Derived> {
+public:
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::index<StorageKind>::type Index;
+
+ enum {
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>::ret),
+
+ IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = internal::traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ IsRowMajor = Flags & RowMajorBit ? 1 : 0
+ };
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+ * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+ * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+ *
+ * \sa class NumTraits
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar, EIGEN_SIZE_MAX(RowsAtCompileTime, ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime, ColsAtCompileTime) > SquareMatrixType;
+
+ inline const Derived& derived() const {
+ return *static_cast<const Derived*> (this);
+ }
+
+ inline Derived& derived() {
+ return *static_cast<Derived*> (this);
+ }
+
+ inline Derived& const_cast_derived() const {
+ return *static_cast<Derived*> (const_cast<SkylineMatrixBase*> (this));
+ }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
+ inline Index rows() const {
+ return derived().rows();
+ }
+
+ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
+ inline Index cols() const {
+ return derived().cols();
+ }
+
+ /** \returns the number of coefficients, which is \a rows()*cols().
+ * \sa rows(), cols(), SizeAtCompileTime. */
+ inline Index size() const {
+ return rows() * cols();
+ }
+
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline Index nonZeros() const {
+ return derived().nonZeros();
+ }
+
+ /** \returns the size of the storage major dimension,
+ * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+ Index outerSize() const {
+ return (int(Flags) & RowMajorBit) ? this->rows() : this->cols();
+ }
+
+ /** \returns the size of the inner dimension according to the storage order,
+ * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+ Index innerSize() const {
+ return (int(Flags) & RowMajorBit) ? this->cols() : this->rows();
+ }
+
+ bool isRValue() const {
+ return m_isRValue;
+ }
+
+ Derived& markAsRValue() {
+ m_isRValue = true;
+ return derived();
+ }
+
+ SkylineMatrixBase() : m_isRValue(false) {
+ /* TODO check flags */
+ }
+
+ inline Derived & operator=(const Derived& other) {
+ this->operator=<Derived > (other);
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ inline void assignGeneric(const OtherDerived& other) {
+ derived().resize(other.rows(), other.cols());
+ for (Index row = 0; row < rows(); row++)
+ for (Index col = 0; col < cols(); col++) {
+ if (other.coeff(row, col) != Scalar(0))
+ derived().insert(row, col) = other.coeff(row, col);
+ }
+ derived().finalize();
+ }
+
+ template<typename OtherDerived>
+ inline Derived & operator=(const SkylineMatrixBase<OtherDerived>& other) {
+ //TODO
+ }
+
+ template<typename Lhs, typename Rhs>
+ inline Derived & operator=(const SkylineProduct<Lhs, Rhs, SkylineTimeSkylineProduct>& product);
+
+ friend std::ostream & operator <<(std::ostream & s, const SkylineMatrixBase& m) {
+ s << m.derived();
+ return s;
+ }
+
+ template<typename OtherDerived>
+ const typename SkylineProductReturnType<Derived, OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const {
+ dst.setZero();
+ for (Index i = 0; i < rows(); i++)
+ for (Index j = 0; j < rows(); j++)
+ dst(i, j) = derived().coeff(i, j);
+ }
+
+ Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime> toDense() const {
+ return derived();
+ }
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ EIGEN_STRONG_INLINE const typename internal::eval<Derived, IsSkyline>::type eval() const {
+ return typename internal::eval<Derived>::type(derived());
+ }
+
+protected:
+ bool m_isRValue;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SkylineMatrixBase_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineProduct.h b/unsupported/Eigen/src/Skyline/SkylineProduct.h
new file mode 100644
index 0000000..1ddf455
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/SkylineProduct.h
@@ -0,0 +1,295 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Guillaume Saupin <guillaume.saupin@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINEPRODUCT_H
+#define EIGEN_SKYLINEPRODUCT_H
+
+namespace Eigen {
+
+template<typename Lhs, typename Rhs, int ProductMode>
+struct SkylineProductReturnType {
+ typedef const typename internal::nested<Lhs, Rhs::RowsAtCompileTime>::type LhsNested;
+ typedef const typename internal::nested<Rhs, Lhs::RowsAtCompileTime>::type RhsNested;
+
+ typedef SkylineProduct<LhsNested, RhsNested, ProductMode> Type;
+};
+
+template<typename LhsNested, typename RhsNested, int ProductMode>
+struct internal::traits<SkylineProduct<LhsNested, RhsNested, ProductMode> > {
+ // clean the nested types:
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+ ResultIsSkyline = ProductMode == SkylineTimeSkylineProduct,
+
+ RemovedBits = ~((EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSkyline ? 0 : SkylineBit)),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef typename internal::conditional<ResultIsSkyline,
+ SkylineMatrixBase<SkylineProduct<LhsNested, RhsNested, ProductMode> >,
+ MatrixBase<SkylineProduct<LhsNested, RhsNested, ProductMode> > >::type Base;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested, int ProductMode>
+class SkylineProduct : no_assignment_operator,
+public traits<SkylineProduct<LhsNested, RhsNested, ProductMode> >::Base {
+public:
+
+ EIGEN_GENERIC_PUBLIC_INTERFACE(SkylineProduct)
+
+private:
+
+ typedef typename traits<SkylineProduct>::_LhsNested _LhsNested;
+ typedef typename traits<SkylineProduct>::_RhsNested _RhsNested;
+
+public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SkylineProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs) {
+ eigen_assert(lhs.cols() == rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime == Dynamic
+ || _RhsNested::RowsAtCompileTime == Dynamic
+ || int(_LhsNested::ColsAtCompileTime) == int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested, _RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const {
+ return m_lhs.rows();
+ }
+
+ EIGEN_STRONG_INLINE Index cols() const {
+ return m_rhs.cols();
+ }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const {
+ return m_lhs;
+ }
+
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const {
+ return m_rhs;
+ }
+
+protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+// dense = skyline * dense
+// Note that here we force no inlining and separate the setZero() because GCC messes up otherwise
+
+template<typename Lhs, typename Rhs, typename Dest>
+EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) {
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename traits<Lhs>::Scalar Scalar;
+
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit,
+ LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit,
+ ProcessFirstHalf = LhsIsSelfAdjoint
+ && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0)
+ || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor)
+ || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)),
+ ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf)
+ };
+
+ //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix.
+ for (Index col = 0; col < rhs.cols(); col++) {
+ for (Index row = 0; row < lhs.rows(); row++) {
+ dst(row, col) = lhs.coeffDiag(row) * rhs(row, col);
+ }
+ }
+ //Use matrix lower triangular part
+ for (Index row = 0; row < lhs.rows(); row++) {
+ typename _Lhs::InnerLowerIterator lIt(lhs, row);
+ const Index stop = lIt.col() + lIt.size();
+ for (Index col = 0; col < rhs.cols(); col++) {
+
+ Index k = lIt.col();
+ Scalar tmp = 0;
+ while (k < stop) {
+ tmp +=
+ lIt.value() *
+ rhs(k++, col);
+ ++lIt;
+ }
+ dst(row, col) += tmp;
+ lIt += -lIt.size();
+ }
+
+ }
+
+ //Use matrix upper triangular part
+ for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) {
+ typename _Lhs::InnerUpperIterator uIt(lhs, lhscol);
+ const Index stop = uIt.size() + uIt.row();
+ for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) {
+
+
+ const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol);
+ Index k = uIt.row();
+ while (k < stop) {
+ dst(k++, rhscol) +=
+ uIt.value() *
+ rhsCoeff;
+ ++uIt;
+ }
+ uIt += -uIt.size();
+ }
+ }
+
+}
+
+template<typename Lhs, typename Rhs, typename Dest>
+EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) {
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename traits<Lhs>::Scalar Scalar;
+
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit,
+ LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit,
+ ProcessFirstHalf = LhsIsSelfAdjoint
+ && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0)
+ || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor)
+ || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)),
+ ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf)
+ };
+
+ //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix.
+ for (Index col = 0; col < rhs.cols(); col++) {
+ for (Index row = 0; row < lhs.rows(); row++) {
+ dst(row, col) = lhs.coeffDiag(row) * rhs(row, col);
+ }
+ }
+
+ //Use matrix upper triangular part
+ for (Index row = 0; row < lhs.rows(); row++) {
+ typename _Lhs::InnerUpperIterator uIt(lhs, row);
+ const Index stop = uIt.col() + uIt.size();
+ for (Index col = 0; col < rhs.cols(); col++) {
+
+ Index k = uIt.col();
+ Scalar tmp = 0;
+ while (k < stop) {
+ tmp +=
+ uIt.value() *
+ rhs(k++, col);
+ ++uIt;
+ }
+
+
+ dst(row, col) += tmp;
+ uIt += -uIt.size();
+ }
+ }
+
+ //Use matrix lower triangular part
+ for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) {
+ typename _Lhs::InnerLowerIterator lIt(lhs, lhscol);
+ const Index stop = lIt.size() + lIt.row();
+ for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) {
+
+ const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol);
+ Index k = lIt.row();
+ while (k < stop) {
+ dst(k++, rhscol) +=
+ lIt.value() *
+ rhsCoeff;
+ ++lIt;
+ }
+ lIt += -lIt.size();
+ }
+ }
+
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit>
+ struct skyline_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct skyline_product_selector<Lhs, Rhs, ResultType, RowMajor> {
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) {
+ skyline_row_major_time_dense_product<Lhs, Rhs, ResultType > (lhs, rhs, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct skyline_product_selector<Lhs, Rhs, ResultType, ColMajor> {
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) {
+ skyline_col_major_time_dense_product<Lhs, Rhs, ResultType > (lhs, rhs, res);
+ }
+};
+
+} // end namespace internal
+
+// template<typename Derived>
+// template<typename Lhs, typename Rhs >
+// Derived & MatrixBase<Derived>::lazyAssign(const SkylineProduct<Lhs, Rhs, SkylineTimeDenseProduct>& product) {
+// typedef typename internal::remove_all<Lhs>::type _Lhs;
+// internal::skyline_product_selector<typename internal::remove_all<Lhs>::type,
+// typename internal::remove_all<Rhs>::type,
+// Derived>::run(product.lhs(), product.rhs(), derived());
+//
+// return derived();
+// }
+
+// skyline * dense
+
+template<typename Derived>
+template<typename OtherDerived >
+EIGEN_STRONG_INLINE const typename SkylineProductReturnType<Derived, OtherDerived>::Type
+SkylineMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const {
+
+ return typename SkylineProductReturnType<Derived, OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SKYLINEPRODUCT_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineStorage.h b/unsupported/Eigen/src/Skyline/SkylineStorage.h
new file mode 100644
index 0000000..378a8de
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/SkylineStorage.h
@@ -0,0 +1,259 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Guillaume Saupin <guillaume.saupin@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINE_STORAGE_H
+#define EIGEN_SKYLINE_STORAGE_H
+
+namespace Eigen {
+
+/** Stores a skyline set of values in three structures :
+ * The diagonal elements
+ * The upper elements
+ * The lower elements
+ *
+ */
+template<typename Scalar>
+class SkylineStorage {
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef SparseIndex Index;
+public:
+
+ SkylineStorage()
+ : m_diag(0),
+ m_lower(0),
+ m_upper(0),
+ m_lowerProfile(0),
+ m_upperProfile(0),
+ m_diagSize(0),
+ m_upperSize(0),
+ m_lowerSize(0),
+ m_upperProfileSize(0),
+ m_lowerProfileSize(0),
+ m_allocatedSize(0) {
+ }
+
+ SkylineStorage(const SkylineStorage& other)
+ : m_diag(0),
+ m_lower(0),
+ m_upper(0),
+ m_lowerProfile(0),
+ m_upperProfile(0),
+ m_diagSize(0),
+ m_upperSize(0),
+ m_lowerSize(0),
+ m_upperProfileSize(0),
+ m_lowerProfileSize(0),
+ m_allocatedSize(0) {
+ *this = other;
+ }
+
+ SkylineStorage & operator=(const SkylineStorage& other) {
+ resize(other.diagSize(), other.m_upperProfileSize, other.m_lowerProfileSize, other.upperSize(), other.lowerSize());
+ memcpy(m_diag, other.m_diag, m_diagSize * sizeof (Scalar));
+ memcpy(m_upper, other.m_upper, other.upperSize() * sizeof (Scalar));
+ memcpy(m_lower, other.m_lower, other.lowerSize() * sizeof (Scalar));
+ memcpy(m_upperProfile, other.m_upperProfile, m_upperProfileSize * sizeof (Index));
+ memcpy(m_lowerProfile, other.m_lowerProfile, m_lowerProfileSize * sizeof (Index));
+ return *this;
+ }
+
+ void swap(SkylineStorage& other) {
+ std::swap(m_diag, other.m_diag);
+ std::swap(m_upper, other.m_upper);
+ std::swap(m_lower, other.m_lower);
+ std::swap(m_upperProfile, other.m_upperProfile);
+ std::swap(m_lowerProfile, other.m_lowerProfile);
+ std::swap(m_diagSize, other.m_diagSize);
+ std::swap(m_upperSize, other.m_upperSize);
+ std::swap(m_lowerSize, other.m_lowerSize);
+ std::swap(m_allocatedSize, other.m_allocatedSize);
+ }
+
+ ~SkylineStorage() {
+ delete[] m_diag;
+ delete[] m_upper;
+ if (m_upper != m_lower)
+ delete[] m_lower;
+ delete[] m_upperProfile;
+ delete[] m_lowerProfile;
+ }
+
+ void reserve(Index size, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) {
+ Index newAllocatedSize = size + upperSize + lowerSize;
+ if (newAllocatedSize > m_allocatedSize)
+ reallocate(size, upperProfileSize, lowerProfileSize, upperSize, lowerSize);
+ }
+
+ void squeeze() {
+ if (m_allocatedSize > m_diagSize + m_upperSize + m_lowerSize)
+ reallocate(m_diagSize, m_upperProfileSize, m_lowerProfileSize, m_upperSize, m_lowerSize);
+ }
+
+ void resize(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize, float reserveSizeFactor = 0) {
+ if (m_allocatedSize < diagSize + upperSize + lowerSize)
+ reallocate(diagSize, upperProfileSize, lowerProfileSize, upperSize + Index(reserveSizeFactor * upperSize), lowerSize + Index(reserveSizeFactor * lowerSize));
+ m_diagSize = diagSize;
+ m_upperSize = upperSize;
+ m_lowerSize = lowerSize;
+ m_upperProfileSize = upperProfileSize;
+ m_lowerProfileSize = lowerProfileSize;
+ }
+
+ inline Index diagSize() const {
+ return m_diagSize;
+ }
+
+ inline Index upperSize() const {
+ return m_upperSize;
+ }
+
+ inline Index lowerSize() const {
+ return m_lowerSize;
+ }
+
+ inline Index upperProfileSize() const {
+ return m_upperProfileSize;
+ }
+
+ inline Index lowerProfileSize() const {
+ return m_lowerProfileSize;
+ }
+
+ inline Index allocatedSize() const {
+ return m_allocatedSize;
+ }
+
+ inline void clear() {
+ m_diagSize = 0;
+ }
+
+ inline Scalar& diag(Index i) {
+ return m_diag[i];
+ }
+
+ inline const Scalar& diag(Index i) const {
+ return m_diag[i];
+ }
+
+ inline Scalar& upper(Index i) {
+ return m_upper[i];
+ }
+
+ inline const Scalar& upper(Index i) const {
+ return m_upper[i];
+ }
+
+ inline Scalar& lower(Index i) {
+ return m_lower[i];
+ }
+
+ inline const Scalar& lower(Index i) const {
+ return m_lower[i];
+ }
+
+ inline Index& upperProfile(Index i) {
+ return m_upperProfile[i];
+ }
+
+ inline const Index& upperProfile(Index i) const {
+ return m_upperProfile[i];
+ }
+
+ inline Index& lowerProfile(Index i) {
+ return m_lowerProfile[i];
+ }
+
+ inline const Index& lowerProfile(Index i) const {
+ return m_lowerProfile[i];
+ }
+
+ static SkylineStorage Map(Index* upperProfile, Index* lowerProfile, Scalar* diag, Scalar* upper, Scalar* lower, Index size, Index upperSize, Index lowerSize) {
+ SkylineStorage res;
+ res.m_upperProfile = upperProfile;
+ res.m_lowerProfile = lowerProfile;
+ res.m_diag = diag;
+ res.m_upper = upper;
+ res.m_lower = lower;
+ res.m_allocatedSize = res.m_diagSize = size;
+ res.m_upperSize = upperSize;
+ res.m_lowerSize = lowerSize;
+ return res;
+ }
+
+ inline void reset() {
+ memset(m_diag, 0, m_diagSize * sizeof (Scalar));
+ memset(m_upper, 0, m_upperSize * sizeof (Scalar));
+ memset(m_lower, 0, m_lowerSize * sizeof (Scalar));
+ memset(m_upperProfile, 0, m_diagSize * sizeof (Index));
+ memset(m_lowerProfile, 0, m_diagSize * sizeof (Index));
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = dummy_precision<RealScalar>()) {
+ //TODO
+ }
+
+protected:
+
+ inline void reallocate(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) {
+
+ Scalar* diag = new Scalar[diagSize];
+ Scalar* upper = new Scalar[upperSize];
+ Scalar* lower = new Scalar[lowerSize];
+ Index* upperProfile = new Index[upperProfileSize];
+ Index* lowerProfile = new Index[lowerProfileSize];
+
+ Index copyDiagSize = (std::min)(diagSize, m_diagSize);
+ Index copyUpperSize = (std::min)(upperSize, m_upperSize);
+ Index copyLowerSize = (std::min)(lowerSize, m_lowerSize);
+ Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize);
+ Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize);
+
+ // copy
+ memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar));
+ memcpy(upper, m_upper, copyUpperSize * sizeof (Scalar));
+ memcpy(lower, m_lower, copyLowerSize * sizeof (Scalar));
+ memcpy(upperProfile, m_upperProfile, copyUpperProfileSize * sizeof (Index));
+ memcpy(lowerProfile, m_lowerProfile, copyLowerProfileSize * sizeof (Index));
+
+
+
+ // delete old stuff
+ delete[] m_diag;
+ delete[] m_upper;
+ delete[] m_lower;
+ delete[] m_upperProfile;
+ delete[] m_lowerProfile;
+ m_diag = diag;
+ m_upper = upper;
+ m_lower = lower;
+ m_upperProfile = upperProfile;
+ m_lowerProfile = lowerProfile;
+ m_allocatedSize = diagSize + upperSize + lowerSize;
+ m_upperSize = upperSize;
+ m_lowerSize = lowerSize;
+ }
+
+public:
+ Scalar* m_diag;
+ Scalar* m_upper;
+ Scalar* m_lower;
+ Index* m_upperProfile;
+ Index* m_lowerProfile;
+ Index m_diagSize;
+ Index m_upperSize;
+ Index m_lowerSize;
+ Index m_upperProfileSize;
+ Index m_lowerProfileSize;
+ Index m_allocatedSize;
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/unsupported/Eigen/src/Skyline/SkylineUtil.h b/unsupported/Eigen/src/Skyline/SkylineUtil.h
new file mode 100644
index 0000000..75eb612
--- /dev/null
+++ b/unsupported/Eigen/src/Skyline/SkylineUtil.h
@@ -0,0 +1,89 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Guillaume Saupin <guillaume.saupin@cea.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SKYLINEUTIL_H
+#define EIGEN_SKYLINEUTIL_H
+
+namespace Eigen {
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SKYLINE(X)
+#else
+#define EIGEN_DBG_SKYLINE(X) X
+#endif
+
+const unsigned int SkylineBit = 0x1200;
+template<typename Lhs, typename Rhs, int ProductMode> class SkylineProduct;
+enum AdditionalProductEvaluationMode {SkylineTimeDenseProduct, SkylineTimeSkylineProduct, DenseTimeSkylineProduct};
+enum {IsSkyline = SkylineBit};
+
+
+#define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SkylineMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+ EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+ EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::index<StorageKind>::type Index; \
+ enum { Flags = Eigen::internal::traits<Derived>::Flags, };
+
+#define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived) \
+ _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase<Derived>)
+
+template<typename Derived> class SkylineMatrixBase;
+template<typename _Scalar, int _Flags = 0> class SkylineMatrix;
+template<typename _Scalar, int _Flags = 0> class DynamicSkylineMatrix;
+template<typename _Scalar, int _Flags = 0> class SkylineVector;
+template<typename _Scalar, int _Flags = 0> class MappedSkylineMatrix;
+
+namespace internal {
+
+template<typename Lhs, typename Rhs> struct skyline_product_mode;
+template<typename Lhs, typename Rhs, int ProductMode = skyline_product_mode<Lhs,Rhs>::value> struct SkylineProductReturnType;
+
+template<typename T> class eval<T,IsSkyline>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SkylineMatrix<_Scalar, _Flags> type;
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SKYLINEUTIL_H
diff --git a/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
new file mode 100644
index 0000000..e9ec746
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
@@ -0,0 +1,122 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
+#define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
+
+namespace Eigen {
+
+#if 0
+
+// NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... >
+// See SparseBlock.h for an example
+
+
+/***************************************************************************
+* specialisation for DynamicSparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size> >
+{
+ typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+ {
+ // need to transpose => perform a block evaluation followed by a big swap
+ DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
+ *this = aux.markAsRValue();
+ }
+ else
+ {
+ // evaluate/copy vector per vector
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ {
+ SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
+ m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+ }
+ }
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ Index nonZeros() const
+ {
+ Index count = 0;
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ count += m_matrix._data()[m_outerStart+j].size();
+ return count;
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(m_matrix.data()[m_outerStart].size()>0);
+ return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1);
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+#endif
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
diff --git a/unsupported/Eigen/src/SparseExtra/CMakeLists.txt b/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
new file mode 100644
index 0000000..7ea32ca
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SparseExtra_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SparseExtra_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/SparseExtra COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
new file mode 100644
index 0000000..dec16df
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h
@@ -0,0 +1,357 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
+#define EIGEN_DYNAMIC_SPARSEMATRIX_H
+
+namespace Eigen {
+
+/** \deprecated use a SparseMatrix in an uncompressed mode
+ *
+ * \class DynamicSparseMatrix
+ *
+ * \brief A sparse matrix class designed for matrix assembly purpose
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
+ * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
+ * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
+ * otherwise.
+ *
+ * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
+ * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
+ * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
+ *
+ * \see SparseMatrix
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = OuterRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+ class DynamicSparseMatrix
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ using Base::operator=;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_innerSize;
+ std::vector<internal::CompressedStorage<Scalar,Index> > m_data;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
+ inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
+
+ std::vector<internal::CompressedStorage<Scalar,Index> >& _data() { return m_data; }
+ const std::vector<internal::CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
+
+ /** \returns the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search.
+ */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data[outer].at(inner);
+ }
+
+ /** \returns a reference to the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data[outer].atWithInsertion(inner);
+ }
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ void setZero()
+ {
+ for (Index j=0; j<outerSize(); ++j)
+ m_data[j].clear();
+ }
+
+ /** \returns the number of non zero coefficients */
+ Index nonZeros() const
+ {
+ Index res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ res += static_cast<Index>(m_data[j].size());
+ return res;
+ }
+
+
+
+ void reserve(Index reserveSize = 1000)
+ {
+ if (outerSize()>0)
+ {
+ Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ m_data[j].reserve(reserveSizePerVector);
+ }
+ }
+ }
+
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void startVec(Index /*outer*/) {}
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one of the given inner vector.
+ *
+ * \sa insert, insertBackByOuterInner */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
+ eigen_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
+ && "wrong sorted insertion");
+ m_data[outer].append(0, inner);
+ return m_data[outer].value(m_data[outer].size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index startId = 0;
+ Index id = static_cast<Index>(m_data[outer].size()) - 1;
+ m_data[outer].resize(id+2,1);
+
+ while ( (id >= startId) && (m_data[outer].index(id) > inner) )
+ {
+ m_data[outer].index(id+1) = m_data[outer].index(id);
+ m_data[outer].value(id+1) = m_data[outer].value(id);
+ --id;
+ }
+ m_data[outer].index(id+1) = inner;
+ m_data[outer].value(id+1) = 0;
+ return m_data[outer].value(id+1);
+ }
+
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void finalize() {}
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ for (Index j=0; j<outerSize(); ++j)
+ m_data[j].prune(reference,epsilon);
+ }
+
+ /** Resize the matrix without preserving the data (the matrix is set to zero)
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ setZero();
+ if (Index(m_data.size()) != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ void resizeAndKeepData(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ const Index innerSize = IsRowMajor ? cols : rows;
+ if (m_innerSize>innerSize)
+ {
+ // remove all coefficients with innerCoord>=innerSize
+ // TODO
+ //std::cerr << "not implemented yet\n";
+ exit(2);
+ }
+ if (m_data.size() != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ /** The class DynamicSparseMatrix is deprectaed */
+ EIGEN_DEPRECATED inline DynamicSparseMatrix()
+ : m_innerSize(0), m_data(0)
+ {
+ eigen_assert(innerSize()==0 && outerSize()==0);
+ }
+
+ /** The class DynamicSparseMatrix is deprectaed */
+ EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols)
+ : m_innerSize(0)
+ {
+ resize(rows, cols);
+ }
+
+ /** The class DynamicSparseMatrix is deprectaed */
+ template<typename OtherDerived>
+ EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_innerSize(0)
+ {
+ Base::operator=(other.derived());
+ }
+
+ inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
+ : Base(), m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(DynamicSparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_innerSize, other.m_innerSize);
+ //std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ /** Destructor */
+ inline ~DynamicSparseMatrix() {}
+
+ public:
+
+ /** \deprecated
+ * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+ {
+ setZero();
+ reserve(reserveSize);
+ }
+
+ /** \deprecated use insert()
+ * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+ * 1 - the coefficient does not exist yet
+ * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+ * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+ * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+ *
+ * \see fillrand(), coeffRef()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return insertBack(outer,inner);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ * Compared to the generic coeffRef(), the unique limitation is that we assume
+ * the coefficient does not exist yet.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use finalize()
+ * Does nothing. Provided for compatibility with SparseMatrix. */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# endif
+ };
+
+template<typename Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+ typedef typename SparseVector<Scalar,_Options,_Index>::InnerIterator Base;
+ public:
+ InnerIterator(const DynamicSparseMatrix& mat, Index outer)
+ : Base(mat.m_data[outer]), m_outer(outer)
+ {}
+
+ inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+
+ protected:
+ const Index m_outer;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator : public SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+ typedef typename SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator Base;
+ public:
+ ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer)
+ : Base(mat.m_data[outer]), m_outer(outer)
+ {}
+
+ inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+
+ protected:
+ const Index m_outer;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h
new file mode 100644
index 0000000..7aafce9
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -0,0 +1,273 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_MARKET_IO_H
+#define EIGEN_SPARSE_MARKET_IO_H
+
+#include <iostream>
+
+namespace Eigen {
+
+namespace internal
+{
+ template <typename Scalar>
+ inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, Scalar& value)
+ {
+ line >> i >> j >> value;
+ i--;
+ j--;
+ if(i>=0 && j>=0 && i<M && j<N)
+ {
+ return true;
+ }
+ else
+ return false;
+ }
+ template <typename Scalar>
+ inline bool GetMarketLine (std::stringstream& line, int& M, int& N, int& i, int& j, std::complex<Scalar>& value)
+ {
+ Scalar valR, valI;
+ line >> i >> j >> valR >> valI;
+ i--;
+ j--;
+ if(i>=0 && j>=0 && i<M && j<N)
+ {
+ value = std::complex<Scalar>(valR, valI);
+ return true;
+ }
+ else
+ return false;
+ }
+
+ template <typename RealScalar>
+ inline void GetVectorElt (const std::string& line, RealScalar& val)
+ {
+ std::istringstream newline(line);
+ newline >> val;
+ }
+
+ template <typename RealScalar>
+ inline void GetVectorElt (const std::string& line, std::complex<RealScalar>& val)
+ {
+ RealScalar valR, valI;
+ std::istringstream newline(line);
+ newline >> valR >> valI;
+ val = std::complex<RealScalar>(valR, valI);
+ }
+
+ template<typename Scalar>
+ inline void putMarketHeader(std::string& header,int sym)
+ {
+ header= "%%MatrixMarket matrix coordinate ";
+ if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+ {
+ header += " complex";
+ if(sym == Symmetric) header += " symmetric";
+ else if (sym == SelfAdjoint) header += " Hermitian";
+ else header += " general";
+ }
+ else
+ {
+ header += " real";
+ if(sym == Symmetric) header += " symmetric";
+ else header += " general";
+ }
+ }
+
+ template<typename Scalar>
+ inline void PutMatrixElt(Scalar value, int row, int col, std::ofstream& out)
+ {
+ out << row << " "<< col << " " << value << "\n";
+ }
+ template<typename Scalar>
+ inline void PutMatrixElt(std::complex<Scalar> value, int row, int col, std::ofstream& out)
+ {
+ out << row << " " << col << " " << value.real() << " " << value.imag() << "\n";
+ }
+
+
+ template<typename Scalar>
+ inline void putVectorElt(Scalar value, std::ofstream& out)
+ {
+ out << value << "\n";
+ }
+ template<typename Scalar>
+ inline void putVectorElt(std::complex<Scalar> value, std::ofstream& out)
+ {
+ out << value.real << " " << value.imag()<< "\n";
+ }
+
+} // end namepsace internal
+
+inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector)
+{
+ sym = 0;
+ isvector = false;
+ std::ifstream in(filename.c_str(),std::ios::in);
+ if(!in)
+ return false;
+
+ std::string line;
+ // The matrix header is always the first line in the file
+ std::getline(in, line); eigen_assert(in.good());
+
+ std::stringstream fmtline(line);
+ std::string substr[5];
+ fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4];
+ if(substr[2].compare("array") == 0) isvector = true;
+ if(substr[3].compare("complex") == 0) iscomplex = true;
+ if(substr[4].compare("symmetric") == 0) sym = Symmetric;
+ else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint;
+
+ return true;
+}
+
+template<typename SparseMatrixType>
+bool loadMarket(SparseMatrixType& mat, const std::string& filename)
+{
+ typedef typename SparseMatrixType::Scalar Scalar;
+ std::ifstream input(filename.c_str(),std::ios::in);
+ if(!input)
+ return false;
+
+ const int maxBuffersize = 2048;
+ char buffer[maxBuffersize];
+
+ bool readsizes = false;
+
+ typedef Triplet<Scalar,int> T;
+ std::vector<T> elements;
+
+ int M(-1), N(-1), NNZ(-1);
+ int count = 0;
+ while(input.getline(buffer, maxBuffersize))
+ {
+ // skip comments
+ //NOTE An appropriate test should be done on the header to get the symmetry
+ if(buffer[0]=='%')
+ continue;
+
+ std::stringstream line(buffer);
+
+ if(!readsizes)
+ {
+ line >> M >> N >> NNZ;
+ if(M > 0 && N > 0 && NNZ > 0)
+ {
+ readsizes = true;
+ std::cout << "sizes: " << M << "," << N << "," << NNZ << "\n";
+ mat.resize(M,N);
+ mat.reserve(NNZ);
+ }
+ }
+ else
+ {
+ int i(-1), j(-1);
+ Scalar value;
+ if( internal::GetMarketLine(line, M, N, i, j, value) )
+ {
+ ++ count;
+ elements.push_back(T(i,j,value));
+ }
+ else
+ std::cerr << "Invalid read: " << i << "," << j << "\n";
+ }
+ }
+ mat.setFromTriplets(elements.begin(), elements.end());
+ if(count!=NNZ)
+ std::cerr << count << "!=" << NNZ << "\n";
+
+ input.close();
+ return true;
+}
+
+template<typename VectorType>
+bool loadMarketVector(VectorType& vec, const std::string& filename)
+{
+ typedef typename VectorType::Scalar Scalar;
+ std::ifstream in(filename.c_str(), std::ios::in);
+ if(!in)
+ return false;
+
+ std::string line;
+ int n(0), col(0);
+ do
+ { // Skip comments
+ std::getline(in, line); eigen_assert(in.good());
+ } while (line[0] == '%');
+ std::istringstream newline(line);
+ newline >> n >> col;
+ eigen_assert(n>0 && col>0);
+ vec.resize(n);
+ int i = 0;
+ Scalar value;
+ while ( std::getline(in, line) && (i < n) ){
+ internal::GetVectorElt(line, value);
+ vec(i++) = value;
+ }
+ in.close();
+ if (i!=n){
+ std::cerr<< "Unable to read all elements from file " << filename << "\n";
+ return false;
+ }
+ return true;
+}
+
+template<typename SparseMatrixType>
+bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0)
+{
+ typedef typename SparseMatrixType::Scalar Scalar;
+ std::ofstream out(filename.c_str(),std::ios::out);
+ if(!out)
+ return false;
+
+ out.flags(std::ios_base::scientific);
+ out.precision(64);
+ std::string header;
+ internal::putMarketHeader<Scalar>(header, sym);
+ out << header << std::endl;
+ out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n";
+ int count = 0;
+ for(int j=0; j<mat.outerSize(); ++j)
+ for(typename SparseMatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ ++ count;
+ internal::PutMatrixElt(it.value(), it.row()+1, it.col()+1, out);
+ // out << it.row()+1 << " " << it.col()+1 << " " << it.value() << "\n";
+ }
+ out.close();
+ return true;
+}
+
+template<typename VectorType>
+bool saveMarketVector (const VectorType& vec, const std::string& filename)
+{
+ typedef typename VectorType::Scalar Scalar;
+ std::ofstream out(filename.c_str(),std::ios::out);
+ if(!out)
+ return false;
+
+ out.flags(std::ios_base::scientific);
+ out.precision(64);
+ if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+ out << "%%MatrixMarket matrix array complex general\n";
+ else
+ out << "%%MatrixMarket matrix array real general\n";
+ out << vec.size() << " "<< 1 << "\n";
+ for (int i=0; i < vec.size(); i++){
+ internal::putVectorElt(vec(i), out);
+ }
+ out.close();
+ return true;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_MARKET_IO_H
diff --git a/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
new file mode 100644
index 0000000..bf13cf2
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
@@ -0,0 +1,232 @@
+
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Desire NUENTSA WAKAM <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BROWSE_MATRICES_H
+#define EIGEN_BROWSE_MATRICES_H
+
+namespace Eigen {
+
+enum {
+ SPD = 0x100,
+ NonSymmetric = 0x0
+};
+
+/**
+ * @brief Iterator to browse matrices from a specified folder
+ *
+ * This is used to load all the matrices from a folder.
+ * The matrices should be in Matrix Market format
+ * It is assumed that the matrices are named as matname.mtx
+ * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian)
+ * The right hand side vectors are loaded as well, if they exist.
+ * They should be named as matname_b.mtx.
+ * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx
+ *
+ * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx
+ *
+ * Sample code
+ * \code
+ *
+ * \endcode
+ *
+ * \tparam Scalar The scalar type
+ */
+template <typename Scalar>
+class MatrixMarketIterator
+{
+ public:
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ typedef SparseMatrix<Scalar,ColMajor> MatrixType;
+
+ public:
+ MatrixMarketIterator(const std::string folder):m_sym(0),m_isvalid(false),m_matIsLoaded(false),m_hasRhs(false),m_hasrefX(false),m_folder(folder)
+ {
+ m_folder_id = opendir(folder.c_str());
+ if (!m_folder_id){
+ m_isvalid = false;
+ std::cerr << "The provided Matrix folder could not be opened \n\n";
+ abort();
+ }
+ Getnextvalidmatrix();
+ }
+
+ ~MatrixMarketIterator()
+ {
+ if (m_folder_id) closedir(m_folder_id);
+ }
+
+ inline MatrixMarketIterator& operator++()
+ {
+ m_matIsLoaded = false;
+ m_hasrefX = false;
+ m_hasRhs = false;
+ Getnextvalidmatrix();
+ return *this;
+ }
+ inline operator bool() const { return m_isvalid;}
+
+ /** Return the sparse matrix corresponding to the current file */
+ inline MatrixType& matrix()
+ {
+ // Read the matrix
+ if (m_matIsLoaded) return m_mat;
+
+ std::string matrix_file = m_folder + "/" + m_matname + ".mtx";
+ if ( !loadMarket(m_mat, matrix_file))
+ {
+ m_matIsLoaded = false;
+ return m_mat;
+ }
+ m_matIsLoaded = true;
+
+ if (m_sym != NonSymmetric)
+ { // Store the upper part of the matrix. It is needed by the solvers dealing with nonsymmetric matrices ??
+ MatrixType B;
+ B = m_mat;
+ m_mat = B.template selfadjointView<Lower>();
+ }
+ return m_mat;
+ }
+
+ /** Return the right hand side corresponding to the current matrix.
+ * If the rhs file is not provided, a random rhs is generated
+ */
+ inline VectorType& rhs()
+ {
+ // Get the right hand side
+ if (m_hasRhs) return m_rhs;
+
+ std::string rhs_file;
+ rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx
+ m_hasRhs = Fileexists(rhs_file);
+ if (m_hasRhs)
+ {
+ m_rhs.resize(m_mat.cols());
+ m_hasRhs = loadMarketVector(m_rhs, rhs_file);
+ }
+ if (!m_hasRhs)
+ {
+ // Generate a random right hand side
+ if (!m_matIsLoaded) this->matrix();
+ m_refX.resize(m_mat.cols());
+ m_refX.setRandom();
+ m_rhs = m_mat * m_refX;
+ m_hasrefX = true;
+ m_hasRhs = true;
+ }
+ return m_rhs;
+ }
+
+ /** Return a reference solution
+ * If it is not provided and if the right hand side is not available
+ * then refX is randomly generated such that A*refX = b
+ * where A and b are the matrix and the rhs.
+ * Note that when a rhs is provided, refX is not available
+ */
+ inline VectorType& refX()
+ {
+ // Check if a reference solution is provided
+ if (m_hasrefX) return m_refX;
+
+ std::string lhs_file;
+ lhs_file = m_folder + "/" + m_matname + "_x.mtx";
+ m_hasrefX = Fileexists(lhs_file);
+ if (m_hasrefX)
+ {
+ m_refX.resize(m_mat.cols());
+ m_hasrefX = loadMarketVector(m_refX, lhs_file);
+ }
+ return m_refX;
+ }
+
+ inline std::string& matname() { return m_matname; }
+
+ inline int sym() { return m_sym; }
+
+ inline bool hasRhs() {return m_hasRhs; }
+ inline bool hasrefX() {return m_hasrefX; }
+
+ protected:
+
+ inline bool Fileexists(std::string file)
+ {
+ std::ifstream file_id(file.c_str());
+ if (!file_id.good() )
+ {
+ return false;
+ }
+ else
+ {
+ file_id.close();
+ return true;
+ }
+ }
+
+ void Getnextvalidmatrix( )
+ {
+ m_isvalid = false;
+ // Here, we return with the next valid matrix in the folder
+ while ( (m_curs_id = readdir(m_folder_id)) != NULL) {
+ m_isvalid = false;
+ std::string curfile;
+ curfile = m_folder + "/" + m_curs_id->d_name;
+ // Discard if it is a folder
+ if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems
+// struct stat st_buf;
+// stat (curfile.c_str(), &st_buf);
+// if (S_ISDIR(st_buf.st_mode)) continue;
+
+ // Determine from the header if it is a matrix or a right hand side
+ bool isvector,iscomplex=false;
+ if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue;
+ if(isvector) continue;
+ if (!iscomplex)
+ {
+ if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+ continue;
+ }
+ if (iscomplex)
+ {
+ if(internal::is_same<Scalar, float>::value || internal::is_same<Scalar, double>::value)
+ continue;
+ }
+
+
+ // Get the matrix name
+ std::string filename = m_curs_id->d_name;
+ m_matname = filename.substr(0, filename.length()-4);
+
+ // Find if the matrix is SPD
+ size_t found = m_matname.find("SPD");
+ if( (found!=std::string::npos) && (m_sym != NonSymmetric) )
+ m_sym = SPD;
+
+ m_isvalid = true;
+ break;
+ }
+ }
+ int m_sym; // Symmetry of the matrix
+ MatrixType m_mat; // Current matrix
+ VectorType m_rhs; // Current vector
+ VectorType m_refX; // The reference solution, if exists
+ std::string m_matname; // Matrix Name
+ bool m_isvalid;
+ bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file
+ bool m_hasRhs; // The right hand side exists
+ bool m_hasrefX; // A reference solution is provided
+ std::string m_folder;
+ DIR * m_folder_id;
+ struct dirent *m_curs_id;
+
+};
+
+} // end namespace Eigen
+
+#endif
diff --git a/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
new file mode 100644
index 0000000..dee1708
--- /dev/null
+++ b/unsupported/Eigen/src/SparseExtra/RandomSetter.h
@@ -0,0 +1,327 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_RANDOMSETTER_H
+#define EIGEN_RANDOMSETTER_H
+
+namespace Eigen {
+
+/** Represents a std::map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct StdMapTraits
+{
+ typedef int KeyType;
+ typedef std::map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 1
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+
+#ifdef EIGEN_UNORDERED_MAP_SUPPORT
+/** Represents a std::unordered_map
+ *
+ * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file
+ * yourself making sure that unordered_map is defined in the std namespace.
+ *
+ * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do:
+ * \code
+ * #include <tr1/unordered_map>
+ * #define EIGEN_UNORDERED_MAP_SUPPORT
+ * namespace std {
+ * using std::tr1::unordered_map;
+ * }
+ * \endcode
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct StdUnorderedMapTraits
+{
+ typedef int KeyType;
+ typedef std::unordered_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif // EIGEN_UNORDERED_MAP_SUPPORT
+
+#ifdef _DENSE_HASH_MAP_H_
+/** Represents a google::dense_hash_map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct GoogleDenseHashMapTraits
+{
+ typedef int KeyType;
+ typedef google::dense_hash_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type& map, const KeyType& k)
+ { map.set_empty_key(k); }
+};
+#endif
+
+#ifdef _SPARSE_HASH_MAP_H_
+/** Represents a google::sparse_hash_map
+ *
+ * \see RandomSetter
+ */
+template<typename Scalar> struct GoogleSparseHashMapTraits
+{
+ typedef int KeyType;
+ typedef google::sparse_hash_map<KeyType,Scalar> Type;
+ enum {
+ IsSorted = 0
+ };
+
+ static void setInvalidKey(Type&, const KeyType&) {}
+};
+#endif
+
+/** \class RandomSetter
+ *
+ * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access
+ *
+ * \param SparseMatrixType the type of the sparse matrix we are updating
+ * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage.
+ * Its default value depends on the system.
+ * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object
+ * as a power of two exponent.
+ *
+ * This class temporarily represents a sparse matrix object using a generic map implementation allowing for
+ * efficient random access. The conversion from the compressed representation to a hash_map object is performed
+ * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy
+ * suggest the use of nested blocks as in this example:
+ *
+ * \code
+ * SparseMatrix<double> m(rows,cols);
+ * {
+ * RandomSetter<SparseMatrix<double> > w(m);
+ * // don't use m but w instead with read/write random access to the coefficients:
+ * for(;;)
+ * w(rand(),rand()) = rand;
+ * }
+ * // when w is deleted, the data are copied back to m
+ * // and m is ready to use.
+ * \endcode
+ *
+ * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would
+ * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter
+ * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order.
+ * To reach optimal performance, this value should be adjusted according to the average number of nonzeros
+ * per rows/columns.
+ *
+ * The possible values for the template parameter MapTraits are:
+ * - \b StdMapTraits: corresponds to std::map. (does not perform very well)
+ * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC)
+ * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption)
+ * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance)
+ *
+ * The default map implementation depends on the availability, and the preferred order is:
+ * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits.
+ *
+ * For performance and memory consumption reasons it is highly recommended to use one of
+ * the Google's hash_map implementation. To enable the support for them, you have two options:
+ * - \#include <google/dense_hash_map> yourself \b before Eigen/Sparse header
+ * - define EIGEN_GOOGLEHASH_SUPPORT
+ * In the later case the inclusion of <google/dense_hash_map> is made for you.
+ *
+ * \see http://code.google.com/p/google-sparsehash/
+ */
+template<typename SparseMatrixType,
+ template <typename T> class MapTraits =
+#if defined _DENSE_HASH_MAP_H_
+ GoogleDenseHashMapTraits
+#elif defined _HASH_MAP
+ GnuHashMapTraits
+#else
+ StdMapTraits
+#endif
+ ,int OuterPacketBits = 6>
+class RandomSetter
+{
+ typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
+
+ struct ScalarWrapper
+ {
+ ScalarWrapper() : value(0) {}
+ Scalar value;
+ };
+ typedef typename MapTraits<ScalarWrapper>::KeyType KeyType;
+ typedef typename MapTraits<ScalarWrapper>::Type HashMapType;
+ static const int OuterPacketMask = (1 << OuterPacketBits) - 1;
+ enum {
+ SwapStorage = 1 - MapTraits<ScalarWrapper>::IsSorted,
+ TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0,
+ SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor
+ };
+
+ public:
+
+ /** Constructs a random setter object from the sparse matrix \a target
+ *
+ * Note that the initial value of \a target are imported. If you want to re-set
+ * a sparse matrix from scratch, then you must set it to zero first using the
+ * setZero() function.
+ */
+ inline RandomSetter(SparseMatrixType& target)
+ : mp_target(&target)
+ {
+ const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize();
+ const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize();
+ m_outerPackets = outerSize >> OuterPacketBits;
+ if (outerSize&OuterPacketMask)
+ m_outerPackets += 1;
+ m_hashmaps = new HashMapType[m_outerPackets];
+ // compute number of bits needed to store inner indices
+ Index aux = innerSize - 1;
+ m_keyBitsOffset = 0;
+ while (aux)
+ {
+ ++m_keyBitsOffset;
+ aux = aux >> 1;
+ }
+ KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset));
+ for (Index k=0; k<m_outerPackets; ++k)
+ MapTraits<ScalarWrapper>::setInvalidKey(m_hashmaps[k],ik);
+
+ // insert current coeffs
+ for (Index j=0; j<mp_target->outerSize(); ++j)
+ for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it)
+ (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value();
+ }
+
+ /** Destructor updating back the sparse matrix target */
+ ~RandomSetter()
+ {
+ KeyType keyBitsMask = (1<<m_keyBitsOffset)-1;
+ if (!SwapStorage) // also means the map is sorted
+ {
+ mp_target->setZero();
+ mp_target->makeCompressed();
+ mp_target->reserve(nonZeros());
+ Index prevOuter = -1;
+ for (Index k=0; k<m_outerPackets; ++k)
+ {
+ const Index outerOffset = (1<<OuterPacketBits) * k;
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const Index outer = (it->first >> m_keyBitsOffset) + outerOffset;
+ const Index inner = it->first & keyBitsMask;
+ if (prevOuter!=outer)
+ {
+ for (Index j=prevOuter+1;j<=outer;++j)
+ mp_target->startVec(j);
+ prevOuter = outer;
+ }
+ mp_target->insertBackByOuterInner(outer, inner) = it->second.value;
+ }
+ }
+ mp_target->finalize();
+ }
+ else
+ {
+ VectorXi positions(mp_target->outerSize());
+ positions.setZero();
+ // pass 1
+ for (Index k=0; k<m_outerPackets; ++k)
+ {
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const Index outer = it->first & keyBitsMask;
+ ++positions[outer];
+ }
+ }
+ // prefix sum
+ Index count = 0;
+ for (Index j=0; j<mp_target->outerSize(); ++j)
+ {
+ Index tmp = positions[j];
+ mp_target->outerIndexPtr()[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ mp_target->makeCompressed();
+ mp_target->outerIndexPtr()[mp_target->outerSize()] = count;
+ mp_target->resizeNonZeros(count);
+ // pass 2
+ for (Index k=0; k<m_outerPackets; ++k)
+ {
+ const Index outerOffset = (1<<OuterPacketBits) * k;
+ typename HashMapType::iterator end = m_hashmaps[k].end();
+ for (typename HashMapType::iterator it = m_hashmaps[k].begin(); it!=end; ++it)
+ {
+ const Index inner = (it->first >> m_keyBitsOffset) + outerOffset;
+ const Index outer = it->first & keyBitsMask;
+ // sorted insertion
+ // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients,
+ // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a
+ // small fraction of them have to be sorted, whence the following simple procedure:
+ Index posStart = mp_target->outerIndexPtr()[outer];
+ Index i = (positions[outer]++) - 1;
+ while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) )
+ {
+ mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i];
+ mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i];
+ --i;
+ }
+ mp_target->innerIndexPtr()[i+1] = inner;
+ mp_target->valuePtr()[i+1] = it->second.value;
+ }
+ }
+ }
+ delete[] m_hashmaps;
+ }
+
+ /** \returns a reference to the coefficient at given coordinates \a row, \a col */
+ Scalar& operator() (Index row, Index col)
+ {
+ const Index outer = SetterRowMajor ? row : col;
+ const Index inner = SetterRowMajor ? col : row;
+ const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map
+ const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet
+ const KeyType key = (KeyType(outerMinor)<<m_keyBitsOffset) | inner;
+ return m_hashmaps[outerMajor][key].value;
+ }
+
+ /** \returns the number of non zero coefficients
+ *
+ * \note According to the underlying map/hash_map implementation,
+ * this function might be quite expensive.
+ */
+ Index nonZeros() const
+ {
+ Index nz = 0;
+ for (Index k=0; k<m_outerPackets; ++k)
+ nz += static_cast<Index>(m_hashmaps[k].size());
+ return nz;
+ }
+
+
+ protected:
+
+ HashMapType* m_hashmaps;
+ SparseMatrixType* mp_target;
+ Index m_outerPackets;
+ unsigned char m_keyBitsOffset;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_RANDOMSETTER_H
diff --git a/unsupported/Eigen/src/Splines/CMakeLists.txt b/unsupported/Eigen/src/Splines/CMakeLists.txt
new file mode 100644
index 0000000..55c6271
--- /dev/null
+++ b/unsupported/Eigen/src/Splines/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_Splines_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_Splines_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Splines COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/Splines/Spline.h b/unsupported/Eigen/src/Splines/Spline.h
new file mode 100644
index 0000000..771f104
--- /dev/null
+++ b/unsupported/Eigen/src/Splines/Spline.h
@@ -0,0 +1,474 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPLINE_H
+#define EIGEN_SPLINE_H
+
+#include "SplineFwd.h"
+
+namespace Eigen
+{
+ /**
+ * \ingroup Splines_Module
+ * \class Spline
+ * \brief A class representing multi-dimensional spline curves.
+ *
+ * The class represents B-splines with non-uniform knot vectors. Each control
+ * point of the B-spline is associated with a basis function
+ * \f{align*}
+ * C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i
+ * \f}
+ *
+ * \tparam _Scalar The underlying data type (typically float or double)
+ * \tparam _Dim The curve dimension (e.g. 2 or 3)
+ * \tparam _Degree Per default set to Dynamic; could be set to the actual desired
+ * degree for optimization purposes (would result in stack allocation
+ * of several temporary variables).
+ **/
+ template <typename _Scalar, int _Dim, int _Degree>
+ class Spline
+ {
+ public:
+ typedef _Scalar Scalar; /*!< The spline curve's scalar type. */
+ enum { Dimension = _Dim /*!< The spline curve's dimension. */ };
+ enum { Degree = _Degree /*!< The spline curve's degree. */ };
+
+ /** \brief The point type the spline is representing. */
+ typedef typename SplineTraits<Spline>::PointType PointType;
+
+ /** \brief The data type used to store knot vectors. */
+ typedef typename SplineTraits<Spline>::KnotVectorType KnotVectorType;
+
+ /** \brief The data type used to store non-zero basis functions. */
+ typedef typename SplineTraits<Spline>::BasisVectorType BasisVectorType;
+
+ /** \brief The data type representing the spline's control points. */
+ typedef typename SplineTraits<Spline>::ControlPointVectorType ControlPointVectorType;
+
+ /**
+ * \brief Creates a (constant) zero spline.
+ * For Splines with dynamic degree, the resulting degree will be 0.
+ **/
+ Spline()
+ : m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2))
+ , m_ctrls(ControlPointVectorType::Zero(2,(Degree==Dynamic ? 1 : Degree+1)))
+ {
+ // in theory this code can go to the initializer list but it will get pretty
+ // much unreadable ...
+ enum { MinDegree = (Degree==Dynamic ? 0 : Degree) };
+ m_knots.template segment<MinDegree+1>(0) = Array<Scalar,1,MinDegree+1>::Zero();
+ m_knots.template segment<MinDegree+1>(MinDegree+1) = Array<Scalar,1,MinDegree+1>::Ones();
+ }
+
+ /**
+ * \brief Creates a spline from a knot vector and control points.
+ * \param knots The spline's knot vector.
+ * \param ctrls The spline's control point vector.
+ **/
+ template <typename OtherVectorType, typename OtherArrayType>
+ Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {}
+
+ /**
+ * \brief Copy constructor for splines.
+ * \param spline The input spline.
+ **/
+ template <int OtherDegree>
+ Spline(const Spline<Scalar, Dimension, OtherDegree>& spline) :
+ m_knots(spline.knots()), m_ctrls(spline.ctrls()) {}
+
+ /**
+ * \brief Returns the knots of the underlying spline.
+ **/
+ const KnotVectorType& knots() const { return m_knots; }
+
+ /**
+ * \brief Returns the knots of the underlying spline.
+ **/
+ const ControlPointVectorType& ctrls() const { return m_ctrls; }
+
+ /**
+ * \brief Returns the spline value at a given site \f$u\f$.
+ *
+ * The function returns
+ * \f{align*}
+ * C(u) & = \sum_{i=0}^{n}N_{i,p}P_i
+ * \f}
+ *
+ * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated.
+ * \return The spline value at the given location \f$u\f$.
+ **/
+ PointType operator()(Scalar u) const;
+
+ /**
+ * \brief Evaluation of spline derivatives of up-to given order.
+ *
+ * The function returns
+ * \f{align*}
+ * \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i
+ * \f}
+ * for i ranging between 0 and order.
+ *
+ * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated.
+ * \param order The order up to which the derivatives are computed.
+ **/
+ typename SplineTraits<Spline>::DerivativeType
+ derivatives(Scalar u, DenseIndex order) const;
+
+ /**
+ * \copydoc Spline::derivatives
+ * Using the template version of this function is more efficieent since
+ * temporary objects are allocated on the stack whenever this is possible.
+ **/
+ template <int DerivativeOrder>
+ typename SplineTraits<Spline,DerivativeOrder>::DerivativeType
+ derivatives(Scalar u, DenseIndex order = DerivativeOrder) const;
+
+ /**
+ * \brief Computes the non-zero basis functions at the given site.
+ *
+ * Splines have local support and a point from their image is defined
+ * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the
+ * spline degree.
+ *
+ * This function computes the \f$p+1\f$ non-zero basis function values
+ * for a given parameter value \f$u\f$. It returns
+ * \f{align*}{
+ * N_{i,p}(u), \hdots, N_{i+p+1,p}(u)
+ * \f}
+ *
+ * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions
+ * are computed.
+ **/
+ typename SplineTraits<Spline>::BasisVectorType
+ basisFunctions(Scalar u) const;
+
+ /**
+ * \brief Computes the non-zero spline basis function derivatives up to given order.
+ *
+ * The function computes
+ * \f{align*}{
+ * \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u)
+ * \f}
+ * with i ranging from 0 up to the specified order.
+ *
+ * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function
+ * derivatives are computed.
+ * \param order The order up to which the basis function derivatives are computes.
+ **/
+ typename SplineTraits<Spline>::BasisDerivativeType
+ basisFunctionDerivatives(Scalar u, DenseIndex order) const;
+
+ /**
+ * \copydoc Spline::basisFunctionDerivatives
+ * Using the template version of this function is more efficieent since
+ * temporary objects are allocated on the stack whenever this is possible.
+ **/
+ template <int DerivativeOrder>
+ typename SplineTraits<Spline,DerivativeOrder>::BasisDerivativeType
+ basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const;
+
+ /**
+ * \brief Returns the spline degree.
+ **/
+ DenseIndex degree() const;
+
+ /**
+ * \brief Returns the span within the knot vector in which u is falling.
+ * \param u The site for which the span is determined.
+ **/
+ DenseIndex span(Scalar u) const;
+
+ /**
+ * \brief Computes the spang within the provided knot vector in which u is falling.
+ **/
+ static DenseIndex Span(typename SplineTraits<Spline>::Scalar u, DenseIndex degree, const typename SplineTraits<Spline>::KnotVectorType& knots);
+
+ /**
+ * \brief Returns the spline's non-zero basis functions.
+ *
+ * The function computes and returns
+ * \f{align*}{
+ * N_{i,p}(u), \hdots, N_{i+p+1,p}(u)
+ * \f}
+ *
+ * \param u The site at which the basis functions are computed.
+ * \param degree The degree of the underlying spline.
+ * \param knots The underlying spline's knot vector.
+ **/
+ static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots);
+
+
+ private:
+ KnotVectorType m_knots; /*!< Knot vector. */
+ ControlPointVectorType m_ctrls; /*!< Control points. */
+ };
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ DenseIndex Spline<_Scalar, _Dim, _Degree>::Span(
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u,
+ DenseIndex degree,
+ const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots)
+ {
+ // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68)
+ if (u <= knots(0)) return degree;
+ const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u);
+ return static_cast<DenseIndex>( std::distance(knots.data(), pos) - 1 );
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType
+ Spline<_Scalar, _Dim, _Degree>::BasisFunctions(
+ typename Spline<_Scalar, _Dim, _Degree>::Scalar u,
+ DenseIndex degree,
+ const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots)
+ {
+ typedef typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType BasisVectorType;
+
+ const DenseIndex p = degree;
+ const DenseIndex i = Spline::Span(u, degree, knots);
+
+ const KnotVectorType& U = knots;
+
+ BasisVectorType left(p+1); left(0) = Scalar(0);
+ BasisVectorType right(p+1); right(0) = Scalar(0);
+
+ VectorBlock<BasisVectorType,Degree>(left,1,p) = u - VectorBlock<const KnotVectorType,Degree>(U,i+1-p,p).reverse();
+ VectorBlock<BasisVectorType,Degree>(right,1,p) = VectorBlock<const KnotVectorType,Degree>(U,i+1,p) - u;
+
+ BasisVectorType N(1,p+1);
+ N(0) = Scalar(1);
+ for (DenseIndex j=1; j<=p; ++j)
+ {
+ Scalar saved = Scalar(0);
+ for (DenseIndex r=0; r<j; r++)
+ {
+ const Scalar tmp = N(r)/(right(r+1)+left(j-r));
+ N[r] = saved + right(r+1)*tmp;
+ saved = left(j-r)*tmp;
+ }
+ N(j) = saved;
+ }
+ return N;
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const
+ {
+ if (_Degree == Dynamic)
+ return m_knots.size() - m_ctrls.cols() - 1;
+ else
+ return _Degree;
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const
+ {
+ return Spline::Span(u, degree(), knots());
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const
+ {
+ enum { Order = SplineTraits<Spline>::OrderAtCompileTime };
+
+ const DenseIndex span = this->span(u);
+ const DenseIndex p = degree();
+ const BasisVectorType basis_funcs = basisFunctions(u);
+
+ const Replicate<BasisVectorType,Dimension,1> ctrl_weights(basis_funcs);
+ const Block<const ControlPointVectorType,Dimension,Order> ctrl_pts(ctrls(),0,span-p,Dimension,p+1);
+ return (ctrl_weights * ctrl_pts).rowwise().sum();
+ }
+
+ /* --------------------------------------------------------------------------------------------- */
+
+ template <typename SplineType, typename DerivativeType>
+ void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der)
+ {
+ enum { Dimension = SplineTraits<SplineType>::Dimension };
+ enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
+ enum { DerivativeOrder = DerivativeType::ColsAtCompileTime };
+
+ typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
+ typedef typename SplineTraits<SplineType,DerivativeOrder>::BasisDerivativeType BasisDerivativeType;
+ typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr;
+
+ const DenseIndex p = spline.degree();
+ const DenseIndex span = spline.span(u);
+
+ const DenseIndex n = (std::min)(p, order);
+
+ der.resize(Dimension,n+1);
+
+ // Retrieve the basis function derivatives up to the desired order...
+ const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives<DerivativeOrder>(u, n+1);
+
+ // ... and perform the linear combinations of the control points.
+ for (DenseIndex der_order=0; der_order<n+1; ++der_order)
+ {
+ const Replicate<BasisDerivativeRowXpr,Dimension,1> ctrl_weights( basis_func_ders.row(der_order) );
+ const Block<const ControlPointVectorType,Dimension,Order> ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1);
+ der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum();
+ }
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType
+ Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
+ {
+ typename SplineTraits< Spline >::DerivativeType res;
+ derivativesImpl(*this, u, order, res);
+ return res;
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ template <int DerivativeOrder>
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType
+ Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const
+ {
+ typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res;
+ derivativesImpl(*this, u, order, res);
+ return res;
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType
+ Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const
+ {
+ return Spline::BasisFunctions(u, degree(), knots());
+ }
+
+ /* --------------------------------------------------------------------------------------------- */
+
+ template <typename SplineType, typename DerivativeType>
+ void basisFunctionDerivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& N_)
+ {
+ enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
+
+ typedef typename SplineTraits<SplineType>::Scalar Scalar;
+ typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
+ typedef typename SplineTraits<SplineType>::KnotVectorType KnotVectorType;
+
+ const KnotVectorType& U = spline.knots();
+
+ const DenseIndex p = spline.degree();
+ const DenseIndex span = spline.span(u);
+
+ const DenseIndex n = (std::min)(p, order);
+
+ N_.resize(n+1, p+1);
+
+ BasisVectorType left = BasisVectorType::Zero(p+1);
+ BasisVectorType right = BasisVectorType::Zero(p+1);
+
+ Matrix<Scalar,Order,Order> ndu(p+1,p+1);
+
+ double saved, temp;
+
+ ndu(0,0) = 1.0;
+
+ DenseIndex j;
+ for (j=1; j<=p; ++j)
+ {
+ left[j] = u-U[span+1-j];
+ right[j] = U[span+j]-u;
+ saved = 0.0;
+
+ for (DenseIndex r=0; r<j; ++r)
+ {
+ /* Lower triangle */
+ ndu(j,r) = right[r+1]+left[j-r];
+ temp = ndu(r,j-1)/ndu(j,r);
+ /* Upper triangle */
+ ndu(r,j) = static_cast<Scalar>(saved+right[r+1] * temp);
+ saved = left[j-r] * temp;
+ }
+
+ ndu(j,j) = static_cast<Scalar>(saved);
+ }
+
+ for (j = p; j>=0; --j)
+ N_(0,j) = ndu(j,p);
+
+ // Compute the derivatives
+ DerivativeType a(n+1,p+1);
+ DenseIndex r=0;
+ for (; r<=p; ++r)
+ {
+ DenseIndex s1,s2;
+ s1 = 0; s2 = 1; // alternate rows in array a
+ a(0,0) = 1.0;
+
+ // Compute the k-th derivative
+ for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
+ {
+ double d = 0.0;
+ DenseIndex rk,pk,j1,j2;
+ rk = r-k; pk = p-k;
+
+ if (r>=k)
+ {
+ a(s2,0) = a(s1,0)/ndu(pk+1,rk);
+ d = a(s2,0)*ndu(rk,pk);
+ }
+
+ if (rk>=-1) j1 = 1;
+ else j1 = -rk;
+
+ if (r-1 <= pk) j2 = k-1;
+ else j2 = p-r;
+
+ for (j=j1; j<=j2; ++j)
+ {
+ a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j);
+ d += a(s2,j)*ndu(rk+j,pk);
+ }
+
+ if (r<=pk)
+ {
+ a(s2,k) = -a(s1,k-1)/ndu(pk+1,r);
+ d += a(s2,k)*ndu(r,pk);
+ }
+
+ N_(k,r) = static_cast<Scalar>(d);
+ j = s1; s1 = s2; s2 = j; // Switch rows
+ }
+ }
+
+ /* Multiply through by the correct factors */
+ /* (Eq. [2.9]) */
+ r = p;
+ for (DenseIndex k=1; k<=static_cast<DenseIndex>(n); ++k)
+ {
+ for (DenseIndex j=p; j>=0; --j) N_(k,j) *= r;
+ r *= p-k;
+ }
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType
+ Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
+ {
+ typename SplineTraits< Spline >::BasisDerivativeType der;
+ basisFunctionDerivativesImpl(*this, u, order, der);
+ return der;
+ }
+
+ template <typename _Scalar, int _Dim, int _Degree>
+ template <int DerivativeOrder>
+ typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType
+ Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const
+ {
+ typename SplineTraits< Spline, DerivativeOrder >::BasisDerivativeType der;
+ basisFunctionDerivativesImpl(*this, u, order, der);
+ return der;
+ }
+}
+
+#endif // EIGEN_SPLINE_H
diff --git a/unsupported/Eigen/src/Splines/SplineFitting.h b/unsupported/Eigen/src/Splines/SplineFitting.h
new file mode 100644
index 0000000..0265d53
--- /dev/null
+++ b/unsupported/Eigen/src/Splines/SplineFitting.h
@@ -0,0 +1,156 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPLINE_FITTING_H
+#define EIGEN_SPLINE_FITTING_H
+
+#include <numeric>
+
+#include "SplineFwd.h"
+
+#include <Eigen/QR>
+
+namespace Eigen
+{
+ /**
+ * \brief Computes knot averages.
+ * \ingroup Splines_Module
+ *
+ * The knots are computed as
+ * \f{align*}
+ * u_0 & = \hdots = u_p = 0 \\
+ * u_{m-p} & = \hdots = u_{m} = 1 \\
+ * u_{j+p} & = \frac{1}{p}\sum_{i=j}^{j+p-1}\bar{u}_i \quad\quad j=1,\hdots,n-p
+ * \f}
+ * where \f$p\f$ is the degree and \f$m+1\f$ the number knots
+ * of the desired interpolating spline.
+ *
+ * \param[in] parameters The input parameters. During interpolation one for each data point.
+ * \param[in] degree The spline degree which is used during the interpolation.
+ * \param[out] knots The output knot vector.
+ *
+ * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data
+ **/
+ template <typename KnotVectorType>
+ void KnotAveraging(const KnotVectorType& parameters, DenseIndex degree, KnotVectorType& knots)
+ {
+ knots.resize(parameters.size()+degree+1);
+
+ for (DenseIndex j=1; j<parameters.size()-degree; ++j)
+ knots(j+degree) = parameters.segment(j,degree).mean();
+
+ knots.segment(0,degree+1) = KnotVectorType::Zero(degree+1);
+ knots.segment(knots.size()-degree-1,degree+1) = KnotVectorType::Ones(degree+1);
+ }
+
+ /**
+ * \brief Computes chord length parameters which are required for spline interpolation.
+ * \ingroup Splines_Module
+ *
+ * \param[in] pts The data points to which a spline should be fit.
+ * \param[out] chord_lengths The resulting chord lenggth vector.
+ *
+ * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data
+ **/
+ template <typename PointArrayType, typename KnotVectorType>
+ void ChordLengths(const PointArrayType& pts, KnotVectorType& chord_lengths)
+ {
+ typedef typename KnotVectorType::Scalar Scalar;
+
+ const DenseIndex n = pts.cols();
+
+ // 1. compute the column-wise norms
+ chord_lengths.resize(pts.cols());
+ chord_lengths[0] = 0;
+ chord_lengths.rightCols(n-1) = (pts.array().leftCols(n-1) - pts.array().rightCols(n-1)).matrix().colwise().norm();
+
+ // 2. compute the partial sums
+ std::partial_sum(chord_lengths.data(), chord_lengths.data()+n, chord_lengths.data());
+
+ // 3. normalize the data
+ chord_lengths /= chord_lengths(n-1);
+ chord_lengths(n-1) = Scalar(1);
+ }
+
+ /**
+ * \brief Spline fitting methods.
+ * \ingroup Splines_Module
+ **/
+ template <typename SplineType>
+ struct SplineFitting
+ {
+ typedef typename SplineType::KnotVectorType KnotVectorType;
+
+ /**
+ * \brief Fits an interpolating Spline to the given data points.
+ *
+ * \param pts The points for which an interpolating spline will be computed.
+ * \param degree The degree of the interpolating spline.
+ *
+ * \returns A spline interpolating the initially provided points.
+ **/
+ template <typename PointArrayType>
+ static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree);
+
+ /**
+ * \brief Fits an interpolating Spline to the given data points.
+ *
+ * \param pts The points for which an interpolating spline will be computed.
+ * \param degree The degree of the interpolating spline.
+ * \param knot_parameters The knot parameters for the interpolation.
+ *
+ * \returns A spline interpolating the initially provided points.
+ **/
+ template <typename PointArrayType>
+ static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters);
+ };
+
+ template <typename SplineType>
+ template <typename PointArrayType>
+ SplineType SplineFitting<SplineType>::Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters)
+ {
+ typedef typename SplineType::KnotVectorType::Scalar Scalar;
+ typedef typename SplineType::ControlPointVectorType ControlPointVectorType;
+
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+
+ KnotVectorType knots;
+ KnotAveraging(knot_parameters, degree, knots);
+
+ DenseIndex n = pts.cols();
+ MatrixType A = MatrixType::Zero(n,n);
+ for (DenseIndex i=1; i<n-1; ++i)
+ {
+ const DenseIndex span = SplineType::Span(knot_parameters[i], degree, knots);
+
+ // The segment call should somehow be told the spline order at compile time.
+ A.row(i).segment(span-degree, degree+1) = SplineType::BasisFunctions(knot_parameters[i], degree, knots);
+ }
+ A(0,0) = 1.0;
+ A(n-1,n-1) = 1.0;
+
+ HouseholderQR<MatrixType> qr(A);
+
+ // Here, we are creating a temporary due to an Eigen issue.
+ ControlPointVectorType ctrls = qr.solve(MatrixType(pts.transpose())).transpose();
+
+ return SplineType(knots, ctrls);
+ }
+
+ template <typename SplineType>
+ template <typename PointArrayType>
+ SplineType SplineFitting<SplineType>::Interpolate(const PointArrayType& pts, DenseIndex degree)
+ {
+ KnotVectorType chord_lengths; // knot parameters
+ ChordLengths(pts, chord_lengths);
+ return Interpolate(pts, degree, chord_lengths);
+ }
+}
+
+#endif // EIGEN_SPLINE_FITTING_H
diff --git a/unsupported/Eigen/src/Splines/SplineFwd.h b/unsupported/Eigen/src/Splines/SplineFwd.h
new file mode 100644
index 0000000..49db8d3
--- /dev/null
+++ b/unsupported/Eigen/src/Splines/SplineFwd.h
@@ -0,0 +1,86 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 20010-2011 Hauke Heibel <hauke.heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPLINES_FWD_H
+#define EIGEN_SPLINES_FWD_H
+
+#include <Eigen/Core>
+
+namespace Eigen
+{
+ template <typename Scalar, int Dim, int Degree = Dynamic> class Spline;
+
+ template < typename SplineType, int DerivativeOrder = Dynamic > struct SplineTraits {};
+
+ /**
+ * \ingroup Splines_Module
+ * \brief Compile-time attributes of the Spline class for Dynamic degree.
+ **/
+ template <typename _Scalar, int _Dim, int _Degree>
+ struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, Dynamic >
+ {
+ typedef _Scalar Scalar; /*!< The spline curve's scalar type. */
+ enum { Dimension = _Dim /*!< The spline curve's dimension. */ };
+ enum { Degree = _Degree /*!< The spline curve's degree. */ };
+
+ enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
+ enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ };
+
+ /** \brief The data type used to store non-zero basis functions. */
+ typedef Array<Scalar,1,OrderAtCompileTime> BasisVectorType;
+
+ /** \brief The data type used to store the values of the basis function derivatives. */
+ typedef Array<Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
+
+ /** \brief The data type used to store the spline's derivative values. */
+ typedef Array<Scalar,Dimension,Dynamic,ColMajor,Dimension,NumOfDerivativesAtCompileTime> DerivativeType;
+
+ /** \brief The point type the spline is representing. */
+ typedef Array<Scalar,Dimension,1> PointType;
+
+ /** \brief The data type used to store knot vectors. */
+ typedef Array<Scalar,1,Dynamic> KnotVectorType;
+
+ /** \brief The data type representing the spline's control points. */
+ typedef Array<Scalar,Dimension,Dynamic> ControlPointVectorType;
+ };
+
+ /**
+ * \ingroup Splines_Module
+ * \brief Compile-time attributes of the Spline class for fixed degree.
+ *
+ * The traits class inherits all attributes from the SplineTraits of Dynamic degree.
+ **/
+ template < typename _Scalar, int _Dim, int _Degree, int _DerivativeOrder >
+ struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, _DerivativeOrder > : public SplineTraits< Spline<_Scalar, _Dim, _Degree> >
+ {
+ enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ };
+ enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ };
+
+ /** \brief The data type used to store the values of the basis function derivatives. */
+ typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType;
+
+ /** \brief The data type used to store the spline's derivative values. */
+ typedef Array<_Scalar,_Dim,Dynamic,ColMajor,_Dim,NumOfDerivativesAtCompileTime> DerivativeType;
+ };
+
+ /** \brief 2D float B-spline with dynamic degree. */
+ typedef Spline<float,2> Spline2f;
+
+ /** \brief 3D float B-spline with dynamic degree. */
+ typedef Spline<float,3> Spline3f;
+
+ /** \brief 2D double B-spline with dynamic degree. */
+ typedef Spline<double,2> Spline2d;
+
+ /** \brief 3D double B-spline with dynamic degree. */
+ typedef Spline<double,3> Spline3d;
+}
+
+#endif // EIGEN_SPLINES_FWD_H
diff --git a/unsupported/README.txt b/unsupported/README.txt
new file mode 100644
index 0000000..83479ff
--- /dev/null
+++ b/unsupported/README.txt
@@ -0,0 +1,50 @@
+This directory contains contributions from various users.
+They are provided "as is", without any support. Nevertheless,
+most of them are subject to be included in Eigen in the future.
+
+In order to use an unsupported module you have to do either:
+
+ - add the path_to_eigen/unsupported directory to your include path and do:
+ #include <Eigen/ModuleHeader>
+
+ - or directly do:
+ #include <unsupported/Eigen/ModuleHeader>
+
+
+If you are interested in contributing to one of them, or have other stuff
+you would like to share, feel free to contact us:
+http://eigen.tuxfamily.org/index.php?title=Main_Page#Mailing_list
+
+Any kind of contributions are much appreciated, even very preliminary ones.
+However, it:
+ - must rely on Eigen,
+ - must be highly related to math,
+ - should have some general purpose in the sense that it could
+ potentially become an offical Eigen module (or be merged into another one).
+
+In doubt feel free to contact us. For instance, if your addons is very too specific
+but it shows an interesting way of using Eigen, then it could be a nice demo.
+
+
+This directory is organized as follow:
+
+unsupported/Eigen/ModuleHeader1
+unsupported/Eigen/ModuleHeader2
+unsupported/Eigen/...
+unsupported/Eigen/src/Module1/SourceFile1.h
+unsupported/Eigen/src/Module1/SourceFile2.h
+unsupported/Eigen/src/Module1/...
+unsupported/Eigen/src/Module2/SourceFile1.h
+unsupported/Eigen/src/Module2/SourceFile2.h
+unsupported/Eigen/src/Module2/...
+unsupported/Eigen/src/...
+unsupported/doc/snippets/.cpp <- code snippets for the doc
+unsupported/doc/examples/.cpp <- examples for the doc
+unsupported/doc/TutorialModule1.dox
+unsupported/doc/TutorialModule2.dox
+unsupported/doc/...
+unsupported/test/.cpp <- unit test files
+
+The documentation is generated at the same time than the main Eigen documentation.
+The .html files are generated in: build_dir/doc/html/unsupported/
+
diff --git a/unsupported/bench/bench_svd.cpp b/unsupported/bench/bench_svd.cpp
new file mode 100644
index 0000000..01d8231
--- /dev/null
+++ b/unsupported/bench/bench_svd.cpp
@@ -0,0 +1,123 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/
+
+// Bench to compare the efficiency of SVD algorithms
+
+#include <iostream>
+#include <bench/BenchTimer.h>
+#include <unsupported/Eigen/SVD>
+
+
+using namespace Eigen;
+using namespace std;
+
+// number of computations of each algorithm before the print of the time
+#ifndef REPEAT
+#define REPEAT 10
+#endif
+
+// number of tests of the same type
+#ifndef NUMBER_SAMPLE
+#define NUMBER_SAMPLE 2
+#endif
+
+template<typename MatrixType>
+void bench_svd(const MatrixType& a = MatrixType())
+{
+ MatrixType m = MatrixType::Random(a.rows(), a.cols());
+ BenchTimer timerJacobi;
+ BenchTimer timerBDC;
+ timerJacobi.reset();
+ timerBDC.reset();
+
+ cout << " Only compute Singular Values" <<endl;
+ for (int k=1; k<=NUMBER_SAMPLE; ++k)
+ {
+ timerBDC.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ BDCSVD<MatrixType> bdc_matrix(m);
+ }
+ timerBDC.stop();
+
+ timerJacobi.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ JacobiSVD<MatrixType> jacobi_matrix(m);
+ }
+ timerJacobi.stop();
+
+
+ cout << "Sample " << k << " : " << REPEAT << " computations : Jacobi : " << fixed << timerJacobi.value() << "s ";
+ cout << " || " << " BDC : " << timerBDC.value() << "s " <<endl <<endl;
+
+ if (timerBDC.value() >= timerJacobi.value())
+ cout << "KO : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+ else
+ cout << "OK : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+
+ }
+ cout << " =================" <<endl;
+ std::cout<< std::endl;
+ timerJacobi.reset();
+ timerBDC.reset();
+ cout << " Computes rotaion matrix" <<endl;
+ for (int k=1; k<=NUMBER_SAMPLE; ++k)
+ {
+ timerBDC.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ BDCSVD<MatrixType> bdc_matrix(m, ComputeFullU|ComputeFullV);
+ }
+ timerBDC.stop();
+
+ timerJacobi.start();
+ for (int i=0; i<REPEAT; ++i)
+ {
+ JacobiSVD<MatrixType> jacobi_matrix(m, ComputeFullU|ComputeFullV);
+ }
+ timerJacobi.stop();
+
+
+ cout << "Sample " << k << " : " << REPEAT << " computations : Jacobi : " << fixed << timerJacobi.value() << "s ";
+ cout << " || " << " BDC : " << timerBDC.value() << "s " <<endl <<endl;
+
+ if (timerBDC.value() >= timerJacobi.value())
+ cout << "KO : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+ else
+ cout << "OK : BDC is " << timerJacobi.value() / timerBDC.value() << " times faster than Jacobi" <<endl;
+
+ }
+ std::cout<< std::endl;
+}
+
+
+
+int main(int argc, char* argv[])
+{
+ std::cout<< std::endl;
+
+ std::cout<<"On a (Dynamic, Dynamic) (6, 6) Matrix" <<std::endl;
+ bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(6, 6));
+
+ std::cout<<"On a (Dynamic, Dynamic) (32, 32) Matrix" <<std::endl;
+ bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(32, 32));
+
+ //std::cout<<"On a (Dynamic, Dynamic) (128, 128) Matrix" <<std::endl;
+ //bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(128, 128));
+
+ std::cout<<"On a (Dynamic, Dynamic) (160, 160) Matrix" <<std::endl;
+ bench_svd<Matrix<double,Dynamic,Dynamic> >(Matrix<double,Dynamic,Dynamic>(160, 160));
+
+ std::cout<< "--------------------------------------------------------------------"<< std::endl;
+
+}
diff --git a/unsupported/doc/CMakeLists.txt b/unsupported/doc/CMakeLists.txt
new file mode 100644
index 0000000..9e9ab98
--- /dev/null
+++ b/unsupported/doc/CMakeLists.txt
@@ -0,0 +1,4 @@
+set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL TRUE)
+
+add_subdirectory(examples)
+add_subdirectory(snippets)
diff --git a/unsupported/doc/Overview.dox b/unsupported/doc/Overview.dox
new file mode 100644
index 0000000..d048377
--- /dev/null
+++ b/unsupported/doc/Overview.dox
@@ -0,0 +1,25 @@
+namespace Eigen {
+
+/** \mainpage Eigen's unsupported modules
+
+This is the API documentation for Eigen's unsupported modules.
+
+These modules are contributions from various users. They are provided "as is", without any support.
+
+Click on the \e Modules tab at the top of this page to get a list of all unsupported modules.
+
+Don't miss the <a href="..//index.html">official Eigen documentation</a>.
+
+*/
+
+/*
+
+\defgroup Unsupported_modules Unsupported modules
+
+The unsupported modules are contributions from various users. They are
+provided "as is", without any support. Nevertheless, some of them are
+subject to be included in Eigen in the future.
+
+*/
+
+}
diff --git a/unsupported/doc/eigendoxy_layout.xml.in b/unsupported/doc/eigendoxy_layout.xml.in
new file mode 100644
index 0000000..c93621e
--- /dev/null
+++ b/unsupported/doc/eigendoxy_layout.xml.in
@@ -0,0 +1,177 @@
+<?xml version="1.0"?>
+<doxygenlayout version="1.0">
+ <!-- Navigation index tabs for HTML output -->
+ <navindex>
+ <tab type="user" url="index.html" title="Overview" />
+ <tab type="modules" visible="yes" title="Unsupported Modules" intro=""/>
+<!-- <tab type="mainpage" visible="yes" title=""/> -->
+ <tab type="classlist" visible="yes" title="" intro=""/>
+<!-- <tab type="classmembers" visible="yes" title="" intro=""/> -->
+ </navindex>
+
+ <!-- Layout definition for a class page -->
+ <class>
+ <briefdescription visible="no"/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <detaileddescription title=""/>
+ <inheritancegraph visible="$CLASS_GRAPH"/>
+ <collaborationgraph visible="$COLLABORATION_GRAPH"/>
+ <allmemberslink visible="yes"/>
+ <memberdecl>
+ <nestedclasses visible="yes" title=""/>
+ <publictypes title=""/>
+ <publicslots title=""/>
+ <signals title=""/>
+ <publicmethods title=""/>
+ <publicstaticmethods title=""/>
+ <publicattributes title=""/>
+ <publicstaticattributes title=""/>
+ <protectedtypes title=""/>
+ <protectedslots title=""/>
+ <protectedmethods title=""/>
+ <protectedstaticmethods title=""/>
+ <protectedattributes title=""/>
+ <protectedstaticattributes title=""/>
+ <packagetypes title=""/>
+ <packagemethods title=""/>
+ <packagestaticmethods title=""/>
+ <packageattributes title=""/>
+ <packagestaticattributes title=""/>
+ <properties title=""/>
+ <events title=""/>
+ <privatetypes title=""/>
+ <privateslots title=""/>
+ <privatemethods title=""/>
+ <privatestaticmethods title=""/>
+ <privateattributes title=""/>
+ <privatestaticattributes title=""/>
+ <friends title=""/>
+ <related title="" subtitle=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+
+ <memberdef>
+ <inlineclasses title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <constructors title=""/>
+ <functions title=""/>
+ <related title=""/>
+ <variables title=""/>
+ <properties title=""/>
+ <events title=""/>
+ </memberdef>
+ <usedfiles visible="$SHOW_USED_FILES"/>
+ <authorsection visible="yes"/>
+ </class>
+
+ <!-- Layout definition for a namespace page -->
+ <namespace>
+ <briefdescription visible="yes"/>
+ <memberdecl>
+ <nestednamespaces visible="yes" title=""/>
+ <classes visible="yes" title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ <memberdef>
+ <inlineclasses title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </namespace>
+
+ <!-- Layout definition for a file page -->
+ <file>
+ <briefdescription visible="yes"/>
+ <includes visible="$SHOW_INCLUDE_FILES"/>
+ <includegraph visible="$INCLUDE_GRAPH"/>
+ <includedbygraph visible="$INCLUDED_BY_GRAPH"/>
+ <sourcelink visible="yes"/>
+ <memberdecl>
+ <classes visible="yes" title=""/>
+ <namespaces visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ <memberdef>
+ <inlineclasses title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ </memberdef>
+ <authorsection/>
+ </file>
+
+ <!-- Layout definition for a group page -->
+ <group>
+ <briefdescription visible="no"/>
+ <detaileddescription title=""/>
+ <groupgraph visible="$GROUP_GRAPHS"/>
+ <memberdecl>
+ <nestedgroups visible="yes" title=""/>
+ <dirs visible="yes" title=""/>
+ <files visible="yes" title=""/>
+ <namespaces visible="yes" title=""/>
+ <classes visible="yes" title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <signals title=""/>
+ <publicslots title=""/>
+ <protectedslots title=""/>
+ <privateslots title=""/>
+ <events title=""/>
+ <properties title=""/>
+ <friends title=""/>
+ <membergroups visible="yes"/>
+ </memberdecl>
+
+ <memberdef>
+ <pagedocs/>
+ <inlineclasses title=""/>
+ <defines title=""/>
+ <typedefs title=""/>
+ <enums title=""/>
+ <enumvalues title=""/>
+ <functions title=""/>
+ <variables title=""/>
+ <signals title=""/>
+ <publicslots title=""/>
+ <protectedslots title=""/>
+ <privateslots title=""/>
+ <events title=""/>
+ <properties title=""/>
+ <friends title=""/>
+ </memberdef>
+ <authorsection visible="yes"/>
+ </group>
+
+ <!-- Layout definition for a directory page -->
+ <directory>
+ <briefdescription visible="yes"/>
+ <directorygraph visible="yes"/>
+ <memberdecl>
+ <dirs visible="yes"/>
+ <files visible="yes"/>
+ </memberdecl>
+ <detaileddescription title=""/>
+ </directory>
+</doxygenlayout>
diff --git a/unsupported/doc/examples/BVH_Example.cpp b/unsupported/doc/examples/BVH_Example.cpp
new file mode 100644
index 0000000..6b6fac0
--- /dev/null
+++ b/unsupported/doc/examples/BVH_Example.cpp
@@ -0,0 +1,52 @@
+#include <Eigen/StdVector>
+#include <unsupported/Eigen/BVH>
+#include <iostream>
+
+using namespace Eigen;
+typedef AlignedBox<double, 2> Box2d;
+
+namespace Eigen {
+ namespace internal {
+ Box2d bounding_box(const Vector2d &v) { return Box2d(v, v); } //compute the bounding box of a single point
+ }
+}
+
+struct PointPointMinimizer //how to compute squared distances between points and rectangles
+{
+ PointPointMinimizer() : calls(0) {}
+ typedef double Scalar;
+
+ double minimumOnVolumeVolume(const Box2d &r1, const Box2d &r2) { ++calls; return r1.squaredExteriorDistance(r2); }
+ double minimumOnVolumeObject(const Box2d &r, const Vector2d &v) { ++calls; return r.squaredExteriorDistance(v); }
+ double minimumOnObjectVolume(const Vector2d &v, const Box2d &r) { ++calls; return r.squaredExteriorDistance(v); }
+ double minimumOnObjectObject(const Vector2d &v1, const Vector2d &v2) { ++calls; return (v1 - v2).squaredNorm(); }
+
+ int calls;
+};
+
+int main()
+{
+ typedef std::vector<Vector2d, aligned_allocator<Vector2d> > StdVectorOfVector2d;
+ StdVectorOfVector2d redPoints, bluePoints;
+ for(int i = 0; i < 100; ++i) { //initialize random set of red points and blue points
+ redPoints.push_back(Vector2d::Random());
+ bluePoints.push_back(Vector2d::Random());
+ }
+
+ PointPointMinimizer minimizer;
+ double minDistSq = std::numeric_limits<double>::max();
+
+ //brute force to find closest red-blue pair
+ for(int i = 0; i < (int)redPoints.size(); ++i)
+ for(int j = 0; j < (int)bluePoints.size(); ++j)
+ minDistSq = std::min(minDistSq, minimizer.minimumOnObjectObject(redPoints[i], bluePoints[j]));
+ std::cout << "Brute force distance = " << sqrt(minDistSq) << ", calls = " << minimizer.calls << std::endl;
+
+ //using BVH to find closest red-blue pair
+ minimizer.calls = 0;
+ KdBVH<double, 2, Vector2d> redTree(redPoints.begin(), redPoints.end()), blueTree(bluePoints.begin(), bluePoints.end()); //construct the trees
+ minDistSq = BVMinimize(redTree, blueTree, minimizer); //actual BVH minimization call
+ std::cout << "BVH distance = " << sqrt(minDistSq) << ", calls = " << minimizer.calls << std::endl;
+
+ return 0;
+}
diff --git a/unsupported/doc/examples/CMakeLists.txt b/unsupported/doc/examples/CMakeLists.txt
new file mode 100644
index 0000000..c47646d
--- /dev/null
+++ b/unsupported/doc/examples/CMakeLists.txt
@@ -0,0 +1,20 @@
+FILE(GLOB examples_SRCS "*.cpp")
+
+ADD_CUSTOM_TARGET(unsupported_examples)
+
+INCLUDE_DIRECTORIES(../../../unsupported ../../../unsupported/test)
+
+FOREACH(example_src ${examples_SRCS})
+ GET_FILENAME_COMPONENT(example ${example_src} NAME_WE)
+ ADD_EXECUTABLE(example_${example} ${example_src})
+ if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
+ target_link_libraries(example_${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
+ endif()
+ ADD_CUSTOM_COMMAND(
+ TARGET example_${example}
+ POST_BUILD
+ COMMAND example_${example}
+ ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
+ )
+ ADD_DEPENDENCIES(unsupported_examples example_${example})
+ENDFOREACH(example_src)
diff --git a/unsupported/doc/examples/FFT.cpp b/unsupported/doc/examples/FFT.cpp
new file mode 100644
index 0000000..fcbf812
--- /dev/null
+++ b/unsupported/doc/examples/FFT.cpp
@@ -0,0 +1,118 @@
+// To use the simple FFT implementation
+// g++ -o demofft -I.. -Wall -O3 FFT.cpp
+
+// To use the FFTW implementation
+// g++ -o demofft -I.. -DUSE_FFTW -Wall -O3 FFT.cpp -lfftw3 -lfftw3f -lfftw3l
+
+#ifdef USE_FFTW
+#include <fftw3.h>
+#endif
+
+#include <vector>
+#include <complex>
+#include <algorithm>
+#include <iterator>
+#include <iostream>
+#include <Eigen/Core>
+#include <unsupported/Eigen/FFT>
+
+using namespace std;
+using namespace Eigen;
+
+template <typename T>
+T mag2(T a)
+{
+ return a*a;
+}
+template <typename T>
+T mag2(std::complex<T> a)
+{
+ return norm(a);
+}
+
+template <typename T>
+T mag2(const std::vector<T> & vec)
+{
+ T out=0;
+ for (size_t k=0;k<vec.size();++k)
+ out += mag2(vec[k]);
+ return out;
+}
+
+template <typename T>
+T mag2(const std::vector<std::complex<T> > & vec)
+{
+ T out=0;
+ for (size_t k=0;k<vec.size();++k)
+ out += mag2(vec[k]);
+ return out;
+}
+
+template <typename T>
+vector<T> operator-(const vector<T> & a,const vector<T> & b )
+{
+ vector<T> c(a);
+ for (size_t k=0;k<b.size();++k)
+ c[k] -= b[k];
+ return c;
+}
+
+template <typename T>
+void RandomFill(std::vector<T> & vec)
+{
+ for (size_t k=0;k<vec.size();++k)
+ vec[k] = T( rand() )/T(RAND_MAX) - .5;
+}
+
+template <typename T>
+void RandomFill(std::vector<std::complex<T> > & vec)
+{
+ for (size_t k=0;k<vec.size();++k)
+ vec[k] = std::complex<T> ( T( rand() )/T(RAND_MAX) - .5, T( rand() )/T(RAND_MAX) - .5);
+}
+
+template <typename T_time,typename T_freq>
+void fwd_inv(size_t nfft)
+{
+ typedef typename NumTraits<T_freq>::Real Scalar;
+ vector<T_time> timebuf(nfft);
+ RandomFill(timebuf);
+
+ vector<T_freq> freqbuf;
+ static FFT<Scalar> fft;
+ fft.fwd(freqbuf,timebuf);
+
+ vector<T_time> timebuf2;
+ fft.inv(timebuf2,freqbuf);
+
+ long double rmse = mag2(timebuf - timebuf2) / mag2(timebuf);
+ cout << "roundtrip rmse: " << rmse << endl;
+}
+
+template <typename T_scalar>
+void two_demos(int nfft)
+{
+ cout << " scalar ";
+ fwd_inv<T_scalar,std::complex<T_scalar> >(nfft);
+ cout << " complex ";
+ fwd_inv<std::complex<T_scalar>,std::complex<T_scalar> >(nfft);
+}
+
+void demo_all_types(int nfft)
+{
+ cout << "nfft=" << nfft << endl;
+ cout << " float" << endl;
+ two_demos<float>(nfft);
+ cout << " double" << endl;
+ two_demos<double>(nfft);
+ cout << " long double" << endl;
+ two_demos<long double>(nfft);
+}
+
+int main()
+{
+ demo_all_types( 2*3*4*5*7 );
+ demo_all_types( 2*9*16*25 );
+ demo_all_types( 1024 );
+ return 0;
+}
diff --git a/unsupported/doc/examples/MatrixExponential.cpp b/unsupported/doc/examples/MatrixExponential.cpp
new file mode 100644
index 0000000..ebd3b96
--- /dev/null
+++ b/unsupported/doc/examples/MatrixExponential.cpp
@@ -0,0 +1,16 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ const double pi = std::acos(-1.0);
+
+ MatrixXd A(3,3);
+ A << 0, -pi/4, 0,
+ pi/4, 0, 0,
+ 0, 0, 0;
+ std::cout << "The matrix A is:\n" << A << "\n\n";
+ std::cout << "The matrix exponential of A is:\n" << A.exp() << "\n\n";
+}
diff --git a/unsupported/doc/examples/MatrixFunction.cpp b/unsupported/doc/examples/MatrixFunction.cpp
new file mode 100644
index 0000000..a4172e4
--- /dev/null
+++ b/unsupported/doc/examples/MatrixFunction.cpp
@@ -0,0 +1,23 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+std::complex<double> expfn(std::complex<double> x, int)
+{
+ return std::exp(x);
+}
+
+int main()
+{
+ const double pi = std::acos(-1.0);
+
+ MatrixXd A(3,3);
+ A << 0, -pi/4, 0,
+ pi/4, 0, 0,
+ 0, 0, 0;
+
+ std::cout << "The matrix A is:\n" << A << "\n\n";
+ std::cout << "The matrix exponential of A is:\n"
+ << A.matrixFunction(expfn) << "\n\n";
+}
diff --git a/unsupported/doc/examples/MatrixLogarithm.cpp b/unsupported/doc/examples/MatrixLogarithm.cpp
new file mode 100644
index 0000000..8c5d970
--- /dev/null
+++ b/unsupported/doc/examples/MatrixLogarithm.cpp
@@ -0,0 +1,15 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ using std::sqrt;
+ MatrixXd A(3,3);
+ A << 0.5*sqrt(2), -0.5*sqrt(2), 0,
+ 0.5*sqrt(2), 0.5*sqrt(2), 0,
+ 0, 0, 1;
+ std::cout << "The matrix A is:\n" << A << "\n\n";
+ std::cout << "The matrix logarithm of A is:\n" << A.log() << "\n";
+}
diff --git a/unsupported/doc/examples/MatrixPower.cpp b/unsupported/doc/examples/MatrixPower.cpp
new file mode 100644
index 0000000..2224524
--- /dev/null
+++ b/unsupported/doc/examples/MatrixPower.cpp
@@ -0,0 +1,16 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ const double pi = std::acos(-1.0);
+ Matrix3d A;
+ A << cos(1), -sin(1), 0,
+ sin(1), cos(1), 0,
+ 0 , 0 , 1;
+ std::cout << "The matrix A is:\n" << A << "\n\n"
+ "The matrix power A^(pi/4) is:\n" << A.pow(pi/4) << std::endl;
+ return 0;
+}
diff --git a/unsupported/doc/examples/MatrixPower_optimal.cpp b/unsupported/doc/examples/MatrixPower_optimal.cpp
new file mode 100644
index 0000000..86470ba
--- /dev/null
+++ b/unsupported/doc/examples/MatrixPower_optimal.cpp
@@ -0,0 +1,17 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ Matrix4cd A = Matrix4cd::Random();
+ MatrixPower<Matrix4cd> Apow(A);
+
+ std::cout << "The matrix A is:\n" << A << "\n\n"
+ "A^3.1 is:\n" << Apow(3.1) << "\n\n"
+ "A^3.3 is:\n" << Apow(3.3) << "\n\n"
+ "A^3.7 is:\n" << Apow(3.7) << "\n\n"
+ "A^3.9 is:\n" << Apow(3.9) << std::endl;
+ return 0;
+}
diff --git a/unsupported/doc/examples/MatrixSine.cpp b/unsupported/doc/examples/MatrixSine.cpp
new file mode 100644
index 0000000..9eea9a0
--- /dev/null
+++ b/unsupported/doc/examples/MatrixSine.cpp
@@ -0,0 +1,20 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ MatrixXd A = MatrixXd::Random(3,3);
+ std::cout << "A = \n" << A << "\n\n";
+
+ MatrixXd sinA = A.sin();
+ std::cout << "sin(A) = \n" << sinA << "\n\n";
+
+ MatrixXd cosA = A.cos();
+ std::cout << "cos(A) = \n" << cosA << "\n\n";
+
+ // The matrix functions satisfy sin^2(A) + cos^2(A) = I,
+ // like the scalar functions.
+ std::cout << "sin^2(A) + cos^2(A) = \n" << sinA*sinA + cosA*cosA << "\n\n";
+}
diff --git a/unsupported/doc/examples/MatrixSinh.cpp b/unsupported/doc/examples/MatrixSinh.cpp
new file mode 100644
index 0000000..f771867
--- /dev/null
+++ b/unsupported/doc/examples/MatrixSinh.cpp
@@ -0,0 +1,20 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ MatrixXf A = MatrixXf::Random(3,3);
+ std::cout << "A = \n" << A << "\n\n";
+
+ MatrixXf sinhA = A.sinh();
+ std::cout << "sinh(A) = \n" << sinhA << "\n\n";
+
+ MatrixXf coshA = A.cosh();
+ std::cout << "cosh(A) = \n" << coshA << "\n\n";
+
+ // The matrix functions satisfy cosh^2(A) - sinh^2(A) = I,
+ // like the scalar functions.
+ std::cout << "cosh^2(A) - sinh^2(A) = \n" << coshA*coshA - sinhA*sinhA << "\n\n";
+}
diff --git a/unsupported/doc/examples/MatrixSquareRoot.cpp b/unsupported/doc/examples/MatrixSquareRoot.cpp
new file mode 100644
index 0000000..88e7557
--- /dev/null
+++ b/unsupported/doc/examples/MatrixSquareRoot.cpp
@@ -0,0 +1,16 @@
+#include <unsupported/Eigen/MatrixFunctions>
+#include <iostream>
+
+using namespace Eigen;
+
+int main()
+{
+ const double pi = std::acos(-1.0);
+
+ MatrixXd A(2,2);
+ A << cos(pi/3), -sin(pi/3),
+ sin(pi/3), cos(pi/3);
+ std::cout << "The matrix A is:\n" << A << "\n\n";
+ std::cout << "The matrix square root of A is:\n" << A.sqrt() << "\n\n";
+ std::cout << "The square of the last matrix is:\n" << A.sqrt() * A.sqrt() << "\n";
+}
diff --git a/unsupported/doc/examples/PolynomialSolver1.cpp b/unsupported/doc/examples/PolynomialSolver1.cpp
new file mode 100644
index 0000000..cd777a4
--- /dev/null
+++ b/unsupported/doc/examples/PolynomialSolver1.cpp
@@ -0,0 +1,53 @@
+#include <unsupported/Eigen/Polynomials>
+#include <vector>
+#include <iostream>
+
+using namespace Eigen;
+using namespace std;
+
+int main()
+{
+ typedef Matrix<double,5,1> Vector5d;
+
+ Vector5d roots = Vector5d::Random();
+ cout << "Roots: " << roots.transpose() << endl;
+ Eigen::Matrix<double,6,1> polynomial;
+ roots_to_monicPolynomial( roots, polynomial );
+
+ PolynomialSolver<double,5> psolve( polynomial );
+ cout << "Complex roots: " << psolve.roots().transpose() << endl;
+
+ std::vector<double> realRoots;
+ psolve.realRoots( realRoots );
+ Map<Vector5d> mapRR( &realRoots[0] );
+ cout << "Real roots: " << mapRR.transpose() << endl;
+
+ cout << endl;
+ cout << "Illustration of the convergence problem with the QR algorithm: " << endl;
+ cout << "---------------------------------------------------------------" << endl;
+ Eigen::Matrix<float,7,1> hardCase_polynomial;
+ hardCase_polynomial <<
+ -0.957, 0.9219, 0.3516, 0.9453, -0.4023, -0.5508, -0.03125;
+ cout << "Hard case polynomial defined by floats: " << hardCase_polynomial.transpose() << endl;
+ PolynomialSolver<float,6> psolvef( hardCase_polynomial );
+ cout << "Complex roots: " << psolvef.roots().transpose() << endl;
+ Eigen::Matrix<float,6,1> evals;
+ for( int i=0; i<6; ++i ){ evals[i] = std::abs( poly_eval( hardCase_polynomial, psolvef.roots()[i] ) ); }
+ cout << "Norms of the evaluations of the polynomial at the roots: " << evals.transpose() << endl << endl;
+
+ cout << "Using double's almost always solves the problem for small degrees: " << endl;
+ cout << "-------------------------------------------------------------------" << endl;
+ PolynomialSolver<double,6> psolve6d( hardCase_polynomial.cast<double>() );
+ cout << "Complex roots: " << psolve6d.roots().transpose() << endl;
+ for( int i=0; i<6; ++i )
+ {
+ std::complex<float> castedRoot( psolve6d.roots()[i].real(), psolve6d.roots()[i].imag() );
+ evals[i] = std::abs( poly_eval( hardCase_polynomial, castedRoot ) );
+ }
+ cout << "Norms of the evaluations of the polynomial at the roots: " << evals.transpose() << endl << endl;
+
+ cout.precision(10);
+ cout << "The last root in float then in double: " << psolvef.roots()[5] << "\t" << psolve6d.roots()[5] << endl;
+ std::complex<float> castedRoot( psolve6d.roots()[5].real(), psolve6d.roots()[5].imag() );
+ cout << "Norm of the difference: " << std::abs( psolvef.roots()[5] - castedRoot ) << endl;
+}
diff --git a/unsupported/doc/examples/PolynomialUtils1.cpp b/unsupported/doc/examples/PolynomialUtils1.cpp
new file mode 100644
index 0000000..dbfe520
--- /dev/null
+++ b/unsupported/doc/examples/PolynomialUtils1.cpp
@@ -0,0 +1,20 @@
+#include <unsupported/Eigen/Polynomials>
+#include <iostream>
+
+using namespace Eigen;
+using namespace std;
+
+int main()
+{
+ Vector4d roots = Vector4d::Random();
+ cout << "Roots: " << roots.transpose() << endl;
+ Eigen::Matrix<double,5,1> polynomial;
+ roots_to_monicPolynomial( roots, polynomial );
+ cout << "Polynomial: ";
+ for( int i=0; i<4; ++i ){ cout << polynomial[i] << ".x^" << i << "+ "; }
+ cout << polynomial[4] << ".x^4" << endl;
+ Vector4d evaluation;
+ for( int i=0; i<4; ++i ){
+ evaluation[i] = poly_eval( polynomial, roots[i] ); }
+ cout << "Evaluation of the polynomial at the roots: " << evaluation.transpose();
+}
diff --git a/unsupported/doc/snippets/CMakeLists.txt b/unsupported/doc/snippets/CMakeLists.txt
new file mode 100644
index 0000000..f0c5cc2
--- /dev/null
+++ b/unsupported/doc/snippets/CMakeLists.txt
@@ -0,0 +1,26 @@
+FILE(GLOB snippets_SRCS "*.cpp")
+
+ADD_CUSTOM_TARGET(unsupported_snippets)
+
+FOREACH(snippet_src ${snippets_SRCS})
+ GET_FILENAME_COMPONENT(snippet ${snippet_src} NAME_WE)
+ SET(compile_snippet_target compile_${snippet})
+ SET(compile_snippet_src ${compile_snippet_target}.cpp)
+ FILE(READ ${snippet_src} snippet_source_code)
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/doc/snippets/compile_snippet.cpp.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
+ ADD_EXECUTABLE(${compile_snippet_target}
+ ${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
+ if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
+ target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
+ endif()
+ ADD_CUSTOM_COMMAND(
+ TARGET ${compile_snippet_target}
+ POST_BUILD
+ COMMAND ${compile_snippet_target}
+ ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
+ )
+ ADD_DEPENDENCIES(unsupported_snippets ${compile_snippet_target})
+ set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src}
+ PROPERTIES OBJECT_DEPENDS ${snippet_src})
+ENDFOREACH(snippet_src)
diff --git a/unsupported/test/BVH.cpp b/unsupported/test/BVH.cpp
new file mode 100644
index 0000000..ff5b329
--- /dev/null
+++ b/unsupported/test/BVH.cpp
@@ -0,0 +1,222 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Ilya Baran <ibaran@mit.edu>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <Eigen/StdVector>
+#include <Eigen/Geometry>
+#include <unsupported/Eigen/BVH>
+
+namespace Eigen {
+
+template<typename Scalar, int Dim> AlignedBox<Scalar, Dim> bounding_box(const Matrix<Scalar, Dim, 1> &v) { return AlignedBox<Scalar, Dim>(v); }
+
+}
+
+
+template<int Dim>
+struct Ball
+{
+EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(double, Dim)
+
+ typedef Matrix<double, Dim, 1> VectorType;
+
+ Ball() {}
+ Ball(const VectorType &c, double r) : center(c), radius(r) {}
+
+ VectorType center;
+ double radius;
+};
+template<int Dim> AlignedBox<double, Dim> bounding_box(const Ball<Dim> &b)
+{ return AlignedBox<double, Dim>(b.center.array() - b.radius, b.center.array() + b.radius); }
+
+inline double SQR(double x) { return x * x; }
+
+template<int Dim>
+struct BallPointStuff //this class provides functions to be both an intersector and a minimizer, both for a ball and a point and for two trees
+{
+ typedef double Scalar;
+ typedef Matrix<double, Dim, 1> VectorType;
+ typedef Ball<Dim> BallType;
+ typedef AlignedBox<double, Dim> BoxType;
+
+ BallPointStuff() : calls(0), count(0) {}
+ BallPointStuff(const VectorType &inP) : p(inP), calls(0), count(0) {}
+
+
+ bool intersectVolume(const BoxType &r) { ++calls; return r.contains(p); }
+ bool intersectObject(const BallType &b) {
+ ++calls;
+ if((b.center - p).squaredNorm() < SQR(b.radius))
+ ++count;
+ return false; //continue
+ }
+
+ bool intersectVolumeVolume(const BoxType &r1, const BoxType &r2) { ++calls; return !(r1.intersection(r2)).isNull(); }
+ bool intersectVolumeObject(const BoxType &r, const BallType &b) { ++calls; return r.squaredExteriorDistance(b.center) < SQR(b.radius); }
+ bool intersectObjectVolume(const BallType &b, const BoxType &r) { ++calls; return r.squaredExteriorDistance(b.center) < SQR(b.radius); }
+ bool intersectObjectObject(const BallType &b1, const BallType &b2){
+ ++calls;
+ if((b1.center - b2.center).norm() < b1.radius + b2.radius)
+ ++count;
+ return false;
+ }
+ bool intersectVolumeObject(const BoxType &r, const VectorType &v) { ++calls; return r.contains(v); }
+ bool intersectObjectObject(const BallType &b, const VectorType &v){
+ ++calls;
+ if((b.center - v).squaredNorm() < SQR(b.radius))
+ ++count;
+ return false;
+ }
+
+ double minimumOnVolume(const BoxType &r) { ++calls; return r.squaredExteriorDistance(p); }
+ double minimumOnObject(const BallType &b) { ++calls; return (std::max)(0., (b.center - p).squaredNorm() - SQR(b.radius)); }
+ double minimumOnVolumeVolume(const BoxType &r1, const BoxType &r2) { ++calls; return r1.squaredExteriorDistance(r2); }
+ double minimumOnVolumeObject(const BoxType &r, const BallType &b) { ++calls; return SQR((std::max)(0., r.exteriorDistance(b.center) - b.radius)); }
+ double minimumOnObjectVolume(const BallType &b, const BoxType &r) { ++calls; return SQR((std::max)(0., r.exteriorDistance(b.center) - b.radius)); }
+ double minimumOnObjectObject(const BallType &b1, const BallType &b2){ ++calls; return SQR((std::max)(0., (b1.center - b2.center).norm() - b1.radius - b2.radius)); }
+ double minimumOnVolumeObject(const BoxType &r, const VectorType &v) { ++calls; return r.squaredExteriorDistance(v); }
+ double minimumOnObjectObject(const BallType &b, const VectorType &v){ ++calls; return SQR((std::max)(0., (b.center - v).norm() - b.radius)); }
+
+ VectorType p;
+ int calls;
+ int count;
+};
+
+
+template<int Dim>
+struct TreeTest
+{
+ typedef Matrix<double, Dim, 1> VectorType;
+ typedef std::vector<VectorType, aligned_allocator<VectorType> > VectorTypeList;
+ typedef Ball<Dim> BallType;
+ typedef std::vector<BallType, aligned_allocator<BallType> > BallTypeList;
+ typedef AlignedBox<double, Dim> BoxType;
+
+ void testIntersect1()
+ {
+ BallTypeList b;
+ for(int i = 0; i < 500; ++i) {
+ b.push_back(BallType(VectorType::Random(), 0.5 * internal::random(0., 1.)));
+ }
+ KdBVH<double, Dim, BallType> tree(b.begin(), b.end());
+
+ VectorType pt = VectorType::Random();
+ BallPointStuff<Dim> i1(pt), i2(pt);
+
+ for(int i = 0; i < (int)b.size(); ++i)
+ i1.intersectObject(b[i]);
+
+ BVIntersect(tree, i2);
+
+ VERIFY(i1.count == i2.count);
+ }
+
+ void testMinimize1()
+ {
+ BallTypeList b;
+ for(int i = 0; i < 500; ++i) {
+ b.push_back(BallType(VectorType::Random(), 0.01 * internal::random(0., 1.)));
+ }
+ KdBVH<double, Dim, BallType> tree(b.begin(), b.end());
+
+ VectorType pt = VectorType::Random();
+ BallPointStuff<Dim> i1(pt), i2(pt);
+
+ double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
+
+ for(int i = 0; i < (int)b.size(); ++i)
+ m1 = (std::min)(m1, i1.minimumOnObject(b[i]));
+
+ m2 = BVMinimize(tree, i2);
+
+ VERIFY_IS_APPROX(m1, m2);
+ }
+
+ void testIntersect2()
+ {
+ BallTypeList b;
+ VectorTypeList v;
+
+ for(int i = 0; i < 50; ++i) {
+ b.push_back(BallType(VectorType::Random(), 0.5 * internal::random(0., 1.)));
+ for(int j = 0; j < 3; ++j)
+ v.push_back(VectorType::Random());
+ }
+
+ KdBVH<double, Dim, BallType> tree(b.begin(), b.end());
+ KdBVH<double, Dim, VectorType> vTree(v.begin(), v.end());
+
+ BallPointStuff<Dim> i1, i2;
+
+ for(int i = 0; i < (int)b.size(); ++i)
+ for(int j = 0; j < (int)v.size(); ++j)
+ i1.intersectObjectObject(b[i], v[j]);
+
+ BVIntersect(tree, vTree, i2);
+
+ VERIFY(i1.count == i2.count);
+ }
+
+ void testMinimize2()
+ {
+ BallTypeList b;
+ VectorTypeList v;
+
+ for(int i = 0; i < 50; ++i) {
+ b.push_back(BallType(VectorType::Random(), 1e-7 + 1e-6 * internal::random(0., 1.)));
+ for(int j = 0; j < 3; ++j)
+ v.push_back(VectorType::Random());
+ }
+
+ KdBVH<double, Dim, BallType> tree(b.begin(), b.end());
+ KdBVH<double, Dim, VectorType> vTree(v.begin(), v.end());
+
+ BallPointStuff<Dim> i1, i2;
+
+ double m1 = (std::numeric_limits<double>::max)(), m2 = m1;
+
+ for(int i = 0; i < (int)b.size(); ++i)
+ for(int j = 0; j < (int)v.size(); ++j)
+ m1 = (std::min)(m1, i1.minimumOnObjectObject(b[i], v[j]));
+
+ m2 = BVMinimize(tree, vTree, i2);
+
+ VERIFY_IS_APPROX(m1, m2);
+ }
+};
+
+
+void test_BVH()
+{
+ for(int i = 0; i < g_repeat; i++) {
+#ifdef EIGEN_TEST_PART_1
+ TreeTest<2> test2;
+ CALL_SUBTEST(test2.testIntersect1());
+ CALL_SUBTEST(test2.testMinimize1());
+ CALL_SUBTEST(test2.testIntersect2());
+ CALL_SUBTEST(test2.testMinimize2());
+#endif
+
+#ifdef EIGEN_TEST_PART_2
+ TreeTest<3> test3;
+ CALL_SUBTEST(test3.testIntersect1());
+ CALL_SUBTEST(test3.testMinimize1());
+ CALL_SUBTEST(test3.testIntersect2());
+ CALL_SUBTEST(test3.testMinimize2());
+#endif
+
+#ifdef EIGEN_TEST_PART_3
+ TreeTest<4> test4;
+ CALL_SUBTEST(test4.testIntersect1());
+ CALL_SUBTEST(test4.testMinimize1());
+ CALL_SUBTEST(test4.testIntersect2());
+ CALL_SUBTEST(test4.testMinimize2());
+#endif
+ }
+}
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
new file mode 100644
index 0000000..2e4cfdb
--- /dev/null
+++ b/unsupported/test/CMakeLists.txt
@@ -0,0 +1,90 @@
+
+set_property(GLOBAL PROPERTY EIGEN_CURRENT_SUBPROJECT "Unsupported")
+add_custom_target(BuildUnsupported)
+
+include_directories(../../test ../../unsupported ../../Eigen
+ ${CMAKE_CURRENT_BINARY_DIR}/../../test)
+
+find_package(GoogleHash)
+if(GOOGLEHASH_FOUND)
+ add_definitions("-DEIGEN_GOOGLEHASH_SUPPORT")
+ include_directories(${GOOGLEHASH_INCLUDES})
+ ei_add_property(EIGEN_TESTED_BACKENDS "GoogleHash, ")
+else(GOOGLEHASH_FOUND)
+ ei_add_property(EIGEN_MISSING_BACKENDS "GoogleHash, ")
+endif(GOOGLEHASH_FOUND)
+
+find_package(Adolc)
+if(ADOLC_FOUND)
+ include_directories(${ADOLC_INCLUDES})
+ ei_add_property(EIGEN_TESTED_BACKENDS "Adolc, ")
+ ei_add_test(forward_adolc "" ${ADOLC_LIBRARIES})
+else(ADOLC_FOUND)
+ ei_add_property(EIGEN_MISSING_BACKENDS "Adolc, ")
+endif(ADOLC_FOUND)
+
+# this test seems to never have been successful on x87, so is considered to contain a FP-related bug.
+# see thread: "non-linear optimization test summary"
+ei_add_test(NonLinearOptimization)
+
+ei_add_test(NumericalDiff)
+ei_add_test(autodiff)
+ei_add_test(BVH)
+ei_add_test(matrix_exponential)
+ei_add_test(matrix_function)
+ei_add_test(matrix_power)
+ei_add_test(matrix_square_root)
+ei_add_test(alignedvector3)
+ei_add_test(FFT)
+
+find_package(MPFR 2.3.0)
+find_package(GMP)
+if(MPFR_FOUND)
+ include_directories(${MPFR_INCLUDES} ./mpreal)
+ ei_add_property(EIGEN_TESTED_BACKENDS "MPFR C++, ")
+ set(EIGEN_MPFR_TEST_LIBRARIES ${MPFR_LIBRARIES} ${GMP_LIBRARIES})
+ ei_add_test(mpreal_support "" "${EIGEN_MPFR_TEST_LIBRARIES}" )
+else()
+ ei_add_property(EIGEN_MISSING_BACKENDS "MPFR C++, ")
+endif()
+
+ei_add_test(sparse_extra "" "")
+
+find_package(FFTW)
+if(FFTW_FOUND)
+ ei_add_property(EIGEN_TESTED_BACKENDS "fftw, ")
+ include_directories( ${FFTW_INCLUDES} )
+ if(FFTWL_LIB)
+ ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT -DEIGEN_HAS_FFTWL" "${FFTW_LIBRARIES}" )
+ else()
+ ei_add_test(FFTW "-DEIGEN_FFTW_DEFAULT" "${FFTW_LIBRARIES}" )
+ endif()
+else()
+ ei_add_property(EIGEN_MISSING_BACKENDS "fftw, ")
+endif()
+
+option(EIGEN_TEST_NO_OPENGL "Disable OpenGL support in unit tests" OFF)
+if(NOT EIGEN_TEST_NO_OPENGL)
+ find_package(OpenGL)
+ find_package(GLUT)
+ find_package(GLEW)
+ if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND)
+ include_directories(${OPENGL_INCLUDE_DIR} ${GLUT_INCLUDE_DIR} ${GLEW_INCLUDE_DIRS})
+ ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ")
+ set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OPENGL_LIBRARIES})
+ ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" )
+ else()
+ ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
+ endif()
+else()
+ ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ")
+endif()
+
+ei_add_test(polynomialsolver)
+ei_add_test(polynomialutils)
+ei_add_test(kronecker_product)
+ei_add_test(splines)
+ei_add_test(gmres)
+ei_add_test(minres)
+ei_add_test(levenberg_marquardt)
+ei_add_test(bdcsvd)
diff --git a/unsupported/test/FFT.cpp b/unsupported/test/FFT.cpp
new file mode 100644
index 0000000..45c87f5
--- /dev/null
+++ b/unsupported/test/FFT.cpp
@@ -0,0 +1,2 @@
+#define test_FFTW test_FFT
+#include "FFTW.cpp"
diff --git a/unsupported/test/FFTW.cpp b/unsupported/test/FFTW.cpp
new file mode 100644
index 0000000..d3718e2
--- /dev/null
+++ b/unsupported/test/FFTW.cpp
@@ -0,0 +1,262 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Mark Borgerding mark a borgerding net
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/FFT>
+
+template <typename T>
+std::complex<T> RandomCpx() { return std::complex<T>( (T)(rand()/(T)RAND_MAX - .5), (T)(rand()/(T)RAND_MAX - .5) ); }
+
+using namespace std;
+using namespace Eigen;
+
+
+template < typename T>
+complex<long double> promote(complex<T> x) { return complex<long double>(x.real(),x.imag()); }
+
+complex<long double> promote(float x) { return complex<long double>( x); }
+complex<long double> promote(double x) { return complex<long double>( x); }
+complex<long double> promote(long double x) { return complex<long double>( x); }
+
+
+ template <typename VT1,typename VT2>
+ long double fft_rmse( const VT1 & fftbuf,const VT2 & timebuf)
+ {
+ long double totalpower=0;
+ long double difpower=0;
+ long double pi = acos((long double)-1 );
+ for (size_t k0=0;k0<(size_t)fftbuf.size();++k0) {
+ complex<long double> acc = 0;
+ long double phinc = -2.*k0* pi / timebuf.size();
+ for (size_t k1=0;k1<(size_t)timebuf.size();++k1) {
+ acc += promote( timebuf[k1] ) * exp( complex<long double>(0,k1*phinc) );
+ }
+ totalpower += numext::abs2(acc);
+ complex<long double> x = promote(fftbuf[k0]);
+ complex<long double> dif = acc - x;
+ difpower += numext::abs2(dif);
+ //cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(numext::abs2(dif)) << endl;
+ }
+ cerr << "rmse:" << sqrt(difpower/totalpower) << endl;
+ return sqrt(difpower/totalpower);
+ }
+
+ template <typename VT1,typename VT2>
+ long double dif_rmse( const VT1 buf1,const VT2 buf2)
+ {
+ long double totalpower=0;
+ long double difpower=0;
+ size_t n = (min)( buf1.size(),buf2.size() );
+ for (size_t k=0;k<n;++k) {
+ totalpower += (numext::abs2( buf1[k] ) + numext::abs2(buf2[k]) )/2.;
+ difpower += numext::abs2(buf1[k] - buf2[k]);
+ }
+ return sqrt(difpower/totalpower);
+ }
+
+enum { StdVectorContainer, EigenVectorContainer };
+
+template<int Container, typename Scalar> struct VectorType;
+
+template<typename Scalar> struct VectorType<StdVectorContainer,Scalar>
+{
+ typedef vector<Scalar> type;
+};
+
+template<typename Scalar> struct VectorType<EigenVectorContainer,Scalar>
+{
+ typedef Matrix<Scalar,Dynamic,1> type;
+};
+
+template <int Container, typename T>
+void test_scalar_generic(int nfft)
+{
+ typedef typename FFT<T>::Complex Complex;
+ typedef typename FFT<T>::Scalar Scalar;
+ typedef typename VectorType<Container,Scalar>::type ScalarVector;
+ typedef typename VectorType<Container,Complex>::type ComplexVector;
+
+ FFT<T> fft;
+ ScalarVector tbuf(nfft);
+ ComplexVector freqBuf;
+ for (int k=0;k<nfft;++k)
+ tbuf[k]= (T)( rand()/(double)RAND_MAX - .5);
+
+ // make sure it DOESN'T give the right full spectrum answer
+ // if we've asked for half-spectrum
+ fft.SetFlag(fft.HalfSpectrum );
+ fft.fwd( freqBuf,tbuf);
+ VERIFY((size_t)freqBuf.size() == (size_t)( (nfft>>1)+1) );
+ VERIFY( fft_rmse(freqBuf,tbuf) < test_precision<T>() );// gross check
+
+ fft.ClearFlag(fft.HalfSpectrum );
+ fft.fwd( freqBuf,tbuf);
+ VERIFY( (size_t)freqBuf.size() == (size_t)nfft);
+ VERIFY( fft_rmse(freqBuf,tbuf) < test_precision<T>() );// gross check
+
+ if (nfft&1)
+ return; // odd FFTs get the wrong size inverse FFT
+
+ ScalarVector tbuf2;
+ fft.inv( tbuf2 , freqBuf);
+ VERIFY( dif_rmse(tbuf,tbuf2) < test_precision<T>() );// gross check
+
+
+ // verify that the Unscaled flag takes effect
+ ScalarVector tbuf3;
+ fft.SetFlag(fft.Unscaled);
+
+ fft.inv( tbuf3 , freqBuf);
+
+ for (int k=0;k<nfft;++k)
+ tbuf3[k] *= T(1./nfft);
+
+
+ //for (size_t i=0;i<(size_t) tbuf.size();++i)
+ // cout << "freqBuf=" << freqBuf[i] << " in2=" << tbuf3[i] << " - in=" << tbuf[i] << " => " << (tbuf3[i] - tbuf[i] ) << endl;
+
+ VERIFY( dif_rmse(tbuf,tbuf3) < test_precision<T>() );// gross check
+
+ // verify that ClearFlag works
+ fft.ClearFlag(fft.Unscaled);
+ fft.inv( tbuf2 , freqBuf);
+ VERIFY( dif_rmse(tbuf,tbuf2) < test_precision<T>() );// gross check
+}
+
+template <typename T>
+void test_scalar(int nfft)
+{
+ test_scalar_generic<StdVectorContainer,T>(nfft);
+ //test_scalar_generic<EigenVectorContainer,T>(nfft);
+}
+
+
+template <int Container, typename T>
+void test_complex_generic(int nfft)
+{
+ typedef typename FFT<T>::Complex Complex;
+ typedef typename VectorType<Container,Complex>::type ComplexVector;
+
+ FFT<T> fft;
+
+ ComplexVector inbuf(nfft);
+ ComplexVector outbuf;
+ ComplexVector buf3;
+ for (int k=0;k<nfft;++k)
+ inbuf[k]= Complex( (T)(rand()/(double)RAND_MAX - .5), (T)(rand()/(double)RAND_MAX - .5) );
+ fft.fwd( outbuf , inbuf);
+
+ VERIFY( fft_rmse(outbuf,inbuf) < test_precision<T>() );// gross check
+ fft.inv( buf3 , outbuf);
+
+ VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
+
+ // verify that the Unscaled flag takes effect
+ ComplexVector buf4;
+ fft.SetFlag(fft.Unscaled);
+ fft.inv( buf4 , outbuf);
+ for (int k=0;k<nfft;++k)
+ buf4[k] *= T(1./nfft);
+ VERIFY( dif_rmse(inbuf,buf4) < test_precision<T>() );// gross check
+
+ // verify that ClearFlag works
+ fft.ClearFlag(fft.Unscaled);
+ fft.inv( buf3 , outbuf);
+ VERIFY( dif_rmse(inbuf,buf3) < test_precision<T>() );// gross check
+}
+
+template <typename T>
+void test_complex(int nfft)
+{
+ test_complex_generic<StdVectorContainer,T>(nfft);
+ test_complex_generic<EigenVectorContainer,T>(nfft);
+}
+/*
+template <typename T,int nrows,int ncols>
+void test_complex2d()
+{
+ typedef typename Eigen::FFT<T>::Complex Complex;
+ FFT<T> fft;
+ Eigen::Matrix<Complex,nrows,ncols> src,src2,dst,dst2;
+
+ src = Eigen::Matrix<Complex,nrows,ncols>::Random();
+ //src = Eigen::Matrix<Complex,nrows,ncols>::Identity();
+
+ for (int k=0;k<ncols;k++) {
+ Eigen::Matrix<Complex,nrows,1> tmpOut;
+ fft.fwd( tmpOut,src.col(k) );
+ dst2.col(k) = tmpOut;
+ }
+
+ for (int k=0;k<nrows;k++) {
+ Eigen::Matrix<Complex,1,ncols> tmpOut;
+ fft.fwd( tmpOut, dst2.row(k) );
+ dst2.row(k) = tmpOut;
+ }
+
+ fft.fwd2(dst.data(),src.data(),ncols,nrows);
+ fft.inv2(src2.data(),dst.data(),ncols,nrows);
+ VERIFY( (src-src2).norm() < test_precision<T>() );
+ VERIFY( (dst-dst2).norm() < test_precision<T>() );
+}
+*/
+
+
+void test_return_by_value(int len)
+{
+ VectorXf in;
+ VectorXf in1;
+ in.setRandom( len );
+ VectorXcf out1,out2;
+ FFT<float> fft;
+
+ fft.SetFlag(fft.HalfSpectrum );
+
+ fft.fwd(out1,in);
+ out2 = fft.fwd(in);
+ VERIFY( (out1-out2).norm() < test_precision<float>() );
+ in1 = fft.inv(out1);
+ VERIFY( (in1-in).norm() < test_precision<float>() );
+}
+
+void test_FFTW()
+{
+ CALL_SUBTEST( test_return_by_value(32) );
+ //CALL_SUBTEST( ( test_complex2d<float,4,8> () ) ); CALL_SUBTEST( ( test_complex2d<double,4,8> () ) );
+ //CALL_SUBTEST( ( test_complex2d<long double,4,8> () ) );
+ CALL_SUBTEST( test_complex<float>(32) ); CALL_SUBTEST( test_complex<double>(32) );
+ CALL_SUBTEST( test_complex<float>(256) ); CALL_SUBTEST( test_complex<double>(256) );
+ CALL_SUBTEST( test_complex<float>(3*8) ); CALL_SUBTEST( test_complex<double>(3*8) );
+ CALL_SUBTEST( test_complex<float>(5*32) ); CALL_SUBTEST( test_complex<double>(5*32) );
+ CALL_SUBTEST( test_complex<float>(2*3*4) ); CALL_SUBTEST( test_complex<double>(2*3*4) );
+ CALL_SUBTEST( test_complex<float>(2*3*4*5) ); CALL_SUBTEST( test_complex<double>(2*3*4*5) );
+ CALL_SUBTEST( test_complex<float>(2*3*4*5*7) ); CALL_SUBTEST( test_complex<double>(2*3*4*5*7) );
+
+ CALL_SUBTEST( test_scalar<float>(32) ); CALL_SUBTEST( test_scalar<double>(32) );
+ CALL_SUBTEST( test_scalar<float>(45) ); CALL_SUBTEST( test_scalar<double>(45) );
+ CALL_SUBTEST( test_scalar<float>(50) ); CALL_SUBTEST( test_scalar<double>(50) );
+ CALL_SUBTEST( test_scalar<float>(256) ); CALL_SUBTEST( test_scalar<double>(256) );
+ CALL_SUBTEST( test_scalar<float>(2*3*4*5*7) ); CALL_SUBTEST( test_scalar<double>(2*3*4*5*7) );
+
+ #ifdef EIGEN_HAS_FFTWL
+ CALL_SUBTEST( test_complex<long double>(32) );
+ CALL_SUBTEST( test_complex<long double>(256) );
+ CALL_SUBTEST( test_complex<long double>(3*8) );
+ CALL_SUBTEST( test_complex<long double>(5*32) );
+ CALL_SUBTEST( test_complex<long double>(2*3*4) );
+ CALL_SUBTEST( test_complex<long double>(2*3*4*5) );
+ CALL_SUBTEST( test_complex<long double>(2*3*4*5*7) );
+
+ CALL_SUBTEST( test_scalar<long double>(32) );
+ CALL_SUBTEST( test_scalar<long double>(45) );
+ CALL_SUBTEST( test_scalar<long double>(50) );
+ CALL_SUBTEST( test_scalar<long double>(256) );
+ CALL_SUBTEST( test_scalar<long double>(2*3*4*5*7) );
+ #endif
+}
diff --git a/unsupported/test/NonLinearOptimization.cpp b/unsupported/test/NonLinearOptimization.cpp
new file mode 100644
index 0000000..d7376b0
--- /dev/null
+++ b/unsupported/test/NonLinearOptimization.cpp
@@ -0,0 +1,1870 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+
+#include <stdio.h>
+
+#include "main.h"
+#include <unsupported/Eigen/NonLinearOptimization>
+
+// This disables some useless Warnings on MSVC.
+// It is intended to be done for this test only.
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+using std::sqrt;
+
+int fcn_chkder(const VectorXd &x, VectorXd &fvec, MatrixXd &fjac, int iflag)
+{
+ /* subroutine fcn for chkder example. */
+
+ int i;
+ assert(15 == fvec.size());
+ assert(3 == x.size());
+ double tmp1, tmp2, tmp3, tmp4;
+ static const double y[15]={1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1,
+ 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
+
+
+ if (iflag == 0)
+ return 0;
+
+ if (iflag != 2)
+ for (i=0; i<15; i++) {
+ tmp1 = i+1;
+ tmp2 = 16-i-1;
+ tmp3 = tmp1;
+ if (i >= 8) tmp3 = tmp2;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ else {
+ for (i = 0; i < 15; i++) {
+ tmp1 = i+1;
+ tmp2 = 16-i-1;
+
+ /* error introduced into next statement for illustration. */
+ /* corrected statement should read tmp3 = tmp1 . */
+
+ tmp3 = tmp2;
+ if (i >= 8) tmp3 = tmp2;
+ tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4=tmp4*tmp4;
+ fjac(i,0) = -1.;
+ fjac(i,1) = tmp1*tmp2/tmp4;
+ fjac(i,2) = tmp1*tmp3/tmp4;
+ }
+ }
+ return 0;
+}
+
+
+void testChkder()
+{
+ const int m=15, n=3;
+ VectorXd x(n), fvec(m), xp, fvecp(m), err;
+ MatrixXd fjac(m,n);
+ VectorXi ipvt;
+
+ /* the following values should be suitable for */
+ /* checking the jacobian matrix. */
+ x << 9.2e-1, 1.3e-1, 5.4e-1;
+
+ internal::chkder(x, fvec, fjac, xp, fvecp, 1, err);
+ fcn_chkder(x, fvec, fjac, 1);
+ fcn_chkder(x, fvec, fjac, 2);
+ fcn_chkder(xp, fvecp, fjac, 1);
+ internal::chkder(x, fvec, fjac, xp, fvecp, 2, err);
+
+ fvecp -= fvec;
+
+ // check those
+ VectorXd fvec_ref(m), fvecp_ref(m), err_ref(m);
+ fvec_ref <<
+ -1.181606, -1.429655, -1.606344,
+ -1.745269, -1.840654, -1.921586,
+ -1.984141, -2.022537, -2.468977,
+ -2.827562, -3.473582, -4.437612,
+ -6.047662, -9.267761, -18.91806;
+ fvecp_ref <<
+ -7.724666e-09, -3.432406e-09, -2.034843e-10,
+ 2.313685e-09, 4.331078e-09, 5.984096e-09,
+ 7.363281e-09, 8.53147e-09, 1.488591e-08,
+ 2.33585e-08, 3.522012e-08, 5.301255e-08,
+ 8.26666e-08, 1.419747e-07, 3.19899e-07;
+ err_ref <<
+ 0.1141397, 0.09943516, 0.09674474,
+ 0.09980447, 0.1073116, 0.1220445,
+ 0.1526814, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1;
+
+ VERIFY_IS_APPROX(fvec, fvec_ref);
+ VERIFY_IS_APPROX(fvecp, fvecp_ref);
+ VERIFY_IS_APPROX(err, err_ref);
+}
+
+// Generic functor
+template<typename _Scalar, int NX=Dynamic, int NY=Dynamic>
+struct Functor
+{
+ typedef _Scalar Scalar;
+ enum {
+ InputsAtCompileTime = NX,
+ ValuesAtCompileTime = NY
+ };
+ typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
+
+ const int m_inputs, m_values;
+
+ Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
+ Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ // you should define that in the subclass :
+// void operator() (const InputType& x, ValueType* v, JacobianType* _j=0) const;
+};
+
+struct lmder_functor : Functor<double>
+{
+ lmder_functor(void): Functor<double>(3,15) {}
+ int operator()(const VectorXd &x, VectorXd &fvec) const
+ {
+ double tmp1, tmp2, tmp3;
+ static const double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1,
+ 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
+
+ for (int i = 0; i < values(); i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ return 0;
+ }
+
+ int df(const VectorXd &x, MatrixXd &fjac) const
+ {
+ double tmp1, tmp2, tmp3, tmp4;
+ for (int i = 0; i < values(); i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4;
+ fjac(i,0) = -1;
+ fjac(i,1) = tmp1*tmp2/tmp4;
+ fjac(i,2) = tmp1*tmp3/tmp4;
+ }
+ return 0;
+ }
+};
+
+void testLmder1()
+{
+ int n=3, info;
+
+ VectorXd x;
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmder_functor functor;
+ LevenbergMarquardt<lmder_functor> lm(functor);
+ info = lm.lmder1(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 6);
+ VERIFY_IS_EQUAL(lm.njev, 5);
+
+ // check norm
+ VERIFY_IS_APPROX(lm.fvec.blueNorm(), 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+}
+
+void testLmder()
+{
+ const int m=15, n=3;
+ int info;
+ double fnorm, covfac;
+ VectorXd x;
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmder_functor functor;
+ LevenbergMarquardt<lmder_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return values
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 6);
+ VERIFY_IS_EQUAL(lm.njev, 5);
+
+ // check norm
+ fnorm = lm.fvec.blueNorm();
+ VERIFY_IS_APPROX(fnorm, 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+
+ // check covariance
+ covfac = fnorm*fnorm/(m-n);
+ internal::covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
+
+ MatrixXd cov_ref(n,n);
+ cov_ref <<
+ 0.0001531202, 0.002869941, -0.002656662,
+ 0.002869941, 0.09480935, -0.09098995,
+ -0.002656662, -0.09098995, 0.08778727;
+
+// std::cout << fjac*covfac << std::endl;
+
+ MatrixXd cov;
+ cov = covfac*lm.fjac.topLeftCorner<n,n>();
+ VERIFY_IS_APPROX( cov, cov_ref);
+ // TODO: why isn't this allowed ? :
+ // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner<n,n>() , cov_ref);
+}
+
+struct hybrj_functor : Functor<double>
+{
+ hybrj_functor(void) : Functor<double>(9,9) {}
+
+ int operator()(const VectorXd &x, VectorXd &fvec)
+ {
+ double temp, temp1, temp2;
+ const int n = x.size();
+ assert(fvec.size()==n);
+ for (int k = 0; k < n; k++)
+ {
+ temp = (3. - 2.*x[k])*x[k];
+ temp1 = 0.;
+ if (k) temp1 = x[k-1];
+ temp2 = 0.;
+ if (k != n-1) temp2 = x[k+1];
+ fvec[k] = temp - temp1 - 2.*temp2 + 1.;
+ }
+ return 0;
+ }
+ int df(const VectorXd &x, MatrixXd &fjac)
+ {
+ const int n = x.size();
+ assert(fjac.rows()==n);
+ assert(fjac.cols()==n);
+ for (int k = 0; k < n; k++)
+ {
+ for (int j = 0; j < n; j++)
+ fjac(k,j) = 0.;
+ fjac(k,k) = 3.- 4.*x[k];
+ if (k) fjac(k,k-1) = -1.;
+ if (k != n-1) fjac(k,k+1) = -2.;
+ }
+ return 0;
+ }
+};
+
+
+void testHybrj1()
+{
+ const int n=9;
+ int info;
+ VectorXd x(n);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, -1.);
+
+ // do the computation
+ hybrj_functor functor;
+ HybridNonLinearSolver<hybrj_functor> solver(functor);
+ info = solver.hybrj1(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(solver.nfev, 11);
+ VERIFY_IS_EQUAL(solver.njev, 1);
+
+ // check norm
+ VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08);
+
+
+// check x
+ VectorXd x_ref(n);
+ x_ref <<
+ -0.5706545, -0.6816283, -0.7017325,
+ -0.7042129, -0.701369, -0.6918656,
+ -0.665792, -0.5960342, -0.4164121;
+ VERIFY_IS_APPROX(x, x_ref);
+}
+
+void testHybrj()
+{
+ const int n=9;
+ int info;
+ VectorXd x(n);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, -1.);
+
+
+ // do the computation
+ hybrj_functor functor;
+ HybridNonLinearSolver<hybrj_functor> solver(functor);
+ solver.diag.setConstant(n, 1.);
+ solver.useExternalScaling = true;
+ info = solver.solve(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(solver.nfev, 11);
+ VERIFY_IS_EQUAL(solver.njev, 1);
+
+ // check norm
+ VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08);
+
+
+// check x
+ VectorXd x_ref(n);
+ x_ref <<
+ -0.5706545, -0.6816283, -0.7017325,
+ -0.7042129, -0.701369, -0.6918656,
+ -0.665792, -0.5960342, -0.4164121;
+ VERIFY_IS_APPROX(x, x_ref);
+
+}
+
+struct hybrd_functor : Functor<double>
+{
+ hybrd_functor(void) : Functor<double>(9,9) {}
+ int operator()(const VectorXd &x, VectorXd &fvec) const
+ {
+ double temp, temp1, temp2;
+ const int n = x.size();
+
+ assert(fvec.size()==n);
+ for (int k=0; k < n; k++)
+ {
+ temp = (3. - 2.*x[k])*x[k];
+ temp1 = 0.;
+ if (k) temp1 = x[k-1];
+ temp2 = 0.;
+ if (k != n-1) temp2 = x[k+1];
+ fvec[k] = temp - temp1 - 2.*temp2 + 1.;
+ }
+ return 0;
+ }
+};
+
+void testHybrd1()
+{
+ int n=9, info;
+ VectorXd x(n);
+
+ /* the following starting values provide a rough solution. */
+ x.setConstant(n, -1.);
+
+ // do the computation
+ hybrd_functor functor;
+ HybridNonLinearSolver<hybrd_functor> solver(functor);
+ info = solver.hybrd1(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(solver.nfev, 20);
+
+ // check norm
+ VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << -0.5706545, -0.6816283, -0.7017325, -0.7042129, -0.701369, -0.6918656, -0.665792, -0.5960342, -0.4164121;
+ VERIFY_IS_APPROX(x, x_ref);
+}
+
+void testHybrd()
+{
+ const int n=9;
+ int info;
+ VectorXd x;
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, -1.);
+
+ // do the computation
+ hybrd_functor functor;
+ HybridNonLinearSolver<hybrd_functor> solver(functor);
+ solver.parameters.nb_of_subdiagonals = 1;
+ solver.parameters.nb_of_superdiagonals = 1;
+ solver.diag.setConstant(n, 1.);
+ solver.useExternalScaling = true;
+ info = solver.solveNumericalDiff(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(solver.nfev, 14);
+
+ // check norm
+ VERIFY_IS_APPROX(solver.fvec.blueNorm(), 1.192636e-08);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref <<
+ -0.5706545, -0.6816283, -0.7017325,
+ -0.7042129, -0.701369, -0.6918656,
+ -0.665792, -0.5960342, -0.4164121;
+ VERIFY_IS_APPROX(x, x_ref);
+}
+
+struct lmstr_functor : Functor<double>
+{
+ lmstr_functor(void) : Functor<double>(3,15) {}
+ int operator()(const VectorXd &x, VectorXd &fvec)
+ {
+ /* subroutine fcn for lmstr1 example. */
+ double tmp1, tmp2, tmp3;
+ static const double y[15]={1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1,
+ 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
+
+ assert(15==fvec.size());
+ assert(3==x.size());
+
+ for (int i=0; i<15; i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ return 0;
+ }
+ int df(const VectorXd &x, VectorXd &jac_row, VectorXd::Index rownb)
+ {
+ assert(x.size()==3);
+ assert(jac_row.size()==x.size());
+ double tmp1, tmp2, tmp3, tmp4;
+
+ int i = rownb-2;
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4;
+ jac_row[0] = -1;
+ jac_row[1] = tmp1*tmp2/tmp4;
+ jac_row[2] = tmp1*tmp3/tmp4;
+ return 0;
+ }
+};
+
+void testLmstr1()
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmstr_functor functor;
+ LevenbergMarquardt<lmstr_functor> lm(functor);
+ info = lm.lmstr1(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 6);
+ VERIFY_IS_EQUAL(lm.njev, 5);
+
+ // check norm
+ VERIFY_IS_APPROX(lm.fvec.blueNorm(), 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695 ;
+ VERIFY_IS_APPROX(x, x_ref);
+}
+
+void testLmstr()
+{
+ const int n=3;
+ int info;
+ double fnorm;
+ VectorXd x(n);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmstr_functor functor;
+ LevenbergMarquardt<lmstr_functor> lm(functor);
+ info = lm.minimizeOptimumStorage(x);
+
+ // check return values
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 6);
+ VERIFY_IS_EQUAL(lm.njev, 5);
+
+ // check norm
+ fnorm = lm.fvec.blueNorm();
+ VERIFY_IS_APPROX(fnorm, 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+
+}
+
+struct lmdif_functor : Functor<double>
+{
+ lmdif_functor(void) : Functor<double>(3,15) {}
+ int operator()(const VectorXd &x, VectorXd &fvec) const
+ {
+ int i;
+ double tmp1,tmp2,tmp3;
+ static const double y[15]={1.4e-1,1.8e-1,2.2e-1,2.5e-1,2.9e-1,3.2e-1,3.5e-1,3.9e-1,
+ 3.7e-1,5.8e-1,7.3e-1,9.6e-1,1.34e0,2.1e0,4.39e0};
+
+ assert(x.size()==3);
+ assert(fvec.size()==15);
+ for (i=0; i<15; i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 15 - i;
+ tmp3 = tmp1;
+
+ if (i >= 8) tmp3 = tmp2;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ return 0;
+ }
+};
+
+void testLmdif1()
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n), fvec(15);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmdif_functor functor;
+ DenseIndex nfev;
+ info = LevenbergMarquardt<lmdif_functor>::lmdif1(functor, x, &nfev);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(nfev, 26);
+
+ // check norm
+ functor(x, fvec);
+ VERIFY_IS_APPROX(fvec.blueNorm(), 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.0824106, 1.1330366, 2.3436947;
+ VERIFY_IS_APPROX(x, x_ref);
+
+}
+
+void testLmdif()
+{
+ const int m=15, n=3;
+ int info;
+ double fnorm, covfac;
+ VectorXd x(n);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmdif_functor functor;
+ NumericalDiff<lmdif_functor> numDiff(functor);
+ LevenbergMarquardt<NumericalDiff<lmdif_functor> > lm(numDiff);
+ info = lm.minimize(x);
+
+ // check return values
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 26);
+
+ // check norm
+ fnorm = lm.fvec.blueNorm();
+ VERIFY_IS_APPROX(fnorm, 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+
+ // check covariance
+ covfac = fnorm*fnorm/(m-n);
+ internal::covar(lm.fjac, lm.permutation.indices()); // TODO : move this as a function of lm
+
+ MatrixXd cov_ref(n,n);
+ cov_ref <<
+ 0.0001531202, 0.002869942, -0.002656662,
+ 0.002869942, 0.09480937, -0.09098997,
+ -0.002656662, -0.09098997, 0.08778729;
+
+// std::cout << fjac*covfac << std::endl;
+
+ MatrixXd cov;
+ cov = covfac*lm.fjac.topLeftCorner<n,n>();
+ VERIFY_IS_APPROX( cov, cov_ref);
+ // TODO: why isn't this allowed ? :
+ // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner<n,n>() , cov_ref);
+}
+
+struct chwirut2_functor : Functor<double>
+{
+ chwirut2_functor(void) : Functor<double>(3,54) {}
+ static const double m_x[54];
+ static const double m_y[54];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ int i;
+
+ assert(b.size()==3);
+ assert(fvec.size()==54);
+ for(i=0; i<54; i++) {
+ double x = m_x[i];
+ fvec[i] = exp(-b[0]*x)/(b[1]+b[2]*x) - m_y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==54);
+ assert(fjac.cols()==3);
+ for(int i=0; i<54; i++) {
+ double x = m_x[i];
+ double factor = 1./(b[1]+b[2]*x);
+ double e = exp(-b[0]*x);
+ fjac(i,0) = -x*e*factor;
+ fjac(i,1) = -e*factor*factor;
+ fjac(i,2) = -x*e*factor*factor;
+ }
+ return 0;
+ }
+};
+const double chwirut2_functor::m_x[54] = { 0.500E0, 1.000E0, 1.750E0, 3.750E0, 5.750E0, 0.875E0, 2.250E0, 3.250E0, 5.250E0, 0.750E0, 1.750E0, 2.750E0, 4.750E0, 0.625E0, 1.250E0, 2.250E0, 4.250E0, .500E0, 3.000E0, .750E0, 3.000E0, 1.500E0, 6.000E0, 3.000E0, 6.000E0, 1.500E0, 3.000E0, .500E0, 2.000E0, 4.000E0, .750E0, 2.000E0, 5.000E0, .750E0, 2.250E0, 3.750E0, 5.750E0, 3.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .500E0, 6.000E0, 3.000E0, .500E0, 2.750E0, .500E0, 1.750E0};
+const double chwirut2_functor::m_y[54] = { 92.9000E0 ,57.1000E0 ,31.0500E0 ,11.5875E0 ,8.0250E0 ,63.6000E0 ,21.4000E0 ,14.2500E0 ,8.4750E0 ,63.8000E0 ,26.8000E0 ,16.4625E0 ,7.1250E0 ,67.3000E0 ,41.0000E0 ,21.1500E0 ,8.1750E0 ,81.5000E0 ,13.1200E0 ,59.9000E0 ,14.6200E0 ,32.9000E0 ,5.4400E0 ,12.5600E0 ,5.4400E0 ,32.0000E0 ,13.9500E0 ,75.8000E0 ,20.0000E0 ,10.4200E0 ,59.5000E0 ,21.6700E0 ,8.5500E0 ,62.0000E0 ,20.2000E0 ,7.7600E0 ,3.7500E0 ,11.8100E0 ,54.7000E0 ,23.7000E0 ,11.5500E0 ,61.3000E0 ,17.7000E0 ,8.7400E0 ,59.2000E0 ,16.3000E0 ,8.6200E0 ,81.0000E0 ,4.8700E0 ,14.6200E0 ,81.7000E0 ,17.1700E0 ,81.3000E0 ,28.9000E0 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/chwirut2.shtml
+void testNistChwirut2(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 0.1, 0.01, 0.02;
+ // do the computation
+ chwirut2_functor functor;
+ LevenbergMarquardt<chwirut2_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 10);
+ VERIFY_IS_EQUAL(lm.njev, 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
+ VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
+ VERIFY_IS_APPROX(x[2], 1.2150007096E-02);
+
+ /*
+ * Second try
+ */
+ x<< 0.15, 0.008, 0.010;
+ // do the computation
+ lm.resetParameters();
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 7);
+ VERIFY_IS_EQUAL(lm.njev, 6);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.1304802941E+02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
+ VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
+ VERIFY_IS_APPROX(x[2], 1.2150007096E-02);
+}
+
+
+struct misra1a_functor : Functor<double>
+{
+ misra1a_functor(void) : Functor<double>(2,14) {}
+ static const double m_x[14];
+ static const double m_y[14];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==2);
+ assert(fvec.size()==14);
+ for(int i=0; i<14; i++) {
+ fvec[i] = b[0]*(1.-exp(-b[1]*m_x[i])) - m_y[i] ;
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==2);
+ assert(fjac.rows()==14);
+ assert(fjac.cols()==2);
+ for(int i=0; i<14; i++) {
+ fjac(i,0) = (1.-exp(-b[1]*m_x[i]));
+ fjac(i,1) = (b[0]*m_x[i]*exp(-b[1]*m_x[i]));
+ }
+ return 0;
+ }
+};
+const double misra1a_functor::m_x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0};
+const double misra1a_functor::m_y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/misra1a.shtml
+void testNistMisra1a(void)
+{
+ const int n=2;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 500., 0.0001;
+ // do the computation
+ misra1a_functor functor;
+ LevenbergMarquardt<misra1a_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 19);
+ VERIFY_IS_EQUAL(lm.njev, 15);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
+ VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
+
+ /*
+ * Second try
+ */
+ x<< 250., 0.0005;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 5);
+ VERIFY_IS_EQUAL(lm.njev, 4);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.2455138894E-01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
+ VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
+}
+
+struct hahn1_functor : Functor<double>
+{
+ hahn1_functor(void) : Functor<double>(7,236) {}
+ static const double m_x[236];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ static const double m_y[236] = { .591E0 , 1.547E0 , 2.902E0 , 2.894E0 , 4.703E0 , 6.307E0 , 7.03E0 , 7.898E0 , 9.470E0 , 9.484E0 , 10.072E0 , 10.163E0 , 11.615E0 , 12.005E0 , 12.478E0 , 12.982E0 , 12.970E0 , 13.926E0 , 14.452E0 , 14.404E0 , 15.190E0 , 15.550E0 , 15.528E0 , 15.499E0 , 16.131E0 , 16.438E0 , 16.387E0 , 16.549E0 , 16.872E0 , 16.830E0 , 16.926E0 , 16.907E0 , 16.966E0 , 17.060E0 , 17.122E0 , 17.311E0 , 17.355E0 , 17.668E0 , 17.767E0 , 17.803E0 , 17.765E0 , 17.768E0 , 17.736E0 , 17.858E0 , 17.877E0 , 17.912E0 , 18.046E0 , 18.085E0 , 18.291E0 , 18.357E0 , 18.426E0 , 18.584E0 , 18.610E0 , 18.870E0 , 18.795E0 , 19.111E0 , .367E0 , .796E0 , 0.892E0 , 1.903E0 , 2.150E0 , 3.697E0 , 5.870E0 , 6.421E0 , 7.422E0 , 9.944E0 , 11.023E0 , 11.87E0 , 12.786E0 , 14.067E0 , 13.974E0 , 14.462E0 , 14.464E0 , 15.381E0 , 15.483E0 , 15.59E0 , 16.075E0 , 16.347E0 , 16.181E0 , 16.915E0 , 17.003E0 , 16.978E0 , 17.756E0 , 17.808E0 , 17.868E0 , 18.481E0 , 18.486E0 , 19.090E0 , 16.062E0 , 16.337E0 , 16.345E0 ,
+ 16.388E0 , 17.159E0 , 17.116E0 , 17.164E0 , 17.123E0 , 17.979E0 , 17.974E0 , 18.007E0 , 17.993E0 , 18.523E0 , 18.669E0 , 18.617E0 , 19.371E0 , 19.330E0 , 0.080E0 , 0.248E0 , 1.089E0 , 1.418E0 , 2.278E0 , 3.624E0 , 4.574E0 , 5.556E0 , 7.267E0 , 7.695E0 , 9.136E0 , 9.959E0 , 9.957E0 , 11.600E0 , 13.138E0 , 13.564E0 , 13.871E0 , 13.994E0 , 14.947E0 , 15.473E0 , 15.379E0 , 15.455E0 , 15.908E0 , 16.114E0 , 17.071E0 , 17.135E0 , 17.282E0 , 17.368E0 , 17.483E0 , 17.764E0 , 18.185E0 , 18.271E0 , 18.236E0 , 18.237E0 , 18.523E0 , 18.627E0 , 18.665E0 , 19.086E0 , 0.214E0 , 0.943E0 , 1.429E0 , 2.241E0 , 2.951E0 , 3.782E0 , 4.757E0 , 5.602E0 , 7.169E0 , 8.920E0 , 10.055E0 , 12.035E0 , 12.861E0 , 13.436E0 , 14.167E0 , 14.755E0 , 15.168E0 , 15.651E0 , 15.746E0 , 16.216E0 , 16.445E0 , 16.965E0 , 17.121E0 , 17.206E0 , 17.250E0 , 17.339E0 , 17.793E0 , 18.123E0 , 18.49E0 , 18.566E0 , 18.645E0 , 18.706E0 , 18.924E0 , 19.1E0 , 0.375E0 , 0.471E0 , 1.504E0 , 2.204E0 , 2.813E0 , 4.765E0 , 9.835E0 , 10.040E0 , 11.946E0 , 12.596E0 ,
+13.303E0 , 13.922E0 , 14.440E0 , 14.951E0 , 15.627E0 , 15.639E0 , 15.814E0 , 16.315E0 , 16.334E0 , 16.430E0 , 16.423E0 , 17.024E0 , 17.009E0 , 17.165E0 , 17.134E0 , 17.349E0 , 17.576E0 , 17.848E0 , 18.090E0 , 18.276E0 , 18.404E0 , 18.519E0 , 19.133E0 , 19.074E0 , 19.239E0 , 19.280E0 , 19.101E0 , 19.398E0 , 19.252E0 , 19.89E0 , 20.007E0 , 19.929E0 , 19.268E0 , 19.324E0 , 20.049E0 , 20.107E0 , 20.062E0 , 20.065E0 , 19.286E0 , 19.972E0 , 20.088E0 , 20.743E0 , 20.83E0 , 20.935E0 , 21.035E0 , 20.93E0 , 21.074E0 , 21.085E0 , 20.935E0 };
+
+ // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++;
+
+ assert(b.size()==7);
+ assert(fvec.size()==236);
+ for(int i=0; i<236; i++) {
+ double x=m_x[i], xx=x*x, xxx=xx*x;
+ fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - m_y[i];
+ }
+ return 0;
+ }
+
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==7);
+ assert(fjac.rows()==236);
+ assert(fjac.cols()==7);
+ for(int i=0; i<236; i++) {
+ double x=m_x[i], xx=x*x, xxx=xx*x;
+ double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx);
+ fjac(i,0) = 1.*fact;
+ fjac(i,1) = x*fact;
+ fjac(i,2) = xx*fact;
+ fjac(i,3) = xxx*fact;
+ fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact;
+ fjac(i,4) = x*fact;
+ fjac(i,5) = xx*fact;
+ fjac(i,6) = xxx*fact;
+ }
+ return 0;
+ }
+};
+const double hahn1_functor::m_x[236] = { 24.41E0 , 34.82E0 , 44.09E0 , 45.07E0 , 54.98E0 , 65.51E0 , 70.53E0 , 75.70E0 , 89.57E0 , 91.14E0 , 96.40E0 , 97.19E0 , 114.26E0 , 120.25E0 , 127.08E0 , 133.55E0 , 133.61E0 , 158.67E0 , 172.74E0 , 171.31E0 , 202.14E0 , 220.55E0 , 221.05E0 , 221.39E0 , 250.99E0 , 268.99E0 , 271.80E0 , 271.97E0 , 321.31E0 , 321.69E0 , 330.14E0 , 333.03E0 , 333.47E0 , 340.77E0 , 345.65E0 , 373.11E0 , 373.79E0 , 411.82E0 , 419.51E0 , 421.59E0 , 422.02E0 , 422.47E0 , 422.61E0 , 441.75E0 , 447.41E0 , 448.7E0 , 472.89E0 , 476.69E0 , 522.47E0 , 522.62E0 , 524.43E0 , 546.75E0 , 549.53E0 , 575.29E0 , 576.00E0 , 625.55E0 , 20.15E0 , 28.78E0 , 29.57E0 , 37.41E0 , 39.12E0 , 50.24E0 , 61.38E0 , 66.25E0 , 73.42E0 , 95.52E0 , 107.32E0 , 122.04E0 , 134.03E0 , 163.19E0 , 163.48E0 , 175.70E0 , 179.86E0 , 211.27E0 , 217.78E0 , 219.14E0 , 262.52E0 , 268.01E0 , 268.62E0 , 336.25E0 , 337.23E0 , 339.33E0 , 427.38E0 , 428.58E0 , 432.68E0 , 528.99E0 , 531.08E0 , 628.34E0 , 253.24E0 , 273.13E0 , 273.66E0 ,
+282.10E0 , 346.62E0 , 347.19E0 , 348.78E0 , 351.18E0 , 450.10E0 , 450.35E0 , 451.92E0 , 455.56E0 , 552.22E0 , 553.56E0 , 555.74E0 , 652.59E0 , 656.20E0 , 14.13E0 , 20.41E0 , 31.30E0 , 33.84E0 , 39.70E0 , 48.83E0 , 54.50E0 , 60.41E0 , 72.77E0 , 75.25E0 , 86.84E0 , 94.88E0 , 96.40E0 , 117.37E0 , 139.08E0 , 147.73E0 , 158.63E0 , 161.84E0 , 192.11E0 , 206.76E0 , 209.07E0 , 213.32E0 , 226.44E0 , 237.12E0 , 330.90E0 , 358.72E0 , 370.77E0 , 372.72E0 , 396.24E0 , 416.59E0 , 484.02E0 , 495.47E0 , 514.78E0 , 515.65E0 , 519.47E0 , 544.47E0 , 560.11E0 , 620.77E0 , 18.97E0 , 28.93E0 , 33.91E0 , 40.03E0 , 44.66E0 , 49.87E0 , 55.16E0 , 60.90E0 , 72.08E0 , 85.15E0 , 97.06E0 , 119.63E0 , 133.27E0 , 143.84E0 , 161.91E0 , 180.67E0 , 198.44E0 , 226.86E0 , 229.65E0 , 258.27E0 , 273.77E0 , 339.15E0 , 350.13E0 , 362.75E0 , 371.03E0 , 393.32E0 , 448.53E0 , 473.78E0 , 511.12E0 , 524.70E0 , 548.75E0 , 551.64E0 , 574.02E0 , 623.86E0 , 21.46E0 , 24.33E0 , 33.43E0 , 39.22E0 , 44.18E0 , 55.02E0 , 94.33E0 , 96.44E0 , 118.82E0 , 128.48E0 ,
+141.94E0 , 156.92E0 , 171.65E0 , 190.00E0 , 223.26E0 , 223.88E0 , 231.50E0 , 265.05E0 , 269.44E0 , 271.78E0 , 273.46E0 , 334.61E0 , 339.79E0 , 349.52E0 , 358.18E0 , 377.98E0 , 394.77E0 , 429.66E0 , 468.22E0 , 487.27E0 , 519.54E0 , 523.03E0 , 612.99E0 , 638.59E0 , 641.36E0 , 622.05E0 , 631.50E0 , 663.97E0 , 646.9E0 , 748.29E0 , 749.21E0 , 750.14E0 , 647.04E0 , 646.89E0 , 746.9E0 , 748.43E0 , 747.35E0 , 749.27E0 , 647.61E0 , 747.78E0 , 750.51E0 , 851.37E0 , 845.97E0 , 847.54E0 , 849.93E0 , 851.61E0 , 849.75E0 , 850.98E0 , 848.23E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/hahn1.shtml
+void testNistHahn1(void)
+{
+ const int n=7;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 10., -1., .05, -.00001, -.05, .001, -.000001;
+ // do the computation
+ hahn1_functor functor;
+ LevenbergMarquardt<hahn1_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 11);
+ VERIFY_IS_EQUAL(lm.njev, 10);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.0776351733E+00);
+ VERIFY_IS_APPROX(x[1],-1.2269296921E-01);
+ VERIFY_IS_APPROX(x[2], 4.0863750610E-03);
+ VERIFY_IS_APPROX(x[3],-1.426264e-06); // shoulde be : -1.4262662514E-06
+ VERIFY_IS_APPROX(x[4],-5.7609940901E-03);
+ VERIFY_IS_APPROX(x[5], 2.4053735503E-04);
+ VERIFY_IS_APPROX(x[6],-1.2314450199E-07);
+
+ /*
+ * Second try
+ */
+ x<< .1, -.1, .005, -.000001, -.005, .0001, -.0000001;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 11);
+ VERIFY_IS_EQUAL(lm.njev, 10);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.5324382854E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.077640); // should be : 1.0776351733E+00
+ VERIFY_IS_APPROX(x[1], -0.1226933); // should be : -1.2269296921E-01
+ VERIFY_IS_APPROX(x[2], 0.004086383); // should be : 4.0863750610E-03
+ VERIFY_IS_APPROX(x[3], -1.426277e-06); // shoulde be : -1.4262662514E-06
+ VERIFY_IS_APPROX(x[4],-5.7609940901E-03);
+ VERIFY_IS_APPROX(x[5], 0.00024053772); // should be : 2.4053735503E-04
+ VERIFY_IS_APPROX(x[6], -1.231450e-07); // should be : -1.2314450199E-07
+
+}
+
+struct misra1d_functor : Functor<double>
+{
+ misra1d_functor(void) : Functor<double>(2,14) {}
+ static const double x[14];
+ static const double y[14];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==2);
+ assert(fvec.size()==14);
+ for(int i=0; i<14; i++) {
+ fvec[i] = b[0]*b[1]*x[i]/(1.+b[1]*x[i]) - y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==2);
+ assert(fjac.rows()==14);
+ assert(fjac.cols()==2);
+ for(int i=0; i<14; i++) {
+ double den = 1.+b[1]*x[i];
+ fjac(i,0) = b[1]*x[i] / den;
+ fjac(i,1) = b[0]*x[i]*(den-b[1]*x[i])/den/den;
+ }
+ return 0;
+ }
+};
+const double misra1d_functor::x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0};
+const double misra1d_functor::y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/misra1d.shtml
+void testNistMisra1d(void)
+{
+ const int n=2;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 500., 0.0001;
+ // do the computation
+ misra1d_functor functor;
+ LevenbergMarquardt<misra1d_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 3);
+ VERIFY_IS_EQUAL(lm.nfev, 9);
+ VERIFY_IS_EQUAL(lm.njev, 7);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
+ VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
+
+ /*
+ * Second try
+ */
+ x<< 450., 0.0003;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 4);
+ VERIFY_IS_EQUAL(lm.njev, 3);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6419295283E-02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
+ VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
+}
+
+
+struct lanczos1_functor : Functor<double>
+{
+ lanczos1_functor(void) : Functor<double>(6,24) {}
+ static const double x[24];
+ static const double y[24];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==6);
+ assert(fvec.size()==24);
+ for(int i=0; i<24; i++)
+ fvec[i] = b[0]*exp(-b[1]*x[i]) + b[2]*exp(-b[3]*x[i]) + b[4]*exp(-b[5]*x[i]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==6);
+ assert(fjac.rows()==24);
+ assert(fjac.cols()==6);
+ for(int i=0; i<24; i++) {
+ fjac(i,0) = exp(-b[1]*x[i]);
+ fjac(i,1) = -b[0]*x[i]*exp(-b[1]*x[i]);
+ fjac(i,2) = exp(-b[3]*x[i]);
+ fjac(i,3) = -b[2]*x[i]*exp(-b[3]*x[i]);
+ fjac(i,4) = exp(-b[5]*x[i]);
+ fjac(i,5) = -b[4]*x[i]*exp(-b[5]*x[i]);
+ }
+ return 0;
+ }
+};
+const double lanczos1_functor::x[24] = { 0.000000000000E+00, 5.000000000000E-02, 1.000000000000E-01, 1.500000000000E-01, 2.000000000000E-01, 2.500000000000E-01, 3.000000000000E-01, 3.500000000000E-01, 4.000000000000E-01, 4.500000000000E-01, 5.000000000000E-01, 5.500000000000E-01, 6.000000000000E-01, 6.500000000000E-01, 7.000000000000E-01, 7.500000000000E-01, 8.000000000000E-01, 8.500000000000E-01, 9.000000000000E-01, 9.500000000000E-01, 1.000000000000E+00, 1.050000000000E+00, 1.100000000000E+00, 1.150000000000E+00 };
+const double lanczos1_functor::y[24] = { 2.513400000000E+00 ,2.044333373291E+00 ,1.668404436564E+00 ,1.366418021208E+00 ,1.123232487372E+00 ,9.268897180037E-01 ,7.679338563728E-01 ,6.388775523106E-01 ,5.337835317402E-01 ,4.479363617347E-01 ,3.775847884350E-01 ,3.197393199326E-01 ,2.720130773746E-01 ,2.324965529032E-01 ,1.996589546065E-01 ,1.722704126914E-01 ,1.493405660168E-01 ,1.300700206922E-01 ,1.138119324644E-01 ,1.000415587559E-01 ,8.833209084540E-02 ,7.833544019350E-02 ,6.976693743449E-02 ,6.239312536719E-02 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/lanczos1.shtml
+void testNistLanczos1(void)
+{
+ const int n=6;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1.2, 0.3, 5.6, 5.5, 6.5, 7.6;
+ // do the computation
+ lanczos1_functor functor;
+ LevenbergMarquardt<lanczos1_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(lm.nfev, 79);
+ VERIFY_IS_EQUAL(lm.njev, 72);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ // check x
+ VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
+ VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
+ VERIFY_IS_APPROX(x[2], 8.6070000013E-01);
+ VERIFY_IS_APPROX(x[3], 3.0000000002E+00);
+ VERIFY_IS_APPROX(x[4], 1.5575999998E+00);
+ VERIFY_IS_APPROX(x[5], 5.0000000001E+00);
+
+ /*
+ * Second try
+ */
+ x<< 0.5, 0.7, 3.6, 4.2, 4., 6.3;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(lm.nfev, 9);
+ VERIFY_IS_EQUAL(lm.njev, 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ // check x
+ VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
+ VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
+ VERIFY_IS_APPROX(x[2], 8.6070000013E-01);
+ VERIFY_IS_APPROX(x[3], 3.0000000002E+00);
+ VERIFY_IS_APPROX(x[4], 1.5575999998E+00);
+ VERIFY_IS_APPROX(x[5], 5.0000000001E+00);
+
+}
+
+struct rat42_functor : Functor<double>
+{
+ rat42_functor(void) : Functor<double>(3,9) {}
+ static const double x[9];
+ static const double y[9];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==9);
+ for(int i=0; i<9; i++) {
+ fvec[i] = b[0] / (1.+exp(b[1]-b[2]*x[i])) - y[i];
+ }
+ return 0;
+ }
+
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==9);
+ assert(fjac.cols()==3);
+ for(int i=0; i<9; i++) {
+ double e = exp(b[1]-b[2]*x[i]);
+ fjac(i,0) = 1./(1.+e);
+ fjac(i,1) = -b[0]*e/(1.+e)/(1.+e);
+ fjac(i,2) = +b[0]*e*x[i]/(1.+e)/(1.+e);
+ }
+ return 0;
+ }
+};
+const double rat42_functor::x[9] = { 9.000E0, 14.000E0, 21.000E0, 28.000E0, 42.000E0, 57.000E0, 63.000E0, 70.000E0, 79.000E0 };
+const double rat42_functor::y[9] = { 8.930E0 ,10.800E0 ,18.590E0 ,22.330E0 ,39.350E0 ,56.110E0 ,61.730E0 ,64.620E0 ,67.080E0 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
+void testNistRat42(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 100., 1., 0.1;
+ // do the computation
+ rat42_functor functor;
+ LevenbergMarquardt<rat42_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 10);
+ VERIFY_IS_EQUAL(lm.njev, 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 7.2462237576E+01);
+ VERIFY_IS_APPROX(x[1], 2.6180768402E+00);
+ VERIFY_IS_APPROX(x[2], 6.7359200066E-02);
+
+ /*
+ * Second try
+ */
+ x<< 75., 2.5, 0.07;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 6);
+ VERIFY_IS_EQUAL(lm.njev, 5);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.0565229338E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 7.2462237576E+01);
+ VERIFY_IS_APPROX(x[1], 2.6180768402E+00);
+ VERIFY_IS_APPROX(x[2], 6.7359200066E-02);
+}
+
+struct MGH10_functor : Functor<double>
+{
+ MGH10_functor(void) : Functor<double>(3,16) {}
+ static const double x[16];
+ static const double y[16];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==16);
+ for(int i=0; i<16; i++)
+ fvec[i] = b[0] * exp(b[1]/(x[i]+b[2])) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==16);
+ assert(fjac.cols()==3);
+ for(int i=0; i<16; i++) {
+ double factor = 1./(x[i]+b[2]);
+ double e = exp(b[1]*factor);
+ fjac(i,0) = e;
+ fjac(i,1) = b[0]*factor*e;
+ fjac(i,2) = -b[1]*b[0]*factor*factor*e;
+ }
+ return 0;
+ }
+};
+const double MGH10_functor::x[16] = { 5.000000E+01, 5.500000E+01, 6.000000E+01, 6.500000E+01, 7.000000E+01, 7.500000E+01, 8.000000E+01, 8.500000E+01, 9.000000E+01, 9.500000E+01, 1.000000E+02, 1.050000E+02, 1.100000E+02, 1.150000E+02, 1.200000E+02, 1.250000E+02 };
+const double MGH10_functor::y[16] = { 3.478000E+04, 2.861000E+04, 2.365000E+04, 1.963000E+04, 1.637000E+04, 1.372000E+04, 1.154000E+04, 9.744000E+03, 8.261000E+03, 7.030000E+03, 6.005000E+03, 5.147000E+03, 4.427000E+03, 3.820000E+03, 3.307000E+03, 2.872000E+03 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
+void testNistMGH10(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 2., 400000., 25000.;
+ // do the computation
+ MGH10_functor functor;
+ LevenbergMarquardt<MGH10_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(lm.nfev, 284 );
+ VERIFY_IS_EQUAL(lm.njev, 249 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
+ VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
+ VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+
+ /*
+ * Second try
+ */
+ x<< 0.02, 4000., 250.;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 3);
+ VERIFY_IS_EQUAL(lm.nfev, 126);
+ VERIFY_IS_EQUAL(lm.njev, 116);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7945855171E+01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
+ VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
+ VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+}
+
+
+struct BoxBOD_functor : Functor<double>
+{
+ BoxBOD_functor(void) : Functor<double>(2,6) {}
+ static const double x[6];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ static const double y[6] = { 109., 149., 149., 191., 213., 224. };
+ assert(b.size()==2);
+ assert(fvec.size()==6);
+ for(int i=0; i<6; i++)
+ fvec[i] = b[0]*(1.-exp(-b[1]*x[i])) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==2);
+ assert(fjac.rows()==6);
+ assert(fjac.cols()==2);
+ for(int i=0; i<6; i++) {
+ double e = exp(-b[1]*x[i]);
+ fjac(i,0) = 1.-e;
+ fjac(i,1) = b[0]*x[i]*e;
+ }
+ return 0;
+ }
+};
+const double BoxBOD_functor::x[6] = { 1., 2., 3., 5., 7., 10. };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/boxbod.shtml
+void testNistBoxBOD(void)
+{
+ const int n=2;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1., 1.;
+ // do the computation
+ BoxBOD_functor functor;
+ LevenbergMarquardt<BoxBOD_functor> lm(functor);
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.factor = 10.;
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 31);
+ VERIFY_IS_EQUAL(lm.njev, 25);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.1380940889E+02);
+ VERIFY_IS_APPROX(x[1], 5.4723748542E-01);
+
+ /*
+ * Second try
+ */
+ x<< 100., 0.75;
+ // do the computation
+ lm.resetParameters();
+ lm.parameters.ftol = NumTraits<double>::epsilon();
+ lm.parameters.xtol = NumTraits<double>::epsilon();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 15 );
+ VERIFY_IS_EQUAL(lm.njev, 14 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.1680088766E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.1380940889E+02);
+ VERIFY_IS_APPROX(x[1], 5.4723748542E-01);
+}
+
+struct MGH17_functor : Functor<double>
+{
+ MGH17_functor(void) : Functor<double>(5,33) {}
+ static const double x[33];
+ static const double y[33];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==5);
+ assert(fvec.size()==33);
+ for(int i=0; i<33; i++)
+ fvec[i] = b[0] + b[1]*exp(-b[3]*x[i]) + b[2]*exp(-b[4]*x[i]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==5);
+ assert(fjac.rows()==33);
+ assert(fjac.cols()==5);
+ for(int i=0; i<33; i++) {
+ fjac(i,0) = 1.;
+ fjac(i,1) = exp(-b[3]*x[i]);
+ fjac(i,2) = exp(-b[4]*x[i]);
+ fjac(i,3) = -x[i]*b[1]*exp(-b[3]*x[i]);
+ fjac(i,4) = -x[i]*b[2]*exp(-b[4]*x[i]);
+ }
+ return 0;
+ }
+};
+const double MGH17_functor::x[33] = { 0.000000E+00, 1.000000E+01, 2.000000E+01, 3.000000E+01, 4.000000E+01, 5.000000E+01, 6.000000E+01, 7.000000E+01, 8.000000E+01, 9.000000E+01, 1.000000E+02, 1.100000E+02, 1.200000E+02, 1.300000E+02, 1.400000E+02, 1.500000E+02, 1.600000E+02, 1.700000E+02, 1.800000E+02, 1.900000E+02, 2.000000E+02, 2.100000E+02, 2.200000E+02, 2.300000E+02, 2.400000E+02, 2.500000E+02, 2.600000E+02, 2.700000E+02, 2.800000E+02, 2.900000E+02, 3.000000E+02, 3.100000E+02, 3.200000E+02 };
+const double MGH17_functor::y[33] = { 8.440000E-01, 9.080000E-01, 9.320000E-01, 9.360000E-01, 9.250000E-01, 9.080000E-01, 8.810000E-01, 8.500000E-01, 8.180000E-01, 7.840000E-01, 7.510000E-01, 7.180000E-01, 6.850000E-01, 6.580000E-01, 6.280000E-01, 6.030000E-01, 5.800000E-01, 5.580000E-01, 5.380000E-01, 5.220000E-01, 5.060000E-01, 4.900000E-01, 4.780000E-01, 4.670000E-01, 4.570000E-01, 4.480000E-01, 4.380000E-01, 4.310000E-01, 4.240000E-01, 4.200000E-01, 4.140000E-01, 4.110000E-01, 4.060000E-01 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/mgh17.shtml
+void testNistMGH17(void)
+{
+ const int n=5;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 50., 150., -100., 1., 2.;
+ // do the computation
+ MGH17_functor functor;
+ LevenbergMarquardt<MGH17_functor> lm(functor);
+ lm.parameters.ftol = NumTraits<double>::epsilon();
+ lm.parameters.xtol = NumTraits<double>::epsilon();
+ lm.parameters.maxfev = 1000;
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(lm.nfev, 602 );
+ VERIFY_IS_EQUAL(lm.njev, 545 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
+ // check x
+ VERIFY_IS_APPROX(x[0], 3.7541005211E-01);
+ VERIFY_IS_APPROX(x[1], 1.9358469127E+00);
+ VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
+ VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
+ VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+
+ /*
+ * Second try
+ */
+ x<< 0.5 ,1.5 ,-1 ,0.01 ,0.02;
+ // do the computation
+ lm.resetParameters();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 18);
+ VERIFY_IS_EQUAL(lm.njev, 15);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.4648946975E-05);
+ // check x
+ VERIFY_IS_APPROX(x[0], 3.7541005211E-01);
+ VERIFY_IS_APPROX(x[1], 1.9358469127E+00);
+ VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
+ VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
+ VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+}
+
+struct MGH09_functor : Functor<double>
+{
+ MGH09_functor(void) : Functor<double>(4,11) {}
+ static const double _x[11];
+ static const double y[11];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==4);
+ assert(fvec.size()==11);
+ for(int i=0; i<11; i++) {
+ double x = _x[i], xx=x*x;
+ fvec[i] = b[0]*(xx+x*b[1])/(xx+x*b[2]+b[3]) - y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==4);
+ assert(fjac.rows()==11);
+ assert(fjac.cols()==4);
+ for(int i=0; i<11; i++) {
+ double x = _x[i], xx=x*x;
+ double factor = 1./(xx+x*b[2]+b[3]);
+ fjac(i,0) = (xx+x*b[1]) * factor;
+ fjac(i,1) = b[0]*x* factor;
+ fjac(i,2) = - b[0]*(xx+x*b[1]) * x * factor * factor;
+ fjac(i,3) = - b[0]*(xx+x*b[1]) * factor * factor;
+ }
+ return 0;
+ }
+};
+const double MGH09_functor::_x[11] = { 4., 2., 1., 5.E-1 , 2.5E-01, 1.670000E-01, 1.250000E-01, 1.E-01, 8.330000E-02, 7.140000E-02, 6.250000E-02 };
+const double MGH09_functor::y[11] = { 1.957000E-01, 1.947000E-01, 1.735000E-01, 1.600000E-01, 8.440000E-02, 6.270000E-02, 4.560000E-02, 3.420000E-02, 3.230000E-02, 2.350000E-02, 2.460000E-02 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml
+void testNistMGH09(void)
+{
+ const int n=4;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 25., 39, 41.5, 39.;
+ // do the computation
+ MGH09_functor functor;
+ LevenbergMarquardt<MGH09_functor> lm(functor);
+ lm.parameters.maxfev = 1000;
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 490 );
+ VERIFY_IS_EQUAL(lm.njev, 376 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01
+ VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
+ VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
+ VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
+
+ /*
+ * Second try
+ */
+ x<< 0.25, 0.39, 0.415, 0.39;
+ // do the computation
+ lm.resetParameters();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 18);
+ VERIFY_IS_EQUAL(lm.njev, 16);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 3.0750560385E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], 0.19280781); // should be 1.9280693458E-01
+ VERIFY_IS_APPROX(x[1], 0.19126265); // should be 1.9128232873E-01
+ VERIFY_IS_APPROX(x[2], 0.12305280); // should be 1.2305650693E-01
+ VERIFY_IS_APPROX(x[3], 0.13605322); // should be 1.3606233068E-01
+}
+
+
+
+struct Bennett5_functor : Functor<double>
+{
+ Bennett5_functor(void) : Functor<double>(3,154) {}
+ static const double x[154];
+ static const double y[154];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==154);
+ for(int i=0; i<154; i++)
+ fvec[i] = b[0]* pow(b[1]+x[i],-1./b[2]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==154);
+ assert(fjac.cols()==3);
+ for(int i=0; i<154; i++) {
+ double e = pow(b[1]+x[i],-1./b[2]);
+ fjac(i,0) = e;
+ fjac(i,1) = - b[0]*e/b[2]/(b[1]+x[i]);
+ fjac(i,2) = b[0]*e*log(b[1]+x[i])/b[2]/b[2];
+ }
+ return 0;
+ }
+};
+const double Bennett5_functor::x[154] = { 7.447168E0, 8.102586E0, 8.452547E0, 8.711278E0, 8.916774E0, 9.087155E0, 9.232590E0, 9.359535E0, 9.472166E0, 9.573384E0, 9.665293E0, 9.749461E0, 9.827092E0, 9.899128E0, 9.966321E0, 10.029280E0, 10.088510E0, 10.144430E0, 10.197380E0, 10.247670E0, 10.295560E0, 10.341250E0, 10.384950E0, 10.426820E0, 10.467000E0, 10.505640E0, 10.542830E0, 10.578690E0, 10.613310E0, 10.646780E0, 10.679150E0, 10.710520E0, 10.740920E0, 10.770440E0, 10.799100E0, 10.826970E0, 10.854080E0, 10.880470E0, 10.906190E0, 10.931260E0, 10.955720E0, 10.979590E0, 11.002910E0, 11.025700E0, 11.047980E0, 11.069770E0, 11.091100E0, 11.111980E0, 11.132440E0, 11.152480E0, 11.172130E0, 11.191410E0, 11.210310E0, 11.228870E0, 11.247090E0, 11.264980E0, 11.282560E0, 11.299840E0, 11.316820E0, 11.333520E0, 11.349940E0, 11.366100E0, 11.382000E0, 11.397660E0, 11.413070E0, 11.428240E0, 11.443200E0, 11.457930E0, 11.472440E0, 11.486750E0, 11.500860E0, 11.514770E0, 11.528490E0, 11.542020E0, 11.555380E0, 11.568550E0,
+11.581560E0, 11.594420E0, 11.607121E0, 11.619640E0, 11.632000E0, 11.644210E0, 11.656280E0, 11.668200E0, 11.679980E0, 11.691620E0, 11.703130E0, 11.714510E0, 11.725760E0, 11.736880E0, 11.747890E0, 11.758780E0, 11.769550E0, 11.780200E0, 11.790730E0, 11.801160E0, 11.811480E0, 11.821700E0, 11.831810E0, 11.841820E0, 11.851730E0, 11.861550E0, 11.871270E0, 11.880890E0, 11.890420E0, 11.899870E0, 11.909220E0, 11.918490E0, 11.927680E0, 11.936780E0, 11.945790E0, 11.954730E0, 11.963590E0, 11.972370E0, 11.981070E0, 11.989700E0, 11.998260E0, 12.006740E0, 12.015150E0, 12.023490E0, 12.031760E0, 12.039970E0, 12.048100E0, 12.056170E0, 12.064180E0, 12.072120E0, 12.080010E0, 12.087820E0, 12.095580E0, 12.103280E0, 12.110920E0, 12.118500E0, 12.126030E0, 12.133500E0, 12.140910E0, 12.148270E0, 12.155570E0, 12.162830E0, 12.170030E0, 12.177170E0, 12.184270E0, 12.191320E0, 12.198320E0, 12.205270E0, 12.212170E0, 12.219030E0, 12.225840E0, 12.232600E0, 12.239320E0, 12.245990E0, 12.252620E0, 12.259200E0, 12.265750E0, 12.272240E0 };
+const double Bennett5_functor::y[154] = { -34.834702E0 ,-34.393200E0 ,-34.152901E0 ,-33.979099E0 ,-33.845901E0 ,-33.732899E0 ,-33.640301E0 ,-33.559200E0 ,-33.486801E0 ,-33.423100E0 ,-33.365101E0 ,-33.313000E0 ,-33.260899E0 ,-33.217400E0 ,-33.176899E0 ,-33.139198E0 ,-33.101601E0 ,-33.066799E0 ,-33.035000E0 ,-33.003101E0 ,-32.971298E0 ,-32.942299E0 ,-32.916302E0 ,-32.890202E0 ,-32.864101E0 ,-32.841000E0 ,-32.817799E0 ,-32.797501E0 ,-32.774300E0 ,-32.757000E0 ,-32.733799E0 ,-32.716400E0 ,-32.699100E0 ,-32.678799E0 ,-32.661400E0 ,-32.644001E0 ,-32.626701E0 ,-32.612202E0 ,-32.597698E0 ,-32.583199E0 ,-32.568699E0 ,-32.554298E0 ,-32.539799E0 ,-32.525299E0 ,-32.510799E0 ,-32.499199E0 ,-32.487598E0 ,-32.473202E0 ,-32.461601E0 ,-32.435501E0 ,-32.435501E0 ,-32.426800E0 ,-32.412300E0 ,-32.400799E0 ,-32.392101E0 ,-32.380501E0 ,-32.366001E0 ,-32.357300E0 ,-32.348598E0 ,-32.339901E0 ,-32.328400E0 ,-32.319698E0 ,-32.311001E0 ,-32.299400E0 ,-32.290699E0 ,-32.282001E0 ,-32.273300E0 ,-32.264599E0 ,-32.256001E0 ,-32.247299E0
+,-32.238602E0 ,-32.229900E0 ,-32.224098E0 ,-32.215401E0 ,-32.203800E0 ,-32.198002E0 ,-32.189400E0 ,-32.183601E0 ,-32.174900E0 ,-32.169102E0 ,-32.163300E0 ,-32.154598E0 ,-32.145901E0 ,-32.140099E0 ,-32.131401E0 ,-32.125599E0 ,-32.119801E0 ,-32.111198E0 ,-32.105400E0 ,-32.096699E0 ,-32.090900E0 ,-32.088001E0 ,-32.079300E0 ,-32.073502E0 ,-32.067699E0 ,-32.061901E0 ,-32.056099E0 ,-32.050301E0 ,-32.044498E0 ,-32.038799E0 ,-32.033001E0 ,-32.027199E0 ,-32.024300E0 ,-32.018501E0 ,-32.012699E0 ,-32.004002E0 ,-32.001099E0 ,-31.995300E0 ,-31.989500E0 ,-31.983700E0 ,-31.977900E0 ,-31.972099E0 ,-31.969299E0 ,-31.963501E0 ,-31.957701E0 ,-31.951900E0 ,-31.946100E0 ,-31.940300E0 ,-31.937401E0 ,-31.931601E0 ,-31.925800E0 ,-31.922899E0 ,-31.917101E0 ,-31.911301E0 ,-31.908400E0 ,-31.902599E0 ,-31.896900E0 ,-31.893999E0 ,-31.888201E0 ,-31.885300E0 ,-31.882401E0 ,-31.876600E0 ,-31.873699E0 ,-31.867901E0 ,-31.862101E0 ,-31.859200E0 ,-31.856300E0 ,-31.850500E0 ,-31.844700E0 ,-31.841801E0 ,-31.838900E0 ,-31.833099E0 ,-31.830200E0 ,
+-31.827299E0 ,-31.821600E0 ,-31.818701E0 ,-31.812901E0 ,-31.809999E0 ,-31.807100E0 ,-31.801300E0 ,-31.798401E0 ,-31.795500E0 ,-31.789700E0 ,-31.786800E0 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/bennett5.shtml
+void testNistBennett5(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< -2000., 50., 0.8;
+ // do the computation
+ Bennett5_functor functor;
+ LevenbergMarquardt<Bennett5_functor> lm(functor);
+ lm.parameters.maxfev = 1000;
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 758);
+ VERIFY_IS_EQUAL(lm.njev, 744);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], -2.5235058043E+03);
+ VERIFY_IS_APPROX(x[1], 4.6736564644E+01);
+ VERIFY_IS_APPROX(x[2], 9.3218483193E-01);
+ /*
+ * Second try
+ */
+ x<< -1500., 45., 0.85;
+ // do the computation
+ lm.resetParameters();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 203);
+ VERIFY_IS_EQUAL(lm.njev, 192);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.2404744073E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], -2523.3007865); // should be -2.5235058043E+03
+ VERIFY_IS_APPROX(x[1], 46.735705771); // should be 4.6736564644E+01);
+ VERIFY_IS_APPROX(x[2], 0.93219881891); // should be 9.3218483193E-01);
+}
+
+struct thurber_functor : Functor<double>
+{
+ thurber_functor(void) : Functor<double>(7,37) {}
+ static const double _x[37];
+ static const double _y[37];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++;
+ assert(b.size()==7);
+ assert(fvec.size()==37);
+ for(int i=0; i<37; i++) {
+ double x=_x[i], xx=x*x, xxx=xx*x;
+ fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - _y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==7);
+ assert(fjac.rows()==37);
+ assert(fjac.cols()==7);
+ for(int i=0; i<37; i++) {
+ double x=_x[i], xx=x*x, xxx=xx*x;
+ double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx);
+ fjac(i,0) = 1.*fact;
+ fjac(i,1) = x*fact;
+ fjac(i,2) = xx*fact;
+ fjac(i,3) = xxx*fact;
+ fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact;
+ fjac(i,4) = x*fact;
+ fjac(i,5) = xx*fact;
+ fjac(i,6) = xxx*fact;
+ }
+ return 0;
+ }
+};
+const double thurber_functor::_x[37] = { -3.067E0, -2.981E0, -2.921E0, -2.912E0, -2.840E0, -2.797E0, -2.702E0, -2.699E0, -2.633E0, -2.481E0, -2.363E0, -2.322E0, -1.501E0, -1.460E0, -1.274E0, -1.212E0, -1.100E0, -1.046E0, -0.915E0, -0.714E0, -0.566E0, -0.545E0, -0.400E0, -0.309E0, -0.109E0, -0.103E0, 0.010E0, 0.119E0, 0.377E0, 0.790E0, 0.963E0, 1.006E0, 1.115E0, 1.572E0, 1.841E0, 2.047E0, 2.200E0 };
+const double thurber_functor::_y[37] = { 80.574E0, 84.248E0, 87.264E0, 87.195E0, 89.076E0, 89.608E0, 89.868E0, 90.101E0, 92.405E0, 95.854E0, 100.696E0, 101.060E0, 401.672E0, 390.724E0, 567.534E0, 635.316E0, 733.054E0, 759.087E0, 894.206E0, 990.785E0, 1090.109E0, 1080.914E0, 1122.643E0, 1178.351E0, 1260.531E0, 1273.514E0, 1288.339E0, 1327.543E0, 1353.863E0, 1414.509E0, 1425.208E0, 1421.384E0, 1442.962E0, 1464.350E0, 1468.705E0, 1447.894E0, 1457.628E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml
+void testNistThurber(void)
+{
+ const int n=7;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1000 ,1000 ,400 ,40 ,0.7,0.3,0.0 ;
+ // do the computation
+ thurber_functor functor;
+ LevenbergMarquardt<thurber_functor> lm(functor);
+ lm.parameters.ftol = 1.E4*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E4*NumTraits<double>::epsilon();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 39);
+ VERIFY_IS_EQUAL(lm.njev, 36);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
+ VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
+ VERIFY_IS_APPROX(x[2], 5.8323836877E+02);
+ VERIFY_IS_APPROX(x[3], 7.5416644291E+01);
+ VERIFY_IS_APPROX(x[4], 9.6629502864E-01);
+ VERIFY_IS_APPROX(x[5], 3.9797285797E-01);
+ VERIFY_IS_APPROX(x[6], 4.9727297349E-02);
+
+ /*
+ * Second try
+ */
+ x<< 1300 ,1500 ,500 ,75 ,1 ,0.4 ,0.05 ;
+ // do the computation
+ lm.resetParameters();
+ lm.parameters.ftol = 1.E4*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E4*NumTraits<double>::epsilon();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 29);
+ VERIFY_IS_EQUAL(lm.njev, 28);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 5.6427082397E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
+ VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
+ VERIFY_IS_APPROX(x[2], 5.8323836877E+02);
+ VERIFY_IS_APPROX(x[3], 7.5416644291E+01);
+ VERIFY_IS_APPROX(x[4], 9.6629502864E-01);
+ VERIFY_IS_APPROX(x[5], 3.9797285797E-01);
+ VERIFY_IS_APPROX(x[6], 4.9727297349E-02);
+}
+
+struct rat43_functor : Functor<double>
+{
+ rat43_functor(void) : Functor<double>(4,15) {}
+ static const double x[15];
+ static const double y[15];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==4);
+ assert(fvec.size()==15);
+ for(int i=0; i<15; i++)
+ fvec[i] = b[0] * pow(1.+exp(b[1]-b[2]*x[i]),-1./b[3]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==4);
+ assert(fjac.rows()==15);
+ assert(fjac.cols()==4);
+ for(int i=0; i<15; i++) {
+ double e = exp(b[1]-b[2]*x[i]);
+ double power = -1./b[3];
+ fjac(i,0) = pow(1.+e, power);
+ fjac(i,1) = power*b[0]*e*pow(1.+e, power-1.);
+ fjac(i,2) = -power*b[0]*e*x[i]*pow(1.+e, power-1.);
+ fjac(i,3) = b[0]*power*power*log(1.+e)*pow(1.+e, power);
+ }
+ return 0;
+ }
+};
+const double rat43_functor::x[15] = { 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15. };
+const double rat43_functor::y[15] = { 16.08, 33.83, 65.80, 97.20, 191.55, 326.20, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
+void testNistRat43(void)
+{
+ const int n=4;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 100., 10., 1., 1.;
+ // do the computation
+ rat43_functor functor;
+ LevenbergMarquardt<rat43_functor> lm(functor);
+ lm.parameters.ftol = 1.E6*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E6*NumTraits<double>::epsilon();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 27);
+ VERIFY_IS_EQUAL(lm.njev, 20);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 6.9964151270E+02);
+ VERIFY_IS_APPROX(x[1], 5.2771253025E+00);
+ VERIFY_IS_APPROX(x[2], 7.5962938329E-01);
+ VERIFY_IS_APPROX(x[3], 1.2792483859E+00);
+
+ /*
+ * Second try
+ */
+ x<< 700., 5., 0.75, 1.3;
+ // do the computation
+ lm.resetParameters();
+ lm.parameters.ftol = 1.E5*NumTraits<double>::epsilon();
+ lm.parameters.xtol = 1.E5*NumTraits<double>::epsilon();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 9);
+ VERIFY_IS_EQUAL(lm.njev, 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 8.7864049080E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 6.9964151270E+02);
+ VERIFY_IS_APPROX(x[1], 5.2771253025E+00);
+ VERIFY_IS_APPROX(x[2], 7.5962938329E-01);
+ VERIFY_IS_APPROX(x[3], 1.2792483859E+00);
+}
+
+
+
+struct eckerle4_functor : Functor<double>
+{
+ eckerle4_functor(void) : Functor<double>(3,35) {}
+ static const double x[35];
+ static const double y[35];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==35);
+ for(int i=0; i<35; i++)
+ fvec[i] = b[0]/b[1] * exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/(b[1]*b[1])) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==35);
+ assert(fjac.cols()==3);
+ for(int i=0; i<35; i++) {
+ double b12 = b[1]*b[1];
+ double e = exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/b12);
+ fjac(i,0) = e / b[1];
+ fjac(i,1) = ((x[i]-b[2])*(x[i]-b[2])/b12-1.) * b[0]*e/b12;
+ fjac(i,2) = (x[i]-b[2])*e*b[0]/b[1]/b12;
+ }
+ return 0;
+ }
+};
+const double eckerle4_functor::x[35] = { 400.0, 405.0, 410.0, 415.0, 420.0, 425.0, 430.0, 435.0, 436.5, 438.0, 439.5, 441.0, 442.5, 444.0, 445.5, 447.0, 448.5, 450.0, 451.5, 453.0, 454.5, 456.0, 457.5, 459.0, 460.5, 462.0, 463.5, 465.0, 470.0, 475.0, 480.0, 485.0, 490.0, 495.0, 500.0};
+const double eckerle4_functor::y[35] = { 0.0001575, 0.0001699, 0.0002350, 0.0003102, 0.0004917, 0.0008710, 0.0017418, 0.0046400, 0.0065895, 0.0097302, 0.0149002, 0.0237310, 0.0401683, 0.0712559, 0.1264458, 0.2073413, 0.2902366, 0.3445623, 0.3698049, 0.3668534, 0.3106727, 0.2078154, 0.1164354, 0.0616764, 0.0337200, 0.0194023, 0.0117831, 0.0074357, 0.0022732, 0.0008800, 0.0004579, 0.0002345, 0.0001586, 0.0001143, 0.0000710 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml
+void testNistEckerle4(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1., 10., 500.;
+ // do the computation
+ eckerle4_functor functor;
+ LevenbergMarquardt<eckerle4_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 18);
+ VERIFY_IS_EQUAL(lm.njev, 15);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.5543827178);
+ VERIFY_IS_APPROX(x[1], 4.0888321754);
+ VERIFY_IS_APPROX(x[2], 4.5154121844E+02);
+
+ /*
+ * Second try
+ */
+ x<< 1.5, 5., 450.;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev, 7);
+ VERIFY_IS_EQUAL(lm.njev, 6);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec.squaredNorm(), 1.4635887487E-03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.5543827178);
+ VERIFY_IS_APPROX(x[1], 4.0888321754);
+ VERIFY_IS_APPROX(x[2], 4.5154121844E+02);
+}
+
+void test_NonLinearOptimization()
+{
+ // Tests using the examples provided by (c)minpack
+ CALL_SUBTEST/*_1*/(testChkder());
+ CALL_SUBTEST/*_1*/(testLmder1());
+ CALL_SUBTEST/*_1*/(testLmder());
+ CALL_SUBTEST/*_2*/(testHybrj1());
+ CALL_SUBTEST/*_2*/(testHybrj());
+ CALL_SUBTEST/*_2*/(testHybrd1());
+ CALL_SUBTEST/*_2*/(testHybrd());
+ CALL_SUBTEST/*_3*/(testLmstr1());
+ CALL_SUBTEST/*_3*/(testLmstr());
+ CALL_SUBTEST/*_3*/(testLmdif1());
+ CALL_SUBTEST/*_3*/(testLmdif());
+
+ // NIST tests, level of difficulty = "Lower"
+ CALL_SUBTEST/*_4*/(testNistMisra1a());
+ CALL_SUBTEST/*_4*/(testNistChwirut2());
+
+ // NIST tests, level of difficulty = "Average"
+ CALL_SUBTEST/*_5*/(testNistHahn1());
+ CALL_SUBTEST/*_6*/(testNistMisra1d());
+// CALL_SUBTEST/*_7*/(testNistMGH17());
+// CALL_SUBTEST/*_8*/(testNistLanczos1());
+
+// // NIST tests, level of difficulty = "Higher"
+ CALL_SUBTEST/*_9*/(testNistRat42());
+// CALL_SUBTEST/*_10*/(testNistMGH10());
+ CALL_SUBTEST/*_11*/(testNistBoxBOD());
+// CALL_SUBTEST/*_12*/(testNistMGH09());
+ CALL_SUBTEST/*_13*/(testNistBennett5());
+ CALL_SUBTEST/*_14*/(testNistThurber());
+ CALL_SUBTEST/*_15*/(testNistRat43());
+ CALL_SUBTEST/*_16*/(testNistEckerle4());
+}
+
+/*
+ * Can be useful for debugging...
+ printf("info, nfev : %d, %d\n", info, lm.nfev);
+ printf("info, nfev, njev : %d, %d, %d\n", info, solver.nfev, solver.njev);
+ printf("info, nfev : %d, %d\n", info, solver.nfev);
+ printf("x[0] : %.32g\n", x[0]);
+ printf("x[1] : %.32g\n", x[1]);
+ printf("x[2] : %.32g\n", x[2]);
+ printf("x[3] : %.32g\n", x[3]);
+ printf("fvec.blueNorm() : %.32g\n", solver.fvec.blueNorm());
+ printf("fvec.blueNorm() : %.32g\n", lm.fvec.blueNorm());
+
+ printf("info, nfev, njev : %d, %d, %d\n", info, lm.nfev, lm.njev);
+ printf("fvec.squaredNorm() : %.13g\n", lm.fvec.squaredNorm());
+ std::cout << x << std::endl;
+ std::cout.precision(9);
+ std::cout << x[0] << std::endl;
+ std::cout << x[1] << std::endl;
+ std::cout << x[2] << std::endl;
+ std::cout << x[3] << std::endl;
+*/
+
diff --git a/unsupported/test/NumericalDiff.cpp b/unsupported/test/NumericalDiff.cpp
new file mode 100644
index 0000000..27d8880
--- /dev/null
+++ b/unsupported/test/NumericalDiff.cpp
@@ -0,0 +1,114 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+
+#include <stdio.h>
+
+#include "main.h"
+#include <unsupported/Eigen/NumericalDiff>
+
+// Generic functor
+template<typename _Scalar, int NX=Dynamic, int NY=Dynamic>
+struct Functor
+{
+ typedef _Scalar Scalar;
+ enum {
+ InputsAtCompileTime = NX,
+ ValuesAtCompileTime = NY
+ };
+ typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
+
+ int m_inputs, m_values;
+
+ Functor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
+ Functor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+};
+
+struct my_functor : Functor<double>
+{
+ my_functor(void): Functor<double>(3,15) {}
+ int operator()(const VectorXd &x, VectorXd &fvec) const
+ {
+ double tmp1, tmp2, tmp3;
+ double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1,
+ 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
+
+ for (int i = 0; i < values(); i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ return 0;
+ }
+
+ int actual_df(const VectorXd &x, MatrixXd &fjac) const
+ {
+ double tmp1, tmp2, tmp3, tmp4;
+ for (int i = 0; i < values(); i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4;
+ fjac(i,0) = -1;
+ fjac(i,1) = tmp1*tmp2/tmp4;
+ fjac(i,2) = tmp1*tmp3/tmp4;
+ }
+ return 0;
+ }
+};
+
+void test_forward()
+{
+ VectorXd x(3);
+ MatrixXd jac(15,3);
+ MatrixXd actual_jac(15,3);
+ my_functor functor;
+
+ x << 0.082, 1.13, 2.35;
+
+ // real one
+ functor.actual_df(x, actual_jac);
+// std::cout << actual_jac << std::endl << std::endl;
+
+ // using NumericalDiff
+ NumericalDiff<my_functor> numDiff(functor);
+ numDiff.df(x, jac);
+// std::cout << jac << std::endl;
+
+ VERIFY_IS_APPROX(jac, actual_jac);
+}
+
+void test_central()
+{
+ VectorXd x(3);
+ MatrixXd jac(15,3);
+ MatrixXd actual_jac(15,3);
+ my_functor functor;
+
+ x << 0.082, 1.13, 2.35;
+
+ // real one
+ functor.actual_df(x, actual_jac);
+
+ // using NumericalDiff
+ NumericalDiff<my_functor,Central> numDiff(functor);
+ numDiff.df(x, jac);
+
+ VERIFY_IS_APPROX(jac, actual_jac);
+}
+
+void test_NumericalDiff()
+{
+ CALL_SUBTEST(test_forward());
+ CALL_SUBTEST(test_central());
+}
diff --git a/unsupported/test/alignedvector3.cpp b/unsupported/test/alignedvector3.cpp
new file mode 100644
index 0000000..fc2bc21
--- /dev/null
+++ b/unsupported/test/alignedvector3.cpp
@@ -0,0 +1,59 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/AlignedVector3>
+
+template<typename Scalar>
+void alignedvector3()
+{
+ Scalar s1 = internal::random<Scalar>();
+ Scalar s2 = internal::random<Scalar>();
+ typedef Matrix<Scalar,3,1> RefType;
+ typedef Matrix<Scalar,3,3> Mat33;
+ typedef AlignedVector3<Scalar> FastType;
+ RefType r1(RefType::Random()), r2(RefType::Random()), r3(RefType::Random()),
+ r4(RefType::Random()), r5(RefType::Random()), r6(RefType::Random());
+ FastType f1(r1), f2(r2), f3(r3), f4(r4), f5(r5), f6(r6);
+ Mat33 m1(Mat33::Random());
+
+ VERIFY_IS_APPROX(f1,r1);
+ VERIFY_IS_APPROX(f4,r4);
+
+ VERIFY_IS_APPROX(f4+f1,r4+r1);
+ VERIFY_IS_APPROX(f4-f1,r4-r1);
+ VERIFY_IS_APPROX(f4+f1-f2,r4+r1-r2);
+ VERIFY_IS_APPROX(f4+=f3,r4+=r3);
+ VERIFY_IS_APPROX(f4-=f5,r4-=r5);
+ VERIFY_IS_APPROX(f4-=f5+f1,r4-=r5+r1);
+ VERIFY_IS_APPROX(f5+f1-s1*f2,r5+r1-s1*r2);
+ VERIFY_IS_APPROX(f5+f1/s2-s1*f2,r5+r1/s2-s1*r2);
+
+ VERIFY_IS_APPROX(m1*f4,m1*r4);
+ VERIFY_IS_APPROX(f4.transpose()*m1,r4.transpose()*m1);
+
+ VERIFY_IS_APPROX(f2.dot(f3),r2.dot(r3));
+ VERIFY_IS_APPROX(f2.cross(f3),r2.cross(r3));
+ VERIFY_IS_APPROX(f2.norm(),r2.norm());
+
+ VERIFY_IS_APPROX(f2.normalized(),r2.normalized());
+
+ VERIFY_IS_APPROX((f2+f1).normalized(),(r2+r1).normalized());
+
+ f2.normalize();
+ r2.normalize();
+ VERIFY_IS_APPROX(f2,r2);
+}
+
+void test_alignedvector3()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST( alignedvector3<float>() );
+ }
+}
diff --git a/unsupported/test/autodiff.cpp b/unsupported/test/autodiff.cpp
new file mode 100644
index 0000000..087e7c5
--- /dev/null
+++ b/unsupported/test/autodiff.cpp
@@ -0,0 +1,173 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/AutoDiff>
+
+template<typename Scalar>
+EIGEN_DONT_INLINE Scalar foo(const Scalar& x, const Scalar& y)
+{
+ using namespace std;
+// return x+std::sin(y);
+ EIGEN_ASM_COMMENT("mybegin");
+ return static_cast<Scalar>(x*2 - pow(x,2) + 2*sqrt(y*y) - 4 * sin(x) + 2 * cos(y) - exp(-0.5*x*x));
+ //return x+2*y*x;//x*2 -std::pow(x,2);//(2*y/x);// - y*2;
+ EIGEN_ASM_COMMENT("myend");
+}
+
+template<typename Vector>
+EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p)
+{
+ typedef typename Vector::Scalar Scalar;
+ return (p-Vector(Scalar(-1),Scalar(1.))).norm() + (p.array() * p.array()).sum() + p.dot(p);
+}
+
+template<typename _Scalar, int NX=Dynamic, int NY=Dynamic>
+struct TestFunc1
+{
+ typedef _Scalar Scalar;
+ enum {
+ InputsAtCompileTime = NX,
+ ValuesAtCompileTime = NY
+ };
+ typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
+
+ int m_inputs, m_values;
+
+ TestFunc1() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
+ TestFunc1(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ template<typename T>
+ void operator() (const Matrix<T,InputsAtCompileTime,1>& x, Matrix<T,ValuesAtCompileTime,1>* _v) const
+ {
+ Matrix<T,ValuesAtCompileTime,1>& v = *_v;
+
+ v[0] = 2 * x[0] * x[0] + x[0] * x[1];
+ v[1] = 3 * x[1] * x[0] + 0.5 * x[1] * x[1];
+ if(inputs()>2)
+ {
+ v[0] += 0.5 * x[2];
+ v[1] += x[2];
+ }
+ if(values()>2)
+ {
+ v[2] = 3 * x[1] * x[0] * x[0];
+ }
+ if (inputs()>2 && values()>2)
+ v[2] *= x[2];
+ }
+
+ void operator() (const InputType& x, ValueType* v, JacobianType* _j) const
+ {
+ (*this)(x, v);
+
+ if(_j)
+ {
+ JacobianType& j = *_j;
+
+ j(0,0) = 4 * x[0] + x[1];
+ j(1,0) = 3 * x[1];
+
+ j(0,1) = x[0];
+ j(1,1) = 3 * x[0] + 2 * 0.5 * x[1];
+
+ if (inputs()>2)
+ {
+ j(0,2) = 0.5;
+ j(1,2) = 1;
+ }
+ if(values()>2)
+ {
+ j(2,0) = 3 * x[1] * 2 * x[0];
+ j(2,1) = 3 * x[0] * x[0];
+ }
+ if (inputs()>2 && values()>2)
+ {
+ j(2,0) *= x[2];
+ j(2,1) *= x[2];
+
+ j(2,2) = 3 * x[1] * x[0] * x[0];
+ j(2,2) = 3 * x[1] * x[0] * x[0];
+ }
+ }
+ }
+};
+
+template<typename Func> void forward_jacobian(const Func& f)
+{
+ typename Func::InputType x = Func::InputType::Random(f.inputs());
+ typename Func::ValueType y(f.values()), yref(f.values());
+ typename Func::JacobianType j(f.values(),f.inputs()), jref(f.values(),f.inputs());
+
+ jref.setZero();
+ yref.setZero();
+ f(x,&yref,&jref);
+// std::cerr << y.transpose() << "\n\n";;
+// std::cerr << j << "\n\n";;
+
+ j.setZero();
+ y.setZero();
+ AutoDiffJacobian<Func> autoj(f);
+ autoj(x, &y, &j);
+// std::cerr << y.transpose() << "\n\n";;
+// std::cerr << j << "\n\n";;
+
+ VERIFY_IS_APPROX(y, yref);
+ VERIFY_IS_APPROX(j, jref);
+}
+
+
+// TODO also check actual derivatives!
+void test_autodiff_scalar()
+{
+ Vector2f p = Vector2f::Random();
+ typedef AutoDiffScalar<Vector2f> AD;
+ AD ax(p.x(),Vector2f::UnitX());
+ AD ay(p.y(),Vector2f::UnitY());
+ AD res = foo<AD>(ax,ay);
+ VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y()));
+}
+
+// TODO also check actual derivatives!
+void test_autodiff_vector()
+{
+ Vector2f p = Vector2f::Random();
+ typedef AutoDiffScalar<Vector2f> AD;
+ typedef Matrix<AD,2,1> VectorAD;
+ VectorAD ap = p.cast<AD>();
+ ap.x().derivatives() = Vector2f::UnitX();
+ ap.y().derivatives() = Vector2f::UnitY();
+
+ AD res = foo<VectorAD>(ap);
+ VERIFY_IS_APPROX(res.value(), foo(p));
+}
+
+void test_autodiff_jacobian()
+{
+ CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,2>()) ));
+ CALL_SUBTEST(( forward_jacobian(TestFunc1<double,2,3>()) ));
+ CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,2>()) ));
+ CALL_SUBTEST(( forward_jacobian(TestFunc1<double,3,3>()) ));
+ CALL_SUBTEST(( forward_jacobian(TestFunc1<double>(3,3)) ));
+}
+
+void test_autodiff()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1( test_autodiff_scalar() );
+ CALL_SUBTEST_2( test_autodiff_vector() );
+ CALL_SUBTEST_3( test_autodiff_jacobian() );
+ }
+}
+
diff --git a/unsupported/test/bdcsvd.cpp b/unsupported/test/bdcsvd.cpp
new file mode 100644
index 0000000..115a649
--- /dev/null
+++ b/unsupported/test/bdcsvd.cpp
@@ -0,0 +1,213 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/
+
+#include "svd_common.h"
+#include <iostream>
+#include <Eigen/LU>
+
+// check if "svd" is the good image of "m"
+template<typename MatrixType>
+void bdcsvd_check_full(const MatrixType& m, const BDCSVD<MatrixType>& svd)
+{
+ svd_check_full< MatrixType, BDCSVD< MatrixType > >(m, svd);
+}
+
+// Compare to a reference value
+template<typename MatrixType>
+void bdcsvd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const BDCSVD<MatrixType>& referenceSvd)
+{
+ svd_compare_to_full< MatrixType, BDCSVD< MatrixType > >(m, computationOptions, referenceSvd);
+} // end bdcsvd_compare_to_full
+
+
+template<typename MatrixType>
+void bdcsvd_solve(const MatrixType& m, unsigned int computationOptions)
+{
+ svd_solve< MatrixType, BDCSVD< MatrixType > >(m, computationOptions);
+} // end template bdcsvd_solve
+
+
+// test the computations options
+template<typename MatrixType>
+void bdcsvd_test_all_computation_options(const MatrixType& m)
+{
+ BDCSVD<MatrixType> fullSvd(m, ComputeFullU|ComputeFullV);
+ svd_test_computation_options_1< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
+ svd_test_computation_options_2< MatrixType, BDCSVD< MatrixType > >(m, fullSvd);
+} // end bdcsvd_test_all_computation_options
+
+
+// Call a test with all the computations options
+template<typename MatrixType>
+void bdcsvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
+{
+ MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
+ bdcsvd_test_all_computation_options<MatrixType>(m);
+} // end template bdcsvd
+
+
+// verify assert
+template<typename MatrixType>
+void bdcsvd_verify_assert(const MatrixType& m)
+{
+ svd_verify_assert< MatrixType, BDCSVD< MatrixType > >(m);
+}// end template bdcsvd_verify_assert
+
+
+// test weird values
+template<typename MatrixType>
+void bdcsvd_inf_nan()
+{
+ svd_inf_nan< MatrixType, BDCSVD< MatrixType > >();
+}// end template bdcsvd_inf_nan
+
+
+
+void bdcsvd_preallocate()
+{
+ svd_preallocate< BDCSVD< MatrixXf > >();
+} // end bdcsvd_preallocate
+
+
+// compare the Singular values returned with Jacobi and Bdc
+template<typename MatrixType>
+void compare_bdc_jacobi(const MatrixType& a = MatrixType(), unsigned int computationOptions = 0)
+{
+ std::cout << "debut compare" << std::endl;
+ MatrixType m = MatrixType::Random(a.rows(), a.cols());
+ BDCSVD<MatrixType> bdc_svd(m);
+ JacobiSVD<MatrixType> jacobi_svd(m);
+ VERIFY_IS_APPROX(bdc_svd.singularValues(), jacobi_svd.singularValues());
+ if(computationOptions & ComputeFullU)
+ VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
+ if(computationOptions & ComputeThinU)
+ VERIFY_IS_APPROX(bdc_svd.matrixU(), jacobi_svd.matrixU());
+ if(computationOptions & ComputeFullV)
+ VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
+ if(computationOptions & ComputeThinV)
+ VERIFY_IS_APPROX(bdc_svd.matrixV(), jacobi_svd.matrixV());
+ std::cout << "fin compare" << std::endl;
+} // end template compare_bdc_jacobi
+
+
+// call the tests
+void test_bdcsvd()
+{
+ // test of Dynamic defined Matrix (42, 42) of float
+ CALL_SUBTEST_11(( bdcsvd_verify_assert<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(42,42)) ));
+ CALL_SUBTEST_11(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(42,42), 0) ));
+ CALL_SUBTEST_11(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(42,42)) ));
+
+ // test of Dynamic defined Matrix (50, 50) of double
+ CALL_SUBTEST_13(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(50,50)) ));
+ CALL_SUBTEST_13(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(50,50), 0) ));
+ CALL_SUBTEST_13(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(50, 50)) ));
+
+ // test of Dynamic defined Matrix (22, 22) of complex double
+ CALL_SUBTEST_14(( bdcsvd_verify_assert<Matrix<std::complex<double>,Dynamic,Dynamic> >
+ (Matrix<std::complex<double>,Dynamic,Dynamic>(22,22)) ));
+ CALL_SUBTEST_14(( compare_bdc_jacobi<Matrix<std::complex<double>,Dynamic,Dynamic> >
+ (Matrix<std::complex<double>, Dynamic, Dynamic> (22,22), 0) ));
+ CALL_SUBTEST_14(( bdcsvd<Matrix<std::complex<double>,Dynamic,Dynamic> >
+ (Matrix<std::complex<double>,Dynamic,Dynamic>(22, 22)) ));
+
+ // test of Dynamic defined Matrix (10, 10) of int
+ //CALL_SUBTEST_15(( bdcsvd_verify_assert<Matrix<int,Dynamic,Dynamic> >
+ // (Matrix<int,Dynamic,Dynamic>(10,10)) ));
+ //CALL_SUBTEST_15(( compare_bdc_jacobi<Matrix<int,Dynamic,Dynamic> >
+ // (Matrix<int,Dynamic,Dynamic>(10,10), 0) ));
+ //CALL_SUBTEST_15(( bdcsvd<Matrix<int,Dynamic,Dynamic> >
+ // (Matrix<int,Dynamic,Dynamic>(10, 10)) ));
+
+
+ // test of Dynamic defined Matrix (8, 6) of double
+
+ CALL_SUBTEST_16(( bdcsvd_verify_assert<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(8,6)) ));
+ CALL_SUBTEST_16(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(8, 6), 0) ));
+ CALL_SUBTEST_16(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(8, 6)) ));
+
+
+
+ // test of Dynamic defined Matrix (36, 12) of float
+ CALL_SUBTEST_17(( compare_bdc_jacobi<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(36, 12), 0) ));
+ CALL_SUBTEST_17(( bdcsvd<Matrix<float,Dynamic,Dynamic> >
+ (Matrix<float,Dynamic,Dynamic>(36, 12)) ));
+
+ // test of Dynamic defined Matrix (5, 8) of double
+ CALL_SUBTEST_18(( compare_bdc_jacobi<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(5, 8), 0) ));
+ CALL_SUBTEST_18(( bdcsvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(5, 8)) ));
+
+
+ // non regression tests
+ CALL_SUBTEST_3(( bdcsvd_verify_assert(Matrix3f()) ));
+ CALL_SUBTEST_4(( bdcsvd_verify_assert(Matrix4d()) ));
+ CALL_SUBTEST_7(( bdcsvd_verify_assert(MatrixXf(10,12)) ));
+ CALL_SUBTEST_8(( bdcsvd_verify_assert(MatrixXcd(7,5)) ));
+
+ // SUBTESTS 1 and 2 on specifics matrix
+ for(int i = 0; i < g_repeat; i++) {
+ Matrix2cd m;
+ m << 0, 1,
+ 0, 1;
+ CALL_SUBTEST_1(( bdcsvd(m, false) ));
+ m << 1, 0,
+ 1, 0;
+ CALL_SUBTEST_1(( bdcsvd(m, false) ));
+
+ Matrix2d n;
+ n << 0, 0,
+ 0, 0;
+ CALL_SUBTEST_2(( bdcsvd(n, false) ));
+ n << 0, 0,
+ 0, 1;
+ CALL_SUBTEST_2(( bdcsvd(n, false) ));
+
+ // Statics matrix don't work with BDSVD yet
+ // bdc algo on a random 3x3 float matrix
+ // CALL_SUBTEST_3(( bdcsvd<Matrix3f>() ));
+ // bdc algo on a random 4x4 double matrix
+ // CALL_SUBTEST_4(( bdcsvd<Matrix4d>() ));
+ // bdc algo on a random 3x5 float matrix
+ // CALL_SUBTEST_5(( bdcsvd<Matrix<float,3,5> >() ));
+
+ int r = internal::random<int>(1, 30),
+ c = internal::random<int>(1, 30);
+ CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(r,c)) ));
+ CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(r,c)) ));
+ (void) r;
+ (void) c;
+
+ // Test on inf/nan matrix
+ CALL_SUBTEST_7( bdcsvd_inf_nan<MatrixXf>() );
+ }
+
+ CALL_SUBTEST_7(( bdcsvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
+ CALL_SUBTEST_8(( bdcsvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
+
+ // Test problem size constructors
+ CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
+
+} // end test_bdcsvd
diff --git a/unsupported/test/dgmres.cpp b/unsupported/test/dgmres.cpp
new file mode 100644
index 0000000..2b11807
--- /dev/null
+++ b/unsupported/test/dgmres.cpp
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2012 desire Nuentsa <desire.nuentsa_wakam@inria.fr
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "../../test/sparse_solver.h"
+#include <Eigen/src/IterativeSolvers/DGMRES.h>
+
+template<typename T> void test_dgmres_T()
+{
+ DGMRES<SparseMatrix<T>, DiagonalPreconditioner<T> > dgmres_colmajor_diag;
+ DGMRES<SparseMatrix<T>, IdentityPreconditioner > dgmres_colmajor_I;
+ DGMRES<SparseMatrix<T>, IncompleteLUT<T> > dgmres_colmajor_ilut;
+ //GMRES<SparseMatrix<T>, SSORPreconditioner<T> > dgmres_colmajor_ssor;
+
+ CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_diag) );
+// CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_I) );
+ CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_ilut) );
+ //CALL_SUBTEST( check_sparse_square_solving(dgmres_colmajor_ssor) );
+}
+
+void test_dgmres()
+{
+ CALL_SUBTEST_1(test_dgmres_T<double>());
+ CALL_SUBTEST_2(test_dgmres_T<std::complex<double> >());
+}
diff --git a/unsupported/test/forward_adolc.cpp b/unsupported/test/forward_adolc.cpp
new file mode 100644
index 0000000..d4baafe
--- /dev/null
+++ b/unsupported/test/forward_adolc.cpp
@@ -0,0 +1,143 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <Eigen/Dense>
+
+#define NUMBER_DIRECTIONS 16
+#include <unsupported/Eigen/AdolcForward>
+
+int adtl::ADOLC_numDir;
+
+template<typename Vector>
+EIGEN_DONT_INLINE typename Vector::Scalar foo(const Vector& p)
+{
+ typedef typename Vector::Scalar Scalar;
+ return (p-Vector(Scalar(-1),Scalar(1.))).norm() + (p.array().sqrt().abs() * p.array().sin()).sum() + p.dot(p);
+}
+
+template<typename _Scalar, int NX=Dynamic, int NY=Dynamic>
+struct TestFunc1
+{
+ typedef _Scalar Scalar;
+ enum {
+ InputsAtCompileTime = NX,
+ ValuesAtCompileTime = NY
+ };
+ typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
+
+ int m_inputs, m_values;
+
+ TestFunc1() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
+ TestFunc1(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ template<typename T>
+ void operator() (const Matrix<T,InputsAtCompileTime,1>& x, Matrix<T,ValuesAtCompileTime,1>* _v) const
+ {
+ Matrix<T,ValuesAtCompileTime,1>& v = *_v;
+
+ v[0] = 2 * x[0] * x[0] + x[0] * x[1];
+ v[1] = 3 * x[1] * x[0] + 0.5 * x[1] * x[1];
+ if(inputs()>2)
+ {
+ v[0] += 0.5 * x[2];
+ v[1] += x[2];
+ }
+ if(values()>2)
+ {
+ v[2] = 3 * x[1] * x[0] * x[0];
+ }
+ if (inputs()>2 && values()>2)
+ v[2] *= x[2];
+ }
+
+ void operator() (const InputType& x, ValueType* v, JacobianType* _j) const
+ {
+ (*this)(x, v);
+
+ if(_j)
+ {
+ JacobianType& j = *_j;
+
+ j(0,0) = 4 * x[0] + x[1];
+ j(1,0) = 3 * x[1];
+
+ j(0,1) = x[0];
+ j(1,1) = 3 * x[0] + 2 * 0.5 * x[1];
+
+ if (inputs()>2)
+ {
+ j(0,2) = 0.5;
+ j(1,2) = 1;
+ }
+ if(values()>2)
+ {
+ j(2,0) = 3 * x[1] * 2 * x[0];
+ j(2,1) = 3 * x[0] * x[0];
+ }
+ if (inputs()>2 && values()>2)
+ {
+ j(2,0) *= x[2];
+ j(2,1) *= x[2];
+
+ j(2,2) = 3 * x[1] * x[0] * x[0];
+ j(2,2) = 3 * x[1] * x[0] * x[0];
+ }
+ }
+ }
+};
+
+template<typename Func> void adolc_forward_jacobian(const Func& f)
+{
+ typename Func::InputType x = Func::InputType::Random(f.inputs());
+ typename Func::ValueType y(f.values()), yref(f.values());
+ typename Func::JacobianType j(f.values(),f.inputs()), jref(f.values(),f.inputs());
+
+ jref.setZero();
+ yref.setZero();
+ f(x,&yref,&jref);
+// std::cerr << y.transpose() << "\n\n";;
+// std::cerr << j << "\n\n";;
+
+ j.setZero();
+ y.setZero();
+ AdolcForwardJacobian<Func> autoj(f);
+ autoj(x, &y, &j);
+// std::cerr << y.transpose() << "\n\n";;
+// std::cerr << j << "\n\n";;
+
+ VERIFY_IS_APPROX(y, yref);
+ VERIFY_IS_APPROX(j, jref);
+}
+
+void test_forward_adolc()
+{
+ adtl::ADOLC_numDir = NUMBER_DIRECTIONS;
+
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1<double,2,2>()) ));
+ CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1<double,2,3>()) ));
+ CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1<double,3,2>()) ));
+ CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1<double,3,3>()) ));
+ CALL_SUBTEST(( adolc_forward_jacobian(TestFunc1<double>(3,3)) ));
+ }
+
+ {
+ // simple instanciation tests
+ Matrix<adtl::adouble,2,1> x;
+ foo(x);
+ Matrix<adtl::adouble,Dynamic,Dynamic> A(4,4);;
+ A.selfadjointView<Lower>().eigenvalues();
+ }
+}
diff --git a/unsupported/test/gmres.cpp b/unsupported/test/gmres.cpp
new file mode 100644
index 0000000..f296911
--- /dev/null
+++ b/unsupported/test/gmres.cpp
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2012 Kolja Brix <brix@igpm.rwth-aaachen.de>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "../../test/sparse_solver.h"
+#include <Eigen/IterativeSolvers>
+
+template<typename T> void test_gmres_T()
+{
+ GMRES<SparseMatrix<T>, DiagonalPreconditioner<T> > gmres_colmajor_diag;
+ GMRES<SparseMatrix<T>, IdentityPreconditioner > gmres_colmajor_I;
+ GMRES<SparseMatrix<T>, IncompleteLUT<T> > gmres_colmajor_ilut;
+ //GMRES<SparseMatrix<T>, SSORPreconditioner<T> > gmres_colmajor_ssor;
+
+ CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_diag) );
+// CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_I) );
+ CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ilut) );
+ //CALL_SUBTEST( check_sparse_square_solving(gmres_colmajor_ssor) );
+}
+
+void test_gmres()
+{
+ CALL_SUBTEST_1(test_gmres_T<double>());
+ CALL_SUBTEST_2(test_gmres_T<std::complex<double> >());
+}
diff --git a/unsupported/test/jacobisvd.cpp b/unsupported/test/jacobisvd.cpp
new file mode 100644
index 0000000..b4e884e
--- /dev/null
+++ b/unsupported/test/jacobisvd.cpp
@@ -0,0 +1,198 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "svd_common.h"
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_check_full(const MatrixType& m, const JacobiSVD<MatrixType, QRPreconditioner>& svd)
+{
+ svd_check_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner > >(m, svd);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const JacobiSVD<MatrixType, QRPreconditioner>& referenceSvd)
+{
+ svd_compare_to_full<MatrixType, JacobiSVD<MatrixType, QRPreconditioner> >(m, computationOptions, referenceSvd);
+}
+
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions)
+{
+ svd_solve< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, computationOptions);
+}
+
+
+
+template<typename MatrixType, int QRPreconditioner>
+void jacobisvd_test_all_computation_options(const MatrixType& m)
+{
+
+ if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols())
+ return;
+
+ JacobiSVD< MatrixType, QRPreconditioner > fullSvd(m, ComputeFullU|ComputeFullV);
+ svd_test_computation_options_1< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
+
+ if(QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ return;
+ svd_test_computation_options_2< MatrixType, JacobiSVD< MatrixType, QRPreconditioner > >(m, fullSvd);
+
+}
+
+template<typename MatrixType>
+void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true)
+{
+ MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a;
+
+ jacobisvd_test_all_computation_options<MatrixType, FullPivHouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, ColPivHouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, HouseholderQRPreconditioner>(m);
+ jacobisvd_test_all_computation_options<MatrixType, NoQRPreconditioner>(m);
+}
+
+
+template<typename MatrixType>
+void jacobisvd_verify_assert(const MatrixType& m)
+{
+
+ svd_verify_assert<MatrixType, JacobiSVD< MatrixType > >(m);
+
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ MatrixType a = MatrixType::Zero(rows, cols);
+ a.setZero();
+
+ if (ColsAtCompileTime == Dynamic)
+ {
+ JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner> svd_fullqr;
+ VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeFullU|ComputeThinV))
+ VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeThinV))
+ VERIFY_RAISES_ASSERT(svd_fullqr.compute(a, ComputeThinU|ComputeFullV))
+ }
+}
+
+template<typename MatrixType>
+void jacobisvd_method()
+{
+ enum { Size = MatrixType::RowsAtCompileTime };
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef Matrix<RealScalar, Size, 1> RealVecType;
+ MatrixType m = MatrixType::Identity();
+ VERIFY_IS_APPROX(m.jacobiSvd().singularValues(), RealVecType::Ones());
+ VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixU());
+ VERIFY_RAISES_ASSERT(m.jacobiSvd().matrixV());
+ VERIFY_IS_APPROX(m.jacobiSvd(ComputeFullU|ComputeFullV).solve(m), m);
+}
+
+
+
+template<typename MatrixType>
+void jacobisvd_inf_nan()
+{
+ svd_inf_nan<MatrixType, JacobiSVD< MatrixType > >();
+}
+
+
+// Regression test for bug 286: JacobiSVD loops indefinitely with some
+// matrices containing denormal numbers.
+void jacobisvd_bug286()
+{
+#if defined __INTEL_COMPILER
+// shut up warning #239: floating point underflow
+#pragma warning push
+#pragma warning disable 239
+#endif
+ Matrix2d M;
+ M << -7.90884e-313, -4.94e-324,
+ 0, 5.60844e-313;
+#if defined __INTEL_COMPILER
+#pragma warning pop
+#endif
+ JacobiSVD<Matrix2d> svd;
+ svd.compute(M); // just check we don't loop indefinitely
+}
+
+
+void jacobisvd_preallocate()
+{
+ svd_preallocate< JacobiSVD <MatrixXf> >();
+}
+
+void test_jacobisvd()
+{
+ CALL_SUBTEST_11(( jacobisvd<Matrix<double,Dynamic,Dynamic> >
+ (Matrix<double,Dynamic,Dynamic>(16, 6)) ));
+
+ CALL_SUBTEST_3(( jacobisvd_verify_assert(Matrix3f()) ));
+ CALL_SUBTEST_4(( jacobisvd_verify_assert(Matrix4d()) ));
+ CALL_SUBTEST_7(( jacobisvd_verify_assert(MatrixXf(10,12)) ));
+ CALL_SUBTEST_8(( jacobisvd_verify_assert(MatrixXcd(7,5)) ));
+
+ for(int i = 0; i < g_repeat; i++) {
+ Matrix2cd m;
+ m << 0, 1,
+ 0, 1;
+ CALL_SUBTEST_1(( jacobisvd(m, false) ));
+ m << 1, 0,
+ 1, 0;
+ CALL_SUBTEST_1(( jacobisvd(m, false) ));
+
+ Matrix2d n;
+ n << 0, 0,
+ 0, 0;
+ CALL_SUBTEST_2(( jacobisvd(n, false) ));
+ n << 0, 0,
+ 0, 1;
+ CALL_SUBTEST_2(( jacobisvd(n, false) ));
+
+ CALL_SUBTEST_3(( jacobisvd<Matrix3f>() ));
+ CALL_SUBTEST_4(( jacobisvd<Matrix4d>() ));
+ CALL_SUBTEST_5(( jacobisvd<Matrix<float,3,5> >() ));
+ CALL_SUBTEST_6(( jacobisvd<Matrix<double,Dynamic,2> >(Matrix<double,Dynamic,2>(10,2)) ));
+
+ int r = internal::random<int>(1, 30),
+ c = internal::random<int>(1, 30);
+ CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(r,c)) ));
+ CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(r,c)) ));
+ (void) r;
+ (void) c;
+
+ // Test on inf/nan matrix
+ CALL_SUBTEST_7( jacobisvd_inf_nan<MatrixXf>() );
+ }
+
+ CALL_SUBTEST_7(( jacobisvd<MatrixXf>(MatrixXf(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) ));
+ CALL_SUBTEST_8(( jacobisvd<MatrixXcd>(MatrixXcd(internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3), internal::random<int>(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/3))) ));
+
+
+ // test matrixbase method
+ CALL_SUBTEST_1(( jacobisvd_method<Matrix2cd>() ));
+ CALL_SUBTEST_3(( jacobisvd_method<Matrix3f>() ));
+
+
+ // Test problem size constructors
+ CALL_SUBTEST_7( JacobiSVD<MatrixXf>(10,10) );
+
+ // Check that preallocation avoids subsequent mallocs
+ CALL_SUBTEST_9( jacobisvd_preallocate() );
+
+ // Regression check for bug 286
+ CALL_SUBTEST_2( jacobisvd_bug286() );
+}
diff --git a/unsupported/test/kronecker_product.cpp b/unsupported/test/kronecker_product.cpp
new file mode 100644
index 0000000..8ddc6ec
--- /dev/null
+++ b/unsupported/test/kronecker_product.cpp
@@ -0,0 +1,181 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Kolja Brix <brix@igpm.rwth-aachen.de>
+// Copyright (C) 2011 Andreas Platen <andiplaten@gmx.de>
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#include "sparse.h"
+#include <Eigen/SparseExtra>
+#include <Eigen/KroneckerProduct>
+
+
+template<typename MatrixType>
+void check_dimension(const MatrixType& ab, const int rows, const int cols)
+{
+ VERIFY_IS_EQUAL(ab.rows(), rows);
+ VERIFY_IS_EQUAL(ab.cols(), cols);
+}
+
+
+template<typename MatrixType>
+void check_kronecker_product(const MatrixType& ab)
+{
+ VERIFY_IS_EQUAL(ab.rows(), 6);
+ VERIFY_IS_EQUAL(ab.cols(), 6);
+ VERIFY_IS_EQUAL(ab.nonZeros(), 36);
+ VERIFY_IS_APPROX(ab.coeff(0,0), -0.4017367630386106);
+ VERIFY_IS_APPROX(ab.coeff(0,1), 0.1056863433932735);
+ VERIFY_IS_APPROX(ab.coeff(0,2), -0.7255206194554212);
+ VERIFY_IS_APPROX(ab.coeff(0,3), 0.1908653336744706);
+ VERIFY_IS_APPROX(ab.coeff(0,4), 0.350864567234111);
+ VERIFY_IS_APPROX(ab.coeff(0,5), -0.0923032108308013);
+ VERIFY_IS_APPROX(ab.coeff(1,0), 0.415417514804677);
+ VERIFY_IS_APPROX(ab.coeff(1,1), -0.2369227701722048);
+ VERIFY_IS_APPROX(ab.coeff(1,2), 0.7502275131458511);
+ VERIFY_IS_APPROX(ab.coeff(1,3), -0.4278731019742696);
+ VERIFY_IS_APPROX(ab.coeff(1,4), -0.3628129162264507);
+ VERIFY_IS_APPROX(ab.coeff(1,5), 0.2069210808481275);
+ VERIFY_IS_APPROX(ab.coeff(2,0), 0.05465890160863986);
+ VERIFY_IS_APPROX(ab.coeff(2,1), -0.2634092511419858);
+ VERIFY_IS_APPROX(ab.coeff(2,2), 0.09871180285793758);
+ VERIFY_IS_APPROX(ab.coeff(2,3), -0.4757066334017702);
+ VERIFY_IS_APPROX(ab.coeff(2,4), -0.04773740823058334);
+ VERIFY_IS_APPROX(ab.coeff(2,5), 0.2300535609645254);
+ VERIFY_IS_APPROX(ab.coeff(3,0), -0.8172945853260133);
+ VERIFY_IS_APPROX(ab.coeff(3,1), 0.2150086428359221);
+ VERIFY_IS_APPROX(ab.coeff(3,2), 0.5825113847292743);
+ VERIFY_IS_APPROX(ab.coeff(3,3), -0.1532433770097174);
+ VERIFY_IS_APPROX(ab.coeff(3,4), -0.329383387282399);
+ VERIFY_IS_APPROX(ab.coeff(3,5), 0.08665207912033064);
+ VERIFY_IS_APPROX(ab.coeff(4,0), 0.8451267514863225);
+ VERIFY_IS_APPROX(ab.coeff(4,1), -0.481996458918977);
+ VERIFY_IS_APPROX(ab.coeff(4,2), -0.6023482390791535);
+ VERIFY_IS_APPROX(ab.coeff(4,3), 0.3435339347164565);
+ VERIFY_IS_APPROX(ab.coeff(4,4), 0.3406002157428891);
+ VERIFY_IS_APPROX(ab.coeff(4,5), -0.1942526344200915);
+ VERIFY_IS_APPROX(ab.coeff(5,0), 0.1111982482925399);
+ VERIFY_IS_APPROX(ab.coeff(5,1), -0.5358806424754169);
+ VERIFY_IS_APPROX(ab.coeff(5,2), -0.07925446559335647);
+ VERIFY_IS_APPROX(ab.coeff(5,3), 0.3819388757769038);
+ VERIFY_IS_APPROX(ab.coeff(5,4), 0.04481475387219876);
+ VERIFY_IS_APPROX(ab.coeff(5,5), -0.2159688616158057);
+}
+
+
+template<typename MatrixType>
+void check_sparse_kronecker_product(const MatrixType& ab)
+{
+ VERIFY_IS_EQUAL(ab.rows(), 12);
+ VERIFY_IS_EQUAL(ab.cols(), 10);
+ VERIFY_IS_EQUAL(ab.nonZeros(), 3*2);
+ VERIFY_IS_APPROX(ab.coeff(3,0), -0.04);
+ VERIFY_IS_APPROX(ab.coeff(5,1), 0.05);
+ VERIFY_IS_APPROX(ab.coeff(0,6), -0.08);
+ VERIFY_IS_APPROX(ab.coeff(2,7), 0.10);
+ VERIFY_IS_APPROX(ab.coeff(6,8), 0.12);
+ VERIFY_IS_APPROX(ab.coeff(8,9), -0.15);
+}
+
+
+void test_kronecker_product()
+{
+ // DM = dense matrix; SM = sparse matrix
+
+ Matrix<double, 2, 3> DM_a;
+ SparseMatrix<double> SM_a(2,3);
+ SM_a.insert(0,0) = DM_a.coeffRef(0,0) = -0.4461540300782201;
+ SM_a.insert(0,1) = DM_a.coeffRef(0,1) = -0.8057364375283049;
+ SM_a.insert(0,2) = DM_a.coeffRef(0,2) = 0.3896572459516341;
+ SM_a.insert(1,0) = DM_a.coeffRef(1,0) = -0.9076572187376921;
+ SM_a.insert(1,1) = DM_a.coeffRef(1,1) = 0.6469156566545853;
+ SM_a.insert(1,2) = DM_a.coeffRef(1,2) = -0.3658010398782789;
+
+ MatrixXd DM_b(3,2);
+ SparseMatrix<double> SM_b(3,2);
+ SM_b.insert(0,0) = DM_b.coeffRef(0,0) = 0.9004440976767099;
+ SM_b.insert(0,1) = DM_b.coeffRef(0,1) = -0.2368830858139832;
+ SM_b.insert(1,0) = DM_b.coeffRef(1,0) = -0.9311078389941825;
+ SM_b.insert(1,1) = DM_b.coeffRef(1,1) = 0.5310335762980047;
+ SM_b.insert(2,0) = DM_b.coeffRef(2,0) = -0.1225112806872035;
+ SM_b.insert(2,1) = DM_b.coeffRef(2,1) = 0.5903998022741264;
+
+ SparseMatrix<double,RowMajor> SM_row_a(SM_a), SM_row_b(SM_b);
+
+ // test kroneckerProduct(DM_block,DM,DM_fixedSize)
+ Matrix<double, 6, 6> DM_fix_ab = kroneckerProduct(DM_a.topLeftCorner<2,3>(),DM_b);
+
+ CALL_SUBTEST(check_kronecker_product(DM_fix_ab));
+
+ for(int i=0;i<DM_fix_ab.rows();++i)
+ for(int j=0;j<DM_fix_ab.cols();++j)
+ VERIFY_IS_APPROX(kroneckerProduct(DM_a,DM_b).coeff(i,j), DM_fix_ab(i,j));
+
+ // test kroneckerProduct(DM,DM,DM_block)
+ MatrixXd DM_block_ab(10,15);
+ DM_block_ab.block<6,6>(2,5) = kroneckerProduct(DM_a,DM_b);
+ CALL_SUBTEST(check_kronecker_product(DM_block_ab.block<6,6>(2,5)));
+
+ // test kroneckerProduct(DM,DM,DM)
+ MatrixXd DM_ab = kroneckerProduct(DM_a,DM_b);
+ CALL_SUBTEST(check_kronecker_product(DM_ab));
+
+ // test kroneckerProduct(SM,DM,SM)
+ SparseMatrix<double> SM_ab = kroneckerProduct(SM_a,DM_b);
+ CALL_SUBTEST(check_kronecker_product(SM_ab));
+ SparseMatrix<double,RowMajor> SM_ab2 = kroneckerProduct(SM_a,DM_b);
+ CALL_SUBTEST(check_kronecker_product(SM_ab2));
+
+ // test kroneckerProduct(DM,SM,SM)
+ SM_ab.setZero();
+ SM_ab.insert(0,0)=37.0;
+ SM_ab = kroneckerProduct(DM_a,SM_b);
+ CALL_SUBTEST(check_kronecker_product(SM_ab));
+ SM_ab2.setZero();
+ SM_ab2.insert(0,0)=37.0;
+ SM_ab2 = kroneckerProduct(DM_a,SM_b);
+ CALL_SUBTEST(check_kronecker_product(SM_ab2));
+
+ // test kroneckerProduct(SM,SM,SM)
+ SM_ab.resize(2,33);
+ SM_ab.insert(0,0)=37.0;
+ SM_ab = kroneckerProduct(SM_a,SM_b);
+ CALL_SUBTEST(check_kronecker_product(SM_ab));
+ SM_ab2.resize(5,11);
+ SM_ab2.insert(0,0)=37.0;
+ SM_ab2 = kroneckerProduct(SM_a,SM_b);
+ CALL_SUBTEST(check_kronecker_product(SM_ab2));
+
+ // test kroneckerProduct(SM,SM,SM) with sparse pattern
+ SM_a.resize(4,5);
+ SM_b.resize(3,2);
+ SM_a.resizeNonZeros(0);
+ SM_b.resizeNonZeros(0);
+ SM_a.insert(1,0) = -0.1;
+ SM_a.insert(0,3) = -0.2;
+ SM_a.insert(2,4) = 0.3;
+ SM_a.finalize();
+
+ SM_b.insert(0,0) = 0.4;
+ SM_b.insert(2,1) = -0.5;
+ SM_b.finalize();
+ SM_ab.resize(1,1);
+ SM_ab.insert(0,0)=37.0;
+ SM_ab = kroneckerProduct(SM_a,SM_b);
+ CALL_SUBTEST(check_sparse_kronecker_product(SM_ab));
+
+ // test dimension of result of kroneckerProduct(DM,DM,DM)
+ MatrixXd DM_a2(2,1);
+ MatrixXd DM_b2(5,4);
+ MatrixXd DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
+ CALL_SUBTEST(check_dimension(DM_ab2,2*5,1*4));
+ DM_a2.resize(10,9);
+ DM_b2.resize(4,8);
+ DM_ab2 = kroneckerProduct(DM_a2,DM_b2);
+ CALL_SUBTEST(check_dimension(DM_ab2,10*4,9*8));
+}
diff --git a/unsupported/test/levenberg_marquardt.cpp b/unsupported/test/levenberg_marquardt.cpp
new file mode 100644
index 0000000..0446472
--- /dev/null
+++ b/unsupported/test/levenberg_marquardt.cpp
@@ -0,0 +1,1448 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+// Copyright (C) 2012 desire Nuentsa <desire.nuentsa_wakam@inria.fr
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#include <stdio.h>
+
+#include "main.h"
+#include <unsupported/Eigen/LevenbergMarquardt>
+
+// This disables some useless Warnings on MSVC.
+// It is intended to be done for this test only.
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+using std::sqrt;
+
+struct lmder_functor : DenseFunctor<double>
+{
+ lmder_functor(void): DenseFunctor<double>(3,15) {}
+ int operator()(const VectorXd &x, VectorXd &fvec) const
+ {
+ double tmp1, tmp2, tmp3;
+ static const double y[15] = {1.4e-1, 1.8e-1, 2.2e-1, 2.5e-1, 2.9e-1, 3.2e-1, 3.5e-1,
+ 3.9e-1, 3.7e-1, 5.8e-1, 7.3e-1, 9.6e-1, 1.34, 2.1, 4.39};
+
+ for (int i = 0; i < values(); i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ return 0;
+ }
+
+ int df(const VectorXd &x, MatrixXd &fjac) const
+ {
+ double tmp1, tmp2, tmp3, tmp4;
+ for (int i = 0; i < values(); i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 16 - i - 1;
+ tmp3 = (i>=8)? tmp2 : tmp1;
+ tmp4 = (x[1]*tmp2 + x[2]*tmp3); tmp4 = tmp4*tmp4;
+ fjac(i,0) = -1;
+ fjac(i,1) = tmp1*tmp2/tmp4;
+ fjac(i,2) = tmp1*tmp3/tmp4;
+ }
+ return 0;
+ }
+};
+
+void testLmder1()
+{
+ int n=3, info;
+
+ VectorXd x;
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmder_functor functor;
+ LevenbergMarquardt<lmder_functor> lm(functor);
+ info = lm.lmder1(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 6);
+ VERIFY_IS_EQUAL(lm.njev(), 5);
+
+ // check norm
+ VERIFY_IS_APPROX(lm.fvec().blueNorm(), 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+}
+
+void testLmder()
+{
+ const int m=15, n=3;
+ int info;
+ double fnorm, covfac;
+ VectorXd x;
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmder_functor functor;
+ LevenbergMarquardt<lmder_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return values
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 6);
+ VERIFY_IS_EQUAL(lm.njev(), 5);
+
+ // check norm
+ fnorm = lm.fvec().blueNorm();
+ VERIFY_IS_APPROX(fnorm, 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+
+ // check covariance
+ covfac = fnorm*fnorm/(m-n);
+ internal::covar(lm.matrixR(), lm.permutation().indices()); // TODO : move this as a function of lm
+
+ MatrixXd cov_ref(n,n);
+ cov_ref <<
+ 0.0001531202, 0.002869941, -0.002656662,
+ 0.002869941, 0.09480935, -0.09098995,
+ -0.002656662, -0.09098995, 0.08778727;
+
+// std::cout << fjac*covfac << std::endl;
+
+ MatrixXd cov;
+ cov = covfac*lm.matrixR().topLeftCorner<n,n>();
+ VERIFY_IS_APPROX( cov, cov_ref);
+ // TODO: why isn't this allowed ? :
+ // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner<n,n>() , cov_ref);
+}
+
+struct lmdif_functor : DenseFunctor<double>
+{
+ lmdif_functor(void) : DenseFunctor<double>(3,15) {}
+ int operator()(const VectorXd &x, VectorXd &fvec) const
+ {
+ int i;
+ double tmp1,tmp2,tmp3;
+ static const double y[15]={1.4e-1,1.8e-1,2.2e-1,2.5e-1,2.9e-1,3.2e-1,3.5e-1,3.9e-1,
+ 3.7e-1,5.8e-1,7.3e-1,9.6e-1,1.34e0,2.1e0,4.39e0};
+
+ assert(x.size()==3);
+ assert(fvec.size()==15);
+ for (i=0; i<15; i++)
+ {
+ tmp1 = i+1;
+ tmp2 = 15 - i;
+ tmp3 = tmp1;
+
+ if (i >= 8) tmp3 = tmp2;
+ fvec[i] = y[i] - (x[0] + tmp1/(x[1]*tmp2 + x[2]*tmp3));
+ }
+ return 0;
+ }
+};
+
+void testLmdif1()
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n), fvec(15);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmdif_functor functor;
+ DenseIndex nfev;
+ info = LevenbergMarquardt<lmdif_functor>::lmdif1(functor, x, &nfev);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+// VERIFY_IS_EQUAL(nfev, 26);
+
+ // check norm
+ functor(x, fvec);
+ VERIFY_IS_APPROX(fvec.blueNorm(), 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.0824106, 1.1330366, 2.3436947;
+ VERIFY_IS_APPROX(x, x_ref);
+
+}
+
+void testLmdif()
+{
+ const int m=15, n=3;
+ int info;
+ double fnorm, covfac;
+ VectorXd x(n);
+
+ /* the following starting values provide a rough fit. */
+ x.setConstant(n, 1.);
+
+ // do the computation
+ lmdif_functor functor;
+ NumericalDiff<lmdif_functor> numDiff(functor);
+ LevenbergMarquardt<NumericalDiff<lmdif_functor> > lm(numDiff);
+ info = lm.minimize(x);
+
+ // check return values
+ VERIFY_IS_EQUAL(info, 1);
+// VERIFY_IS_EQUAL(lm.nfev(), 26);
+
+ // check norm
+ fnorm = lm.fvec().blueNorm();
+ VERIFY_IS_APPROX(fnorm, 0.09063596);
+
+ // check x
+ VectorXd x_ref(n);
+ x_ref << 0.08241058, 1.133037, 2.343695;
+ VERIFY_IS_APPROX(x, x_ref);
+
+ // check covariance
+ covfac = fnorm*fnorm/(m-n);
+ internal::covar(lm.matrixR(), lm.permutation().indices()); // TODO : move this as a function of lm
+
+ MatrixXd cov_ref(n,n);
+ cov_ref <<
+ 0.0001531202, 0.002869942, -0.002656662,
+ 0.002869942, 0.09480937, -0.09098997,
+ -0.002656662, -0.09098997, 0.08778729;
+
+// std::cout << fjac*covfac << std::endl;
+
+ MatrixXd cov;
+ cov = covfac*lm.matrixR().topLeftCorner<n,n>();
+ VERIFY_IS_APPROX( cov, cov_ref);
+ // TODO: why isn't this allowed ? :
+ // VERIFY_IS_APPROX( covfac*fjac.topLeftCorner<n,n>() , cov_ref);
+}
+
+struct chwirut2_functor : DenseFunctor<double>
+{
+ chwirut2_functor(void) : DenseFunctor<double>(3,54) {}
+ static const double m_x[54];
+ static const double m_y[54];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ int i;
+
+ assert(b.size()==3);
+ assert(fvec.size()==54);
+ for(i=0; i<54; i++) {
+ double x = m_x[i];
+ fvec[i] = exp(-b[0]*x)/(b[1]+b[2]*x) - m_y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==54);
+ assert(fjac.cols()==3);
+ for(int i=0; i<54; i++) {
+ double x = m_x[i];
+ double factor = 1./(b[1]+b[2]*x);
+ double e = exp(-b[0]*x);
+ fjac(i,0) = -x*e*factor;
+ fjac(i,1) = -e*factor*factor;
+ fjac(i,2) = -x*e*factor*factor;
+ }
+ return 0;
+ }
+};
+const double chwirut2_functor::m_x[54] = { 0.500E0, 1.000E0, 1.750E0, 3.750E0, 5.750E0, 0.875E0, 2.250E0, 3.250E0, 5.250E0, 0.750E0, 1.750E0, 2.750E0, 4.750E0, 0.625E0, 1.250E0, 2.250E0, 4.250E0, .500E0, 3.000E0, .750E0, 3.000E0, 1.500E0, 6.000E0, 3.000E0, 6.000E0, 1.500E0, 3.000E0, .500E0, 2.000E0, 4.000E0, .750E0, 2.000E0, 5.000E0, .750E0, 2.250E0, 3.750E0, 5.750E0, 3.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .750E0, 2.500E0, 4.000E0, .500E0, 6.000E0, 3.000E0, .500E0, 2.750E0, .500E0, 1.750E0};
+const double chwirut2_functor::m_y[54] = { 92.9000E0 ,57.1000E0 ,31.0500E0 ,11.5875E0 ,8.0250E0 ,63.6000E0 ,21.4000E0 ,14.2500E0 ,8.4750E0 ,63.8000E0 ,26.8000E0 ,16.4625E0 ,7.1250E0 ,67.3000E0 ,41.0000E0 ,21.1500E0 ,8.1750E0 ,81.5000E0 ,13.1200E0 ,59.9000E0 ,14.6200E0 ,32.9000E0 ,5.4400E0 ,12.5600E0 ,5.4400E0 ,32.0000E0 ,13.9500E0 ,75.8000E0 ,20.0000E0 ,10.4200E0 ,59.5000E0 ,21.6700E0 ,8.5500E0 ,62.0000E0 ,20.2000E0 ,7.7600E0 ,3.7500E0 ,11.8100E0 ,54.7000E0 ,23.7000E0 ,11.5500E0 ,61.3000E0 ,17.7000E0 ,8.7400E0 ,59.2000E0 ,16.3000E0 ,8.6200E0 ,81.0000E0 ,4.8700E0 ,14.6200E0 ,81.7000E0 ,17.1700E0 ,81.3000E0 ,28.9000E0 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/chwirut2.shtml
+void testNistChwirut2(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 0.1, 0.01, 0.02;
+ // do the computation
+ chwirut2_functor functor;
+ LevenbergMarquardt<chwirut2_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+// VERIFY_IS_EQUAL(lm.nfev(), 10);
+ VERIFY_IS_EQUAL(lm.njev(), 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.1304802941E+02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
+ VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
+ VERIFY_IS_APPROX(x[2], 1.2150007096E-02);
+
+ /*
+ * Second try
+ */
+ x<< 0.15, 0.008, 0.010;
+ // do the computation
+ lm.resetParameters();
+ lm.setFtol(1.E6*NumTraits<double>::epsilon());
+ lm.setXtol(1.E6*NumTraits<double>::epsilon());
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+// VERIFY_IS_EQUAL(lm.nfev(), 7);
+ VERIFY_IS_EQUAL(lm.njev(), 6);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.1304802941E+02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.6657666537E-01);
+ VERIFY_IS_APPROX(x[1], 5.1653291286E-03);
+ VERIFY_IS_APPROX(x[2], 1.2150007096E-02);
+}
+
+
+struct misra1a_functor : DenseFunctor<double>
+{
+ misra1a_functor(void) : DenseFunctor<double>(2,14) {}
+ static const double m_x[14];
+ static const double m_y[14];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==2);
+ assert(fvec.size()==14);
+ for(int i=0; i<14; i++) {
+ fvec[i] = b[0]*(1.-exp(-b[1]*m_x[i])) - m_y[i] ;
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==2);
+ assert(fjac.rows()==14);
+ assert(fjac.cols()==2);
+ for(int i=0; i<14; i++) {
+ fjac(i,0) = (1.-exp(-b[1]*m_x[i]));
+ fjac(i,1) = (b[0]*m_x[i]*exp(-b[1]*m_x[i]));
+ }
+ return 0;
+ }
+};
+const double misra1a_functor::m_x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0};
+const double misra1a_functor::m_y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/misra1a.shtml
+void testNistMisra1a(void)
+{
+ const int n=2;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 500., 0.0001;
+ // do the computation
+ misra1a_functor functor;
+ LevenbergMarquardt<misra1a_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 19);
+ VERIFY_IS_EQUAL(lm.njev(), 15);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.2455138894E-01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
+ VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
+
+ /*
+ * Second try
+ */
+ x<< 250., 0.0005;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 5);
+ VERIFY_IS_EQUAL(lm.njev(), 4);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.2455138894E-01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.3894212918E+02);
+ VERIFY_IS_APPROX(x[1], 5.5015643181E-04);
+}
+
+struct hahn1_functor : DenseFunctor<double>
+{
+ hahn1_functor(void) : DenseFunctor<double>(7,236) {}
+ static const double m_x[236];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ static const double m_y[236] = { .591E0 , 1.547E0 , 2.902E0 , 2.894E0 , 4.703E0 , 6.307E0 , 7.03E0 , 7.898E0 , 9.470E0 , 9.484E0 , 10.072E0 , 10.163E0 , 11.615E0 , 12.005E0 , 12.478E0 , 12.982E0 , 12.970E0 , 13.926E0 , 14.452E0 , 14.404E0 , 15.190E0 , 15.550E0 , 15.528E0 , 15.499E0 , 16.131E0 , 16.438E0 , 16.387E0 , 16.549E0 , 16.872E0 , 16.830E0 , 16.926E0 , 16.907E0 , 16.966E0 , 17.060E0 , 17.122E0 , 17.311E0 , 17.355E0 , 17.668E0 , 17.767E0 , 17.803E0 , 17.765E0 , 17.768E0 , 17.736E0 , 17.858E0 , 17.877E0 , 17.912E0 , 18.046E0 , 18.085E0 , 18.291E0 , 18.357E0 , 18.426E0 , 18.584E0 , 18.610E0 , 18.870E0 , 18.795E0 , 19.111E0 , .367E0 , .796E0 , 0.892E0 , 1.903E0 , 2.150E0 , 3.697E0 , 5.870E0 , 6.421E0 , 7.422E0 , 9.944E0 , 11.023E0 , 11.87E0 , 12.786E0 , 14.067E0 , 13.974E0 , 14.462E0 , 14.464E0 , 15.381E0 , 15.483E0 , 15.59E0 , 16.075E0 , 16.347E0 , 16.181E0 , 16.915E0 , 17.003E0 , 16.978E0 , 17.756E0 , 17.808E0 , 17.868E0 , 18.481E0 , 18.486E0 , 19.090E0 , 16.062E0 , 16.337E0 , 16.345E0 ,
+ 16.388E0 , 17.159E0 , 17.116E0 , 17.164E0 , 17.123E0 , 17.979E0 , 17.974E0 , 18.007E0 , 17.993E0 , 18.523E0 , 18.669E0 , 18.617E0 , 19.371E0 , 19.330E0 , 0.080E0 , 0.248E0 , 1.089E0 , 1.418E0 , 2.278E0 , 3.624E0 , 4.574E0 , 5.556E0 , 7.267E0 , 7.695E0 , 9.136E0 , 9.959E0 , 9.957E0 , 11.600E0 , 13.138E0 , 13.564E0 , 13.871E0 , 13.994E0 , 14.947E0 , 15.473E0 , 15.379E0 , 15.455E0 , 15.908E0 , 16.114E0 , 17.071E0 , 17.135E0 , 17.282E0 , 17.368E0 , 17.483E0 , 17.764E0 , 18.185E0 , 18.271E0 , 18.236E0 , 18.237E0 , 18.523E0 , 18.627E0 , 18.665E0 , 19.086E0 , 0.214E0 , 0.943E0 , 1.429E0 , 2.241E0 , 2.951E0 , 3.782E0 , 4.757E0 , 5.602E0 , 7.169E0 , 8.920E0 , 10.055E0 , 12.035E0 , 12.861E0 , 13.436E0 , 14.167E0 , 14.755E0 , 15.168E0 , 15.651E0 , 15.746E0 , 16.216E0 , 16.445E0 , 16.965E0 , 17.121E0 , 17.206E0 , 17.250E0 , 17.339E0 , 17.793E0 , 18.123E0 , 18.49E0 , 18.566E0 , 18.645E0 , 18.706E0 , 18.924E0 , 19.1E0 , 0.375E0 , 0.471E0 , 1.504E0 , 2.204E0 , 2.813E0 , 4.765E0 , 9.835E0 , 10.040E0 , 11.946E0 ,
+12.596E0 ,
+13.303E0 , 13.922E0 , 14.440E0 , 14.951E0 , 15.627E0 , 15.639E0 , 15.814E0 , 16.315E0 , 16.334E0 , 16.430E0 , 16.423E0 , 17.024E0 , 17.009E0 , 17.165E0 , 17.134E0 , 17.349E0 , 17.576E0 , 17.848E0 , 18.090E0 , 18.276E0 , 18.404E0 , 18.519E0 , 19.133E0 , 19.074E0 , 19.239E0 , 19.280E0 , 19.101E0 , 19.398E0 , 19.252E0 , 19.89E0 , 20.007E0 , 19.929E0 , 19.268E0 , 19.324E0 , 20.049E0 , 20.107E0 , 20.062E0 , 20.065E0 , 19.286E0 , 19.972E0 , 20.088E0 , 20.743E0 , 20.83E0 , 20.935E0 , 21.035E0 , 20.93E0 , 21.074E0 , 21.085E0 , 20.935E0 };
+
+ // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++;
+
+ assert(b.size()==7);
+ assert(fvec.size()==236);
+ for(int i=0; i<236; i++) {
+ double x=m_x[i], xx=x*x, xxx=xx*x;
+ fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - m_y[i];
+ }
+ return 0;
+ }
+
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==7);
+ assert(fjac.rows()==236);
+ assert(fjac.cols()==7);
+ for(int i=0; i<236; i++) {
+ double x=m_x[i], xx=x*x, xxx=xx*x;
+ double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx);
+ fjac(i,0) = 1.*fact;
+ fjac(i,1) = x*fact;
+ fjac(i,2) = xx*fact;
+ fjac(i,3) = xxx*fact;
+ fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact;
+ fjac(i,4) = x*fact;
+ fjac(i,5) = xx*fact;
+ fjac(i,6) = xxx*fact;
+ }
+ return 0;
+ }
+};
+const double hahn1_functor::m_x[236] = { 24.41E0 , 34.82E0 , 44.09E0 , 45.07E0 , 54.98E0 , 65.51E0 , 70.53E0 , 75.70E0 , 89.57E0 , 91.14E0 , 96.40E0 , 97.19E0 , 114.26E0 , 120.25E0 , 127.08E0 , 133.55E0 , 133.61E0 , 158.67E0 , 172.74E0 , 171.31E0 , 202.14E0 , 220.55E0 , 221.05E0 , 221.39E0 , 250.99E0 , 268.99E0 , 271.80E0 , 271.97E0 , 321.31E0 , 321.69E0 , 330.14E0 , 333.03E0 , 333.47E0 , 340.77E0 , 345.65E0 , 373.11E0 , 373.79E0 , 411.82E0 , 419.51E0 , 421.59E0 , 422.02E0 , 422.47E0 , 422.61E0 , 441.75E0 , 447.41E0 , 448.7E0 , 472.89E0 , 476.69E0 , 522.47E0 , 522.62E0 , 524.43E0 , 546.75E0 , 549.53E0 , 575.29E0 , 576.00E0 , 625.55E0 , 20.15E0 , 28.78E0 , 29.57E0 , 37.41E0 , 39.12E0 , 50.24E0 , 61.38E0 , 66.25E0 , 73.42E0 , 95.52E0 , 107.32E0 , 122.04E0 , 134.03E0 , 163.19E0 , 163.48E0 , 175.70E0 , 179.86E0 , 211.27E0 , 217.78E0 , 219.14E0 , 262.52E0 , 268.01E0 , 268.62E0 , 336.25E0 , 337.23E0 , 339.33E0 , 427.38E0 , 428.58E0 , 432.68E0 , 528.99E0 , 531.08E0 , 628.34E0 , 253.24E0 , 273.13E0 , 273.66E0 ,
+282.10E0 , 346.62E0 , 347.19E0 , 348.78E0 , 351.18E0 , 450.10E0 , 450.35E0 , 451.92E0 , 455.56E0 , 552.22E0 , 553.56E0 , 555.74E0 , 652.59E0 , 656.20E0 , 14.13E0 , 20.41E0 , 31.30E0 , 33.84E0 , 39.70E0 , 48.83E0 , 54.50E0 , 60.41E0 , 72.77E0 , 75.25E0 , 86.84E0 , 94.88E0 , 96.40E0 , 117.37E0 , 139.08E0 , 147.73E0 , 158.63E0 , 161.84E0 , 192.11E0 , 206.76E0 , 209.07E0 , 213.32E0 , 226.44E0 , 237.12E0 , 330.90E0 , 358.72E0 , 370.77E0 , 372.72E0 , 396.24E0 , 416.59E0 , 484.02E0 , 495.47E0 , 514.78E0 , 515.65E0 , 519.47E0 , 544.47E0 , 560.11E0 , 620.77E0 , 18.97E0 , 28.93E0 , 33.91E0 , 40.03E0 , 44.66E0 , 49.87E0 , 55.16E0 , 60.90E0 , 72.08E0 , 85.15E0 , 97.06E0 , 119.63E0 , 133.27E0 , 143.84E0 , 161.91E0 , 180.67E0 , 198.44E0 , 226.86E0 , 229.65E0 , 258.27E0 , 273.77E0 , 339.15E0 , 350.13E0 , 362.75E0 , 371.03E0 , 393.32E0 , 448.53E0 , 473.78E0 , 511.12E0 , 524.70E0 , 548.75E0 , 551.64E0 , 574.02E0 , 623.86E0 , 21.46E0 , 24.33E0 , 33.43E0 , 39.22E0 , 44.18E0 , 55.02E0 , 94.33E0 , 96.44E0 , 118.82E0 , 128.48E0 ,
+141.94E0 , 156.92E0 , 171.65E0 , 190.00E0 , 223.26E0 , 223.88E0 , 231.50E0 , 265.05E0 , 269.44E0 , 271.78E0 , 273.46E0 , 334.61E0 , 339.79E0 , 349.52E0 , 358.18E0 , 377.98E0 , 394.77E0 , 429.66E0 , 468.22E0 , 487.27E0 , 519.54E0 , 523.03E0 , 612.99E0 , 638.59E0 , 641.36E0 , 622.05E0 , 631.50E0 , 663.97E0 , 646.9E0 , 748.29E0 , 749.21E0 , 750.14E0 , 647.04E0 , 646.89E0 , 746.9E0 , 748.43E0 , 747.35E0 , 749.27E0 , 647.61E0 , 747.78E0 , 750.51E0 , 851.37E0 , 845.97E0 , 847.54E0 , 849.93E0 , 851.61E0 , 849.75E0 , 850.98E0 , 848.23E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/hahn1.shtml
+void testNistHahn1(void)
+{
+ const int n=7;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 10., -1., .05, -.00001, -.05, .001, -.000001;
+ // do the computation
+ hahn1_functor functor;
+ LevenbergMarquardt<hahn1_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 11);
+ VERIFY_IS_EQUAL(lm.njev(), 10);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.5324382854E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.0776351733E+00);
+ VERIFY_IS_APPROX(x[1],-1.2269296921E-01);
+ VERIFY_IS_APPROX(x[2], 4.0863750610E-03);
+ VERIFY_IS_APPROX(x[3],-1.426264e-06); // shoulde be : -1.4262662514E-06
+ VERIFY_IS_APPROX(x[4],-5.7609940901E-03);
+ VERIFY_IS_APPROX(x[5], 2.4053735503E-04);
+ VERIFY_IS_APPROX(x[6],-1.2314450199E-07);
+
+ /*
+ * Second try
+ */
+ x<< .1, -.1, .005, -.000001, -.005, .0001, -.0000001;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+// VERIFY_IS_EQUAL(lm.nfev(), 11);
+ VERIFY_IS_EQUAL(lm.njev(), 10);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.5324382854E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.077640); // should be : 1.0776351733E+00
+ VERIFY_IS_APPROX(x[1], -0.1226933); // should be : -1.2269296921E-01
+ VERIFY_IS_APPROX(x[2], 0.004086383); // should be : 4.0863750610E-03
+ VERIFY_IS_APPROX(x[3], -1.426277e-06); // shoulde be : -1.4262662514E-06
+ VERIFY_IS_APPROX(x[4],-5.7609940901E-03);
+ VERIFY_IS_APPROX(x[5], 0.00024053772); // should be : 2.4053735503E-04
+ VERIFY_IS_APPROX(x[6], -1.231450e-07); // should be : -1.2314450199E-07
+
+}
+
+struct misra1d_functor : DenseFunctor<double>
+{
+ misra1d_functor(void) : DenseFunctor<double>(2,14) {}
+ static const double x[14];
+ static const double y[14];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==2);
+ assert(fvec.size()==14);
+ for(int i=0; i<14; i++) {
+ fvec[i] = b[0]*b[1]*x[i]/(1.+b[1]*x[i]) - y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==2);
+ assert(fjac.rows()==14);
+ assert(fjac.cols()==2);
+ for(int i=0; i<14; i++) {
+ double den = 1.+b[1]*x[i];
+ fjac(i,0) = b[1]*x[i] / den;
+ fjac(i,1) = b[0]*x[i]*(den-b[1]*x[i])/den/den;
+ }
+ return 0;
+ }
+};
+const double misra1d_functor::x[14] = { 77.6E0, 114.9E0, 141.1E0, 190.8E0, 239.9E0, 289.0E0, 332.8E0, 378.4E0, 434.8E0, 477.3E0, 536.8E0, 593.1E0, 689.1E0, 760.0E0};
+const double misra1d_functor::y[14] = { 10.07E0, 14.73E0, 17.94E0, 23.93E0, 29.61E0, 35.18E0, 40.02E0, 44.82E0, 50.76E0, 55.05E0, 61.01E0, 66.40E0, 75.47E0, 81.78E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/misra1d.shtml
+void testNistMisra1d(void)
+{
+ const int n=2;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 500., 0.0001;
+ // do the computation
+ misra1d_functor functor;
+ LevenbergMarquardt<misra1d_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 9);
+ VERIFY_IS_EQUAL(lm.njev(), 7);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6419295283E-02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
+ VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
+
+ /*
+ * Second try
+ */
+ x<< 450., 0.0003;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 4);
+ VERIFY_IS_EQUAL(lm.njev(), 3);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6419295283E-02);
+ // check x
+ VERIFY_IS_APPROX(x[0], 4.3736970754E+02);
+ VERIFY_IS_APPROX(x[1], 3.0227324449E-04);
+}
+
+
+struct lanczos1_functor : DenseFunctor<double>
+{
+ lanczos1_functor(void) : DenseFunctor<double>(6,24) {}
+ static const double x[24];
+ static const double y[24];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==6);
+ assert(fvec.size()==24);
+ for(int i=0; i<24; i++)
+ fvec[i] = b[0]*exp(-b[1]*x[i]) + b[2]*exp(-b[3]*x[i]) + b[4]*exp(-b[5]*x[i]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==6);
+ assert(fjac.rows()==24);
+ assert(fjac.cols()==6);
+ for(int i=0; i<24; i++) {
+ fjac(i,0) = exp(-b[1]*x[i]);
+ fjac(i,1) = -b[0]*x[i]*exp(-b[1]*x[i]);
+ fjac(i,2) = exp(-b[3]*x[i]);
+ fjac(i,3) = -b[2]*x[i]*exp(-b[3]*x[i]);
+ fjac(i,4) = exp(-b[5]*x[i]);
+ fjac(i,5) = -b[4]*x[i]*exp(-b[5]*x[i]);
+ }
+ return 0;
+ }
+};
+const double lanczos1_functor::x[24] = { 0.000000000000E+00, 5.000000000000E-02, 1.000000000000E-01, 1.500000000000E-01, 2.000000000000E-01, 2.500000000000E-01, 3.000000000000E-01, 3.500000000000E-01, 4.000000000000E-01, 4.500000000000E-01, 5.000000000000E-01, 5.500000000000E-01, 6.000000000000E-01, 6.500000000000E-01, 7.000000000000E-01, 7.500000000000E-01, 8.000000000000E-01, 8.500000000000E-01, 9.000000000000E-01, 9.500000000000E-01, 1.000000000000E+00, 1.050000000000E+00, 1.100000000000E+00, 1.150000000000E+00 };
+const double lanczos1_functor::y[24] = { 2.513400000000E+00 ,2.044333373291E+00 ,1.668404436564E+00 ,1.366418021208E+00 ,1.123232487372E+00 ,9.268897180037E-01 ,7.679338563728E-01 ,6.388775523106E-01 ,5.337835317402E-01 ,4.479363617347E-01 ,3.775847884350E-01 ,3.197393199326E-01 ,2.720130773746E-01 ,2.324965529032E-01 ,1.996589546065E-01 ,1.722704126914E-01 ,1.493405660168E-01 ,1.300700206922E-01 ,1.138119324644E-01 ,1.000415587559E-01 ,8.833209084540E-02 ,7.833544019350E-02 ,6.976693743449E-02 ,6.239312536719E-02 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/lanczos1.shtml
+void testNistLanczos1(void)
+{
+ const int n=6;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1.2, 0.3, 5.6, 5.5, 6.5, 7.6;
+ // do the computation
+ lanczos1_functor functor;
+ LevenbergMarquardt<lanczos1_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(lm.nfev(), 79);
+ VERIFY_IS_EQUAL(lm.njev(), 72);
+ // check norm^2
+// VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.430899764097e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ // check x
+ VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
+ VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
+ VERIFY_IS_APPROX(x[2], 8.6070000013E-01);
+ VERIFY_IS_APPROX(x[3], 3.0000000002E+00);
+ VERIFY_IS_APPROX(x[4], 1.5575999998E+00);
+ VERIFY_IS_APPROX(x[5], 5.0000000001E+00);
+
+ /*
+ * Second try
+ */
+ x<< 0.5, 0.7, 3.6, 4.2, 4., 6.3;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 2);
+ VERIFY_IS_EQUAL(lm.nfev(), 9);
+ VERIFY_IS_EQUAL(lm.njev(), 8);
+ // check norm^2
+// VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.428595533845e-25); // should be 1.4307867721E-25, but nist results are on 128-bit floats
+ // check x
+ VERIFY_IS_APPROX(x[0], 9.5100000027E-02);
+ VERIFY_IS_APPROX(x[1], 1.0000000001E+00);
+ VERIFY_IS_APPROX(x[2], 8.6070000013E-01);
+ VERIFY_IS_APPROX(x[3], 3.0000000002E+00);
+ VERIFY_IS_APPROX(x[4], 1.5575999998E+00);
+ VERIFY_IS_APPROX(x[5], 5.0000000001E+00);
+
+}
+
+struct rat42_functor : DenseFunctor<double>
+{
+ rat42_functor(void) : DenseFunctor<double>(3,9) {}
+ static const double x[9];
+ static const double y[9];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==9);
+ for(int i=0; i<9; i++) {
+ fvec[i] = b[0] / (1.+exp(b[1]-b[2]*x[i])) - y[i];
+ }
+ return 0;
+ }
+
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==9);
+ assert(fjac.cols()==3);
+ for(int i=0; i<9; i++) {
+ double e = exp(b[1]-b[2]*x[i]);
+ fjac(i,0) = 1./(1.+e);
+ fjac(i,1) = -b[0]*e/(1.+e)/(1.+e);
+ fjac(i,2) = +b[0]*e*x[i]/(1.+e)/(1.+e);
+ }
+ return 0;
+ }
+};
+const double rat42_functor::x[9] = { 9.000E0, 14.000E0, 21.000E0, 28.000E0, 42.000E0, 57.000E0, 63.000E0, 70.000E0, 79.000E0 };
+const double rat42_functor::y[9] = { 8.930E0 ,10.800E0 ,18.590E0 ,22.330E0 ,39.350E0 ,56.110E0 ,61.730E0 ,64.620E0 ,67.080E0 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
+void testNistRat42(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 100., 1., 0.1;
+ // do the computation
+ rat42_functor functor;
+ LevenbergMarquardt<rat42_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 10);
+ VERIFY_IS_EQUAL(lm.njev(), 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.0565229338E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 7.2462237576E+01);
+ VERIFY_IS_APPROX(x[1], 2.6180768402E+00);
+ VERIFY_IS_APPROX(x[2], 6.7359200066E-02);
+
+ /*
+ * Second try
+ */
+ x<< 75., 2.5, 0.07;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 6);
+ VERIFY_IS_EQUAL(lm.njev(), 5);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.0565229338E+00);
+ // check x
+ VERIFY_IS_APPROX(x[0], 7.2462237576E+01);
+ VERIFY_IS_APPROX(x[1], 2.6180768402E+00);
+ VERIFY_IS_APPROX(x[2], 6.7359200066E-02);
+}
+
+struct MGH10_functor : DenseFunctor<double>
+{
+ MGH10_functor(void) : DenseFunctor<double>(3,16) {}
+ static const double x[16];
+ static const double y[16];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==16);
+ for(int i=0; i<16; i++)
+ fvec[i] = b[0] * exp(b[1]/(x[i]+b[2])) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==16);
+ assert(fjac.cols()==3);
+ for(int i=0; i<16; i++) {
+ double factor = 1./(x[i]+b[2]);
+ double e = exp(b[1]*factor);
+ fjac(i,0) = e;
+ fjac(i,1) = b[0]*factor*e;
+ fjac(i,2) = -b[1]*b[0]*factor*factor*e;
+ }
+ return 0;
+ }
+};
+const double MGH10_functor::x[16] = { 5.000000E+01, 5.500000E+01, 6.000000E+01, 6.500000E+01, 7.000000E+01, 7.500000E+01, 8.000000E+01, 8.500000E+01, 9.000000E+01, 9.500000E+01, 1.000000E+02, 1.050000E+02, 1.100000E+02, 1.150000E+02, 1.200000E+02, 1.250000E+02 };
+const double MGH10_functor::y[16] = { 3.478000E+04, 2.861000E+04, 2.365000E+04, 1.963000E+04, 1.637000E+04, 1.372000E+04, 1.154000E+04, 9.744000E+03, 8.261000E+03, 7.030000E+03, 6.005000E+03, 5.147000E+03, 4.427000E+03, 3.820000E+03, 3.307000E+03, 2.872000E+03 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
+void testNistMGH10(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 2., 400000., 25000.;
+ // do the computation
+ MGH10_functor functor;
+ LevenbergMarquardt<MGH10_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 284 );
+ VERIFY_IS_EQUAL(lm.njev(), 249 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
+ VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
+ VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+
+ /*
+ * Second try
+ */
+ x<< 0.02, 4000., 250.;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 126);
+ VERIFY_IS_EQUAL(lm.njev(), 116);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7945855171E+01);
+ // check x
+ VERIFY_IS_APPROX(x[0], 5.6096364710E-03);
+ VERIFY_IS_APPROX(x[1], 6.1813463463E+03);
+ VERIFY_IS_APPROX(x[2], 3.4522363462E+02);
+}
+
+
+struct BoxBOD_functor : DenseFunctor<double>
+{
+ BoxBOD_functor(void) : DenseFunctor<double>(2,6) {}
+ static const double x[6];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ static const double y[6] = { 109., 149., 149., 191., 213., 224. };
+ assert(b.size()==2);
+ assert(fvec.size()==6);
+ for(int i=0; i<6; i++)
+ fvec[i] = b[0]*(1.-exp(-b[1]*x[i])) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==2);
+ assert(fjac.rows()==6);
+ assert(fjac.cols()==2);
+ for(int i=0; i<6; i++) {
+ double e = exp(-b[1]*x[i]);
+ fjac(i,0) = 1.-e;
+ fjac(i,1) = b[0]*x[i]*e;
+ }
+ return 0;
+ }
+};
+const double BoxBOD_functor::x[6] = { 1., 2., 3., 5., 7., 10. };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/boxbod.shtml
+void testNistBoxBOD(void)
+{
+ const int n=2;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1., 1.;
+ // do the computation
+ BoxBOD_functor functor;
+ LevenbergMarquardt<BoxBOD_functor> lm(functor);
+ lm.setFtol(1.E6*NumTraits<double>::epsilon());
+ lm.setXtol(1.E6*NumTraits<double>::epsilon());
+ lm.setFactor(10);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 31);
+ VERIFY_IS_EQUAL(lm.njev(), 25);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.1380940889E+02);
+ VERIFY_IS_APPROX(x[1], 5.4723748542E-01);
+
+ /*
+ * Second try
+ */
+ x<< 100., 0.75;
+ // do the computation
+ lm.resetParameters();
+ lm.setFtol(NumTraits<double>::epsilon());
+ lm.setXtol( NumTraits<double>::epsilon());
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 15 );
+ VERIFY_IS_EQUAL(lm.njev(), 14 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.1680088766E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 2.1380940889E+02);
+ VERIFY_IS_APPROX(x[1], 5.4723748542E-01);
+}
+
+struct MGH17_functor : DenseFunctor<double>
+{
+ MGH17_functor(void) : DenseFunctor<double>(5,33) {}
+ static const double x[33];
+ static const double y[33];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==5);
+ assert(fvec.size()==33);
+ for(int i=0; i<33; i++)
+ fvec[i] = b[0] + b[1]*exp(-b[3]*x[i]) + b[2]*exp(-b[4]*x[i]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==5);
+ assert(fjac.rows()==33);
+ assert(fjac.cols()==5);
+ for(int i=0; i<33; i++) {
+ fjac(i,0) = 1.;
+ fjac(i,1) = exp(-b[3]*x[i]);
+ fjac(i,2) = exp(-b[4]*x[i]);
+ fjac(i,3) = -x[i]*b[1]*exp(-b[3]*x[i]);
+ fjac(i,4) = -x[i]*b[2]*exp(-b[4]*x[i]);
+ }
+ return 0;
+ }
+};
+const double MGH17_functor::x[33] = { 0.000000E+00, 1.000000E+01, 2.000000E+01, 3.000000E+01, 4.000000E+01, 5.000000E+01, 6.000000E+01, 7.000000E+01, 8.000000E+01, 9.000000E+01, 1.000000E+02, 1.100000E+02, 1.200000E+02, 1.300000E+02, 1.400000E+02, 1.500000E+02, 1.600000E+02, 1.700000E+02, 1.800000E+02, 1.900000E+02, 2.000000E+02, 2.100000E+02, 2.200000E+02, 2.300000E+02, 2.400000E+02, 2.500000E+02, 2.600000E+02, 2.700000E+02, 2.800000E+02, 2.900000E+02, 3.000000E+02, 3.100000E+02, 3.200000E+02 };
+const double MGH17_functor::y[33] = { 8.440000E-01, 9.080000E-01, 9.320000E-01, 9.360000E-01, 9.250000E-01, 9.080000E-01, 8.810000E-01, 8.500000E-01, 8.180000E-01, 7.840000E-01, 7.510000E-01, 7.180000E-01, 6.850000E-01, 6.580000E-01, 6.280000E-01, 6.030000E-01, 5.800000E-01, 5.580000E-01, 5.380000E-01, 5.220000E-01, 5.060000E-01, 4.900000E-01, 4.780000E-01, 4.670000E-01, 4.570000E-01, 4.480000E-01, 4.380000E-01, 4.310000E-01, 4.240000E-01, 4.200000E-01, 4.140000E-01, 4.110000E-01, 4.060000E-01 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/mgh17.shtml
+void testNistMGH17(void)
+{
+ const int n=5;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 50., 150., -100., 1., 2.;
+ // do the computation
+ MGH17_functor functor;
+ LevenbergMarquardt<MGH17_functor> lm(functor);
+ lm.setFtol(NumTraits<double>::epsilon());
+ lm.setXtol(NumTraits<double>::epsilon());
+ lm.setMaxfev(1000);
+ info = lm.minimize(x);
+
+ // check return value
+// VERIFY_IS_EQUAL(info, 2); //FIXME Use (lm.info() == Success)
+// VERIFY_IS_EQUAL(lm.nfev(), 602 );
+ VERIFY_IS_EQUAL(lm.njev(), 545 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.4648946975E-05);
+ // check x
+ VERIFY_IS_APPROX(x[0], 3.7541005211E-01);
+ VERIFY_IS_APPROX(x[1], 1.9358469127E+00);
+ VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
+ VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
+ VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+
+ /*
+ * Second try
+ */
+ x<< 0.5 ,1.5 ,-1 ,0.01 ,0.02;
+ // do the computation
+ lm.resetParameters();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 18);
+ VERIFY_IS_EQUAL(lm.njev(), 15);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.4648946975E-05);
+ // check x
+ VERIFY_IS_APPROX(x[0], 3.7541005211E-01);
+ VERIFY_IS_APPROX(x[1], 1.9358469127E+00);
+ VERIFY_IS_APPROX(x[2], -1.4646871366E+00);
+ VERIFY_IS_APPROX(x[3], 1.2867534640E-02);
+ VERIFY_IS_APPROX(x[4], 2.2122699662E-02);
+}
+
+struct MGH09_functor : DenseFunctor<double>
+{
+ MGH09_functor(void) : DenseFunctor<double>(4,11) {}
+ static const double _x[11];
+ static const double y[11];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==4);
+ assert(fvec.size()==11);
+ for(int i=0; i<11; i++) {
+ double x = _x[i], xx=x*x;
+ fvec[i] = b[0]*(xx+x*b[1])/(xx+x*b[2]+b[3]) - y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==4);
+ assert(fjac.rows()==11);
+ assert(fjac.cols()==4);
+ for(int i=0; i<11; i++) {
+ double x = _x[i], xx=x*x;
+ double factor = 1./(xx+x*b[2]+b[3]);
+ fjac(i,0) = (xx+x*b[1]) * factor;
+ fjac(i,1) = b[0]*x* factor;
+ fjac(i,2) = - b[0]*(xx+x*b[1]) * x * factor * factor;
+ fjac(i,3) = - b[0]*(xx+x*b[1]) * factor * factor;
+ }
+ return 0;
+ }
+};
+const double MGH09_functor::_x[11] = { 4., 2., 1., 5.E-1 , 2.5E-01, 1.670000E-01, 1.250000E-01, 1.E-01, 8.330000E-02, 7.140000E-02, 6.250000E-02 };
+const double MGH09_functor::y[11] = { 1.957000E-01, 1.947000E-01, 1.735000E-01, 1.600000E-01, 8.440000E-02, 6.270000E-02, 4.560000E-02, 3.420000E-02, 3.230000E-02, 2.350000E-02, 2.460000E-02 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml
+void testNistMGH09(void)
+{
+ const int n=4;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 25., 39, 41.5, 39.;
+ // do the computation
+ MGH09_functor functor;
+ LevenbergMarquardt<MGH09_functor> lm(functor);
+ lm.setMaxfev(1000);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 490 );
+ VERIFY_IS_EQUAL(lm.njev(), 376 );
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 3.0750560385E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], 0.1928077089); // should be 1.9280693458E-01
+ VERIFY_IS_APPROX(x[1], 0.19126423573); // should be 1.9128232873E-01
+ VERIFY_IS_APPROX(x[2], 0.12305309914); // should be 1.2305650693E-01
+ VERIFY_IS_APPROX(x[3], 0.13605395375); // should be 1.3606233068E-01
+
+ /*
+ * Second try
+ */
+ x<< 0.25, 0.39, 0.415, 0.39;
+ // do the computation
+ lm.resetParameters();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 18);
+ VERIFY_IS_EQUAL(lm.njev(), 16);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 3.0750560385E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], 0.19280781); // should be 1.9280693458E-01
+ VERIFY_IS_APPROX(x[1], 0.19126265); // should be 1.9128232873E-01
+ VERIFY_IS_APPROX(x[2], 0.12305280); // should be 1.2305650693E-01
+ VERIFY_IS_APPROX(x[3], 0.13605322); // should be 1.3606233068E-01
+}
+
+
+
+struct Bennett5_functor : DenseFunctor<double>
+{
+ Bennett5_functor(void) : DenseFunctor<double>(3,154) {}
+ static const double x[154];
+ static const double y[154];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==154);
+ for(int i=0; i<154; i++)
+ fvec[i] = b[0]* pow(b[1]+x[i],-1./b[2]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==154);
+ assert(fjac.cols()==3);
+ for(int i=0; i<154; i++) {
+ double e = pow(b[1]+x[i],-1./b[2]);
+ fjac(i,0) = e;
+ fjac(i,1) = - b[0]*e/b[2]/(b[1]+x[i]);
+ fjac(i,2) = b[0]*e*log(b[1]+x[i])/b[2]/b[2];
+ }
+ return 0;
+ }
+};
+const double Bennett5_functor::x[154] = { 7.447168E0, 8.102586E0, 8.452547E0, 8.711278E0, 8.916774E0, 9.087155E0, 9.232590E0, 9.359535E0, 9.472166E0, 9.573384E0, 9.665293E0, 9.749461E0, 9.827092E0, 9.899128E0, 9.966321E0, 10.029280E0, 10.088510E0, 10.144430E0, 10.197380E0, 10.247670E0, 10.295560E0, 10.341250E0, 10.384950E0, 10.426820E0, 10.467000E0, 10.505640E0, 10.542830E0, 10.578690E0, 10.613310E0, 10.646780E0, 10.679150E0, 10.710520E0, 10.740920E0, 10.770440E0, 10.799100E0, 10.826970E0, 10.854080E0, 10.880470E0, 10.906190E0, 10.931260E0, 10.955720E0, 10.979590E0, 11.002910E0, 11.025700E0, 11.047980E0, 11.069770E0, 11.091100E0, 11.111980E0, 11.132440E0, 11.152480E0, 11.172130E0, 11.191410E0, 11.210310E0, 11.228870E0, 11.247090E0, 11.264980E0, 11.282560E0, 11.299840E0, 11.316820E0, 11.333520E0, 11.349940E0, 11.366100E0, 11.382000E0, 11.397660E0, 11.413070E0, 11.428240E0, 11.443200E0, 11.457930E0, 11.472440E0, 11.486750E0, 11.500860E0, 11.514770E0, 11.528490E0, 11.542020E0, 11.555380E0, 11.568550E0,
+11.581560E0, 11.594420E0, 11.607121E0, 11.619640E0, 11.632000E0, 11.644210E0, 11.656280E0, 11.668200E0, 11.679980E0, 11.691620E0, 11.703130E0, 11.714510E0, 11.725760E0, 11.736880E0, 11.747890E0, 11.758780E0, 11.769550E0, 11.780200E0, 11.790730E0, 11.801160E0, 11.811480E0, 11.821700E0, 11.831810E0, 11.841820E0, 11.851730E0, 11.861550E0, 11.871270E0, 11.880890E0, 11.890420E0, 11.899870E0, 11.909220E0, 11.918490E0, 11.927680E0, 11.936780E0, 11.945790E0, 11.954730E0, 11.963590E0, 11.972370E0, 11.981070E0, 11.989700E0, 11.998260E0, 12.006740E0, 12.015150E0, 12.023490E0, 12.031760E0, 12.039970E0, 12.048100E0, 12.056170E0, 12.064180E0, 12.072120E0, 12.080010E0, 12.087820E0, 12.095580E0, 12.103280E0, 12.110920E0, 12.118500E0, 12.126030E0, 12.133500E0, 12.140910E0, 12.148270E0, 12.155570E0, 12.162830E0, 12.170030E0, 12.177170E0, 12.184270E0, 12.191320E0, 12.198320E0, 12.205270E0, 12.212170E0, 12.219030E0, 12.225840E0, 12.232600E0, 12.239320E0, 12.245990E0, 12.252620E0, 12.259200E0, 12.265750E0, 12.272240E0 };
+const double Bennett5_functor::y[154] = { -34.834702E0 ,-34.393200E0 ,-34.152901E0 ,-33.979099E0 ,-33.845901E0 ,-33.732899E0 ,-33.640301E0 ,-33.559200E0 ,-33.486801E0 ,-33.423100E0 ,-33.365101E0 ,-33.313000E0 ,-33.260899E0 ,-33.217400E0 ,-33.176899E0 ,-33.139198E0 ,-33.101601E0 ,-33.066799E0 ,-33.035000E0 ,-33.003101E0 ,-32.971298E0 ,-32.942299E0 ,-32.916302E0 ,-32.890202E0 ,-32.864101E0 ,-32.841000E0 ,-32.817799E0 ,-32.797501E0 ,-32.774300E0 ,-32.757000E0 ,-32.733799E0 ,-32.716400E0 ,-32.699100E0 ,-32.678799E0 ,-32.661400E0 ,-32.644001E0 ,-32.626701E0 ,-32.612202E0 ,-32.597698E0 ,-32.583199E0 ,-32.568699E0 ,-32.554298E0 ,-32.539799E0 ,-32.525299E0 ,-32.510799E0 ,-32.499199E0 ,-32.487598E0 ,-32.473202E0 ,-32.461601E0 ,-32.435501E0 ,-32.435501E0 ,-32.426800E0 ,-32.412300E0 ,-32.400799E0 ,-32.392101E0 ,-32.380501E0 ,-32.366001E0 ,-32.357300E0 ,-32.348598E0 ,-32.339901E0 ,-32.328400E0 ,-32.319698E0 ,-32.311001E0 ,-32.299400E0 ,-32.290699E0 ,-32.282001E0 ,-32.273300E0 ,-32.264599E0 ,-32.256001E0 ,-32.247299E0
+,-32.238602E0 ,-32.229900E0 ,-32.224098E0 ,-32.215401E0 ,-32.203800E0 ,-32.198002E0 ,-32.189400E0 ,-32.183601E0 ,-32.174900E0 ,-32.169102E0 ,-32.163300E0 ,-32.154598E0 ,-32.145901E0 ,-32.140099E0 ,-32.131401E0 ,-32.125599E0 ,-32.119801E0 ,-32.111198E0 ,-32.105400E0 ,-32.096699E0 ,-32.090900E0 ,-32.088001E0 ,-32.079300E0 ,-32.073502E0 ,-32.067699E0 ,-32.061901E0 ,-32.056099E0 ,-32.050301E0 ,-32.044498E0 ,-32.038799E0 ,-32.033001E0 ,-32.027199E0 ,-32.024300E0 ,-32.018501E0 ,-32.012699E0 ,-32.004002E0 ,-32.001099E0 ,-31.995300E0 ,-31.989500E0 ,-31.983700E0 ,-31.977900E0 ,-31.972099E0 ,-31.969299E0 ,-31.963501E0 ,-31.957701E0 ,-31.951900E0 ,-31.946100E0 ,-31.940300E0 ,-31.937401E0 ,-31.931601E0 ,-31.925800E0 ,-31.922899E0 ,-31.917101E0 ,-31.911301E0 ,-31.908400E0 ,-31.902599E0 ,-31.896900E0 ,-31.893999E0 ,-31.888201E0 ,-31.885300E0 ,-31.882401E0 ,-31.876600E0 ,-31.873699E0 ,-31.867901E0 ,-31.862101E0 ,-31.859200E0 ,-31.856300E0 ,-31.850500E0 ,-31.844700E0 ,-31.841801E0 ,-31.838900E0 ,-31.833099E0 ,-31.830200E0 ,
+-31.827299E0 ,-31.821600E0 ,-31.818701E0 ,-31.812901E0 ,-31.809999E0 ,-31.807100E0 ,-31.801300E0 ,-31.798401E0 ,-31.795500E0 ,-31.789700E0 ,-31.786800E0 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/bennett5.shtml
+void testNistBennett5(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< -2000., 50., 0.8;
+ // do the computation
+ Bennett5_functor functor;
+ LevenbergMarquardt<Bennett5_functor> lm(functor);
+ lm.setMaxfev(1000);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 758);
+ VERIFY_IS_EQUAL(lm.njev(), 744);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.2404744073E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], -2.5235058043E+03);
+ VERIFY_IS_APPROX(x[1], 4.6736564644E+01);
+ VERIFY_IS_APPROX(x[2], 9.3218483193E-01);
+ /*
+ * Second try
+ */
+ x<< -1500., 45., 0.85;
+ // do the computation
+ lm.resetParameters();
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 203);
+ VERIFY_IS_EQUAL(lm.njev(), 192);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.2404744073E-04);
+ // check x
+ VERIFY_IS_APPROX(x[0], -2523.3007865); // should be -2.5235058043E+03
+ VERIFY_IS_APPROX(x[1], 46.735705771); // should be 4.6736564644E+01);
+ VERIFY_IS_APPROX(x[2], 0.93219881891); // should be 9.3218483193E-01);
+}
+
+struct thurber_functor : DenseFunctor<double>
+{
+ thurber_functor(void) : DenseFunctor<double>(7,37) {}
+ static const double _x[37];
+ static const double _y[37];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ // int called=0; printf("call hahn1_functor with iflag=%d, called=%d\n", iflag, called); if (iflag==1) called++;
+ assert(b.size()==7);
+ assert(fvec.size()==37);
+ for(int i=0; i<37; i++) {
+ double x=_x[i], xx=x*x, xxx=xx*x;
+ fvec[i] = (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) / (1.+b[4]*x+b[5]*xx+b[6]*xxx) - _y[i];
+ }
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==7);
+ assert(fjac.rows()==37);
+ assert(fjac.cols()==7);
+ for(int i=0; i<37; i++) {
+ double x=_x[i], xx=x*x, xxx=xx*x;
+ double fact = 1./(1.+b[4]*x+b[5]*xx+b[6]*xxx);
+ fjac(i,0) = 1.*fact;
+ fjac(i,1) = x*fact;
+ fjac(i,2) = xx*fact;
+ fjac(i,3) = xxx*fact;
+ fact = - (b[0]+b[1]*x+b[2]*xx+b[3]*xxx) * fact * fact;
+ fjac(i,4) = x*fact;
+ fjac(i,5) = xx*fact;
+ fjac(i,6) = xxx*fact;
+ }
+ return 0;
+ }
+};
+const double thurber_functor::_x[37] = { -3.067E0, -2.981E0, -2.921E0, -2.912E0, -2.840E0, -2.797E0, -2.702E0, -2.699E0, -2.633E0, -2.481E0, -2.363E0, -2.322E0, -1.501E0, -1.460E0, -1.274E0, -1.212E0, -1.100E0, -1.046E0, -0.915E0, -0.714E0, -0.566E0, -0.545E0, -0.400E0, -0.309E0, -0.109E0, -0.103E0, 0.010E0, 0.119E0, 0.377E0, 0.790E0, 0.963E0, 1.006E0, 1.115E0, 1.572E0, 1.841E0, 2.047E0, 2.200E0 };
+const double thurber_functor::_y[37] = { 80.574E0, 84.248E0, 87.264E0, 87.195E0, 89.076E0, 89.608E0, 89.868E0, 90.101E0, 92.405E0, 95.854E0, 100.696E0, 101.060E0, 401.672E0, 390.724E0, 567.534E0, 635.316E0, 733.054E0, 759.087E0, 894.206E0, 990.785E0, 1090.109E0, 1080.914E0, 1122.643E0, 1178.351E0, 1260.531E0, 1273.514E0, 1288.339E0, 1327.543E0, 1353.863E0, 1414.509E0, 1425.208E0, 1421.384E0, 1442.962E0, 1464.350E0, 1468.705E0, 1447.894E0, 1457.628E0};
+
+// http://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml
+void testNistThurber(void)
+{
+ const int n=7;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1000 ,1000 ,400 ,40 ,0.7,0.3,0.0 ;
+ // do the computation
+ thurber_functor functor;
+ LevenbergMarquardt<thurber_functor> lm(functor);
+ lm.setFtol(1.E4*NumTraits<double>::epsilon());
+ lm.setXtol(1.E4*NumTraits<double>::epsilon());
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 39);
+ VERIFY_IS_EQUAL(lm.njev(), 36);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6427082397E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
+ VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
+ VERIFY_IS_APPROX(x[2], 5.8323836877E+02);
+ VERIFY_IS_APPROX(x[3], 7.5416644291E+01);
+ VERIFY_IS_APPROX(x[4], 9.6629502864E-01);
+ VERIFY_IS_APPROX(x[5], 3.9797285797E-01);
+ VERIFY_IS_APPROX(x[6], 4.9727297349E-02);
+
+ /*
+ * Second try
+ */
+ x<< 1300 ,1500 ,500 ,75 ,1 ,0.4 ,0.05 ;
+ // do the computation
+ lm.resetParameters();
+ lm.setFtol(1.E4*NumTraits<double>::epsilon());
+ lm.setXtol(1.E4*NumTraits<double>::epsilon());
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 29);
+ VERIFY_IS_EQUAL(lm.njev(), 28);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 5.6427082397E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.2881396800E+03);
+ VERIFY_IS_APPROX(x[1], 1.4910792535E+03);
+ VERIFY_IS_APPROX(x[2], 5.8323836877E+02);
+ VERIFY_IS_APPROX(x[3], 7.5416644291E+01);
+ VERIFY_IS_APPROX(x[4], 9.6629502864E-01);
+ VERIFY_IS_APPROX(x[5], 3.9797285797E-01);
+ VERIFY_IS_APPROX(x[6], 4.9727297349E-02);
+}
+
+struct rat43_functor : DenseFunctor<double>
+{
+ rat43_functor(void) : DenseFunctor<double>(4,15) {}
+ static const double x[15];
+ static const double y[15];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==4);
+ assert(fvec.size()==15);
+ for(int i=0; i<15; i++)
+ fvec[i] = b[0] * pow(1.+exp(b[1]-b[2]*x[i]),-1./b[3]) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==4);
+ assert(fjac.rows()==15);
+ assert(fjac.cols()==4);
+ for(int i=0; i<15; i++) {
+ double e = exp(b[1]-b[2]*x[i]);
+ double power = -1./b[3];
+ fjac(i,0) = pow(1.+e, power);
+ fjac(i,1) = power*b[0]*e*pow(1.+e, power-1.);
+ fjac(i,2) = -power*b[0]*e*x[i]*pow(1.+e, power-1.);
+ fjac(i,3) = b[0]*power*power*log(1.+e)*pow(1.+e, power);
+ }
+ return 0;
+ }
+};
+const double rat43_functor::x[15] = { 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15. };
+const double rat43_functor::y[15] = { 16.08, 33.83, 65.80, 97.20, 191.55, 326.20, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
+void testNistRat43(void)
+{
+ const int n=4;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 100., 10., 1., 1.;
+ // do the computation
+ rat43_functor functor;
+ LevenbergMarquardt<rat43_functor> lm(functor);
+ lm.setFtol(1.E6*NumTraits<double>::epsilon());
+ lm.setXtol(1.E6*NumTraits<double>::epsilon());
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 27);
+ VERIFY_IS_EQUAL(lm.njev(), 20);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7864049080E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 6.9964151270E+02);
+ VERIFY_IS_APPROX(x[1], 5.2771253025E+00);
+ VERIFY_IS_APPROX(x[2], 7.5962938329E-01);
+ VERIFY_IS_APPROX(x[3], 1.2792483859E+00);
+
+ /*
+ * Second try
+ */
+ x<< 700., 5., 0.75, 1.3;
+ // do the computation
+ lm.resetParameters();
+ lm.setFtol(1.E5*NumTraits<double>::epsilon());
+ lm.setXtol(1.E5*NumTraits<double>::epsilon());
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 9);
+ VERIFY_IS_EQUAL(lm.njev(), 8);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 8.7864049080E+03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 6.9964151270E+02);
+ VERIFY_IS_APPROX(x[1], 5.2771253025E+00);
+ VERIFY_IS_APPROX(x[2], 7.5962938329E-01);
+ VERIFY_IS_APPROX(x[3], 1.2792483859E+00);
+}
+
+
+
+struct eckerle4_functor : DenseFunctor<double>
+{
+ eckerle4_functor(void) : DenseFunctor<double>(3,35) {}
+ static const double x[35];
+ static const double y[35];
+ int operator()(const VectorXd &b, VectorXd &fvec)
+ {
+ assert(b.size()==3);
+ assert(fvec.size()==35);
+ for(int i=0; i<35; i++)
+ fvec[i] = b[0]/b[1] * exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/(b[1]*b[1])) - y[i];
+ return 0;
+ }
+ int df(const VectorXd &b, MatrixXd &fjac)
+ {
+ assert(b.size()==3);
+ assert(fjac.rows()==35);
+ assert(fjac.cols()==3);
+ for(int i=0; i<35; i++) {
+ double b12 = b[1]*b[1];
+ double e = exp(-0.5*(x[i]-b[2])*(x[i]-b[2])/b12);
+ fjac(i,0) = e / b[1];
+ fjac(i,1) = ((x[i]-b[2])*(x[i]-b[2])/b12-1.) * b[0]*e/b12;
+ fjac(i,2) = (x[i]-b[2])*e*b[0]/b[1]/b12;
+ }
+ return 0;
+ }
+};
+const double eckerle4_functor::x[35] = { 400.0, 405.0, 410.0, 415.0, 420.0, 425.0, 430.0, 435.0, 436.5, 438.0, 439.5, 441.0, 442.5, 444.0, 445.5, 447.0, 448.5, 450.0, 451.5, 453.0, 454.5, 456.0, 457.5, 459.0, 460.5, 462.0, 463.5, 465.0, 470.0, 475.0, 480.0, 485.0, 490.0, 495.0, 500.0};
+const double eckerle4_functor::y[35] = { 0.0001575, 0.0001699, 0.0002350, 0.0003102, 0.0004917, 0.0008710, 0.0017418, 0.0046400, 0.0065895, 0.0097302, 0.0149002, 0.0237310, 0.0401683, 0.0712559, 0.1264458, 0.2073413, 0.2902366, 0.3445623, 0.3698049, 0.3668534, 0.3106727, 0.2078154, 0.1164354, 0.0616764, 0.0337200, 0.0194023, 0.0117831, 0.0074357, 0.0022732, 0.0008800, 0.0004579, 0.0002345, 0.0001586, 0.0001143, 0.0000710 };
+
+// http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml
+void testNistEckerle4(void)
+{
+ const int n=3;
+ int info;
+
+ VectorXd x(n);
+
+ /*
+ * First try
+ */
+ x<< 1., 10., 500.;
+ // do the computation
+ eckerle4_functor functor;
+ LevenbergMarquardt<eckerle4_functor> lm(functor);
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 18);
+ VERIFY_IS_EQUAL(lm.njev(), 15);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.4635887487E-03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.5543827178);
+ VERIFY_IS_APPROX(x[1], 4.0888321754);
+ VERIFY_IS_APPROX(x[2], 4.5154121844E+02);
+
+ /*
+ * Second try
+ */
+ x<< 1.5, 5., 450.;
+ // do the computation
+ info = lm.minimize(x);
+
+ // check return value
+ VERIFY_IS_EQUAL(info, 1);
+ VERIFY_IS_EQUAL(lm.nfev(), 7);
+ VERIFY_IS_EQUAL(lm.njev(), 6);
+ // check norm^2
+ VERIFY_IS_APPROX(lm.fvec().squaredNorm(), 1.4635887487E-03);
+ // check x
+ VERIFY_IS_APPROX(x[0], 1.5543827178);
+ VERIFY_IS_APPROX(x[1], 4.0888321754);
+ VERIFY_IS_APPROX(x[2], 4.5154121844E+02);
+}
+
+void test_levenberg_marquardt()
+{
+ // Tests using the examples provided by (c)minpack
+ CALL_SUBTEST(testLmder1());
+ CALL_SUBTEST(testLmder());
+ CALL_SUBTEST(testLmdif1());
+// CALL_SUBTEST(testLmstr1());
+// CALL_SUBTEST(testLmstr());
+ CALL_SUBTEST(testLmdif());
+
+ // NIST tests, level of difficulty = "Lower"
+ CALL_SUBTEST(testNistMisra1a());
+ CALL_SUBTEST(testNistChwirut2());
+
+ // NIST tests, level of difficulty = "Average"
+ CALL_SUBTEST(testNistHahn1());
+ CALL_SUBTEST(testNistMisra1d());
+ CALL_SUBTEST(testNistMGH17());
+ CALL_SUBTEST(testNistLanczos1());
+
+// // NIST tests, level of difficulty = "Higher"
+ CALL_SUBTEST(testNistRat42());
+ CALL_SUBTEST(testNistMGH10());
+ CALL_SUBTEST(testNistBoxBOD());
+// CALL_SUBTEST(testNistMGH09());
+ CALL_SUBTEST(testNistBennett5());
+ CALL_SUBTEST(testNistThurber());
+ CALL_SUBTEST(testNistRat43());
+ CALL_SUBTEST(testNistEckerle4());
+}
diff --git a/unsupported/test/matrix_exponential.cpp b/unsupported/test/matrix_exponential.cpp
new file mode 100644
index 0000000..50dec08
--- /dev/null
+++ b/unsupported/test/matrix_exponential.cpp
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "matrix_functions.h"
+
+double binom(int n, int k)
+{
+ double res = 1;
+ for (int i=0; i<k; i++)
+ res = res * (n-k+i+1) / (i+1);
+ return res;
+}
+
+template <typename T>
+T expfn(T x, int)
+{
+ return std::exp(x);
+}
+
+template <typename T>
+void test2dRotation(double tol)
+{
+ Matrix<T,2,2> A, B, C;
+ T angle;
+
+ A << 0, 1, -1, 0;
+ for (int i=0; i<=20; i++)
+ {
+ angle = static_cast<T>(pow(10, i / 5. - 2));
+ B << std::cos(angle), std::sin(angle), -std::sin(angle), std::cos(angle);
+
+ C = (angle*A).matrixFunction(expfn);
+ std::cout << "test2dRotation: i = " << i << " error funm = " << relerr(C, B);
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+
+ C = (angle*A).exp();
+ std::cout << " error expm = " << relerr(C, B) << "\n";
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ }
+}
+
+template <typename T>
+void test2dHyperbolicRotation(double tol)
+{
+ Matrix<std::complex<T>,2,2> A, B, C;
+ std::complex<T> imagUnit(0,1);
+ T angle, ch, sh;
+
+ for (int i=0; i<=20; i++)
+ {
+ angle = static_cast<T>((i-10) / 2.0);
+ ch = std::cosh(angle);
+ sh = std::sinh(angle);
+ A << 0, angle*imagUnit, -angle*imagUnit, 0;
+ B << ch, sh*imagUnit, -sh*imagUnit, ch;
+
+ C = A.matrixFunction(expfn);
+ std::cout << "test2dHyperbolicRotation: i = " << i << " error funm = " << relerr(C, B);
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+
+ C = A.exp();
+ std::cout << " error expm = " << relerr(C, B) << "\n";
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ }
+}
+
+template <typename T>
+void testPascal(double tol)
+{
+ for (int size=1; size<20; size++)
+ {
+ Matrix<T,Dynamic,Dynamic> A(size,size), B(size,size), C(size,size);
+ A.setZero();
+ for (int i=0; i<size-1; i++)
+ A(i+1,i) = static_cast<T>(i+1);
+ B.setZero();
+ for (int i=0; i<size; i++)
+ for (int j=0; j<=i; j++)
+ B(i,j) = static_cast<T>(binom(i,j));
+
+ C = A.matrixFunction(expfn);
+ std::cout << "testPascal: size = " << size << " error funm = " << relerr(C, B);
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+
+ C = A.exp();
+ std::cout << " error expm = " << relerr(C, B) << "\n";
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ }
+}
+
+template<typename MatrixType>
+void randomTest(const MatrixType& m, double tol)
+{
+ /* this test covers the following files:
+ Inverse.h
+ */
+ typename MatrixType::Index rows = m.rows();
+ typename MatrixType::Index cols = m.cols();
+ MatrixType m1(rows, cols), m2(rows, cols), identity = MatrixType::Identity(rows, cols);
+
+ typedef typename NumTraits<typename internal::traits<MatrixType>::Scalar>::Real RealScalar;
+
+ for(int i = 0; i < g_repeat; i++) {
+ m1 = MatrixType::Random(rows, cols);
+
+ m2 = m1.matrixFunction(expfn) * (-m1).matrixFunction(expfn);
+ std::cout << "randomTest: error funm = " << relerr(identity, m2);
+ VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
+
+ m2 = m1.exp() * (-m1).exp();
+ std::cout << " error expm = " << relerr(identity, m2) << "\n";
+ VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
+ }
+}
+
+void test_matrix_exponential()
+{
+ CALL_SUBTEST_2(test2dRotation<double>(1e-13));
+ CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
+ CALL_SUBTEST_8(test2dRotation<long double>(1e-13));
+ CALL_SUBTEST_2(test2dHyperbolicRotation<double>(1e-14));
+ CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
+ CALL_SUBTEST_8(test2dHyperbolicRotation<long double>(1e-14));
+ CALL_SUBTEST_6(testPascal<float>(1e-6));
+ CALL_SUBTEST_5(testPascal<double>(1e-15));
+ CALL_SUBTEST_2(randomTest(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(randomTest(Matrix<double,3,3,RowMajor>(), 1e-13));
+ CALL_SUBTEST_3(randomTest(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(randomTest(MatrixXd(8,8), 1e-13));
+ CALL_SUBTEST_1(randomTest(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(randomTest(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_1(randomTest(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(randomTest(MatrixXf(8,8), 1e-4));
+ CALL_SUBTEST_9(randomTest(Matrix<long double,Dynamic,Dynamic>(7,7), 1e-13));
+}
diff --git a/unsupported/test/matrix_function.cpp b/unsupported/test/matrix_function.cpp
new file mode 100644
index 0000000..3c76cfb
--- /dev/null
+++ b/unsupported/test/matrix_function.cpp
@@ -0,0 +1,193 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/MatrixFunctions>
+
+// Variant of VERIFY_IS_APPROX which uses absolute error instead of
+// relative error.
+#define VERIFY_IS_APPROX_ABS(a, b) VERIFY(test_isApprox_abs(a, b))
+
+template<typename Type1, typename Type2>
+inline bool test_isApprox_abs(const Type1& a, const Type2& b)
+{
+ return ((a-b).array().abs() < test_precision<typename Type1::RealScalar>()).all();
+}
+
+
+// Returns a matrix with eigenvalues clustered around 0, 1 and 2.
+template<typename MatrixType>
+MatrixType randomMatrixWithRealEivals(const typename MatrixType::Index size)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ MatrixType diag = MatrixType::Zero(size, size);
+ for (Index i = 0; i < size; ++i) {
+ diag(i, i) = Scalar(RealScalar(internal::random<int>(0,2)))
+ + internal::random<Scalar>() * Scalar(RealScalar(0.01));
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ HouseholderQR<MatrixType> QRofA(A);
+ return QRofA.householderQ().inverse() * diag * QRofA.householderQ();
+}
+
+template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+struct randomMatrixWithImagEivals
+{
+ // Returns a matrix with eigenvalues clustered around 0 and +/- i.
+ static MatrixType run(const typename MatrixType::Index size);
+};
+
+// Partial specialization for real matrices
+template<typename MatrixType>
+struct randomMatrixWithImagEivals<MatrixType, 0>
+{
+ static MatrixType run(const typename MatrixType::Index size)
+ {
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ MatrixType diag = MatrixType::Zero(size, size);
+ Index i = 0;
+ while (i < size) {
+ Index randomInt = internal::random<Index>(-1, 1);
+ if (randomInt == 0 || i == size-1) {
+ diag(i, i) = internal::random<Scalar>() * Scalar(0.01);
+ ++i;
+ } else {
+ Scalar alpha = Scalar(randomInt) + internal::random<Scalar>() * Scalar(0.01);
+ diag(i, i+1) = alpha;
+ diag(i+1, i) = -alpha;
+ i += 2;
+ }
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ HouseholderQR<MatrixType> QRofA(A);
+ return QRofA.householderQ().inverse() * diag * QRofA.householderQ();
+ }
+};
+
+// Partial specialization for complex matrices
+template<typename MatrixType>
+struct randomMatrixWithImagEivals<MatrixType, 1>
+{
+ static MatrixType run(const typename MatrixType::Index size)
+ {
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ const Scalar imagUnit(0, 1);
+ MatrixType diag = MatrixType::Zero(size, size);
+ for (Index i = 0; i < size; ++i) {
+ diag(i, i) = Scalar(RealScalar(internal::random<Index>(-1, 1))) * imagUnit
+ + internal::random<Scalar>() * Scalar(RealScalar(0.01));
+ }
+ MatrixType A = MatrixType::Random(size, size);
+ HouseholderQR<MatrixType> QRofA(A);
+ return QRofA.householderQ().inverse() * diag * QRofA.householderQ();
+ }
+};
+
+
+template<typename MatrixType>
+void testMatrixExponential(const MatrixType& A)
+{
+ typedef typename internal::traits<MatrixType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef std::complex<RealScalar> ComplexScalar;
+
+ VERIFY_IS_APPROX(A.exp(), A.matrixFunction(StdStemFunctions<ComplexScalar>::exp));
+}
+
+template<typename MatrixType>
+void testMatrixLogarithm(const MatrixType& A)
+{
+ typedef typename internal::traits<MatrixType>::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ MatrixType scaledA;
+ RealScalar maxImagPartOfSpectrum = A.eigenvalues().imag().cwiseAbs().maxCoeff();
+ if (maxImagPartOfSpectrum >= 0.9 * M_PI)
+ scaledA = A * 0.9 * M_PI / maxImagPartOfSpectrum;
+ else
+ scaledA = A;
+
+ // identity X.exp().log() = X only holds if Im(lambda) < pi for all eigenvalues of X
+ MatrixType expA = scaledA.exp();
+ MatrixType logExpA = expA.log();
+ VERIFY_IS_APPROX(logExpA, scaledA);
+}
+
+template<typename MatrixType>
+void testHyperbolicFunctions(const MatrixType& A)
+{
+ // Need to use absolute error because of possible cancellation when
+ // adding/subtracting expA and expmA.
+ VERIFY_IS_APPROX_ABS(A.sinh(), (A.exp() - (-A).exp()) / 2);
+ VERIFY_IS_APPROX_ABS(A.cosh(), (A.exp() + (-A).exp()) / 2);
+}
+
+template<typename MatrixType>
+void testGonioFunctions(const MatrixType& A)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+ typedef std::complex<RealScalar> ComplexScalar;
+ typedef Matrix<ComplexScalar, MatrixType::RowsAtCompileTime,
+ MatrixType::ColsAtCompileTime, MatrixType::Options> ComplexMatrix;
+
+ ComplexScalar imagUnit(0,1);
+ ComplexScalar two(2,0);
+
+ ComplexMatrix Ac = A.template cast<ComplexScalar>();
+
+ ComplexMatrix exp_iA = (imagUnit * Ac).exp();
+ ComplexMatrix exp_miA = (-imagUnit * Ac).exp();
+
+ ComplexMatrix sinAc = A.sin().template cast<ComplexScalar>();
+ VERIFY_IS_APPROX_ABS(sinAc, (exp_iA - exp_miA) / (two*imagUnit));
+
+ ComplexMatrix cosAc = A.cos().template cast<ComplexScalar>();
+ VERIFY_IS_APPROX_ABS(cosAc, (exp_iA + exp_miA) / 2);
+}
+
+template<typename MatrixType>
+void testMatrix(const MatrixType& A)
+{
+ testMatrixExponential(A);
+ testMatrixLogarithm(A);
+ testHyperbolicFunctions(A);
+ testGonioFunctions(A);
+}
+
+template<typename MatrixType>
+void testMatrixType(const MatrixType& m)
+{
+ // Matrices with clustered eigenvalue lead to different code paths
+ // in MatrixFunction.h and are thus useful for testing.
+ typedef typename MatrixType::Index Index;
+
+ const Index size = m.rows();
+ for (int i = 0; i < g_repeat; i++) {
+ testMatrix(MatrixType::Random(size, size).eval());
+ testMatrix(randomMatrixWithRealEivals<MatrixType>(size));
+ testMatrix(randomMatrixWithImagEivals<MatrixType>::run(size));
+ }
+}
+
+void test_matrix_function()
+{
+ CALL_SUBTEST_1(testMatrixType(Matrix<float,1,1>()));
+ CALL_SUBTEST_2(testMatrixType(Matrix3cf()));
+ CALL_SUBTEST_3(testMatrixType(MatrixXf(8,8)));
+ CALL_SUBTEST_4(testMatrixType(Matrix2d()));
+ CALL_SUBTEST_5(testMatrixType(Matrix<double,5,5,RowMajor>()));
+ CALL_SUBTEST_6(testMatrixType(Matrix4cd()));
+ CALL_SUBTEST_7(testMatrixType(MatrixXd(13,13)));
+}
diff --git a/unsupported/test/matrix_functions.h b/unsupported/test/matrix_functions.h
new file mode 100644
index 0000000..5817cae
--- /dev/null
+++ b/unsupported/test/matrix_functions.h
@@ -0,0 +1,47 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/MatrixFunctions>
+
+template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
+struct generateTestMatrix;
+
+// for real matrices, make sure none of the eigenvalues are negative
+template <typename MatrixType>
+struct generateTestMatrix<MatrixType,0>
+{
+ static void run(MatrixType& result, typename MatrixType::Index size)
+ {
+ MatrixType mat = MatrixType::Random(size, size);
+ EigenSolver<MatrixType> es(mat);
+ typename EigenSolver<MatrixType>::EigenvalueType eivals = es.eigenvalues();
+ for (typename MatrixType::Index i = 0; i < size; ++i) {
+ if (eivals(i).imag() == 0 && eivals(i).real() < 0)
+ eivals(i) = -eivals(i);
+ }
+ result = (es.eigenvectors() * eivals.asDiagonal() * es.eigenvectors().inverse()).real();
+ }
+};
+
+// for complex matrices, any matrix is fine
+template <typename MatrixType>
+struct generateTestMatrix<MatrixType,1>
+{
+ static void run(MatrixType& result, typename MatrixType::Index size)
+ {
+ result = MatrixType::Random(size, size);
+ }
+};
+
+template <typename Derived, typename OtherDerived>
+double relerr(const MatrixBase<Derived>& A, const MatrixBase<OtherDerived>& B)
+{
+ return std::sqrt((A - B).cwiseAbs2().sum() / (std::min)(A.cwiseAbs2().sum(), B.cwiseAbs2().sum()));
+}
diff --git a/unsupported/test/matrix_power.cpp b/unsupported/test/matrix_power.cpp
new file mode 100644
index 0000000..b9d513b
--- /dev/null
+++ b/unsupported/test/matrix_power.cpp
@@ -0,0 +1,133 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "matrix_functions.h"
+
+template <typename MatrixType, int IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct generateTriangularMatrix;
+
+// for real matrices, make sure none of the eigenvalues are negative
+template <typename MatrixType>
+struct generateTriangularMatrix<MatrixType,0>
+{
+ static void run(MatrixType& result, typename MatrixType::Index size)
+ {
+ result.resize(size, size);
+ result.template triangularView<Upper>() = MatrixType::Random(size, size);
+ for (typename MatrixType::Index i = 0; i < size; ++i)
+ result.coeffRef(i,i) = std::abs(result.coeff(i,i));
+ }
+};
+
+// for complex matrices, any matrix is fine
+template <typename MatrixType>
+struct generateTriangularMatrix<MatrixType,1>
+{
+ static void run(MatrixType& result, typename MatrixType::Index size)
+ {
+ result.resize(size, size);
+ result.template triangularView<Upper>() = MatrixType::Random(size, size);
+ }
+};
+
+template<typename T>
+void test2dRotation(double tol)
+{
+ Matrix<T,2,2> A, B, C;
+ T angle, c, s;
+
+ A << 0, 1, -1, 0;
+ MatrixPower<Matrix<T,2,2> > Apow(A);
+
+ for (int i=0; i<=20; ++i) {
+ angle = pow(10, (i-10) / 5.);
+ c = std::cos(angle);
+ s = std::sin(angle);
+ B << c, s, -s, c;
+
+ C = Apow(std::ldexp(angle,1) / M_PI);
+ std::cout << "test2dRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ }
+}
+
+template<typename T>
+void test2dHyperbolicRotation(double tol)
+{
+ Matrix<std::complex<T>,2,2> A, B, C;
+ T angle, ch = std::cosh((T)1);
+ std::complex<T> ish(0, std::sinh((T)1));
+
+ A << ch, ish, -ish, ch;
+ MatrixPower<Matrix<std::complex<T>,2,2> > Apow(A);
+
+ for (int i=0; i<=20; ++i) {
+ angle = std::ldexp(static_cast<T>(i-10), -1);
+ ch = std::cosh(angle);
+ ish = std::complex<T>(0, std::sinh(angle));
+ B << ch, ish, -ish, ch;
+
+ C = Apow(angle);
+ std::cout << "test2dHyperbolicRotation: i = " << i << " error powerm = " << relerr(C,B) << '\n';
+ VERIFY(C.isApprox(B, static_cast<T>(tol)));
+ }
+}
+
+template<typename MatrixType>
+void testExponentLaws(const MatrixType& m, double tol)
+{
+ typedef typename MatrixType::RealScalar RealScalar;
+ MatrixType m1, m2, m3, m4, m5;
+ RealScalar x, y;
+
+ for (int i=0; i < g_repeat; ++i) {
+ generateTestMatrix<MatrixType>::run(m1, m.rows());
+ MatrixPower<MatrixType> mpow(m1);
+
+ x = internal::random<RealScalar>();
+ y = internal::random<RealScalar>();
+ m2 = mpow(x);
+ m3 = mpow(y);
+
+ m4 = mpow(x+y);
+ m5.noalias() = m2 * m3;
+ VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+
+ m4 = mpow(x*y);
+ m5 = m2.pow(y);
+ VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+
+ m4 = (std::abs(x) * m1).pow(y);
+ m5 = std::pow(std::abs(x), y) * m3;
+ VERIFY(m4.isApprox(m5, static_cast<RealScalar>(tol)));
+ }
+}
+
+typedef Matrix<double,3,3,RowMajor> Matrix3dRowMajor;
+typedef Matrix<long double,Dynamic,Dynamic> MatrixXe;
+
+void test_matrix_power()
+{
+ CALL_SUBTEST_2(test2dRotation<double>(1e-13));
+ CALL_SUBTEST_1(test2dRotation<float>(2e-5)); // was 1e-5, relaxed for clang 2.8 / linux / x86-64
+ CALL_SUBTEST_9(test2dRotation<long double>(1e-13));
+ CALL_SUBTEST_2(test2dHyperbolicRotation<double>(1e-14));
+ CALL_SUBTEST_1(test2dHyperbolicRotation<float>(1e-5));
+ CALL_SUBTEST_9(test2dHyperbolicRotation<long double>(1e-14));
+
+ CALL_SUBTEST_2(testExponentLaws(Matrix2d(), 1e-13));
+ CALL_SUBTEST_7(testExponentLaws(Matrix3dRowMajor(), 1e-13));
+ CALL_SUBTEST_3(testExponentLaws(Matrix4cd(), 1e-13));
+ CALL_SUBTEST_4(testExponentLaws(MatrixXd(8,8), 2e-12));
+ CALL_SUBTEST_1(testExponentLaws(Matrix2f(), 1e-4));
+ CALL_SUBTEST_5(testExponentLaws(Matrix3cf(), 1e-4));
+ CALL_SUBTEST_8(testExponentLaws(Matrix4f(), 1e-4));
+ CALL_SUBTEST_6(testExponentLaws(MatrixXf(2,2), 1e-3)); // see bug 614
+ CALL_SUBTEST_9(testExponentLaws(MatrixXe(7,7), 1e-13));
+}
diff --git a/unsupported/test/matrix_square_root.cpp b/unsupported/test/matrix_square_root.cpp
new file mode 100644
index 0000000..ea541e1
--- /dev/null
+++ b/unsupported/test/matrix_square_root.cpp
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "matrix_functions.h"
+
+template<typename MatrixType>
+void testMatrixSqrt(const MatrixType& m)
+{
+ MatrixType A;
+ generateTestMatrix<MatrixType>::run(A, m.rows());
+ MatrixType sqrtA = A.sqrt();
+ VERIFY_IS_APPROX(sqrtA * sqrtA, A);
+}
+
+void test_matrix_square_root()
+{
+ for (int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST_1(testMatrixSqrt(Matrix3cf()));
+ CALL_SUBTEST_2(testMatrixSqrt(MatrixXcd(12,12)));
+ CALL_SUBTEST_3(testMatrixSqrt(Matrix4f()));
+ CALL_SUBTEST_4(testMatrixSqrt(Matrix<double,Dynamic,Dynamic,RowMajor>(9, 9)));
+ CALL_SUBTEST_5(testMatrixSqrt(Matrix<float,1,1>()));
+ CALL_SUBTEST_5(testMatrixSqrt(Matrix<std::complex<float>,1,1>()));
+ }
+}
diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp
new file mode 100644
index 0000000..509ebe0
--- /dev/null
+++ b/unsupported/test/minres.cpp
@@ -0,0 +1,49 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <g.gael@free.fr>
+// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#include <cmath>
+
+#include "../../test/sparse_solver.h"
+#include <Eigen/IterativeSolvers>
+
+template<typename T> void test_minres_T()
+{
+ MINRES<SparseMatrix<T>, Lower|Upper, DiagonalPreconditioner<T> > minres_colmajor_diag;
+ MINRES<SparseMatrix<T>, Lower, IdentityPreconditioner > minres_colmajor_lower_I;
+ MINRES<SparseMatrix<T>, Upper, IdentityPreconditioner > minres_colmajor_upper_I;
+// MINRES<SparseMatrix<T>, Lower, IncompleteLUT<T> > minres_colmajor_ilut;
+ //minres<SparseMatrix<T>, SSORPreconditioner<T> > minres_colmajor_ssor;
+
+
+// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) );
+ // CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) );
+ //CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) );
+
+ // Diagonal preconditioner
+ MINRES<SparseMatrix<T>, Lower, DiagonalPreconditioner<T> > minres_colmajor_lower_diag;
+ MINRES<SparseMatrix<T>, Upper, DiagonalPreconditioner<T> > minres_colmajor_upper_diag;
+ MINRES<SparseMatrix<T>, Upper|Lower, DiagonalPreconditioner<T> > minres_colmajor_uplo_diag;
+
+ // call tests for SPD matrix
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_I) );
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_I) );
+
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_diag) );
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_diag) );
+// CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) );
+
+ // TO DO: symmetric semi-definite matrix
+ // TO DO: symmetric indefinite matrix
+}
+
+void test_minres()
+{
+ CALL_SUBTEST_1(test_minres_T<double>());
+// CALL_SUBTEST_2(test_minres_T<std::complex<double> >());
+}
diff --git a/unsupported/test/mpreal/mpreal.h b/unsupported/test/mpreal/mpreal.h
new file mode 100644
index 0000000..7d6f4e7
--- /dev/null
+++ b/unsupported/test/mpreal/mpreal.h
@@ -0,0 +1,3074 @@
+/*
+ MPFR C++: Multi-precision floating point number class for C++.
+ Based on MPFR library: http://mpfr.org
+
+ Project homepage: http://www.holoborodko.com/pavel/mpfr
+ Contact e-mail: pavel@holoborodko.com
+
+ Copyright (c) 2008-2014 Pavel Holoborodko
+
+ Contributors:
+ Dmitriy Gubanov, Konstantin Holoborodko, Brian Gladman,
+ Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, Heinz van Saanen,
+ Pere Constans, Peter van Hoof, Gael Guennebaud, Tsai Chia Cheng,
+ Alexei Zubanov, Jauhien Piatlicki, Victor Berger, John Westwood,
+ Petr Aleksandrov, Orion Poplawski, Charles Karney.
+
+ Licensing:
+ (A) MPFR C++ is under GNU General Public License ("GPL").
+
+ (B) Non-free licenses may also be purchased from the author, for users who
+ do not want their programs protected by the GPL.
+
+ The non-free licenses are for users that wish to use MPFR C++ in
+ their products but are unwilling to release their software
+ under the GPL (which would require them to release source code
+ and allow free redistribution).
+
+ Such users can purchase an unlimited-use license from the author.
+ Contact us for more details.
+
+ GNU General Public License ("GPL") copyright permissions statement:
+ **************************************************************************
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __MPREAL_H__
+#define __MPREAL_H__
+
+#include <string>
+#include <iostream>
+#include <sstream>
+#include <stdexcept>
+#include <cfloat>
+#include <cmath>
+#include <cstring>
+#include <limits>
+
+// Options
+// FIXME HAVE_INT64_SUPPORT leads to clashes with long int and int64_t on some systems.
+//#define MPREAL_HAVE_INT64_SUPPORT // Enable int64_t support if possible. Available only for MSVC 2010 & GCC.
+#define MPREAL_HAVE_MSVC_DEBUGVIEW // Enable Debugger Visualizer for "Debug" builds in MSVC.
+#define MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS // Enable extended std::numeric_limits<mpfr::mpreal> specialization.
+ // Meaning that "digits", "round_style" and similar members are defined as functions, not constants.
+ // See std::numeric_limits<mpfr::mpreal> at the end of the file for more information.
+
+// Library version
+#define MPREAL_VERSION_MAJOR 3
+#define MPREAL_VERSION_MINOR 5
+#define MPREAL_VERSION_PATCHLEVEL 9
+#define MPREAL_VERSION_STRING "3.5.9"
+
+// Detect compiler using signatures from http://predef.sourceforge.net/
+#if defined(__GNUC__) && defined(__INTEL_COMPILER)
+ #define IsInf(x) isinf(x) // Intel ICC compiler on Linux
+
+#elif defined(_MSC_VER) // Microsoft Visual C++
+ #define IsInf(x) (!_finite(x))
+
+#else
+ #define IsInf(x) std::isinf(x) // GNU C/C++ (and/or other compilers), just hope for C99 conformance
+#endif
+
+// A Clang feature extension to determine compiler features.
+#ifndef __has_feature
+ #define __has_feature(x) 0
+#endif
+
+// Detect support for r-value references (move semantic). Borrowed from Eigen.
+#if (__has_feature(cxx_rvalue_references) || \
+ defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \
+ (defined(_MSC_VER) && _MSC_VER >= 1600))
+
+ #define MPREAL_HAVE_MOVE_SUPPORT
+
+ // Use fields in mpfr_t structure to check if it was initialized / set dummy initialization
+ #define mpfr_is_initialized(x) (0 != (x)->_mpfr_d)
+ #define mpfr_set_uninitialized(x) ((x)->_mpfr_d = 0 )
+#endif
+
+// Detect support for explicit converters.
+#if (__has_feature(cxx_explicit_conversions) || \
+ defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \
+ (defined(_MSC_VER) && _MSC_VER >= 1800))
+
+ #define MPREAL_HAVE_EXPLICIT_CONVERTERS
+#endif
+
+// Detect available 64-bit capabilities
+#if defined(MPREAL_HAVE_INT64_SUPPORT)
+
+ #define MPFR_USE_INTMAX_T // Should be defined before mpfr.h
+
+ #if defined(_MSC_VER) // MSVC + Windows
+ #if (_MSC_VER >= 1600)
+ #include <stdint.h> // <stdint.h> is available only in msvc2010!
+
+ #else // MPFR relies on intmax_t which is available only in msvc2010
+ #undef MPREAL_HAVE_INT64_SUPPORT // Besides, MPFR & MPIR have to be compiled with msvc2010
+ #undef MPFR_USE_INTMAX_T // Since we cannot detect this, disable x64 by default
+ // Someone should change this manually if needed.
+ #endif
+
+ #elif defined (__GNUC__) && defined(__linux__)
+ #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) || defined (__PPC64__)
+ #undef MPREAL_HAVE_INT64_SUPPORT // Remove all shaman dances for x64 builds since
+ #undef MPFR_USE_INTMAX_T // GCC already supports x64 as of "long int" is 64-bit integer, nothing left to do
+ #else
+ #include <stdint.h> // use int64_t, uint64_t otherwise
+ #endif
+
+ #else
+ #include <stdint.h> // rely on int64_t, uint64_t in all other cases, Mac OSX, etc.
+ #endif
+
+#endif
+
+#if defined(MPREAL_HAVE_MSVC_DEBUGVIEW) && defined(_MSC_VER) && defined(_DEBUG)
+ #define MPREAL_MSVC_DEBUGVIEW_CODE DebugView = toString();
+ #define MPREAL_MSVC_DEBUGVIEW_DATA std::string DebugView;
+#else
+ #define MPREAL_MSVC_DEBUGVIEW_CODE
+ #define MPREAL_MSVC_DEBUGVIEW_DATA
+#endif
+
+#include <mpfr.h>
+
+#if (MPFR_VERSION < MPFR_VERSION_NUM(3,0,0))
+ #include <cstdlib> // Needed for random()
+#endif
+
+// Less important options
+#define MPREAL_DOUBLE_BITS_OVERFLOW -1 // Triggers overflow exception during conversion to double if mpreal
+ // cannot fit in MPREAL_DOUBLE_BITS_OVERFLOW bits
+ // = -1 disables overflow checks (default)
+#if defined(__GNUC__)
+ #define MPREAL_PERMISSIVE_EXPR __extension__
+#else
+ #define MPREAL_PERMISSIVE_EXPR
+#endif
+
+namespace mpfr {
+
+class mpreal {
+private:
+ mpfr_t mp;
+
+public:
+
+ // Get default rounding mode & precision
+ inline static mp_rnd_t get_default_rnd() { return (mp_rnd_t)(mpfr_get_default_rounding_mode()); }
+ inline static mp_prec_t get_default_prec() { return mpfr_get_default_prec(); }
+
+ // Constructors && type conversions
+ mpreal();
+ mpreal(const mpreal& u);
+ mpreal(const mpf_t u);
+ mpreal(const mpz_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const mpq_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const long double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const unsigned long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const unsigned int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+
+ // Construct mpreal from mpfr_t structure.
+ // shared = true allows to avoid deep copy, so that mpreal and 'u' share the same data & pointers.
+ mpreal(const mpfr_t u, bool shared = false);
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+ mpreal(const uint64_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const int64_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
+#endif
+
+ mpreal(const char* s, mp_prec_t prec = mpreal::get_default_prec(), int base = 10, mp_rnd_t mode = mpreal::get_default_rnd());
+ mpreal(const std::string& s, mp_prec_t prec = mpreal::get_default_prec(), int base = 10, mp_rnd_t mode = mpreal::get_default_rnd());
+
+ ~mpreal();
+
+#ifdef MPREAL_HAVE_MOVE_SUPPORT
+ mpreal& operator=(mpreal&& v);
+ mpreal(mpreal&& u);
+#endif
+
+ // Operations
+ // =
+ // +, -, *, /, ++, --, <<, >>
+ // *=, +=, -=, /=,
+ // <, >, ==, <=, >=
+
+ // =
+ mpreal& operator=(const mpreal& v);
+ mpreal& operator=(const mpf_t v);
+ mpreal& operator=(const mpz_t v);
+ mpreal& operator=(const mpq_t v);
+ mpreal& operator=(const long double v);
+ mpreal& operator=(const double v);
+ mpreal& operator=(const unsigned long int v);
+ mpreal& operator=(const unsigned int v);
+ mpreal& operator=(const long int v);
+ mpreal& operator=(const int v);
+ mpreal& operator=(const char* s);
+ mpreal& operator=(const std::string& s);
+
+ // +
+ mpreal& operator+=(const mpreal& v);
+ mpreal& operator+=(const mpf_t v);
+ mpreal& operator+=(const mpz_t v);
+ mpreal& operator+=(const mpq_t v);
+ mpreal& operator+=(const long double u);
+ mpreal& operator+=(const double u);
+ mpreal& operator+=(const unsigned long int u);
+ mpreal& operator+=(const unsigned int u);
+ mpreal& operator+=(const long int u);
+ mpreal& operator+=(const int u);
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+ mpreal& operator+=(const int64_t u);
+ mpreal& operator+=(const uint64_t u);
+ mpreal& operator-=(const int64_t u);
+ mpreal& operator-=(const uint64_t u);
+ mpreal& operator*=(const int64_t u);
+ mpreal& operator*=(const uint64_t u);
+ mpreal& operator/=(const int64_t u);
+ mpreal& operator/=(const uint64_t u);
+#endif
+
+ const mpreal operator+() const;
+ mpreal& operator++ ();
+ const mpreal operator++ (int);
+
+ // -
+ mpreal& operator-=(const mpreal& v);
+ mpreal& operator-=(const mpz_t v);
+ mpreal& operator-=(const mpq_t v);
+ mpreal& operator-=(const long double u);
+ mpreal& operator-=(const double u);
+ mpreal& operator-=(const unsigned long int u);
+ mpreal& operator-=(const unsigned int u);
+ mpreal& operator-=(const long int u);
+ mpreal& operator-=(const int u);
+ const mpreal operator-() const;
+ friend const mpreal operator-(const unsigned long int b, const mpreal& a);
+ friend const mpreal operator-(const unsigned int b, const mpreal& a);
+ friend const mpreal operator-(const long int b, const mpreal& a);
+ friend const mpreal operator-(const int b, const mpreal& a);
+ friend const mpreal operator-(const double b, const mpreal& a);
+ mpreal& operator-- ();
+ const mpreal operator-- (int);
+
+ // *
+ mpreal& operator*=(const mpreal& v);
+ mpreal& operator*=(const mpz_t v);
+ mpreal& operator*=(const mpq_t v);
+ mpreal& operator*=(const long double v);
+ mpreal& operator*=(const double v);
+ mpreal& operator*=(const unsigned long int v);
+ mpreal& operator*=(const unsigned int v);
+ mpreal& operator*=(const long int v);
+ mpreal& operator*=(const int v);
+
+ // /
+ mpreal& operator/=(const mpreal& v);
+ mpreal& operator/=(const mpz_t v);
+ mpreal& operator/=(const mpq_t v);
+ mpreal& operator/=(const long double v);
+ mpreal& operator/=(const double v);
+ mpreal& operator/=(const unsigned long int v);
+ mpreal& operator/=(const unsigned int v);
+ mpreal& operator/=(const long int v);
+ mpreal& operator/=(const int v);
+ friend const mpreal operator/(const unsigned long int b, const mpreal& a);
+ friend const mpreal operator/(const unsigned int b, const mpreal& a);
+ friend const mpreal operator/(const long int b, const mpreal& a);
+ friend const mpreal operator/(const int b, const mpreal& a);
+ friend const mpreal operator/(const double b, const mpreal& a);
+
+ //<<= Fast Multiplication by 2^u
+ mpreal& operator<<=(const unsigned long int u);
+ mpreal& operator<<=(const unsigned int u);
+ mpreal& operator<<=(const long int u);
+ mpreal& operator<<=(const int u);
+
+ //>>= Fast Division by 2^u
+ mpreal& operator>>=(const unsigned long int u);
+ mpreal& operator>>=(const unsigned int u);
+ mpreal& operator>>=(const long int u);
+ mpreal& operator>>=(const int u);
+
+ // Boolean Operators
+ friend bool operator > (const mpreal& a, const mpreal& b);
+ friend bool operator >= (const mpreal& a, const mpreal& b);
+ friend bool operator < (const mpreal& a, const mpreal& b);
+ friend bool operator <= (const mpreal& a, const mpreal& b);
+ friend bool operator == (const mpreal& a, const mpreal& b);
+ friend bool operator != (const mpreal& a, const mpreal& b);
+
+ // Optimized specializations for boolean operators
+ friend bool operator == (const mpreal& a, const unsigned long int b);
+ friend bool operator == (const mpreal& a, const unsigned int b);
+ friend bool operator == (const mpreal& a, const long int b);
+ friend bool operator == (const mpreal& a, const int b);
+ friend bool operator == (const mpreal& a, const long double b);
+ friend bool operator == (const mpreal& a, const double b);
+
+ // Type Conversion operators
+ bool toBool (mp_rnd_t mode = GMP_RNDZ) const;
+ long toLong (mp_rnd_t mode = GMP_RNDZ) const;
+ unsigned long toULong (mp_rnd_t mode = GMP_RNDZ) const;
+ float toFloat (mp_rnd_t mode = GMP_RNDN) const;
+ double toDouble (mp_rnd_t mode = GMP_RNDN) const;
+ long double toLDouble (mp_rnd_t mode = GMP_RNDN) const;
+
+#if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS)
+ explicit operator bool () const { return toBool(); }
+ explicit operator int () const { return toLong(); }
+ explicit operator long () const { return toLong(); }
+ explicit operator long long () const { return toLong(); }
+ explicit operator unsigned () const { return toULong(); }
+ explicit operator unsigned long () const { return toULong(); }
+ explicit operator unsigned long long () const { return toULong(); }
+ explicit operator float () const { return toFloat(); }
+ explicit operator double () const { return toDouble(); }
+ explicit operator long double () const { return toLDouble(); }
+#endif
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+ int64_t toInt64 (mp_rnd_t mode = GMP_RNDZ) const;
+ uint64_t toUInt64 (mp_rnd_t mode = GMP_RNDZ) const;
+
+ #if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS)
+ explicit operator int64_t () const { return toInt64(); }
+ explicit operator uint64_t () const { return toUInt64(); }
+ #endif
+#endif
+
+ // Get raw pointers so that mpreal can be directly used in raw mpfr_* functions
+ ::mpfr_ptr mpfr_ptr();
+ ::mpfr_srcptr mpfr_ptr() const;
+ ::mpfr_srcptr mpfr_srcptr() const;
+
+ // Convert mpreal to string with n significant digits in base b
+ // n = -1 -> convert with the maximum available digits
+ std::string toString(int n = -1, int b = 10, mp_rnd_t mode = mpreal::get_default_rnd()) const;
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ std::string toString(const std::string& format) const;
+#endif
+
+ std::ostream& output(std::ostream& os) const;
+
+ // Math Functions
+ friend const mpreal sqr (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal sqrt(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal sqrt(const unsigned long int v, mp_rnd_t rnd_mode);
+ friend const mpreal cbrt(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal root(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
+ friend const mpreal pow (const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode);
+ friend const mpreal pow (const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode);
+ friend const mpreal pow (const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode);
+ friend const mpreal pow (const mpreal& a, const long int b, mp_rnd_t rnd_mode);
+ friend const mpreal pow (const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode);
+ friend const mpreal pow (const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode);
+ friend const mpreal fabs(const mpreal& v, mp_rnd_t rnd_mode);
+
+ friend const mpreal abs(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode);
+ friend inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
+ friend inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode);
+ friend inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
+ friend inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode);
+ friend int cmpabs(const mpreal& a,const mpreal& b);
+
+ friend const mpreal log (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal log2 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal log10(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal exp2 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal exp10(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal log1p(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal expm1(const mpreal& v, mp_rnd_t rnd_mode);
+
+ friend const mpreal cos(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal sin(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal tan(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal sec(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal csc(const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal cot(const mpreal& v, mp_rnd_t rnd_mode);
+ friend int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode);
+
+ friend const mpreal acos (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal asin (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal atan (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode);
+ friend const mpreal acot (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal asec (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal acsc (const mpreal& v, mp_rnd_t rnd_mode);
+
+ friend const mpreal cosh (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal sinh (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal tanh (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal sech (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal csch (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal coth (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal acosh (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal asinh (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal acoth (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal asech (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal acsch (const mpreal& v, mp_rnd_t rnd_mode);
+
+ friend const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
+
+ friend const mpreal fac_ui (unsigned long int v, mp_prec_t prec, mp_rnd_t rnd_mode);
+ friend const mpreal eint (const mpreal& v, mp_rnd_t rnd_mode);
+
+ friend const mpreal gamma (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal lngamma (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal lgamma (const mpreal& v, int *signp, mp_rnd_t rnd_mode);
+ friend const mpreal zeta (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal erf (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode);
+ friend const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode);
+ friend const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode);
+ friend const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode);
+ friend int sgn(const mpreal& v); // returns -1 or +1
+
+// MPFR 2.4.0 Specifics
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ friend int sinh_cosh (mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal li2 (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
+ friend const mpreal rec_sqrt (const mpreal& v, mp_rnd_t rnd_mode);
+
+ // MATLAB's semantic equivalents
+ friend const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Remainder after division
+ friend const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Modulus after division
+#endif
+
+// MPFR 3.0.0 Specifics
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+ friend const mpreal digamma (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal ai (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear
+ friend const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear
+ friend const mpreal grandom (unsigned int seed);
+#endif
+
+ // Uniformly distributed random number generation in [0,1] using
+ // Mersenne-Twister algorithm by default.
+ // Use parameter to setup seed, e.g.: random((unsigned)time(NULL))
+ // Check urandom() for more precise control.
+ friend const mpreal random(unsigned int seed);
+
+ // Exponent and mantissa manipulation
+ friend const mpreal frexp(const mpreal& v, mp_exp_t* exp);
+ friend const mpreal ldexp(const mpreal& v, mp_exp_t exp);
+
+ // Splits mpreal value into fractional and integer parts.
+ // Returns fractional part and stores integer part in n.
+ friend const mpreal modf(const mpreal& v, mpreal& n);
+
+ // Constants
+ // don't forget to call mpfr_free_cache() for every thread where you are using const-functions
+ friend const mpreal const_log2 (mp_prec_t prec, mp_rnd_t rnd_mode);
+ friend const mpreal const_pi (mp_prec_t prec, mp_rnd_t rnd_mode);
+ friend const mpreal const_euler (mp_prec_t prec, mp_rnd_t rnd_mode);
+ friend const mpreal const_catalan (mp_prec_t prec, mp_rnd_t rnd_mode);
+
+ // returns +inf iff sign>=0 otherwise -inf
+ friend const mpreal const_infinity(int sign, mp_prec_t prec);
+
+ // Output/ Input
+ friend std::ostream& operator<<(std::ostream& os, const mpreal& v);
+ friend std::istream& operator>>(std::istream& is, mpreal& v);
+
+ // Integer Related Functions
+ friend const mpreal rint (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal ceil (const mpreal& v);
+ friend const mpreal floor(const mpreal& v);
+ friend const mpreal round(const mpreal& v);
+ friend const mpreal trunc(const mpreal& v);
+ friend const mpreal rint_ceil (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal rint_floor (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal rint_round (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal rint_trunc (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal frac (const mpreal& v, mp_rnd_t rnd_mode);
+ friend const mpreal remainder ( const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
+ friend const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
+
+ // Miscellaneous Functions
+ friend const mpreal nexttoward (const mpreal& x, const mpreal& y);
+ friend const mpreal nextabove (const mpreal& x);
+ friend const mpreal nextbelow (const mpreal& x);
+
+ // use gmp_randinit_default() to init state, gmp_randclear() to clear
+ friend const mpreal urandomb (gmp_randstate_t& state);
+
+// MPFR < 2.4.2 Specifics
+#if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2))
+ friend const mpreal random2 (mp_size_t size, mp_exp_t exp);
+#endif
+
+ // Instance Checkers
+ friend bool isnan (const mpreal& v);
+ friend bool isinf (const mpreal& v);
+ friend bool isfinite (const mpreal& v);
+
+ friend bool isnum (const mpreal& v);
+ friend bool iszero (const mpreal& v);
+ friend bool isint (const mpreal& v);
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+ friend bool isregular(const mpreal& v);
+#endif
+
+ // Set/Get instance properties
+ inline mp_prec_t get_prec() const;
+ inline void set_prec(mp_prec_t prec, mp_rnd_t rnd_mode = get_default_rnd()); // Change precision with rounding mode
+
+ // Aliases for get_prec(), set_prec() - needed for compatibility with std::complex<mpreal> interface
+ inline mpreal& setPrecision(int Precision, mp_rnd_t RoundingMode = get_default_rnd());
+ inline int getPrecision() const;
+
+ // Set mpreal to +/- inf, NaN, +/-0
+ mpreal& setInf (int Sign = +1);
+ mpreal& setNan ();
+ mpreal& setZero (int Sign = +1);
+ mpreal& setSign (int Sign, mp_rnd_t RoundingMode = get_default_rnd());
+
+ //Exponent
+ mp_exp_t get_exp();
+ int set_exp(mp_exp_t e);
+ int check_range (int t, mp_rnd_t rnd_mode = get_default_rnd());
+ int subnormalize (int t,mp_rnd_t rnd_mode = get_default_rnd());
+
+ // Inexact conversion from float
+ inline bool fits_in_bits(double x, int n);
+
+ // Set/Get global properties
+ static void set_default_prec(mp_prec_t prec);
+ static void set_default_rnd(mp_rnd_t rnd_mode);
+
+ static mp_exp_t get_emin (void);
+ static mp_exp_t get_emax (void);
+ static mp_exp_t get_emin_min (void);
+ static mp_exp_t get_emin_max (void);
+ static mp_exp_t get_emax_min (void);
+ static mp_exp_t get_emax_max (void);
+ static int set_emin (mp_exp_t exp);
+ static int set_emax (mp_exp_t exp);
+
+ // Efficient swapping of two mpreal values - needed for std algorithms
+ friend void swap(mpreal& x, mpreal& y);
+
+ friend const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
+ friend const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
+
+private:
+ // Human friendly Debug Preview in Visual Studio.
+ // Put one of these lines:
+ //
+ // mpfr::mpreal=<DebugView> ; Show value only
+ // mpfr::mpreal=<DebugView>, <mp[0]._mpfr_prec,u>bits ; Show value & precision
+ //
+ // at the beginning of
+ // [Visual Studio Installation Folder]\Common7\Packages\Debugger\autoexp.dat
+ MPREAL_MSVC_DEBUGVIEW_DATA
+
+ // "Smart" resources deallocation. Checks if instance initialized before deletion.
+ void clear(::mpfr_ptr);
+};
+
+//////////////////////////////////////////////////////////////////////////
+// Exceptions
+class conversion_overflow : public std::exception {
+public:
+ std::string why() { return "inexact conversion from floating point"; }
+};
+
+//////////////////////////////////////////////////////////////////////////
+// Constructors & converters
+// Default constructor: creates mp number and initializes it to 0.
+inline mpreal::mpreal()
+{
+ mpfr_init2 (mpfr_ptr(), mpreal::get_default_prec());
+ mpfr_set_ui(mpfr_ptr(), 0, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const mpreal& u)
+{
+ mpfr_init2(mpfr_ptr(),mpfr_get_prec(u.mpfr_srcptr()));
+ mpfr_set (mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+#ifdef MPREAL_HAVE_MOVE_SUPPORT
+inline mpreal::mpreal(mpreal&& other)
+{
+ mpfr_set_uninitialized(mpfr_ptr()); // make sure "other" holds no pinter to actual data
+ mpfr_swap(mpfr_ptr(), other.mpfr_ptr());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal& mpreal::operator=(mpreal&& other)
+{
+ mpfr_swap(mpfr_ptr(), other.mpfr_ptr());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+#endif
+
+inline mpreal::mpreal(const mpfr_t u, bool shared)
+{
+ if(shared)
+ {
+ std::memcpy(mpfr_ptr(), u, sizeof(mpfr_t));
+ }
+ else
+ {
+ mpfr_init2(mpfr_ptr(), mpfr_get_prec(u));
+ mpfr_set (mpfr_ptr(), u, mpreal::get_default_rnd());
+ }
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const mpf_t u)
+{
+ mpfr_init2(mpfr_ptr(),(mp_prec_t) mpf_get_prec(u)); // (gmp: mp_bitcnt_t) unsigned long -> long (mpfr: mp_prec_t)
+ mpfr_set_f(mpfr_ptr(),u,mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const mpz_t u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2(mpfr_ptr(), prec);
+ mpfr_set_z(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const mpq_t u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2(mpfr_ptr(), prec);
+ mpfr_set_q(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2(mpfr_ptr(), prec);
+
+#if (MPREAL_DOUBLE_BITS_OVERFLOW > -1)
+ if(fits_in_bits(u, MPREAL_DOUBLE_BITS_OVERFLOW))
+ {
+ mpfr_set_d(mpfr_ptr(), u, mode);
+ }else
+ throw conversion_overflow();
+#else
+ mpfr_set_d(mpfr_ptr(), u, mode);
+#endif
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_ld(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_ui(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_ui(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_si(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_si(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+inline mpreal::mpreal(const uint64_t u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_uj(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const int64_t u, mp_prec_t prec, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_sj(mpfr_ptr(), u, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+#endif
+
+inline mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_str(mpfr_ptr(), s, base, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t mode)
+{
+ mpfr_init2 (mpfr_ptr(), prec);
+ mpfr_set_str(mpfr_ptr(), s.c_str(), base, mode);
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline void mpreal::clear(::mpfr_ptr x)
+{
+#ifdef MPREAL_HAVE_MOVE_SUPPORT
+ if(mpfr_is_initialized(x))
+#endif
+ mpfr_clear(x);
+}
+
+inline mpreal::~mpreal()
+{
+ clear(mpfr_ptr());
+}
+
+// internal namespace needed for template magic
+namespace internal{
+
+ // Use SFINAE to restrict arithmetic operations instantiation only for numeric types
+ // This is needed for smooth integration with libraries based on expression templates, like Eigen.
+ // TODO: Do the same for boolean operators.
+ template <typename ArgumentType> struct result_type {};
+
+ template <> struct result_type<mpreal> {typedef mpreal type;};
+ template <> struct result_type<mpz_t> {typedef mpreal type;};
+ template <> struct result_type<mpq_t> {typedef mpreal type;};
+ template <> struct result_type<long double> {typedef mpreal type;};
+ template <> struct result_type<double> {typedef mpreal type;};
+ template <> struct result_type<unsigned long int> {typedef mpreal type;};
+ template <> struct result_type<unsigned int> {typedef mpreal type;};
+ template <> struct result_type<long int> {typedef mpreal type;};
+ template <> struct result_type<int> {typedef mpreal type;};
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+ template <> struct result_type<int64_t > {typedef mpreal type;};
+ template <> struct result_type<uint64_t > {typedef mpreal type;};
+#endif
+}
+
+// + Addition
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
+ operator+(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) += rhs; }
+
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
+ operator+(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) += lhs; }
+
+// - Subtraction
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
+ operator-(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) -= rhs; }
+
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
+ operator-(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) -= rhs; }
+
+// * Multiplication
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
+ operator*(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) *= rhs; }
+
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
+ operator*(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) *= lhs; }
+
+// / Division
+template <typename Rhs>
+inline const typename internal::result_type<Rhs>::type
+ operator/(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) /= rhs; }
+
+template <typename Lhs>
+inline const typename internal::result_type<Lhs>::type
+ operator/(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) /= rhs; }
+
+//////////////////////////////////////////////////////////////////////////
+// sqrt
+const mpreal sqrt(const unsigned int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal sqrt(const long int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal sqrt(const int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal sqrt(const long double v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal sqrt(const double v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+// abs
+inline const mpreal abs(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd());
+
+//////////////////////////////////////////////////////////////////////////
+// pow
+const mpreal pow(const mpreal& a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const mpreal& a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const mpreal& a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const mpreal& a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const unsigned int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long double a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const double a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const unsigned long int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned long int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned long int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned long int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned long int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const unsigned int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const unsigned int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const long int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long double a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long double a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const long double a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+const mpreal pow(const double a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const double a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const double a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
+
+//////////////////////////////////////////////////////////////////////////
+// Estimate machine epsilon for the given precision
+// Returns smallest eps such that 1.0 + eps != 1.0
+inline mpreal machine_epsilon(mp_prec_t prec = mpreal::get_default_prec());
+
+// Returns smallest eps such that x + eps != x (relative machine epsilon)
+inline mpreal machine_epsilon(const mpreal& x);
+
+// Gives max & min values for the required precision,
+// minval is 'safe' meaning 1 / minval does not overflow
+// maxval is 'safe' meaning 1 / maxval does not underflow
+inline mpreal minval(mp_prec_t prec = mpreal::get_default_prec());
+inline mpreal maxval(mp_prec_t prec = mpreal::get_default_prec());
+
+// 'Dirty' equality check 1: |a-b| < min{|a|,|b|} * eps
+inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps);
+
+// 'Dirty' equality check 2: |a-b| < min{|a|,|b|} * eps( min{|a|,|b|} )
+inline bool isEqualFuzzy(const mpreal& a, const mpreal& b);
+
+// 'Bitwise' equality check
+// maxUlps - a and b can be apart by maxUlps binary numbers.
+inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps);
+
+//////////////////////////////////////////////////////////////////////////
+// Convert precision in 'bits' to decimal digits and vice versa.
+// bits = ceil(digits*log[2](10))
+// digits = floor(bits*log[10](2))
+
+inline mp_prec_t digits2bits(int d);
+inline int bits2digits(mp_prec_t b);
+
+//////////////////////////////////////////////////////////////////////////
+// min, max
+const mpreal (max)(const mpreal& x, const mpreal& y);
+const mpreal (min)(const mpreal& x, const mpreal& y);
+
+//////////////////////////////////////////////////////////////////////////
+// Implementation
+//////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////
+// Operators - Assignment
+inline mpreal& mpreal::operator=(const mpreal& v)
+{
+ if (this != &v)
+ {
+ mp_prec_t tp = mpfr_get_prec( mpfr_srcptr());
+ mp_prec_t vp = mpfr_get_prec(v.mpfr_srcptr());
+
+ if(tp != vp){
+ clear(mpfr_ptr());
+ mpfr_init2(mpfr_ptr(), vp);
+ }
+
+ mpfr_set(mpfr_ptr(), v.mpfr_srcptr(), mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ }
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const mpf_t v)
+{
+ mpfr_set_f(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const mpz_t v)
+{
+ mpfr_set_z(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const mpq_t v)
+{
+ mpfr_set_q(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const long double v)
+{
+ mpfr_set_ld(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const double v)
+{
+#if (MPREAL_DOUBLE_BITS_OVERFLOW > -1)
+ if(fits_in_bits(v, MPREAL_DOUBLE_BITS_OVERFLOW))
+ {
+ mpfr_set_d(mpfr_ptr(),v,mpreal::get_default_rnd());
+ }else
+ throw conversion_overflow();
+#else
+ mpfr_set_d(mpfr_ptr(),v,mpreal::get_default_rnd());
+#endif
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const unsigned long int v)
+{
+ mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const unsigned int v)
+{
+ mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const long int v)
+{
+ mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const int v)
+{
+ mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const char* s)
+{
+ // Use other converters for more precise control on base & precision & rounding:
+ //
+ // mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
+ // mpreal(const std::string& s,mp_prec_t prec, int base, mp_rnd_t mode)
+ //
+ // Here we assume base = 10 and we use precision of target variable.
+
+ mpfr_t t;
+
+ mpfr_init2(t, mpfr_get_prec(mpfr_srcptr()));
+
+ if(0 == mpfr_set_str(t, s, 10, mpreal::get_default_rnd()))
+ {
+ mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ }
+
+ clear(t);
+ return *this;
+}
+
+inline mpreal& mpreal::operator=(const std::string& s)
+{
+ // Use other converters for more precise control on base & precision & rounding:
+ //
+ // mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
+ // mpreal(const std::string& s,mp_prec_t prec, int base, mp_rnd_t mode)
+ //
+ // Here we assume base = 10 and we use precision of target variable.
+
+ mpfr_t t;
+
+ mpfr_init2(t, mpfr_get_prec(mpfr_srcptr()));
+
+ if(0 == mpfr_set_str(t, s.c_str(), 10, mpreal::get_default_rnd()))
+ {
+ mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ }
+
+ clear(t);
+ return *this;
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// + Addition
+inline mpreal& mpreal::operator+=(const mpreal& v)
+{
+ mpfr_add(mpfr_ptr(), mpfr_srcptr(), v.mpfr_srcptr(), mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const mpf_t u)
+{
+ *this += mpreal(u);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const mpz_t u)
+{
+ mpfr_add_z(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const mpq_t u)
+{
+ mpfr_add_q(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+= (const long double u)
+{
+ *this += mpreal(u);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+= (const double u)
+{
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ mpfr_add_d(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+#else
+ *this += mpreal(u);
+#endif
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const unsigned long int u)
+{
+ mpfr_add_ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const unsigned int u)
+{
+ mpfr_add_ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const long int u)
+{
+ mpfr_add_si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator+=(const int u)
+{
+ mpfr_add_si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+inline mpreal& mpreal::operator+=(const int64_t u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator+=(const uint64_t u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator-=(const int64_t u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator-=(const uint64_t u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator*=(const int64_t u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator*=(const uint64_t u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator/=(const int64_t u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+inline mpreal& mpreal::operator/=(const uint64_t u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
+#endif
+
+inline const mpreal mpreal::operator+()const { return mpreal(*this); }
+
+inline const mpreal operator+(const mpreal& a, const mpreal& b)
+{
+ mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr())));
+ mpfr_add(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
+ return c;
+}
+
+inline mpreal& mpreal::operator++()
+{
+ return *this += 1;
+}
+
+inline const mpreal mpreal::operator++ (int)
+{
+ mpreal x(*this);
+ *this += 1;
+ return x;
+}
+
+inline mpreal& mpreal::operator--()
+{
+ return *this -= 1;
+}
+
+inline const mpreal mpreal::operator-- (int)
+{
+ mpreal x(*this);
+ *this -= 1;
+ return x;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// - Subtraction
+inline mpreal& mpreal::operator-=(const mpreal& v)
+{
+ mpfr_sub(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const mpz_t v)
+{
+ mpfr_sub_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const mpq_t v)
+{
+ mpfr_sub_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const long double v)
+{
+ *this -= mpreal(v);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const double v)
+{
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ mpfr_sub_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+#else
+ *this -= mpreal(v);
+#endif
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const unsigned long int v)
+{
+ mpfr_sub_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const unsigned int v)
+{
+ mpfr_sub_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const long int v)
+{
+ mpfr_sub_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator-=(const int v)
+{
+ mpfr_sub_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline const mpreal mpreal::operator-()const
+{
+ mpreal u(*this);
+ mpfr_neg(u.mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd());
+ return u;
+}
+
+inline const mpreal operator-(const mpreal& a, const mpreal& b)
+{
+ mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr())));
+ mpfr_sub(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
+ return c;
+}
+
+inline const mpreal operator-(const double b, const mpreal& a)
+{
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
+ mpfr_d_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+#else
+ mpreal x(b, mpfr_get_prec(a.mpfr_ptr()));
+ x -= a;
+ return x;
+#endif
+}
+
+inline const mpreal operator-(const unsigned long int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
+ mpfr_ui_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator-(const unsigned int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
+ mpfr_ui_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator-(const long int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
+ mpfr_si_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator-(const int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
+ mpfr_si_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// * Multiplication
+inline mpreal& mpreal::operator*= (const mpreal& v)
+{
+ mpfr_mul(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const mpz_t v)
+{
+ mpfr_mul_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const mpq_t v)
+{
+ mpfr_mul_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const long double v)
+{
+ *this *= mpreal(v);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const double v)
+{
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ mpfr_mul_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+#else
+ *this *= mpreal(v);
+#endif
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const unsigned long int v)
+{
+ mpfr_mul_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const unsigned int v)
+{
+ mpfr_mul_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const long int v)
+{
+ mpfr_mul_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator*=(const int v)
+{
+ mpfr_mul_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline const mpreal operator*(const mpreal& a, const mpreal& b)
+{
+ mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr())));
+ mpfr_mul(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
+ return c;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// / Division
+inline mpreal& mpreal::operator/=(const mpreal& v)
+{
+ mpfr_div(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const mpz_t v)
+{
+ mpfr_div_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const mpq_t v)
+{
+ mpfr_div_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const long double v)
+{
+ *this /= mpreal(v);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const double v)
+{
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ mpfr_div_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+#else
+ *this /= mpreal(v);
+#endif
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const unsigned long int v)
+{
+ mpfr_div_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const unsigned int v)
+{
+ mpfr_div_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const long int v)
+{
+ mpfr_div_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator/=(const int v)
+{
+ mpfr_div_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline const mpreal operator/(const mpreal& a, const mpreal& b)
+{
+ mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_srcptr()), mpfr_get_prec(b.mpfr_srcptr())));
+ mpfr_div(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
+ return c;
+}
+
+inline const mpreal operator/(const unsigned long int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
+ mpfr_ui_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator/(const unsigned int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
+ mpfr_ui_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator/(const long int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
+ mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator/(const int b, const mpreal& a)
+{
+ mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
+ mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+}
+
+inline const mpreal operator/(const double b, const mpreal& a)
+{
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+ mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
+ mpfr_d_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
+ return x;
+#else
+ mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
+ x /= a;
+ return x;
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Shifts operators - Multiplication/Division by power of 2
+inline mpreal& mpreal::operator<<=(const unsigned long int u)
+{
+ mpfr_mul_2ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator<<=(const unsigned int u)
+{
+ mpfr_mul_2ui(mpfr_ptr(),mpfr_srcptr(),static_cast<unsigned long int>(u),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator<<=(const long int u)
+{
+ mpfr_mul_2si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator<<=(const int u)
+{
+ mpfr_mul_2si(mpfr_ptr(),mpfr_srcptr(),static_cast<long int>(u),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator>>=(const unsigned long int u)
+{
+ mpfr_div_2ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator>>=(const unsigned int u)
+{
+ mpfr_div_2ui(mpfr_ptr(),mpfr_srcptr(),static_cast<unsigned long int>(u),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator>>=(const long int u)
+{
+ mpfr_div_2si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::operator>>=(const int u)
+{
+ mpfr_div_2si(mpfr_ptr(),mpfr_srcptr(),static_cast<long int>(u),mpreal::get_default_rnd());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline const mpreal operator<<(const mpreal& v, const unsigned long int k)
+{
+ return mul_2ui(v,k);
+}
+
+inline const mpreal operator<<(const mpreal& v, const unsigned int k)
+{
+ return mul_2ui(v,static_cast<unsigned long int>(k));
+}
+
+inline const mpreal operator<<(const mpreal& v, const long int k)
+{
+ return mul_2si(v,k);
+}
+
+inline const mpreal operator<<(const mpreal& v, const int k)
+{
+ return mul_2si(v,static_cast<long int>(k));
+}
+
+inline const mpreal operator>>(const mpreal& v, const unsigned long int k)
+{
+ return div_2ui(v,k);
+}
+
+inline const mpreal operator>>(const mpreal& v, const long int k)
+{
+ return div_2si(v,k);
+}
+
+inline const mpreal operator>>(const mpreal& v, const unsigned int k)
+{
+ return div_2ui(v,static_cast<unsigned long int>(k));
+}
+
+inline const mpreal operator>>(const mpreal& v, const int k)
+{
+ return div_2si(v,static_cast<long int>(k));
+}
+
+// mul_2ui
+inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode)
+{
+ mpreal x(v);
+ mpfr_mul_2ui(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
+ return x;
+}
+
+// mul_2si
+inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode)
+{
+ mpreal x(v);
+ mpfr_mul_2si(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
+ return x;
+}
+
+inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode)
+{
+ mpreal x(v);
+ mpfr_div_2ui(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
+ return x;
+}
+
+inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode)
+{
+ mpreal x(v);
+ mpfr_div_2si(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
+ return x;
+}
+
+//////////////////////////////////////////////////////////////////////////
+//Boolean operators
+inline bool operator > (const mpreal& a, const mpreal& b){ return (mpfr_greater_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
+inline bool operator >= (const mpreal& a, const mpreal& b){ return (mpfr_greaterequal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
+inline bool operator < (const mpreal& a, const mpreal& b){ return (mpfr_less_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
+inline bool operator <= (const mpreal& a, const mpreal& b){ return (mpfr_lessequal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
+inline bool operator == (const mpreal& a, const mpreal& b){ return (mpfr_equal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
+inline bool operator != (const mpreal& a, const mpreal& b){ return (mpfr_lessgreater_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); }
+
+inline bool operator == (const mpreal& a, const unsigned long int b ){ return (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const unsigned int b ){ return (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const long int b ){ return (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const int b ){ return (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const long double b ){ return (mpfr_cmp_ld(a.mpfr_srcptr(),b) == 0 ); }
+inline bool operator == (const mpreal& a, const double b ){ return (mpfr_cmp_d (a.mpfr_srcptr(),b) == 0 ); }
+
+
+inline bool isnan (const mpreal& op){ return (mpfr_nan_p (op.mpfr_srcptr()) != 0 ); }
+inline bool isinf (const mpreal& op){ return (mpfr_inf_p (op.mpfr_srcptr()) != 0 ); }
+inline bool isfinite (const mpreal& op){ return (mpfr_number_p (op.mpfr_srcptr()) != 0 ); }
+inline bool iszero (const mpreal& op){ return (mpfr_zero_p (op.mpfr_srcptr()) != 0 ); }
+inline bool isint (const mpreal& op){ return (mpfr_integer_p(op.mpfr_srcptr()) != 0 ); }
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+inline bool isregular(const mpreal& op){ return (mpfr_regular_p(op.mpfr_srcptr()));}
+#endif
+
+//////////////////////////////////////////////////////////////////////////
+// Type Converters
+inline bool mpreal::toBool (mp_rnd_t /*mode*/) const { return mpfr_zero_p (mpfr_srcptr()) == 0; }
+inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si (mpfr_srcptr(), mode); }
+inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui (mpfr_srcptr(), mode); }
+inline float mpreal::toFloat (mp_rnd_t mode) const { return mpfr_get_flt(mpfr_srcptr(), mode); }
+inline double mpreal::toDouble (mp_rnd_t mode) const { return mpfr_get_d (mpfr_srcptr(), mode); }
+inline long double mpreal::toLDouble(mp_rnd_t mode) const { return mpfr_get_ld (mpfr_srcptr(), mode); }
+
+#if defined (MPREAL_HAVE_INT64_SUPPORT)
+inline int64_t mpreal::toInt64 (mp_rnd_t mode) const{ return mpfr_get_sj(mpfr_srcptr(), mode); }
+inline uint64_t mpreal::toUInt64(mp_rnd_t mode) const{ return mpfr_get_uj(mpfr_srcptr(), mode); }
+#endif
+
+inline ::mpfr_ptr mpreal::mpfr_ptr() { return mp; }
+inline ::mpfr_srcptr mpreal::mpfr_ptr() const { return mp; }
+inline ::mpfr_srcptr mpreal::mpfr_srcptr() const { return mp; }
+
+template <class T>
+inline std::string toString(T t, std::ios_base & (*f)(std::ios_base&))
+{
+ std::ostringstream oss;
+ oss << f << t;
+ return oss.str();
+}
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+
+inline std::string mpreal::toString(const std::string& format) const
+{
+ char *s = NULL;
+ std::string out;
+
+ if( !format.empty() )
+ {
+ if(!(mpfr_asprintf(&s, format.c_str(), mpfr_srcptr()) < 0))
+ {
+ out = std::string(s);
+
+ mpfr_free_str(s);
+ }
+ }
+
+ return out;
+}
+
+#endif
+
+inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
+{
+ // TODO: Add extended format specification (f, e, rounding mode) as it done in output operator
+ (void)b;
+ (void)mode;
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+
+ std::ostringstream format;
+
+ int digits = (n >= 0) ? n : bits2digits(mpfr_get_prec(mpfr_srcptr()));
+
+ format << "%." << digits << "RNg";
+
+ return toString(format.str());
+
+#else
+
+ char *s, *ns = NULL;
+ size_t slen, nslen;
+ mp_exp_t exp;
+ std::string out;
+
+ if(mpfr_inf_p(mp))
+ {
+ if(mpfr_sgn(mp)>0) return "+Inf";
+ else return "-Inf";
+ }
+
+ if(mpfr_zero_p(mp)) return "0";
+ if(mpfr_nan_p(mp)) return "NaN";
+
+ s = mpfr_get_str(NULL, &exp, b, 0, mp, mode);
+ ns = mpfr_get_str(NULL, &exp, b, (std::max)(0,n), mp, mode);
+
+ if(s!=NULL && ns!=NULL)
+ {
+ slen = strlen(s);
+ nslen = strlen(ns);
+ if(nslen<=slen)
+ {
+ mpfr_free_str(s);
+ s = ns;
+ slen = nslen;
+ }
+ else {
+ mpfr_free_str(ns);
+ }
+
+ // Make human eye-friendly formatting if possible
+ if (exp>0 && static_cast<size_t>(exp)<slen)
+ {
+ if(s[0]=='-')
+ {
+ // Remove zeros starting from right end
+ char* ptr = s+slen-1;
+ while (*ptr=='0' && ptr>s+exp) ptr--;
+
+ if(ptr==s+exp) out = std::string(s,exp+1);
+ else out = std::string(s,exp+1)+'.'+std::string(s+exp+1,ptr-(s+exp+1)+1);
+
+ //out = string(s,exp+1)+'.'+string(s+exp+1);
+ }
+ else
+ {
+ // Remove zeros starting from right end
+ char* ptr = s+slen-1;
+ while (*ptr=='0' && ptr>s+exp-1) ptr--;
+
+ if(ptr==s+exp-1) out = std::string(s,exp);
+ else out = std::string(s,exp)+'.'+std::string(s+exp,ptr-(s+exp)+1);
+
+ //out = string(s,exp)+'.'+string(s+exp);
+ }
+
+ }else{ // exp<0 || exp>slen
+ if(s[0]=='-')
+ {
+ // Remove zeros starting from right end
+ char* ptr = s+slen-1;
+ while (*ptr=='0' && ptr>s+1) ptr--;
+
+ if(ptr==s+1) out = std::string(s,2);
+ else out = std::string(s,2)+'.'+std::string(s+2,ptr-(s+2)+1);
+
+ //out = string(s,2)+'.'+string(s+2);
+ }
+ else
+ {
+ // Remove zeros starting from right end
+ char* ptr = s+slen-1;
+ while (*ptr=='0' && ptr>s) ptr--;
+
+ if(ptr==s) out = std::string(s,1);
+ else out = std::string(s,1)+'.'+std::string(s+1,ptr-(s+1)+1);
+
+ //out = string(s,1)+'.'+string(s+1);
+ }
+
+ // Make final string
+ if(--exp)
+ {
+ if(exp>0) out += "e+"+mpfr::toString<mp_exp_t>(exp,std::dec);
+ else out += "e"+mpfr::toString<mp_exp_t>(exp,std::dec);
+ }
+ }
+
+ mpfr_free_str(s);
+ return out;
+ }else{
+ return "conversion error!";
+ }
+#endif
+}
+
+
+//////////////////////////////////////////////////////////////////////////
+// I/O
+inline std::ostream& mpreal::output(std::ostream& os) const
+{
+ std::ostringstream format;
+ const std::ios::fmtflags flags = os.flags();
+
+ format << ((flags & std::ios::showpos) ? "%+" : "%");
+ if (os.precision() >= 0)
+ format << '.' << os.precision() << "R*"
+ << ((flags & std::ios::floatfield) == std::ios::fixed ? 'f' :
+ (flags & std::ios::floatfield) == std::ios::scientific ? 'e' :
+ 'g');
+ else
+ format << "R*e";
+
+ char *s = NULL;
+ if(!(mpfr_asprintf(&s, format.str().c_str(),
+ mpfr::mpreal::get_default_rnd(),
+ mpfr_srcptr())
+ < 0))
+ {
+ os << std::string(s);
+ mpfr_free_str(s);
+ }
+ return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const mpreal& v)
+{
+ return v.output(os);
+}
+
+inline std::istream& operator>>(std::istream &is, mpreal& v)
+{
+ // TODO: use cout::hexfloat and other flags to setup base
+ std::string tmp;
+ is >> tmp;
+ mpfr_set_str(v.mpfr_ptr(), tmp.c_str(), 10, mpreal::get_default_rnd());
+ return is;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Bits - decimal digits relation
+// bits = ceil(digits*log[2](10))
+// digits = floor(bits*log[10](2))
+
+inline mp_prec_t digits2bits(int d)
+{
+ const double LOG2_10 = 3.3219280948873624;
+
+ return mp_prec_t(std::ceil( d * LOG2_10 ));
+}
+
+inline int bits2digits(mp_prec_t b)
+{
+ const double LOG10_2 = 0.30102999566398119;
+
+ return int(std::floor( b * LOG10_2 ));
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Set/Get number properties
+inline int sgn(const mpreal& op)
+{
+ int r = mpfr_signbit(op.mpfr_srcptr());
+ return (r > 0? -1 : 1);
+}
+
+inline mpreal& mpreal::setSign(int sign, mp_rnd_t RoundingMode)
+{
+ mpfr_setsign(mpfr_ptr(), mpfr_srcptr(), (sign < 0 ? 1 : 0), RoundingMode);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline int mpreal::getPrecision() const
+{
+ return int(mpfr_get_prec(mpfr_srcptr()));
+}
+
+inline mpreal& mpreal::setPrecision(int Precision, mp_rnd_t RoundingMode)
+{
+ mpfr_prec_round(mpfr_ptr(), Precision, RoundingMode);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::setInf(int sign)
+{
+ mpfr_set_inf(mpfr_ptr(), sign);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::setNan()
+{
+ mpfr_set_nan(mpfr_ptr());
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mpreal& mpreal::setZero(int sign)
+{
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+ mpfr_set_zero(mpfr_ptr(), sign);
+#else
+ mpfr_set_si(mpfr_ptr(), 0, (mpfr_get_default_rounding_mode)());
+ setSign(sign);
+#endif
+
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return *this;
+}
+
+inline mp_prec_t mpreal::get_prec() const
+{
+ return mpfr_get_prec(mpfr_srcptr());
+}
+
+inline void mpreal::set_prec(mp_prec_t prec, mp_rnd_t rnd_mode)
+{
+ mpfr_prec_round(mpfr_ptr(),prec,rnd_mode);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+}
+
+inline mp_exp_t mpreal::get_exp ()
+{
+ return mpfr_get_exp(mpfr_srcptr());
+}
+
+inline int mpreal::set_exp (mp_exp_t e)
+{
+ int x = mpfr_set_exp(mpfr_ptr(), e);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return x;
+}
+
+inline const mpreal frexp(const mpreal& v, mp_exp_t* exp)
+{
+ mpreal x(v);
+ *exp = x.get_exp();
+ x.set_exp(0);
+ return x;
+}
+
+inline const mpreal ldexp(const mpreal& v, mp_exp_t exp)
+{
+ mpreal x(v);
+
+ // rounding is not important since we just increasing the exponent
+ mpfr_mul_2si(x.mpfr_ptr(), x.mpfr_srcptr(), exp, mpreal::get_default_rnd());
+ return x;
+}
+
+inline mpreal machine_epsilon(mp_prec_t prec)
+{
+ /* the smallest eps such that 1 + eps != 1 */
+ return machine_epsilon(mpreal(1, prec));
+}
+
+inline mpreal machine_epsilon(const mpreal& x)
+{
+ /* the smallest eps such that x + eps != x */
+ if( x < 0)
+ {
+ return nextabove(-x) + x;
+ }else{
+ return nextabove( x) - x;
+ }
+}
+
+// minval is 'safe' meaning 1 / minval does not overflow
+inline mpreal minval(mp_prec_t prec)
+{
+ /* min = 1/2 * 2^emin = 2^(emin - 1) */
+ return mpreal(1, prec) << mpreal::get_emin()-1;
+}
+
+// maxval is 'safe' meaning 1 / maxval does not underflow
+inline mpreal maxval(mp_prec_t prec)
+{
+ /* max = (1 - eps) * 2^emax, eps is machine epsilon */
+ return (mpreal(1, prec) - machine_epsilon(prec)) << mpreal::get_emax();
+}
+
+inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps)
+{
+ return abs(a - b) <= machine_epsilon((max)(abs(a), abs(b))) * maxUlps;
+}
+
+inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps)
+{
+ return abs(a - b) <= eps;
+}
+
+inline bool isEqualFuzzy(const mpreal& a, const mpreal& b)
+{
+ return isEqualFuzzy(a, b, machine_epsilon((max)(1, (min)(abs(a), abs(b)))));
+}
+
+inline const mpreal modf(const mpreal& v, mpreal& n)
+{
+ mpreal f(v);
+
+ // rounding is not important since we are using the same number
+ mpfr_frac (f.mpfr_ptr(),f.mpfr_srcptr(),mpreal::get_default_rnd());
+ mpfr_trunc(n.mpfr_ptr(),v.mpfr_srcptr());
+ return f;
+}
+
+inline int mpreal::check_range (int t, mp_rnd_t rnd_mode)
+{
+ return mpfr_check_range(mpfr_ptr(),t,rnd_mode);
+}
+
+inline int mpreal::subnormalize (int t,mp_rnd_t rnd_mode)
+{
+ int r = mpfr_subnormalize(mpfr_ptr(),t,rnd_mode);
+ MPREAL_MSVC_DEBUGVIEW_CODE;
+ return r;
+}
+
+inline mp_exp_t mpreal::get_emin (void)
+{
+ return mpfr_get_emin();
+}
+
+inline int mpreal::set_emin (mp_exp_t exp)
+{
+ return mpfr_set_emin(exp);
+}
+
+inline mp_exp_t mpreal::get_emax (void)
+{
+ return mpfr_get_emax();
+}
+
+inline int mpreal::set_emax (mp_exp_t exp)
+{
+ return mpfr_set_emax(exp);
+}
+
+inline mp_exp_t mpreal::get_emin_min (void)
+{
+ return mpfr_get_emin_min();
+}
+
+inline mp_exp_t mpreal::get_emin_max (void)
+{
+ return mpfr_get_emin_max();
+}
+
+inline mp_exp_t mpreal::get_emax_min (void)
+{
+ return mpfr_get_emax_min();
+}
+
+inline mp_exp_t mpreal::get_emax_max (void)
+{
+ return mpfr_get_emax_max();
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Mathematical Functions
+//////////////////////////////////////////////////////////////////////////
+#define MPREAL_UNARY_MATH_FUNCTION_BODY(f) \
+ mpreal y(0, mpfr_get_prec(x.mpfr_srcptr())); \
+ mpfr_##f(y.mpfr_ptr(), x.mpfr_srcptr(), r); \
+ return y;
+
+inline const mpreal sqr (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
+{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqr ); }
+
+inline const mpreal sqrt (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
+{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqrt); }
+
+inline const mpreal sqrt(const unsigned long int x, mp_rnd_t r)
+{
+ mpreal y;
+ mpfr_sqrt_ui(y.mpfr_ptr(), x, r);
+ return y;
+}
+
+inline const mpreal sqrt(const unsigned int v, mp_rnd_t rnd_mode)
+{
+ return sqrt(static_cast<unsigned long int>(v),rnd_mode);
+}
+
+inline const mpreal sqrt(const long int v, mp_rnd_t rnd_mode)
+{
+ if (v>=0) return sqrt(static_cast<unsigned long int>(v),rnd_mode);
+ else return mpreal().setNan(); // NaN
+}
+
+inline const mpreal sqrt(const int v, mp_rnd_t rnd_mode)
+{
+ if (v>=0) return sqrt(static_cast<unsigned long int>(v),rnd_mode);
+ else return mpreal().setNan(); // NaN
+}
+
+inline const mpreal root(const mpreal& x, unsigned long int k, mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal y(0, mpfr_get_prec(x.mpfr_srcptr()));
+ mpfr_root(y.mpfr_ptr(), x.mpfr_srcptr(), k, r);
+ return y;
+}
+
+inline const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal y(0, mpfr_get_prec(a.mpfr_srcptr()));
+ mpfr_dim(y.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), r);
+ return y;
+}
+
+inline int cmpabs(const mpreal& a,const mpreal& b)
+{
+ return mpfr_cmpabs(a.mpfr_ptr(), b.mpfr_srcptr());
+}
+
+inline int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ return mpfr_sin_cos(s.mpfr_ptr(), c.mpfr_ptr(), v.mpfr_srcptr(), rnd_mode);
+}
+
+inline const mpreal sqrt (const long double v, mp_rnd_t rnd_mode) { return sqrt(mpreal(v),rnd_mode); }
+inline const mpreal sqrt (const double v, mp_rnd_t rnd_mode) { return sqrt(mpreal(v),rnd_mode); }
+
+inline const mpreal cbrt (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cbrt ); }
+inline const mpreal fabs (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); }
+inline const mpreal abs (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); }
+inline const mpreal log (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log ); }
+inline const mpreal log2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log2 ); }
+inline const mpreal log10 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log10); }
+inline const mpreal exp (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp ); }
+inline const mpreal exp2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp2 ); }
+inline const mpreal exp10 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp10); }
+inline const mpreal cos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cos ); }
+inline const mpreal sin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sin ); }
+inline const mpreal tan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(tan ); }
+inline const mpreal sec (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sec ); }
+inline const mpreal csc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(csc ); }
+inline const mpreal cot (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cot ); }
+inline const mpreal acos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(acos ); }
+inline const mpreal asin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asin ); }
+inline const mpreal atan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atan ); }
+
+inline const mpreal acot (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atan (1/v, r); }
+inline const mpreal asec (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acos (1/v, r); }
+inline const mpreal acsc (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asin (1/v, r); }
+inline const mpreal acoth (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atanh(1/v, r); }
+inline const mpreal asech (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acosh(1/v, r); }
+inline const mpreal acsch (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asinh(1/v, r); }
+
+inline const mpreal cosh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cosh ); }
+inline const mpreal sinh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sinh ); }
+inline const mpreal tanh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(tanh ); }
+inline const mpreal sech (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sech ); }
+inline const mpreal csch (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(csch ); }
+inline const mpreal coth (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(coth ); }
+inline const mpreal acosh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(acosh); }
+inline const mpreal asinh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asinh); }
+inline const mpreal atanh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atanh); }
+
+inline const mpreal log1p (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log1p ); }
+inline const mpreal expm1 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(expm1 ); }
+inline const mpreal eint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(eint ); }
+inline const mpreal gamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); }
+inline const mpreal lngamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(lngamma); }
+inline const mpreal zeta (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(zeta ); }
+inline const mpreal erf (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erf ); }
+inline const mpreal erfc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erfc ); }
+inline const mpreal besselj0(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(j0 ); }
+inline const mpreal besselj1(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(j1 ); }
+inline const mpreal bessely0(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(y0 ); }
+inline const mpreal bessely1(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(y1 ); }
+
+inline const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
+ mpfr_atan2(a.mpfr_ptr(), y.mpfr_srcptr(), x.mpfr_srcptr(), rnd_mode);
+ return a;
+}
+
+inline const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
+ mpfr_hypot(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
+ return a;
+}
+
+inline const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
+ mpfr_remainder(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
+ return a;
+}
+
+inline const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
+ mpfr_remquo(a.mpfr_ptr(),q, x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
+ return a;
+}
+
+inline const mpreal fac_ui (unsigned long int v, mp_prec_t prec = mpreal::get_default_prec(),
+ mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(0, prec);
+ mpfr_fac_ui(x.mpfr_ptr(),v,rnd_mode);
+ return x;
+}
+
+
+inline const mpreal lgamma (const mpreal& v, int *signp = 0, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(v);
+ int tsignp;
+
+ if(signp) mpfr_lgamma(x.mpfr_ptr(), signp,v.mpfr_srcptr(),rnd_mode);
+ else mpfr_lgamma(x.mpfr_ptr(),&tsignp,v.mpfr_srcptr(),rnd_mode);
+
+ return x;
+}
+
+
+inline const mpreal besseljn (long n, const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal y(0, x.getPrecision());
+ mpfr_jn(y.mpfr_ptr(), n, x.mpfr_srcptr(), r);
+ return y;
+}
+
+inline const mpreal besselyn (long n, const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal y(0, x.getPrecision());
+ mpfr_yn(y.mpfr_ptr(), n, x.mpfr_srcptr(), r);
+ return y;
+}
+
+inline const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a;
+ mp_prec_t p1, p2, p3;
+
+ p1 = v1.get_prec();
+ p2 = v2.get_prec();
+ p3 = v3.get_prec();
+
+ a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
+
+ mpfr_fma(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
+ return a;
+}
+
+inline const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a;
+ mp_prec_t p1, p2, p3;
+
+ p1 = v1.get_prec();
+ p2 = v2.get_prec();
+ p3 = v3.get_prec();
+
+ a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
+
+ mpfr_fms(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
+ return a;
+}
+
+inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a;
+ mp_prec_t p1, p2;
+
+ p1 = v1.get_prec();
+ p2 = v2.get_prec();
+
+ a.set_prec(p1>p2?p1:p2);
+
+ mpfr_agm(a.mp, v1.mp, v2.mp, rnd_mode);
+
+ return a;
+}
+
+inline const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x;
+ mpfr_ptr* t;
+ unsigned long int i;
+
+ t = new mpfr_ptr[n];
+ for (i=0;i<n;i++) t[i] = (mpfr_ptr)tab[i].mp;
+ mpfr_sum(x.mp,t,n,rnd_mode);
+ delete[] t;
+ return x;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// MPFR 2.4.0 Specifics
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
+
+inline int sinh_cosh(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ return mpfr_sinh_cosh(s.mp,c.mp,v.mp,rnd_mode);
+}
+
+inline const mpreal li2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
+{
+ MPREAL_UNARY_MATH_FUNCTION_BODY(li2);
+}
+
+inline const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ /* R = rem(X,Y) if Y != 0, returns X - n * Y where n = trunc(X/Y). */
+ return fmod(x, y, rnd_mode);
+}
+
+inline const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ (void)rnd_mode;
+
+ /*
+
+ m = mod(x,y) if y != 0, returns x - n*y where n = floor(x/y)
+
+ The following are true by convention:
+ - mod(x,0) is x
+ - mod(x,x) is 0
+ - mod(x,y) for x != y and y != 0 has the same sign as y.
+
+ */
+
+ if(iszero(y)) return x;
+ if(x == y) return 0;
+
+ mpreal m = x - floor(x / y) * y;
+
+ m.setSign(sgn(y)); // make sure result has the same sign as Y
+
+ return m;
+}
+
+inline const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a;
+ mp_prec_t yp, xp;
+
+ yp = y.get_prec();
+ xp = x.get_prec();
+
+ a.set_prec(yp>xp?yp:xp);
+
+ mpfr_fmod(a.mp, x.mp, y.mp, rnd_mode);
+
+ return a;
+}
+
+inline const mpreal rec_sqrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(v);
+ mpfr_rec_sqrt(x.mp,v.mp,rnd_mode);
+ return x;
+}
+#endif // MPFR 2.4.0 Specifics
+
+//////////////////////////////////////////////////////////////////////////
+// MPFR 3.0.0 Specifics
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+inline const mpreal digamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(digamma); }
+inline const mpreal ai (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(ai); }
+#endif // MPFR 3.0.0 Specifics
+
+//////////////////////////////////////////////////////////////////////////
+// Constants
+inline const mpreal const_log2 (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal x(0, p);
+ mpfr_const_log2(x.mpfr_ptr(), r);
+ return x;
+}
+
+inline const mpreal const_pi (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal x(0, p);
+ mpfr_const_pi(x.mpfr_ptr(), r);
+ return x;
+}
+
+inline const mpreal const_euler (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal x(0, p);
+ mpfr_const_euler(x.mpfr_ptr(), r);
+ return x;
+}
+
+inline const mpreal const_catalan (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
+{
+ mpreal x(0, p);
+ mpfr_const_catalan(x.mpfr_ptr(), r);
+ return x;
+}
+
+inline const mpreal const_infinity (int sign = 1, mp_prec_t p = mpreal::get_default_prec())
+{
+ mpreal x(0, p);
+ mpfr_set_inf(x.mpfr_ptr(), sign);
+ return x;
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Integer Related Functions
+inline const mpreal ceil(const mpreal& v)
+{
+ mpreal x(v);
+ mpfr_ceil(x.mp,v.mp);
+ return x;
+}
+
+inline const mpreal floor(const mpreal& v)
+{
+ mpreal x(v);
+ mpfr_floor(x.mp,v.mp);
+ return x;
+}
+
+inline const mpreal round(const mpreal& v)
+{
+ mpreal x(v);
+ mpfr_round(x.mp,v.mp);
+ return x;
+}
+
+inline const mpreal trunc(const mpreal& v)
+{
+ mpreal x(v);
+ mpfr_trunc(x.mp,v.mp);
+ return x;
+}
+
+inline const mpreal rint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint ); }
+inline const mpreal rint_ceil (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_ceil ); }
+inline const mpreal rint_floor (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_floor); }
+inline const mpreal rint_round (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_round); }
+inline const mpreal rint_trunc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_trunc); }
+inline const mpreal frac (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(frac ); }
+
+//////////////////////////////////////////////////////////////////////////
+// Miscellaneous Functions
+inline void swap (mpreal& a, mpreal& b) { mpfr_swap(a.mp,b.mp); }
+inline const mpreal (max)(const mpreal& x, const mpreal& y){ return (x>y?x:y); }
+inline const mpreal (min)(const mpreal& x, const mpreal& y){ return (x<y?x:y); }
+
+inline const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a;
+ mpfr_max(a.mp,x.mp,y.mp,rnd_mode);
+ return a;
+}
+
+inline const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal a;
+ mpfr_min(a.mp,x.mp,y.mp,rnd_mode);
+ return a;
+}
+
+inline const mpreal nexttoward (const mpreal& x, const mpreal& y)
+{
+ mpreal a(x);
+ mpfr_nexttoward(a.mp,y.mp);
+ return a;
+}
+
+inline const mpreal nextabove (const mpreal& x)
+{
+ mpreal a(x);
+ mpfr_nextabove(a.mp);
+ return a;
+}
+
+inline const mpreal nextbelow (const mpreal& x)
+{
+ mpreal a(x);
+ mpfr_nextbelow(a.mp);
+ return a;
+}
+
+inline const mpreal urandomb (gmp_randstate_t& state)
+{
+ mpreal x;
+ mpfr_urandomb(x.mp,state);
+ return x;
+}
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
+// use gmp_randinit_default() to init state, gmp_randclear() to clear
+inline const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x;
+ mpfr_urandom(x.mp,state,rnd_mode);
+ return x;
+}
+
+inline const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x;
+ mpfr_grandom(x.mp, NULL, state, rnd_mode);
+ return x;
+}
+
+#endif
+
+#if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2))
+inline const mpreal random2 (mp_size_t size, mp_exp_t exp)
+{
+ mpreal x;
+ mpfr_random2(x.mp,size,exp);
+ return x;
+}
+#endif
+
+// Uniformly distributed random number generation
+// a = random(seed); <- initialization & first random number generation
+// a = random(); <- next random numbers generation
+// seed != 0
+inline const mpreal random(unsigned int seed = 0)
+{
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+ static gmp_randstate_t state;
+ static bool isFirstTime = true;
+
+ if(isFirstTime)
+ {
+ gmp_randinit_default(state);
+ gmp_randseed_ui(state,0);
+ isFirstTime = false;
+ }
+
+ if(seed != 0) gmp_randseed_ui(state,seed);
+
+ return mpfr::urandom(state);
+#else
+ if(seed != 0) std::srand(seed);
+ return mpfr::mpreal(std::rand()/(double)RAND_MAX);
+#endif
+
+}
+
+#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
+inline const mpreal grandom(unsigned int seed = 0)
+{
+ static gmp_randstate_t state;
+ static bool isFirstTime = true;
+
+ if(isFirstTime)
+ {
+ gmp_randinit_default(state);
+ gmp_randseed_ui(state,0);
+ isFirstTime = false;
+ }
+
+ if(seed != 0) gmp_randseed_ui(state,seed);
+
+ return mpfr::grandom(state);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////
+// Set/Get global properties
+inline void mpreal::set_default_prec(mp_prec_t prec)
+{
+ mpfr_set_default_prec(prec);
+}
+
+inline void mpreal::set_default_rnd(mp_rnd_t rnd_mode)
+{
+ mpfr_set_default_rounding_mode(rnd_mode);
+}
+
+inline bool mpreal::fits_in_bits(double x, int n)
+{
+ int i;
+ double t;
+ return IsInf(x) || (std::modf ( std::ldexp ( std::frexp ( x, &i ), n ), &t ) == 0.0);
+}
+
+inline const mpreal pow(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(a);
+ mpfr_pow(x.mp,x.mp,b.mp,rnd_mode);
+ return x;
+}
+
+inline const mpreal pow(const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(a);
+ mpfr_pow_z(x.mp,x.mp,b,rnd_mode);
+ return x;
+}
+
+inline const mpreal pow(const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(a);
+ mpfr_pow_ui(x.mp,x.mp,b,rnd_mode);
+ return x;
+}
+
+inline const mpreal pow(const mpreal& a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ return pow(a,static_cast<unsigned long int>(b),rnd_mode);
+}
+
+inline const mpreal pow(const mpreal& a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(a);
+ mpfr_pow_si(x.mp,x.mp,b,rnd_mode);
+ return x;
+}
+
+inline const mpreal pow(const mpreal& a, const int b, mp_rnd_t rnd_mode)
+{
+ return pow(a,static_cast<long int>(b),rnd_mode);
+}
+
+inline const mpreal pow(const mpreal& a, const long double b, mp_rnd_t rnd_mode)
+{
+ return pow(a,mpreal(b),rnd_mode);
+}
+
+inline const mpreal pow(const mpreal& a, const double b, mp_rnd_t rnd_mode)
+{
+ return pow(a,mpreal(b),rnd_mode);
+}
+
+inline const mpreal pow(const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
+{
+ mpreal x(a);
+ mpfr_ui_pow(x.mp,a,b.mp,rnd_mode);
+ return x;
+}
+
+inline const mpreal pow(const unsigned int a, const mpreal& b, mp_rnd_t rnd_mode)
+{
+ return pow(static_cast<unsigned long int>(a),b,rnd_mode);
+}
+
+inline const mpreal pow(const long int a, const mpreal& b, mp_rnd_t rnd_mode)
+{
+ if (a>=0) return pow(static_cast<unsigned long int>(a),b,rnd_mode);
+ else return pow(mpreal(a),b,rnd_mode);
+}
+
+inline const mpreal pow(const int a, const mpreal& b, mp_rnd_t rnd_mode)
+{
+ if (a>=0) return pow(static_cast<unsigned long int>(a),b,rnd_mode);
+ else return pow(mpreal(a),b,rnd_mode);
+}
+
+inline const mpreal pow(const long double a, const mpreal& b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),b,rnd_mode);
+}
+
+inline const mpreal pow(const double a, const mpreal& b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),b,rnd_mode);
+}
+
+// pow unsigned long int
+inline const mpreal pow(const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode)
+{
+ mpreal x(a);
+ mpfr_ui_pow_ui(x.mp,a,b,rnd_mode);
+ return x;
+}
+
+inline const mpreal pow(const unsigned long int a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ return pow(a,static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+}
+
+inline const mpreal pow(const unsigned long int a, const long int b, mp_rnd_t rnd_mode)
+{
+ if(b>0) return pow(a,static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+inline const mpreal pow(const unsigned long int a, const int b, mp_rnd_t rnd_mode)
+{
+ if(b>0) return pow(a,static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+inline const mpreal pow(const unsigned long int a, const long double b, mp_rnd_t rnd_mode)
+{
+ return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+inline const mpreal pow(const unsigned long int a, const double b, mp_rnd_t rnd_mode)
+{
+ return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+// pow unsigned int
+inline const mpreal pow(const unsigned int a, const unsigned long int b, mp_rnd_t rnd_mode)
+{
+ return pow(static_cast<unsigned long int>(a),b,rnd_mode); //mpfr_ui_pow_ui
+}
+
+inline const mpreal pow(const unsigned int a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+}
+
+inline const mpreal pow(const unsigned int a, const long int b, mp_rnd_t rnd_mode)
+{
+ if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+inline const mpreal pow(const unsigned int a, const int b, mp_rnd_t rnd_mode)
+{
+ if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+inline const mpreal pow(const unsigned int a, const long double b, mp_rnd_t rnd_mode)
+{
+ return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+inline const mpreal pow(const unsigned int a, const double b, mp_rnd_t rnd_mode)
+{
+ return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+}
+
+// pow long int
+inline const mpreal pow(const long int a, const unsigned long int b, mp_rnd_t rnd_mode)
+{
+ if (a>0) return pow(static_cast<unsigned long int>(a),b,rnd_mode); //mpfr_ui_pow_ui
+ else return pow(mpreal(a),b,rnd_mode); //mpfr_pow_ui
+}
+
+inline const mpreal pow(const long int a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ if (a>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_pow_ui
+}
+
+inline const mpreal pow(const long int a, const long int b, mp_rnd_t rnd_mode)
+{
+ if (a>0)
+ {
+ if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ }else{
+ return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
+ }
+}
+
+inline const mpreal pow(const long int a, const int b, mp_rnd_t rnd_mode)
+{
+ if (a>0)
+ {
+ if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ }else{
+ return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
+ }
+}
+
+inline const mpreal pow(const long int a, const long double b, mp_rnd_t rnd_mode)
+{
+ if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
+}
+
+inline const mpreal pow(const long int a, const double b, mp_rnd_t rnd_mode)
+{
+ if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
+}
+
+// pow int
+inline const mpreal pow(const int a, const unsigned long int b, mp_rnd_t rnd_mode)
+{
+ if (a>0) return pow(static_cast<unsigned long int>(a),b,rnd_mode); //mpfr_ui_pow_ui
+ else return pow(mpreal(a),b,rnd_mode); //mpfr_pow_ui
+}
+
+inline const mpreal pow(const int a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ if (a>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_pow_ui
+}
+
+inline const mpreal pow(const int a, const long int b, mp_rnd_t rnd_mode)
+{
+ if (a>0)
+ {
+ if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ }else{
+ return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
+ }
+}
+
+inline const mpreal pow(const int a, const int b, mp_rnd_t rnd_mode)
+{
+ if (a>0)
+ {
+ if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
+ else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ }else{
+ return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
+ }
+}
+
+inline const mpreal pow(const int a, const long double b, mp_rnd_t rnd_mode)
+{
+ if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
+}
+
+inline const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode)
+{
+ if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
+ else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
+}
+
+// pow long double
+inline const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),mpreal(b),rnd_mode);
+}
+
+inline const mpreal pow(const long double a, const unsigned long int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),b,rnd_mode); //mpfr_pow_ui
+}
+
+inline const mpreal pow(const long double a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_pow_ui
+}
+
+inline const mpreal pow(const long double a, const long int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
+}
+
+inline const mpreal pow(const long double a, const int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
+}
+
+inline const mpreal pow(const double a, const double b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),mpreal(b),rnd_mode);
+}
+
+inline const mpreal pow(const double a, const unsigned long int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),b,rnd_mode); // mpfr_pow_ui
+}
+
+inline const mpreal pow(const double a, const unsigned int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); // mpfr_pow_ui
+}
+
+inline const mpreal pow(const double a, const long int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
+}
+
+inline const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode)
+{
+ return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
+}
+} // End of mpfr namespace
+
+// Explicit specialization of std::swap for mpreal numbers
+// Thus standard algorithms will use efficient version of swap (due to Koenig lookup)
+// Non-throwing swap C++ idiom: http://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Non-throwing_swap
+namespace std
+{
+ // we are allowed to extend namespace std with specializations only
+ template <>
+ inline void swap(mpfr::mpreal& x, mpfr::mpreal& y)
+ {
+ return mpfr::swap(x, y);
+ }
+
+ template<>
+ class numeric_limits<mpfr::mpreal>
+ {
+ public:
+ static const bool is_specialized = true;
+ static const bool is_signed = true;
+ static const bool is_integer = false;
+ static const bool is_exact = false;
+ static const int radix = 2;
+
+ static const bool has_infinity = true;
+ static const bool has_quiet_NaN = true;
+ static const bool has_signaling_NaN = true;
+
+ static const bool is_iec559 = true; // = IEEE 754
+ static const bool is_bounded = true;
+ static const bool is_modulo = false;
+ static const bool traps = true;
+ static const bool tinyness_before = true;
+
+ static const float_denorm_style has_denorm = denorm_absent;
+
+ inline static mpfr::mpreal (min) (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::minval(precision); }
+ inline static mpfr::mpreal (max) (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(precision); }
+ inline static mpfr::mpreal lowest (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(precision); }
+
+ // Returns smallest eps such that 1 + eps != 1 (classic machine epsilon)
+ inline static mpfr::mpreal epsilon(mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(precision); }
+
+ // Returns smallest eps such that x + eps != x (relative machine epsilon)
+ inline static mpfr::mpreal epsilon(const mpfr::mpreal& x) { return mpfr::machine_epsilon(x); }
+
+ inline static mpfr::mpreal round_error(mp_prec_t precision = mpfr::mpreal::get_default_prec())
+ {
+ mp_rnd_t r = mpfr::mpreal::get_default_rnd();
+
+ if(r == GMP_RNDN) return mpfr::mpreal(0.5, precision);
+ else return mpfr::mpreal(1.0, precision);
+ }
+
+ inline static const mpfr::mpreal infinity() { return mpfr::const_infinity(); }
+ inline static const mpfr::mpreal quiet_NaN() { return mpfr::mpreal().setNan(); }
+ inline static const mpfr::mpreal signaling_NaN() { return mpfr::mpreal().setNan(); }
+ inline static const mpfr::mpreal denorm_min() { return (min)(); }
+
+ // Please note, exponent range is not fixed in MPFR
+ static const int min_exponent = MPFR_EMIN_DEFAULT;
+ static const int max_exponent = MPFR_EMAX_DEFAULT;
+ MPREAL_PERMISSIVE_EXPR static const int min_exponent10 = (int) (MPFR_EMIN_DEFAULT * 0.3010299956639811);
+ MPREAL_PERMISSIVE_EXPR static const int max_exponent10 = (int) (MPFR_EMAX_DEFAULT * 0.3010299956639811);
+
+#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS
+
+ // Following members should be constant according to standard, but they can be variable in MPFR
+ // So we define them as functions here.
+ //
+ // This is preferable way for std::numeric_limits<mpfr::mpreal> specialization.
+ // But it is incompatible with standard std::numeric_limits and might not work with other libraries, e.g. boost.
+ // See below for compatible implementation.
+ inline static float_round_style round_style()
+ {
+ mp_rnd_t r = mpfr::mpreal::get_default_rnd();
+
+ switch (r)
+ {
+ case GMP_RNDN: return round_to_nearest;
+ case GMP_RNDZ: return round_toward_zero;
+ case GMP_RNDU: return round_toward_infinity;
+ case GMP_RNDD: return round_toward_neg_infinity;
+ default: return round_indeterminate;
+ }
+ }
+
+ inline static int digits() { return int(mpfr::mpreal::get_default_prec()); }
+ inline static int digits(const mpfr::mpreal& x) { return x.getPrecision(); }
+
+ inline static int digits10(mp_prec_t precision = mpfr::mpreal::get_default_prec())
+ {
+ return mpfr::bits2digits(precision);
+ }
+
+ inline static int digits10(const mpfr::mpreal& x)
+ {
+ return mpfr::bits2digits(x.getPrecision());
+ }
+
+ inline static int max_digits10(mp_prec_t precision = mpfr::mpreal::get_default_prec())
+ {
+ return digits10(precision);
+ }
+#else
+ // Digits and round_style are NOT constants when it comes to mpreal.
+ // If possible, please use functions digits() and round_style() defined above.
+ //
+ // These (default) values are preserved for compatibility with existing libraries, e.g. boost.
+ // Change them accordingly to your application.
+ //
+ // For example, if you use 256 bits of precision uniformly in your program, then:
+ // digits = 256
+ // digits10 = 77
+ // max_digits10 = 78
+ //
+ // Approximate formula for decimal digits is: digits10 = floor(log10(2) * digits). See bits2digits() for more details.
+
+ static const std::float_round_style round_style = round_to_nearest;
+ static const int digits = 53;
+ static const int digits10 = 15;
+ static const int max_digits10 = 16;
+#endif
+ };
+
+}
+
+#endif /* __MPREAL_H__ */
diff --git a/unsupported/test/mpreal_support.cpp b/unsupported/test/mpreal_support.cpp
new file mode 100644
index 0000000..bc00382
--- /dev/null
+++ b/unsupported/test/mpreal_support.cpp
@@ -0,0 +1,57 @@
+#include "main.h"
+#include <Eigen/MPRealSupport>
+#include <Eigen/LU>
+#include <Eigen/Eigenvalues>
+#include <sstream>
+
+using namespace mpfr;
+using namespace Eigen;
+
+void test_mpreal_support()
+{
+ // set precision to 256 bits (double has only 53 bits)
+ mpreal::set_default_prec(256);
+ typedef Matrix<mpreal,Eigen::Dynamic,Eigen::Dynamic> MatrixXmp;
+
+ std::cerr << "epsilon = " << NumTraits<mpreal>::epsilon() << "\n";
+ std::cerr << "dummy_precision = " << NumTraits<mpreal>::dummy_precision() << "\n";
+ std::cerr << "highest = " << NumTraits<mpreal>::highest() << "\n";
+ std::cerr << "lowest = " << NumTraits<mpreal>::lowest() << "\n";
+
+ for(int i = 0; i < g_repeat; i++) {
+ int s = Eigen::internal::random<int>(1,100);
+ MatrixXmp A = MatrixXmp::Random(s,s);
+ MatrixXmp B = MatrixXmp::Random(s,s);
+ MatrixXmp S = A.adjoint() * A;
+ MatrixXmp X;
+
+ // Basic stuffs
+ VERIFY_IS_APPROX(A.real(), A);
+ VERIFY(Eigen::internal::isApprox(A.array().abs2().sum(), A.squaredNorm()));
+ VERIFY_IS_APPROX(A.array().exp(), exp(A.array()));
+ VERIFY_IS_APPROX(A.array().abs2().sqrt(), A.array().abs());
+ VERIFY_IS_APPROX(A.array().sin(), sin(A.array()));
+ VERIFY_IS_APPROX(A.array().cos(), cos(A.array()));
+
+
+ // Cholesky
+ X = S.selfadjointView<Lower>().llt().solve(B);
+ VERIFY_IS_APPROX((S.selfadjointView<Lower>()*X).eval(),B);
+
+ // partial LU
+ X = A.lu().solve(B);
+ VERIFY_IS_APPROX((A*X).eval(),B);
+
+ // symmetric eigenvalues
+ SelfAdjointEigenSolver<MatrixXmp> eig(S);
+ VERIFY_IS_EQUAL(eig.info(), Success);
+ VERIFY( (S.selfadjointView<Lower>() * eig.eigenvectors()).isApprox(eig.eigenvectors() * eig.eigenvalues().asDiagonal(), NumTraits<mpreal>::dummy_precision()*1e3) );
+ }
+
+ {
+ MatrixXmp A(8,3); A.setRandom();
+ // test output (interesting things happen in this code)
+ std::stringstream stream;
+ stream << A;
+ }
+}
diff --git a/unsupported/test/openglsupport.cpp b/unsupported/test/openglsupport.cpp
new file mode 100644
index 0000000..706a816
--- /dev/null
+++ b/unsupported/test/openglsupport.cpp
@@ -0,0 +1,337 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include <main.h>
+#include <iostream>
+#include <GL/glew.h>
+#include <Eigen/OpenGLSupport>
+#include <GL/glut.h>
+using namespace Eigen;
+
+
+
+
+#define VERIFY_MATRIX(CODE,REF) { \
+ glLoadIdentity(); \
+ CODE; \
+ Matrix<float,4,4,ColMajor> m; m.setZero(); \
+ glGet(GL_MODELVIEW_MATRIX, m); \
+ if(!(REF).cast<float>().isApprox(m)) { \
+ std::cerr << "Expected:\n" << ((REF).cast<float>()) << "\n" << "got\n" << m << "\n\n"; \
+ } \
+ VERIFY_IS_APPROX((REF).cast<float>(), m); \
+ }
+
+#define VERIFY_UNIFORM(SUFFIX,NAME,TYPE) { \
+ TYPE value; value.setRandom(); \
+ TYPE data; \
+ int loc = glGetUniformLocation(prg_id, #NAME); \
+ VERIFY((loc!=-1) && "uniform not found"); \
+ glUniform(loc,value); \
+ EIGEN_CAT(glGetUniform,SUFFIX)(prg_id,loc,data.data()); \
+ if(!value.isApprox(data)) { \
+ std::cerr << "Expected:\n" << value << "\n" << "got\n" << data << "\n\n"; \
+ } \
+ VERIFY_IS_APPROX(value, data); \
+ }
+
+#define VERIFY_UNIFORMi(NAME,TYPE) { \
+ TYPE value = TYPE::Random().eval().cast<float>().cast<TYPE::Scalar>(); \
+ TYPE data; \
+ int loc = glGetUniformLocation(prg_id, #NAME); \
+ VERIFY((loc!=-1) && "uniform not found"); \
+ glUniform(loc,value); \
+ glGetUniformiv(prg_id,loc,(GLint*)data.data()); \
+ if(!value.isApprox(data)) { \
+ std::cerr << "Expected:\n" << value << "\n" << "got\n" << data << "\n\n"; \
+ } \
+ VERIFY_IS_APPROX(value, data); \
+ }
+
+void printInfoLog(GLuint objectID)
+{
+ int infologLength, charsWritten;
+ GLchar *infoLog;
+ glGetProgramiv(objectID,GL_INFO_LOG_LENGTH, &infologLength);
+ if(infologLength > 0)
+ {
+ infoLog = new GLchar[infologLength];
+ glGetProgramInfoLog(objectID, infologLength, &charsWritten, infoLog);
+ if (charsWritten>0)
+ std::cerr << "Shader info : \n" << infoLog << std::endl;
+ delete[] infoLog;
+ }
+}
+
+GLint createShader(const char* vtx, const char* frg)
+{
+ GLint prg_id = glCreateProgram();
+ GLint vtx_id = glCreateShader(GL_VERTEX_SHADER);
+ GLint frg_id = glCreateShader(GL_FRAGMENT_SHADER);
+ GLint ok;
+
+ glShaderSource(vtx_id, 1, &vtx, 0);
+ glCompileShader(vtx_id);
+ glGetShaderiv(vtx_id,GL_COMPILE_STATUS,&ok);
+ if(!ok)
+ {
+ std::cerr << "vtx compilation failed\n";
+ }
+
+ glShaderSource(frg_id, 1, &frg, 0);
+ glCompileShader(frg_id);
+ glGetShaderiv(frg_id,GL_COMPILE_STATUS,&ok);
+ if(!ok)
+ {
+ std::cerr << "frg compilation failed\n";
+ }
+
+ glAttachShader(prg_id, vtx_id);
+ glAttachShader(prg_id, frg_id);
+ glLinkProgram(prg_id);
+ glGetProgramiv(prg_id,GL_LINK_STATUS,&ok);
+ if(!ok)
+ {
+ std::cerr << "linking failed\n";
+ }
+ printInfoLog(prg_id);
+
+ glUseProgram(prg_id);
+ return prg_id;
+}
+
+void test_openglsupport()
+{
+ int argc = 0;
+ glutInit(&argc, 0);
+ glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
+ glutInitWindowPosition (0,0);
+ glutInitWindowSize(10, 10);
+
+ if(glutCreateWindow("Eigen") <= 0)
+ {
+ std::cerr << "Error: Unable to create GLUT Window.\n";
+ exit(1);
+ }
+
+ glewExperimental = GL_TRUE;
+ if(glewInit() != GLEW_OK)
+ {
+ std::cerr << "Warning: Failed to initialize GLEW\n";
+ }
+
+ Vector3f v3f;
+ Matrix3f rot;
+ glBegin(GL_POINTS);
+
+ glVertex(v3f);
+ glVertex(2*v3f+v3f);
+ glVertex(rot*v3f);
+
+ glEnd();
+
+ // 4x4 matrices
+ Matrix4f mf44; mf44.setRandom();
+ VERIFY_MATRIX(glLoadMatrix(mf44), mf44);
+ VERIFY_MATRIX(glMultMatrix(mf44), mf44);
+ Matrix4d md44; md44.setRandom();
+ VERIFY_MATRIX(glLoadMatrix(md44), md44);
+ VERIFY_MATRIX(glMultMatrix(md44), md44);
+
+ // Quaternion
+ Quaterniond qd(AngleAxisd(internal::random<double>(), Vector3d::Random()));
+ VERIFY_MATRIX(glRotate(qd), Projective3d(qd).matrix());
+
+ Quaternionf qf(AngleAxisf(internal::random<double>(), Vector3f::Random()));
+ VERIFY_MATRIX(glRotate(qf), Projective3f(qf).matrix());
+
+ // 3D Transform
+ Transform<float,3,AffineCompact> acf3; acf3.matrix().setRandom();
+ VERIFY_MATRIX(glLoadMatrix(acf3), Projective3f(acf3).matrix());
+ VERIFY_MATRIX(glMultMatrix(acf3), Projective3f(acf3).matrix());
+
+ Transform<float,3,Affine> af3(acf3);
+ VERIFY_MATRIX(glLoadMatrix(af3), Projective3f(af3).matrix());
+ VERIFY_MATRIX(glMultMatrix(af3), Projective3f(af3).matrix());
+
+ Transform<float,3,Projective> pf3; pf3.matrix().setRandom();
+ VERIFY_MATRIX(glLoadMatrix(pf3), Projective3f(pf3).matrix());
+ VERIFY_MATRIX(glMultMatrix(pf3), Projective3f(pf3).matrix());
+
+ Transform<double,3,AffineCompact> acd3; acd3.matrix().setRandom();
+ VERIFY_MATRIX(glLoadMatrix(acd3), Projective3d(acd3).matrix());
+ VERIFY_MATRIX(glMultMatrix(acd3), Projective3d(acd3).matrix());
+
+ Transform<double,3,Affine> ad3(acd3);
+ VERIFY_MATRIX(glLoadMatrix(ad3), Projective3d(ad3).matrix());
+ VERIFY_MATRIX(glMultMatrix(ad3), Projective3d(ad3).matrix());
+
+ Transform<double,3,Projective> pd3; pd3.matrix().setRandom();
+ VERIFY_MATRIX(glLoadMatrix(pd3), Projective3d(pd3).matrix());
+ VERIFY_MATRIX(glMultMatrix(pd3), Projective3d(pd3).matrix());
+
+ // translations (2D and 3D)
+ {
+ Vector2f vf2; vf2.setRandom(); Vector3f vf23; vf23 << vf2, 0;
+ VERIFY_MATRIX(glTranslate(vf2), Projective3f(Translation3f(vf23)).matrix());
+ Vector2d vd2; vd2.setRandom(); Vector3d vd23; vd23 << vd2, 0;
+ VERIFY_MATRIX(glTranslate(vd2), Projective3d(Translation3d(vd23)).matrix());
+
+ Vector3f vf3; vf3.setRandom();
+ VERIFY_MATRIX(glTranslate(vf3), Projective3f(Translation3f(vf3)).matrix());
+ Vector3d vd3; vd3.setRandom();
+ VERIFY_MATRIX(glTranslate(vd3), Projective3d(Translation3d(vd3)).matrix());
+
+ Translation<float,3> tf3; tf3.vector().setRandom();
+ VERIFY_MATRIX(glTranslate(tf3), Projective3f(tf3).matrix());
+
+ Translation<double,3> td3; td3.vector().setRandom();
+ VERIFY_MATRIX(glTranslate(td3), Projective3d(td3).matrix());
+ }
+
+ // scaling (2D and 3D)
+ {
+ Vector2f vf2; vf2.setRandom(); Vector3f vf23; vf23 << vf2, 1;
+ VERIFY_MATRIX(glScale(vf2), Projective3f(Scaling(vf23)).matrix());
+ Vector2d vd2; vd2.setRandom(); Vector3d vd23; vd23 << vd2, 1;
+ VERIFY_MATRIX(glScale(vd2), Projective3d(Scaling(vd23)).matrix());
+
+ Vector3f vf3; vf3.setRandom();
+ VERIFY_MATRIX(glScale(vf3), Projective3f(Scaling(vf3)).matrix());
+ Vector3d vd3; vd3.setRandom();
+ VERIFY_MATRIX(glScale(vd3), Projective3d(Scaling(vd3)).matrix());
+
+ UniformScaling<float> usf(internal::random<float>());
+ VERIFY_MATRIX(glScale(usf), Projective3f(usf).matrix());
+
+ UniformScaling<double> usd(internal::random<double>());
+ VERIFY_MATRIX(glScale(usd), Projective3d(usd).matrix());
+ }
+
+ // uniform
+ {
+ const char* vtx = "void main(void) { gl_Position = gl_Vertex; }\n";
+
+ if(GLEW_VERSION_2_0)
+ {
+ #ifdef GL_VERSION_2_0
+ const char* frg = ""
+ "uniform vec2 v2f;\n"
+ "uniform vec3 v3f;\n"
+ "uniform vec4 v4f;\n"
+ "uniform ivec2 v2i;\n"
+ "uniform ivec3 v3i;\n"
+ "uniform ivec4 v4i;\n"
+ "uniform mat2 m2f;\n"
+ "uniform mat3 m3f;\n"
+ "uniform mat4 m4f;\n"
+ "void main(void) { gl_FragColor = vec4(v2f[0]+v3f[0]+v4f[0])+vec4(v2i[0]+v3i[0]+v4i[0])+vec4(m2f[0][0]+m3f[0][0]+m4f[0][0]); }\n";
+
+ GLint prg_id = createShader(vtx,frg);
+
+ VERIFY_UNIFORM(fv,v2f, Vector2f);
+ VERIFY_UNIFORM(fv,v3f, Vector3f);
+ VERIFY_UNIFORM(fv,v4f, Vector4f);
+ VERIFY_UNIFORMi(v2i, Vector2i);
+ VERIFY_UNIFORMi(v3i, Vector3i);
+ VERIFY_UNIFORMi(v4i, Vector4i);
+ VERIFY_UNIFORM(fv,m2f, Matrix2f);
+ VERIFY_UNIFORM(fv,m3f, Matrix3f);
+ VERIFY_UNIFORM(fv,m4f, Matrix4f);
+ #endif
+ }
+ else
+ std::cerr << "Warning: opengl 2.0 was not tested\n";
+
+ if(GLEW_VERSION_2_1)
+ {
+ #ifdef GL_VERSION_2_1
+ const char* frg = "#version 120\n"
+ "uniform mat2x3 m23f;\n"
+ "uniform mat3x2 m32f;\n"
+ "uniform mat2x4 m24f;\n"
+ "uniform mat4x2 m42f;\n"
+ "uniform mat3x4 m34f;\n"
+ "uniform mat4x3 m43f;\n"
+ "void main(void) { gl_FragColor = vec4(m23f[0][0]+m32f[0][0]+m24f[0][0]+m42f[0][0]+m34f[0][0]+m43f[0][0]); }\n";
+
+ GLint prg_id = createShader(vtx,frg);
+
+ typedef Matrix<float,2,3> Matrix23f;
+ typedef Matrix<float,3,2> Matrix32f;
+ typedef Matrix<float,2,4> Matrix24f;
+ typedef Matrix<float,4,2> Matrix42f;
+ typedef Matrix<float,3,4> Matrix34f;
+ typedef Matrix<float,4,3> Matrix43f;
+
+ VERIFY_UNIFORM(fv,m23f, Matrix23f);
+ VERIFY_UNIFORM(fv,m32f, Matrix32f);
+ VERIFY_UNIFORM(fv,m24f, Matrix24f);
+ VERIFY_UNIFORM(fv,m42f, Matrix42f);
+ VERIFY_UNIFORM(fv,m34f, Matrix34f);
+ VERIFY_UNIFORM(fv,m43f, Matrix43f);
+ #endif
+ }
+ else
+ std::cerr << "Warning: opengl 2.1 was not tested\n";
+
+ if(GLEW_VERSION_3_0)
+ {
+ #ifdef GL_VERSION_3_0
+ const char* frg = "#version 150\n"
+ "uniform uvec2 v2ui;\n"
+ "uniform uvec3 v3ui;\n"
+ "uniform uvec4 v4ui;\n"
+ "out vec4 data;\n"
+ "void main(void) { data = vec4(v2ui[0]+v3ui[0]+v4ui[0]); }\n";
+
+ GLint prg_id = createShader(vtx,frg);
+
+ typedef Matrix<unsigned int,2,1> Vector2ui;
+ typedef Matrix<unsigned int,3,1> Vector3ui;
+ typedef Matrix<unsigned int,4,1> Vector4ui;
+
+ VERIFY_UNIFORMi(v2ui, Vector2ui);
+ VERIFY_UNIFORMi(v3ui, Vector3ui);
+ VERIFY_UNIFORMi(v4ui, Vector4ui);
+ #endif
+ }
+ else
+ std::cerr << "Warning: opengl 3.0 was not tested\n";
+
+ #ifdef GLEW_ARB_gpu_shader_fp64
+ if(GLEW_ARB_gpu_shader_fp64)
+ {
+ #ifdef GL_ARB_gpu_shader_fp64
+ const char* frg = "#version 150\n"
+ "uniform dvec2 v2d;\n"
+ "uniform dvec3 v3d;\n"
+ "uniform dvec4 v4d;\n"
+ "out vec4 data;\n"
+ "void main(void) { data = vec4(v2d[0]+v3d[0]+v4d[0]); }\n";
+
+ GLint prg_id = createShader(vtx,frg);
+
+ typedef Vector2d Vector2d;
+ typedef Vector3d Vector3d;
+ typedef Vector4d Vector4d;
+
+ VERIFY_UNIFORM(dv,v2d, Vector2d);
+ VERIFY_UNIFORM(dv,v3d, Vector3d);
+ VERIFY_UNIFORM(dv,v4d, Vector4d);
+ #endif
+ }
+ else
+ std::cerr << "Warning: GLEW_ARB_gpu_shader_fp64 was not tested\n";
+ #else
+ std::cerr << "Warning: GLEW_ARB_gpu_shader_fp64 was not tested\n";
+ #endif
+ }
+
+}
diff --git a/unsupported/test/polynomialsolver.cpp b/unsupported/test/polynomialsolver.cpp
new file mode 100644
index 0000000..de79f15
--- /dev/null
+++ b/unsupported/test/polynomialsolver.cpp
@@ -0,0 +1,213 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/Polynomials>
+#include <iostream>
+#include <algorithm>
+
+using namespace std;
+
+namespace Eigen {
+namespace internal {
+template<int Size>
+struct increment_if_fixed_size
+{
+ enum {
+ ret = (Size == Dynamic) ? Dynamic : Size+1
+ };
+};
+}
+}
+
+
+template<int Deg, typename POLYNOMIAL, typename SOLVER>
+bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
+{
+ typedef typename POLYNOMIAL::Index Index;
+ typedef typename POLYNOMIAL::Scalar Scalar;
+
+ typedef typename SOLVER::RootsType RootsType;
+ typedef Matrix<Scalar,Deg,1> EvalRootsType;
+
+ const Index deg = pols.size()-1;
+
+ psolve.compute( pols );
+ const RootsType& roots( psolve.roots() );
+ EvalRootsType evr( deg );
+ for( int i=0; i<roots.size(); ++i ){
+ evr[i] = std::abs( poly_eval( pols, roots[i] ) ); }
+
+ bool evalToZero = evr.isZero( test_precision<Scalar>() );
+ if( !evalToZero )
+ {
+ cerr << "WRONG root: " << endl;
+ cerr << "Polynomial: " << pols.transpose() << endl;
+ cerr << "Roots found: " << roots.transpose() << endl;
+ cerr << "Abs value of the polynomial at the roots: " << evr.transpose() << endl;
+ cerr << endl;
+ }
+
+ std::vector<Scalar> rootModuli( roots.size() );
+ Map< EvalRootsType > aux( &rootModuli[0], roots.size() );
+ aux = roots.array().abs();
+ std::sort( rootModuli.begin(), rootModuli.end() );
+ bool distinctModuli=true;
+ for( size_t i=1; i<rootModuli.size() && distinctModuli; ++i )
+ {
+ if( internal::isApprox( rootModuli[i], rootModuli[i-1] ) ){
+ distinctModuli = false; }
+ }
+ VERIFY( evalToZero || !distinctModuli );
+
+ return distinctModuli;
+}
+
+
+
+
+
+
+
+template<int Deg, typename POLYNOMIAL>
+void evalSolver( const POLYNOMIAL& pols )
+{
+ typedef typename POLYNOMIAL::Scalar Scalar;
+
+ typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
+
+ PolynomialSolverType psolve;
+ aux_evalSolver<Deg, POLYNOMIAL, PolynomialSolverType>( pols, psolve );
+}
+
+
+
+
+template< int Deg, typename POLYNOMIAL, typename ROOTS, typename REAL_ROOTS >
+void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const REAL_ROOTS& real_roots )
+{
+ using std::sqrt;
+ typedef typename POLYNOMIAL::Scalar Scalar;
+
+ typedef PolynomialSolver<Scalar, Deg > PolynomialSolverType;
+
+ PolynomialSolverType psolve;
+ if( aux_evalSolver<Deg, POLYNOMIAL, PolynomialSolverType>( pols, psolve ) )
+ {
+ //It is supposed that
+ // 1) the roots found are correct
+ // 2) the roots have distinct moduli
+
+ typedef typename REAL_ROOTS::Scalar Real;
+
+ //Test realRoots
+ std::vector< Real > calc_realRoots;
+ psolve.realRoots( calc_realRoots );
+ VERIFY( calc_realRoots.size() == (size_t)real_roots.size() );
+
+ const Scalar psPrec = sqrt( test_precision<Scalar>() );
+
+ for( size_t i=0; i<calc_realRoots.size(); ++i )
+ {
+ bool found = false;
+ for( size_t j=0; j<calc_realRoots.size()&& !found; ++j )
+ {
+ if( internal::isApprox( calc_realRoots[i], real_roots[j] ), psPrec ){
+ found = true; }
+ }
+ VERIFY( found );
+ }
+
+ //Test greatestRoot
+ VERIFY( internal::isApprox( roots.array().abs().maxCoeff(),
+ abs( psolve.greatestRoot() ), psPrec ) );
+
+ //Test smallestRoot
+ VERIFY( internal::isApprox( roots.array().abs().minCoeff(),
+ abs( psolve.smallestRoot() ), psPrec ) );
+
+ bool hasRealRoot;
+ //Test absGreatestRealRoot
+ Real r = psolve.absGreatestRealRoot( hasRealRoot );
+ VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
+ if( hasRealRoot ){
+ VERIFY( internal::isApprox( real_roots.array().abs().maxCoeff(), abs(r), psPrec ) ); }
+
+ //Test absSmallestRealRoot
+ r = psolve.absSmallestRealRoot( hasRealRoot );
+ VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
+ if( hasRealRoot ){
+ VERIFY( internal::isApprox( real_roots.array().abs().minCoeff(), abs( r ), psPrec ) ); }
+
+ //Test greatestRealRoot
+ r = psolve.greatestRealRoot( hasRealRoot );
+ VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
+ if( hasRealRoot ){
+ VERIFY( internal::isApprox( real_roots.array().maxCoeff(), r, psPrec ) ); }
+
+ //Test smallestRealRoot
+ r = psolve.smallestRealRoot( hasRealRoot );
+ VERIFY( hasRealRoot == (real_roots.size() > 0 ) );
+ if( hasRealRoot ){
+ VERIFY( internal::isApprox( real_roots.array().minCoeff(), r, psPrec ) ); }
+ }
+}
+
+
+template<typename _Scalar, int _Deg>
+void polynomialsolver(int deg)
+{
+ typedef internal::increment_if_fixed_size<_Deg> Dim;
+ typedef Matrix<_Scalar,Dim::ret,1> PolynomialType;
+ typedef Matrix<_Scalar,_Deg,1> EvalRootsType;
+
+ cout << "Standard cases" << endl;
+ PolynomialType pols = PolynomialType::Random(deg+1);
+ evalSolver<_Deg,PolynomialType>( pols );
+
+ cout << "Hard cases" << endl;
+ _Scalar multipleRoot = internal::random<_Scalar>();
+ EvalRootsType allRoots = EvalRootsType::Constant(deg,multipleRoot);
+ roots_to_monicPolynomial( allRoots, pols );
+ evalSolver<_Deg,PolynomialType>( pols );
+
+ cout << "Test sugar" << endl;
+ EvalRootsType realRoots = EvalRootsType::Random(deg);
+ roots_to_monicPolynomial( realRoots, pols );
+ evalSolverSugarFunction<_Deg>(
+ pols,
+ realRoots.template cast <
+ std::complex<
+ typename NumTraits<_Scalar>::Real
+ >
+ >(),
+ realRoots );
+}
+
+void test_polynomialsolver()
+{
+ for(int i = 0; i < g_repeat; i++)
+ {
+ CALL_SUBTEST_1( (polynomialsolver<float,1>(1)) );
+ CALL_SUBTEST_2( (polynomialsolver<double,2>(2)) );
+ CALL_SUBTEST_3( (polynomialsolver<double,3>(3)) );
+ CALL_SUBTEST_4( (polynomialsolver<float,4>(4)) );
+ CALL_SUBTEST_5( (polynomialsolver<double,5>(5)) );
+ CALL_SUBTEST_6( (polynomialsolver<float,6>(6)) );
+ CALL_SUBTEST_7( (polynomialsolver<float,7>(7)) );
+ CALL_SUBTEST_8( (polynomialsolver<double,8>(8)) );
+
+ CALL_SUBTEST_9( (polynomialsolver<float,Dynamic>(
+ internal::random<int>(9,13)
+ )) );
+ CALL_SUBTEST_10((polynomialsolver<double,Dynamic>(
+ internal::random<int>(9,13)
+ )) );
+ }
+}
diff --git a/unsupported/test/polynomialutils.cpp b/unsupported/test/polynomialutils.cpp
new file mode 100644
index 0000000..5fc9684
--- /dev/null
+++ b/unsupported/test/polynomialutils.cpp
@@ -0,0 +1,113 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Manuel Yguel <manuel.yguel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+#include <unsupported/Eigen/Polynomials>
+#include <iostream>
+
+using namespace std;
+
+namespace Eigen {
+namespace internal {
+template<int Size>
+struct increment_if_fixed_size
+{
+ enum {
+ ret = (Size == Dynamic) ? Dynamic : Size+1
+ };
+};
+}
+}
+
+template<typename _Scalar, int _Deg>
+void realRoots_to_monicPolynomial_test(int deg)
+{
+ typedef internal::increment_if_fixed_size<_Deg> Dim;
+ typedef Matrix<_Scalar,Dim::ret,1> PolynomialType;
+ typedef Matrix<_Scalar,_Deg,1> EvalRootsType;
+
+ PolynomialType pols(deg+1);
+ EvalRootsType roots = EvalRootsType::Random(deg);
+ roots_to_monicPolynomial( roots, pols );
+
+ EvalRootsType evr( deg );
+ for( int i=0; i<roots.size(); ++i ){
+ evr[i] = std::abs( poly_eval( pols, roots[i] ) ); }
+
+ bool evalToZero = evr.isZero( test_precision<_Scalar>() );
+ if( !evalToZero ){
+ cerr << evr.transpose() << endl; }
+ VERIFY( evalToZero );
+}
+
+template<typename _Scalar> void realRoots_to_monicPolynomial_scalar()
+{
+ CALL_SUBTEST_2( (realRoots_to_monicPolynomial_test<_Scalar,2>(2)) );
+ CALL_SUBTEST_3( (realRoots_to_monicPolynomial_test<_Scalar,3>(3)) );
+ CALL_SUBTEST_4( (realRoots_to_monicPolynomial_test<_Scalar,4>(4)) );
+ CALL_SUBTEST_5( (realRoots_to_monicPolynomial_test<_Scalar,5>(5)) );
+ CALL_SUBTEST_6( (realRoots_to_monicPolynomial_test<_Scalar,6>(6)) );
+ CALL_SUBTEST_7( (realRoots_to_monicPolynomial_test<_Scalar,7>(7)) );
+ CALL_SUBTEST_8( (realRoots_to_monicPolynomial_test<_Scalar,17>(17)) );
+
+ CALL_SUBTEST_9( (realRoots_to_monicPolynomial_test<_Scalar,Dynamic>(
+ internal::random<int>(18,26) )) );
+}
+
+
+
+
+template<typename _Scalar, int _Deg>
+void CauchyBounds(int deg)
+{
+ typedef internal::increment_if_fixed_size<_Deg> Dim;
+ typedef Matrix<_Scalar,Dim::ret,1> PolynomialType;
+ typedef Matrix<_Scalar,_Deg,1> EvalRootsType;
+
+ PolynomialType pols(deg+1);
+ EvalRootsType roots = EvalRootsType::Random(deg);
+ roots_to_monicPolynomial( roots, pols );
+ _Scalar M = cauchy_max_bound( pols );
+ _Scalar m = cauchy_min_bound( pols );
+ _Scalar Max = roots.array().abs().maxCoeff();
+ _Scalar min = roots.array().abs().minCoeff();
+ bool eval = (M >= Max) && (m <= min);
+ if( !eval )
+ {
+ cerr << "Roots: " << roots << endl;
+ cerr << "Bounds: (" << m << ", " << M << ")" << endl;
+ cerr << "Min,Max: (" << min << ", " << Max << ")" << endl;
+ }
+ VERIFY( eval );
+}
+
+template<typename _Scalar> void CauchyBounds_scalar()
+{
+ CALL_SUBTEST_2( (CauchyBounds<_Scalar,2>(2)) );
+ CALL_SUBTEST_3( (CauchyBounds<_Scalar,3>(3)) );
+ CALL_SUBTEST_4( (CauchyBounds<_Scalar,4>(4)) );
+ CALL_SUBTEST_5( (CauchyBounds<_Scalar,5>(5)) );
+ CALL_SUBTEST_6( (CauchyBounds<_Scalar,6>(6)) );
+ CALL_SUBTEST_7( (CauchyBounds<_Scalar,7>(7)) );
+ CALL_SUBTEST_8( (CauchyBounds<_Scalar,17>(17)) );
+
+ CALL_SUBTEST_9( (CauchyBounds<_Scalar,Dynamic>(
+ internal::random<int>(18,26) )) );
+}
+
+void test_polynomialutils()
+{
+ for(int i = 0; i < g_repeat; i++)
+ {
+ realRoots_to_monicPolynomial_scalar<double>();
+ realRoots_to_monicPolynomial_scalar<float>();
+ CauchyBounds_scalar<double>();
+ CauchyBounds_scalar<float>();
+ }
+}
diff --git a/unsupported/test/sparse_extra.cpp b/unsupported/test/sparse_extra.cpp
new file mode 100644
index 0000000..1ee791b
--- /dev/null
+++ b/unsupported/test/sparse_extra.cpp
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+// import basic and product tests for deprectaed DynamicSparseMatrix
+#define EIGEN_NO_DEPRECATED_WARNING
+#include "sparse_basic.cpp"
+#include "sparse_product.cpp"
+#include <Eigen/SparseExtra>
+
+template<typename SetterType,typename DenseType, typename Scalar, int Options>
+bool test_random_setter(SparseMatrix<Scalar,Options>& sm, const DenseType& ref, const std::vector<Vector2i>& nonzeroCoords)
+{
+ {
+ sm.setZero();
+ SetterType w(sm);
+ std::vector<Vector2i> remaining = nonzeroCoords;
+ while(!remaining.empty())
+ {
+ int i = internal::random<int>(0,static_cast<int>(remaining.size())-1);
+ w(remaining[i].x(),remaining[i].y()) = ref.coeff(remaining[i].x(),remaining[i].y());
+ remaining[i] = remaining.back();
+ remaining.pop_back();
+ }
+ }
+ return sm.isApprox(ref);
+}
+
+template<typename SetterType,typename DenseType, typename T>
+bool test_random_setter(DynamicSparseMatrix<T>& sm, const DenseType& ref, const std::vector<Vector2i>& nonzeroCoords)
+{
+ sm.setZero();
+ std::vector<Vector2i> remaining = nonzeroCoords;
+ while(!remaining.empty())
+ {
+ int i = internal::random<int>(0,static_cast<int>(remaining.size())-1);
+ sm.coeffRef(remaining[i].x(),remaining[i].y()) = ref.coeff(remaining[i].x(),remaining[i].y());
+ remaining[i] = remaining.back();
+ remaining.pop_back();
+ }
+ return sm.isApprox(ref);
+}
+
+template<typename SparseMatrixType> void sparse_extra(const SparseMatrixType& ref)
+{
+ typedef typename SparseMatrixType::Index Index;
+ const Index rows = ref.rows();
+ const Index cols = ref.cols();
+ typedef typename SparseMatrixType::Scalar Scalar;
+ enum { Flags = SparseMatrixType::Flags };
+
+ double density = (std::max)(8./(rows*cols), 0.01);
+ typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+ typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ Scalar eps = 1e-6;
+
+ SparseMatrixType m(rows, cols);
+ DenseMatrix refMat = DenseMatrix::Zero(rows, cols);
+ DenseVector vec1 = DenseVector::Random(rows);
+
+ std::vector<Vector2i> zeroCoords;
+ std::vector<Vector2i> nonzeroCoords;
+ initSparse<Scalar>(density, refMat, m, 0, &zeroCoords, &nonzeroCoords);
+
+ if (zeroCoords.size()==0 || nonzeroCoords.size()==0)
+ return;
+
+ // test coeff and coeffRef
+ for (int i=0; i<(int)zeroCoords.size(); ++i)
+ {
+ VERIFY_IS_MUCH_SMALLER_THAN( m.coeff(zeroCoords[i].x(),zeroCoords[i].y()), eps );
+ if(internal::is_same<SparseMatrixType,SparseMatrix<Scalar,Flags> >::value)
+ VERIFY_RAISES_ASSERT( m.coeffRef(zeroCoords[0].x(),zeroCoords[0].y()) = 5 );
+ }
+ VERIFY_IS_APPROX(m, refMat);
+
+ m.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
+ refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
+
+ VERIFY_IS_APPROX(m, refMat);
+
+ // random setter
+// {
+// m.setZero();
+// VERIFY_IS_NOT_APPROX(m, refMat);
+// SparseSetter<SparseMatrixType, RandomAccessPattern> w(m);
+// std::vector<Vector2i> remaining = nonzeroCoords;
+// while(!remaining.empty())
+// {
+// int i = internal::random<int>(0,remaining.size()-1);
+// w->coeffRef(remaining[i].x(),remaining[i].y()) = refMat.coeff(remaining[i].x(),remaining[i].y());
+// remaining[i] = remaining.back();
+// remaining.pop_back();
+// }
+// }
+// VERIFY_IS_APPROX(m, refMat);
+
+ VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, StdMapTraits> >(m,refMat,nonzeroCoords) ));
+ #ifdef EIGEN_UNORDERED_MAP_SUPPORT
+ VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, StdUnorderedMapTraits> >(m,refMat,nonzeroCoords) ));
+ #endif
+ #ifdef _DENSE_HASH_MAP_H_
+ VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, GoogleDenseHashMapTraits> >(m,refMat,nonzeroCoords) ));
+ #endif
+ #ifdef _SPARSE_HASH_MAP_H_
+ VERIFY(( test_random_setter<RandomSetter<SparseMatrixType, GoogleSparseHashMapTraits> >(m,refMat,nonzeroCoords) ));
+ #endif
+
+
+ // test RandomSetter
+ /*{
+ SparseMatrixType m1(rows,cols), m2(rows,cols);
+ DenseMatrix refM1 = DenseMatrix::Zero(rows, rows);
+ initSparse<Scalar>(density, refM1, m1);
+ {
+ Eigen::RandomSetter<SparseMatrixType > setter(m2);
+ for (int j=0; j<m1.outerSize(); ++j)
+ for (typename SparseMatrixType::InnerIterator i(m1,j); i; ++i)
+ setter(i.index(), j) = i.value();
+ }
+ VERIFY_IS_APPROX(m1, m2);
+ }*/
+
+
+}
+
+void test_sparse_extra()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ int s = Eigen::internal::random<int>(1,50);
+ CALL_SUBTEST_1( sparse_extra(SparseMatrix<double>(8, 8)) );
+ CALL_SUBTEST_2( sparse_extra(SparseMatrix<std::complex<double> >(s, s)) );
+ CALL_SUBTEST_1( sparse_extra(SparseMatrix<double>(s, s)) );
+
+ CALL_SUBTEST_3( sparse_extra(DynamicSparseMatrix<double>(s, s)) );
+// CALL_SUBTEST_3(( sparse_basic(DynamicSparseMatrix<double>(s, s)) ));
+// CALL_SUBTEST_3(( sparse_basic(DynamicSparseMatrix<double,ColMajor,long int>(s, s)) ));
+
+ CALL_SUBTEST_3( (sparse_product<DynamicSparseMatrix<float, ColMajor> >()) );
+ CALL_SUBTEST_3( (sparse_product<DynamicSparseMatrix<float, RowMajor> >()) );
+ }
+}
diff --git a/unsupported/test/splines.cpp b/unsupported/test/splines.cpp
new file mode 100644
index 0000000..a7eb3e0
--- /dev/null
+++ b/unsupported/test/splines.cpp
@@ -0,0 +1,244 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010-2011 Hauke Heibel <heibel@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#include "main.h"
+
+#include <unsupported/Eigen/Splines>
+
+namespace Eigen {
+
+// lets do some explicit instantiations and thus
+// force the compilation of all spline functions...
+template class Spline<double, 2, Dynamic>;
+template class Spline<double, 3, Dynamic>;
+
+template class Spline<double, 2, 2>;
+template class Spline<double, 2, 3>;
+template class Spline<double, 2, 4>;
+template class Spline<double, 2, 5>;
+
+template class Spline<float, 2, Dynamic>;
+template class Spline<float, 3, Dynamic>;
+
+template class Spline<float, 3, 2>;
+template class Spline<float, 3, 3>;
+template class Spline<float, 3, 4>;
+template class Spline<float, 3, 5>;
+
+}
+
+Spline<double, 2, Dynamic> closed_spline2d()
+{
+ RowVectorXd knots(12);
+ knots << 0,
+ 0,
+ 0,
+ 0,
+ 0.867193179093898,
+ 1.660330955342408,
+ 2.605084834823134,
+ 3.484154586374428,
+ 4.252699478956276,
+ 4.252699478956276,
+ 4.252699478956276,
+ 4.252699478956276;
+
+ MatrixXd ctrls(8,2);
+ ctrls << -0.370967741935484, 0.236842105263158,
+ -0.231401860693277, 0.442245185027632,
+ 0.344361228532831, 0.773369994120753,
+ 0.828990216203802, 0.106550882647595,
+ 0.407270163678382, -1.043452922172848,
+ -0.488467813584053, -0.390098582530090,
+ -0.494657189446427, 0.054804824897884,
+ -0.370967741935484, 0.236842105263158;
+ ctrls.transposeInPlace();
+
+ return Spline<double, 2, Dynamic>(knots, ctrls);
+}
+
+/* create a reference spline */
+Spline<double, 3, Dynamic> spline3d()
+{
+ RowVectorXd knots(11);
+ knots << 0,
+ 0,
+ 0,
+ 0.118997681558377,
+ 0.162611735194631,
+ 0.498364051982143,
+ 0.655098003973841,
+ 0.679702676853675,
+ 1.000000000000000,
+ 1.000000000000000,
+ 1.000000000000000;
+
+ MatrixXd ctrls(8,3);
+ ctrls << 0.959743958516081, 0.340385726666133, 0.585267750979777,
+ 0.223811939491137, 0.751267059305653, 0.255095115459269,
+ 0.505957051665142, 0.699076722656686, 0.890903252535799,
+ 0.959291425205444, 0.547215529963803, 0.138624442828679,
+ 0.149294005559057, 0.257508254123736, 0.840717255983663,
+ 0.254282178971531, 0.814284826068816, 0.243524968724989,
+ 0.929263623187228, 0.349983765984809, 0.196595250431208,
+ 0.251083857976031, 0.616044676146639, 0.473288848902729;
+ ctrls.transposeInPlace();
+
+ return Spline<double, 3, Dynamic>(knots, ctrls);
+}
+
+/* compares evaluations against known results */
+void eval_spline3d()
+{
+ Spline3d spline = spline3d();
+
+ RowVectorXd u(10);
+ u << 0.351659507062997,
+ 0.830828627896291,
+ 0.585264091152724,
+ 0.549723608291140,
+ 0.917193663829810,
+ 0.285839018820374,
+ 0.757200229110721,
+ 0.753729094278495,
+ 0.380445846975357,
+ 0.567821640725221;
+
+ MatrixXd pts(10,3);
+ pts << 0.707620811535916, 0.510258911240815, 0.417485437023409,
+ 0.603422256426978, 0.529498282727551, 0.270351549348981,
+ 0.228364197569334, 0.423745615677815, 0.637687289287490,
+ 0.275556796335168, 0.350856706427970, 0.684295784598905,
+ 0.514519311047655, 0.525077224890754, 0.351628308305896,
+ 0.724152914315666, 0.574461155457304, 0.469860285484058,
+ 0.529365063753288, 0.613328702656816, 0.237837040141739,
+ 0.522469395136878, 0.619099658652895, 0.237139665242069,
+ 0.677357023849552, 0.480655768435853, 0.422227610314397,
+ 0.247046593173758, 0.380604672404750, 0.670065791405019;
+ pts.transposeInPlace();
+
+ for (int i=0; i<u.size(); ++i)
+ {
+ Vector3d pt = spline(u(i));
+ VERIFY( (pt - pts.col(i)).norm() < 1e-14 );
+ }
+}
+
+/* compares evaluations on corner cases */
+void eval_spline3d_onbrks()
+{
+ Spline3d spline = spline3d();
+
+ RowVectorXd u = spline.knots();
+
+ MatrixXd pts(11,3);
+ pts << 0.959743958516081, 0.340385726666133, 0.585267750979777,
+ 0.959743958516081, 0.340385726666133, 0.585267750979777,
+ 0.959743958516081, 0.340385726666133, 0.585267750979777,
+ 0.430282980289940, 0.713074680056118, 0.720373307943349,
+ 0.558074875553060, 0.681617921034459, 0.804417124839942,
+ 0.407076008291750, 0.349707710518163, 0.617275937419545,
+ 0.240037008286602, 0.738739390398014, 0.324554153129411,
+ 0.302434111480572, 0.781162443963899, 0.240177089094644,
+ 0.251083857976031, 0.616044676146639, 0.473288848902729,
+ 0.251083857976031, 0.616044676146639, 0.473288848902729,
+ 0.251083857976031, 0.616044676146639, 0.473288848902729;
+ pts.transposeInPlace();
+
+ for (int i=0; i<u.size(); ++i)
+ {
+ Vector3d pt = spline(u(i));
+ VERIFY( (pt - pts.col(i)).norm() < 1e-14 );
+ }
+}
+
+void eval_closed_spline2d()
+{
+ Spline2d spline = closed_spline2d();
+
+ RowVectorXd u(12);
+ u << 0,
+ 0.332457030395796,
+ 0.356467130532952,
+ 0.453562180176215,
+ 0.648017921874804,
+ 0.973770235555003,
+ 1.882577647219307,
+ 2.289408593930498,
+ 3.511951429883045,
+ 3.884149321369450,
+ 4.236261590369414,
+ 4.252699478956276;
+
+ MatrixXd pts(12,2);
+ pts << -0.370967741935484, 0.236842105263158,
+ -0.152576775123250, 0.448975001279334,
+ -0.133417538277668, 0.461615613865667,
+ -0.053199060826740, 0.507630360006299,
+ 0.114249591147281, 0.570414135097409,
+ 0.377810316891987, 0.560497102875315,
+ 0.665052120135908, -0.157557441109611,
+ 0.516006487053228, -0.559763292174825,
+ -0.379486035348887, -0.331959640488223,
+ -0.462034726249078, -0.039105670080824,
+ -0.378730600917982, 0.225127015099919,
+ -0.370967741935484, 0.236842105263158;
+ pts.transposeInPlace();
+
+ for (int i=0; i<u.size(); ++i)
+ {
+ Vector2d pt = spline(u(i));
+ VERIFY( (pt - pts.col(i)).norm() < 1e-14 );
+ }
+}
+
+void check_global_interpolation2d()
+{
+ typedef Spline2d::PointType PointType;
+ typedef Spline2d::KnotVectorType KnotVectorType;
+ typedef Spline2d::ControlPointVectorType ControlPointVectorType;
+
+ ControlPointVectorType points = ControlPointVectorType::Random(2,100);
+
+ KnotVectorType chord_lengths; // knot parameters
+ Eigen::ChordLengths(points, chord_lengths);
+
+ // interpolation without knot parameters
+ {
+ const Spline2d spline = SplineFitting<Spline2d>::Interpolate(points,3);
+
+ for (Eigen::DenseIndex i=0; i<points.cols(); ++i)
+ {
+ PointType pt = spline( chord_lengths(i) );
+ PointType ref = points.col(i);
+ VERIFY( (pt - ref).matrix().norm() < 1e-14 );
+ }
+ }
+
+ // interpolation with given knot parameters
+ {
+ const Spline2d spline = SplineFitting<Spline2d>::Interpolate(points,3,chord_lengths);
+
+ for (Eigen::DenseIndex i=0; i<points.cols(); ++i)
+ {
+ PointType pt = spline( chord_lengths(i) );
+ PointType ref = points.col(i);
+ VERIFY( (pt - ref).matrix().norm() < 1e-14 );
+ }
+ }
+}
+
+
+void test_splines()
+{
+ CALL_SUBTEST( eval_spline3d() );
+ CALL_SUBTEST( eval_spline3d_onbrks() );
+ CALL_SUBTEST( eval_closed_spline2d() );
+ CALL_SUBTEST( check_global_interpolation2d() );
+}
diff --git a/unsupported/test/svd_common.h b/unsupported/test/svd_common.h
new file mode 100644
index 0000000..b40c23a
--- /dev/null
+++ b/unsupported/test/svd_common.h
@@ -0,0 +1,261 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// discard stack allocation as that too bypasses malloc
+#define EIGEN_STACK_ALLOCATION_LIMIT 0
+#define EIGEN_RUNTIME_NO_MALLOC
+
+#include "main.h"
+#include <unsupported/Eigen/SVD>
+#include <Eigen/LU>
+
+
+// check if "svd" is the good image of "m"
+template<typename MatrixType, typename SVD>
+void svd_check_full(const MatrixType& m, const SVD& svd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime> MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime> MatrixVType;
+
+
+ MatrixType sigma = MatrixType::Zero(rows, cols);
+ sigma.diagonal() = svd.singularValues().template cast<Scalar>();
+ MatrixUType u = svd.matrixU();
+ MatrixVType v = svd.matrixV();
+ VERIFY_IS_APPROX(m, u * sigma * v.adjoint());
+ VERIFY_IS_UNITARY(u);
+ VERIFY_IS_UNITARY(v);
+} // end svd_check_full
+
+
+
+// Compare to a reference value
+template<typename MatrixType, typename SVD>
+void svd_compare_to_full(const MatrixType& m,
+ unsigned int computationOptions,
+ const SVD& referenceSvd)
+{
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+ Index diagSize = (std::min)(rows, cols);
+
+ SVD svd(m, computationOptions);
+
+ VERIFY_IS_APPROX(svd.singularValues(), referenceSvd.singularValues());
+ if(computationOptions & ComputeFullU)
+ VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU());
+ if(computationOptions & ComputeThinU)
+ VERIFY_IS_APPROX(svd.matrixU(), referenceSvd.matrixU().leftCols(diagSize));
+ if(computationOptions & ComputeFullV)
+ VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV());
+ if(computationOptions & ComputeThinV)
+ VERIFY_IS_APPROX(svd.matrixV(), referenceSvd.matrixV().leftCols(diagSize));
+} // end svd_compare_to_full
+
+
+
+template<typename MatrixType, typename SVD>
+void svd_solve(const MatrixType& m, unsigned int computationOptions)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, Dynamic> RhsType;
+ typedef Matrix<Scalar, ColsAtCompileTime, Dynamic> SolutionType;
+
+ RhsType rhs = RhsType::Random(rows, internal::random<Index>(1, cols));
+ SVD svd(m, computationOptions);
+ SolutionType x = svd.solve(rhs);
+ // evaluate normal equation which works also for least-squares solutions
+ VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs);
+} // end svd_solve
+
+
+// test computations options
+// 2 functions because Jacobisvd can return before the second function
+template<typename MatrixType, typename SVD>
+void svd_test_computation_options_1(const MatrixType& m, const SVD& fullSvd)
+{
+ svd_check_full< MatrixType, SVD >(m, fullSvd);
+ svd_solve< MatrixType, SVD >(m, ComputeFullU | ComputeFullV);
+}
+
+
+template<typename MatrixType, typename SVD>
+void svd_test_computation_options_2(const MatrixType& m, const SVD& fullSvd)
+{
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeFullV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, 0, fullSvd);
+
+ if (MatrixType::ColsAtCompileTime == Dynamic) {
+ // thin U/V are only available with dynamic number of columns
+
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeFullU|ComputeThinV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeFullV, fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU , fullSvd);
+ svd_compare_to_full< MatrixType, SVD >(m, ComputeThinU|ComputeThinV, fullSvd);
+ svd_solve<MatrixType, SVD>(m, ComputeFullU | ComputeThinV);
+ svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeFullV);
+ svd_solve<MatrixType, SVD>(m, ComputeThinU | ComputeThinV);
+
+ typedef typename MatrixType::Index Index;
+ Index diagSize = (std::min)(m.rows(), m.cols());
+ SVD svd(m, ComputeThinU | ComputeThinV);
+ VERIFY_IS_APPROX(m, svd.matrixU().leftCols(diagSize) * svd.singularValues().asDiagonal() * svd.matrixV().leftCols(diagSize).adjoint());
+ }
+}
+
+template<typename MatrixType, typename SVD>
+void svd_verify_assert(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ Index rows = m.rows();
+ Index cols = m.cols();
+
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, 1> RhsType;
+ RhsType rhs(rows);
+ SVD svd;
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.singularValues())
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ MatrixType a = MatrixType::Zero(rows, cols);
+ a.setZero();
+ svd.compute(a, 0);
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ svd.singularValues();
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+
+ if (ColsAtCompileTime == Dynamic)
+ {
+ svd.compute(a, ComputeThinU);
+ svd.matrixU();
+ VERIFY_RAISES_ASSERT(svd.matrixV())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ svd.compute(a, ComputeThinV);
+ svd.matrixV();
+ VERIFY_RAISES_ASSERT(svd.matrixU())
+ VERIFY_RAISES_ASSERT(svd.solve(rhs))
+ }
+ else
+ {
+ VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinU))
+ VERIFY_RAISES_ASSERT(svd.compute(a, ComputeThinV))
+ }
+}
+
+// work around stupid msvc error when constructing at compile time an expression that involves
+// a division by zero, even if the numeric type has floating point
+template<typename Scalar>
+EIGEN_DONT_INLINE Scalar zero() { return Scalar(0); }
+
+// workaround aggressive optimization in ICC
+template<typename T> EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; }
+
+
+template<typename MatrixType, typename SVD>
+void svd_inf_nan()
+{
+ // all this function does is verify we don't iterate infinitely on nan/inf values
+
+ SVD svd;
+ typedef typename MatrixType::Scalar Scalar;
+ Scalar some_inf = Scalar(1) / zero<Scalar>();
+ VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf));
+ svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV);
+
+ Scalar some_nan = zero<Scalar> () / zero<Scalar> ();
+ VERIFY(some_nan != some_nan);
+ svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV);
+
+ MatrixType m = MatrixType::Zero(10,10);
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_inf;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+
+ m = MatrixType::Zero(10,10);
+ m(internal::random<int>(0,9), internal::random<int>(0,9)) = some_nan;
+ svd.compute(m, ComputeFullU | ComputeFullV);
+}
+
+
+template<typename SVD>
+void svd_preallocate()
+{
+ Vector3f v(3.f, 2.f, 1.f);
+ MatrixXf m = v.asDiagonal();
+
+ internal::set_is_malloc_allowed(false);
+ VERIFY_RAISES_ASSERT(VectorXf v(10);)
+ SVD svd;
+ internal::set_is_malloc_allowed(true);
+ svd.compute(m);
+ VERIFY_IS_APPROX(svd.singularValues(), v);
+
+ SVD svd2(3,3);
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+ VERIFY_IS_APPROX(svd2.singularValues(), v);
+ VERIFY_RAISES_ASSERT(svd2.matrixU());
+ VERIFY_RAISES_ASSERT(svd2.matrixV());
+ svd2.compute(m, ComputeFullU | ComputeFullV);
+ VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
+ VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+
+ SVD svd3(3,3,ComputeFullU|ComputeFullV);
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m);
+ internal::set_is_malloc_allowed(true);
+ VERIFY_IS_APPROX(svd2.singularValues(), v);
+ VERIFY_IS_APPROX(svd2.matrixU(), Matrix3f::Identity());
+ VERIFY_IS_APPROX(svd2.matrixV(), Matrix3f::Identity());
+ internal::set_is_malloc_allowed(false);
+ svd2.compute(m, ComputeFullU|ComputeFullV);
+ internal::set_is_malloc_allowed(true);
+}
+
+
+
+
+