Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 1 | // This file is part of Eigen, a lightweight C++ template library |
| 2 | // for linear algebra. |
| 3 | // |
| 4 | // Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com> |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 5 | // Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr> |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 6 | // |
| 7 | // This Source Code Form is subject to the terms of the Mozilla |
| 8 | // Public License v. 2.0. If a copy of the MPL was not distributed |
| 9 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. |
| 10 | |
| 11 | #ifndef EIGEN_BIDIAGONALIZATION_H |
| 12 | #define EIGEN_BIDIAGONALIZATION_H |
| 13 | |
| 14 | namespace Eigen { |
| 15 | |
| 16 | namespace internal { |
| 17 | // UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. |
| 18 | // At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. |
| 19 | |
| 20 | template<typename _MatrixType> class UpperBidiagonalization |
| 21 | { |
| 22 | public: |
| 23 | |
| 24 | typedef _MatrixType MatrixType; |
| 25 | enum { |
| 26 | RowsAtCompileTime = MatrixType::RowsAtCompileTime, |
| 27 | ColsAtCompileTime = MatrixType::ColsAtCompileTime, |
| 28 | ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret |
| 29 | }; |
| 30 | typedef typename MatrixType::Scalar Scalar; |
| 31 | typedef typename MatrixType::RealScalar RealScalar; |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 32 | typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 33 | typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType; |
| 34 | typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType; |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 35 | typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType; |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 36 | typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType; |
| 37 | typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType; |
| 38 | typedef HouseholderSequence< |
| 39 | const MatrixType, |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 40 | const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 41 | > HouseholderUSequenceType; |
| 42 | typedef HouseholderSequence< |
| 43 | const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type, |
| 44 | Diagonal<const MatrixType,1>, |
| 45 | OnTheRight |
| 46 | > HouseholderVSequenceType; |
| 47 | |
| 48 | /** |
| 49 | * \brief Default Constructor. |
| 50 | * |
| 51 | * The default constructor is useful in cases in which the user intends to |
| 52 | * perform decompositions via Bidiagonalization::compute(const MatrixType&). |
| 53 | */ |
| 54 | UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {} |
| 55 | |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 56 | explicit UpperBidiagonalization(const MatrixType& matrix) |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 57 | : m_householder(matrix.rows(), matrix.cols()), |
| 58 | m_bidiagonal(matrix.cols(), matrix.cols()), |
| 59 | m_isInitialized(false) |
| 60 | { |
| 61 | compute(matrix); |
| 62 | } |
| 63 | |
| 64 | UpperBidiagonalization& compute(const MatrixType& matrix); |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 65 | UpperBidiagonalization& computeUnblocked(const MatrixType& matrix); |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 66 | |
| 67 | const MatrixType& householder() const { return m_householder; } |
| 68 | const BidiagonalType& bidiagonal() const { return m_bidiagonal; } |
| 69 | |
| 70 | const HouseholderUSequenceType householderU() const |
| 71 | { |
| 72 | eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); |
| 73 | return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate()); |
| 74 | } |
| 75 | |
| 76 | const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy |
| 77 | { |
| 78 | eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); |
| 79 | return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>()) |
| 80 | .setLength(m_householder.cols()-1) |
| 81 | .setShift(1); |
| 82 | } |
| 83 | |
| 84 | protected: |
| 85 | MatrixType m_householder; |
| 86 | BidiagonalType m_bidiagonal; |
| 87 | bool m_isInitialized; |
| 88 | }; |
| 89 | |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 90 | // Standard upper bidiagonalization without fancy optimizations |
| 91 | // This version should be faster for small matrix size |
| 92 | template<typename MatrixType> |
| 93 | void upperbidiagonalization_inplace_unblocked(MatrixType& mat, |
| 94 | typename MatrixType::RealScalar *diagonal, |
| 95 | typename MatrixType::RealScalar *upper_diagonal, |
| 96 | typename MatrixType::Scalar* tempData = 0) |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 97 | { |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 98 | typedef typename MatrixType::Scalar Scalar; |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 99 | |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 100 | Index rows = mat.rows(); |
| 101 | Index cols = mat.cols(); |
| 102 | |
| 103 | typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType; |
| 104 | TempType tempVector; |
| 105 | if(tempData==0) |
| 106 | { |
| 107 | tempVector.resize(rows); |
| 108 | tempData = tempVector.data(); |
| 109 | } |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 110 | |
| 111 | for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k) |
| 112 | { |
| 113 | Index remainingRows = rows - k; |
| 114 | Index remainingCols = cols - k - 1; |
| 115 | |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 116 | // construct left householder transform in-place in A |
| 117 | mat.col(k).tail(remainingRows) |
| 118 | .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]); |
| 119 | // apply householder transform to remaining part of A on the left |
| 120 | mat.bottomRightCorner(remainingRows, remainingCols) |
| 121 | .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData); |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 122 | |
| 123 | if(k == cols-1) break; |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 124 | |
| 125 | // construct right householder transform in-place in mat |
| 126 | mat.row(k).tail(remainingCols) |
| 127 | .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]); |
| 128 | // apply householder transform to remaining part of mat on the left |
| 129 | mat.bottomRightCorner(remainingRows-1, remainingCols) |
Austin Schuh | c55b017 | 2022-02-20 17:52:35 -0800 | [diff] [blame^] | 130 | .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData); |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 131 | } |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | /** \internal |
| 135 | * Helper routine for the block reduction to upper bidiagonal form. |
| 136 | * |
| 137 | * Let's partition the matrix A: |
| 138 | * |
| 139 | * | A00 A01 | |
| 140 | * A = | | |
| 141 | * | A10 A11 | |
| 142 | * |
| 143 | * This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10] |
| 144 | * and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11 |
| 145 | * is updated using matrix-matrix products: |
| 146 | * A22 -= V * Y^T - X * U^T |
| 147 | * where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01 |
| 148 | * respectively, and the update matrices X and Y are computed during the reduction. |
| 149 | * |
| 150 | */ |
| 151 | template<typename MatrixType> |
| 152 | void upperbidiagonalization_blocked_helper(MatrixType& A, |
| 153 | typename MatrixType::RealScalar *diagonal, |
| 154 | typename MatrixType::RealScalar *upper_diagonal, |
| 155 | Index bs, |
| 156 | Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, |
| 157 | traits<MatrixType>::Flags & RowMajorBit> > X, |
| 158 | Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, |
| 159 | traits<MatrixType>::Flags & RowMajorBit> > Y) |
| 160 | { |
| 161 | typedef typename MatrixType::Scalar Scalar; |
| 162 | typedef typename MatrixType::RealScalar RealScalar; |
| 163 | typedef typename NumTraits<RealScalar>::Literal Literal; |
| 164 | enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit }; |
| 165 | typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride; |
| 166 | typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride; |
| 167 | typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType; |
| 168 | typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType; |
| 169 | typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType; |
| 170 | |
| 171 | Index brows = A.rows(); |
| 172 | Index bcols = A.cols(); |
| 173 | |
| 174 | Scalar tau_u, tau_u_prev(0), tau_v; |
| 175 | |
| 176 | for(Index k = 0; k < bs; ++k) |
| 177 | { |
| 178 | Index remainingRows = brows - k; |
| 179 | Index remainingCols = bcols - k - 1; |
| 180 | |
| 181 | SubMatType X_k1( X.block(k,0, remainingRows,k) ); |
| 182 | SubMatType V_k1( A.block(k,0, remainingRows,k) ); |
| 183 | |
| 184 | // 1 - update the k-th column of A |
| 185 | SubColumnType v_k = A.col(k).tail(remainingRows); |
| 186 | v_k -= V_k1 * Y.row(k).head(k).adjoint(); |
| 187 | if(k) v_k -= X_k1 * A.col(k).head(k); |
| 188 | |
| 189 | // 2 - construct left Householder transform in-place |
| 190 | v_k.makeHouseholderInPlace(tau_v, diagonal[k]); |
| 191 | |
| 192 | if(k+1<bcols) |
| 193 | { |
| 194 | SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) ); |
| 195 | SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) ); |
| 196 | |
| 197 | // this eases the application of Householder transforAions |
| 198 | // A(k,k) will store tau_v later |
| 199 | A(k,k) = Scalar(1); |
| 200 | |
| 201 | // 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k ) |
| 202 | { |
| 203 | SubColumnType y_k( Y.col(k).tail(remainingCols) ); |
| 204 | |
Austin Schuh | c55b017 | 2022-02-20 17:52:35 -0800 | [diff] [blame^] | 205 | // let's use the beginning of column k of Y as a temporary vector |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 206 | SubColumnType tmp( Y.col(k).head(k) ); |
| 207 | y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck |
| 208 | tmp.noalias() = V_k1.adjoint() * v_k; |
| 209 | y_k.noalias() -= Y_k.leftCols(k) * tmp; |
| 210 | tmp.noalias() = X_k1.adjoint() * v_k; |
| 211 | y_k.noalias() -= U_k1.adjoint() * tmp; |
| 212 | y_k *= numext::conj(tau_v); |
| 213 | } |
| 214 | |
| 215 | // 4 - update k-th row of A (it will become u_k) |
| 216 | SubRowType u_k( A.row(k).tail(remainingCols) ); |
| 217 | u_k = u_k.conjugate(); |
| 218 | { |
| 219 | u_k -= Y_k * A.row(k).head(k+1).adjoint(); |
| 220 | if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint(); |
| 221 | } |
| 222 | |
| 223 | // 5 - construct right Householder transform in-place |
| 224 | u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]); |
| 225 | |
| 226 | // this eases the application of Householder transformations |
| 227 | // A(k,k+1) will store tau_u later |
| 228 | A(k,k+1) = Scalar(1); |
| 229 | |
| 230 | // 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k ) |
| 231 | { |
| 232 | SubColumnType x_k ( X.col(k).tail(remainingRows-1) ); |
| 233 | |
Austin Schuh | c55b017 | 2022-02-20 17:52:35 -0800 | [diff] [blame^] | 234 | // let's use the beginning of column k of X as a temporary vectors |
Austin Schuh | 189376f | 2018-12-20 22:11:15 +1100 | [diff] [blame] | 235 | // note that tmp0 and tmp1 overlaps |
| 236 | SubColumnType tmp0 ( X.col(k).head(k) ), |
| 237 | tmp1 ( X.col(k).head(k+1) ); |
| 238 | |
| 239 | x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck |
| 240 | tmp0.noalias() = U_k1 * u_k.transpose(); |
| 241 | x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0; |
| 242 | tmp1.noalias() = Y_k.adjoint() * u_k.transpose(); |
| 243 | x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1; |
| 244 | x_k *= numext::conj(tau_u); |
| 245 | tau_u = numext::conj(tau_u); |
| 246 | u_k = u_k.conjugate(); |
| 247 | } |
| 248 | |
| 249 | if(k>0) A.coeffRef(k-1,k) = tau_u_prev; |
| 250 | tau_u_prev = tau_u; |
| 251 | } |
| 252 | else |
| 253 | A.coeffRef(k-1,k) = tau_u_prev; |
| 254 | |
| 255 | A.coeffRef(k,k) = tau_v; |
| 256 | } |
| 257 | |
| 258 | if(bs<bcols) |
| 259 | A.coeffRef(bs-1,bs) = tau_u_prev; |
| 260 | |
| 261 | // update A22 |
| 262 | if(bcols>bs && brows>bs) |
| 263 | { |
| 264 | SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) ); |
| 265 | SubMatType A10( A.block(bs,0, brows-bs,bs) ); |
| 266 | SubMatType A01( A.block(0,bs, bs,bcols-bs) ); |
| 267 | Scalar tmp = A01(bs-1,0); |
| 268 | A01(bs-1,0) = Literal(1); |
| 269 | A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint(); |
| 270 | A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01; |
| 271 | A01(bs-1,0) = tmp; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | /** \internal |
| 276 | * |
| 277 | * Implementation of a block-bidiagonal reduction. |
| 278 | * It is based on the following paper: |
| 279 | * The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form. |
| 280 | * by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995) |
| 281 | * section 3.3 |
| 282 | */ |
| 283 | template<typename MatrixType, typename BidiagType> |
| 284 | void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal, |
| 285 | Index maxBlockSize=32, |
| 286 | typename MatrixType::Scalar* /*tempData*/ = 0) |
| 287 | { |
| 288 | typedef typename MatrixType::Scalar Scalar; |
| 289 | typedef Block<MatrixType,Dynamic,Dynamic> BlockType; |
| 290 | |
| 291 | Index rows = A.rows(); |
| 292 | Index cols = A.cols(); |
| 293 | Index size = (std::min)(rows, cols); |
| 294 | |
| 295 | // X and Y are work space |
| 296 | enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit }; |
| 297 | Matrix<Scalar, |
| 298 | MatrixType::RowsAtCompileTime, |
| 299 | Dynamic, |
| 300 | StorageOrder, |
| 301 | MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize); |
| 302 | Matrix<Scalar, |
| 303 | MatrixType::ColsAtCompileTime, |
| 304 | Dynamic, |
| 305 | StorageOrder, |
| 306 | MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize); |
| 307 | Index blockSize = (std::min)(maxBlockSize,size); |
| 308 | |
| 309 | Index k = 0; |
| 310 | for(k = 0; k < size; k += blockSize) |
| 311 | { |
| 312 | Index bs = (std::min)(size-k,blockSize); // actual size of the block |
| 313 | Index brows = rows - k; // rows of the block |
| 314 | Index bcols = cols - k; // columns of the block |
| 315 | |
| 316 | // partition the matrix A: |
| 317 | // |
| 318 | // | A00 A01 A02 | |
| 319 | // | | |
| 320 | // A = | A10 A11 A12 | |
| 321 | // | | |
| 322 | // | A20 A21 A22 | |
| 323 | // |
| 324 | // where A11 is a bs x bs diagonal block, |
| 325 | // and let: |
| 326 | // | A11 A12 | |
| 327 | // B = | | |
| 328 | // | A21 A22 | |
| 329 | |
| 330 | BlockType B = A.block(k,k,brows,bcols); |
| 331 | |
| 332 | // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22. |
| 333 | // Finally, the algorithm continue on the updated A22. |
| 334 | // |
| 335 | // However, if B is too small, or A22 empty, then let's use an unblocked strategy |
| 336 | if(k+bs==cols || bcols<48) // somewhat arbitrary threshold |
| 337 | { |
| 338 | upperbidiagonalization_inplace_unblocked(B, |
| 339 | &(bidiagonal.template diagonal<0>().coeffRef(k)), |
| 340 | &(bidiagonal.template diagonal<1>().coeffRef(k)), |
| 341 | X.data() |
| 342 | ); |
| 343 | break; // We're done |
| 344 | } |
| 345 | else |
| 346 | { |
| 347 | upperbidiagonalization_blocked_helper<BlockType>( B, |
| 348 | &(bidiagonal.template diagonal<0>().coeffRef(k)), |
| 349 | &(bidiagonal.template diagonal<1>().coeffRef(k)), |
| 350 | bs, |
| 351 | X.topLeftCorner(brows,bs), |
| 352 | Y.topLeftCorner(bcols,bs) |
| 353 | ); |
| 354 | } |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | template<typename _MatrixType> |
| 359 | UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix) |
| 360 | { |
| 361 | Index rows = matrix.rows(); |
| 362 | Index cols = matrix.cols(); |
| 363 | EIGEN_ONLY_USED_FOR_DEBUG(cols); |
| 364 | |
| 365 | eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); |
| 366 | |
| 367 | m_householder = matrix; |
| 368 | |
| 369 | ColVectorType temp(rows); |
| 370 | |
| 371 | upperbidiagonalization_inplace_unblocked(m_householder, |
| 372 | &(m_bidiagonal.template diagonal<0>().coeffRef(0)), |
| 373 | &(m_bidiagonal.template diagonal<1>().coeffRef(0)), |
| 374 | temp.data()); |
| 375 | |
| 376 | m_isInitialized = true; |
| 377 | return *this; |
| 378 | } |
| 379 | |
| 380 | template<typename _MatrixType> |
| 381 | UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) |
| 382 | { |
| 383 | Index rows = matrix.rows(); |
| 384 | Index cols = matrix.cols(); |
| 385 | EIGEN_ONLY_USED_FOR_DEBUG(rows); |
| 386 | EIGEN_ONLY_USED_FOR_DEBUG(cols); |
| 387 | |
| 388 | eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); |
| 389 | |
| 390 | m_householder = matrix; |
| 391 | upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal); |
| 392 | |
Brian Silverman | 72890c2 | 2015-09-19 14:37:37 -0400 | [diff] [blame] | 393 | m_isInitialized = true; |
| 394 | return *this; |
| 395 | } |
| 396 | |
| 397 | #if 0 |
| 398 | /** \return the Householder QR decomposition of \c *this. |
| 399 | * |
| 400 | * \sa class Bidiagonalization |
| 401 | */ |
| 402 | template<typename Derived> |
| 403 | const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject> |
| 404 | MatrixBase<Derived>::bidiagonalization() const |
| 405 | { |
| 406 | return UpperBidiagonalization<PlainObject>(eval()); |
| 407 | } |
| 408 | #endif |
| 409 | |
| 410 | } // end namespace internal |
| 411 | |
| 412 | } // end namespace Eigen |
| 413 | |
| 414 | #endif // EIGEN_BIDIAGONALIZATION_H |