From 773207293c8ff3629872ff09c4955f339b4fcbe4 Mon Sep 17 00:00:00 2001 From: Alessio Morale Date: Sat, 26 Mar 2016 19:18:22 +0100 Subject: [PATCH] LP-208 - Upgrade Eigen to get rid of deprecated bind* warning (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=872) --- ground/gcs/src/libs/eigen/.hg_archival.txt | 4 +- ground/gcs/src/libs/eigen/.hgtags | 8 + ground/gcs/src/libs/eigen/CMakeLists.txt | 68 +- ground/gcs/src/libs/eigen/CTestConfig.cmake | 8 +- .../gcs/src/libs/eigen/Eigen/CholmodSupport | 2 +- ground/gcs/src/libs/eigen/Eigen/Core | 6 +- ground/gcs/src/libs/eigen/Eigen/Eigen2Support | 15 +- ground/gcs/src/libs/eigen/Eigen/SPQRSupport | 2 +- ground/gcs/src/libs/eigen/Eigen/SparseCore | 2 +- .../gcs/src/libs/eigen/Eigen/UmfPackSupport | 2 +- .../src/libs/eigen/Eigen/src/Cholesky/LDLT.h | 109 +- .../src/libs/eigen/Eigen/src/Cholesky/LLT.h | 10 +- .../libs/eigen/Eigen/src/Cholesky/LLT_MKL.h | 2 +- .../Eigen/src/CholmodSupport/CholmodSupport.h | 33 +- .../gcs/src/libs/eigen/Eigen/src/Core/Array.h | 17 +- .../src/libs/eigen/Eigen/src/Core/ArrayBase.h | 4 +- .../libs/eigen/Eigen/src/Core/ArrayWrapper.h | 10 + .../src/libs/eigen/Eigen/src/Core/Assign.h | 15 +- .../gcs/src/libs/eigen/Eigen/src/Core/Block.h | 7 +- .../libs/eigen/Eigen/src/Core/BooleanRedux.h | 8 +- .../eigen/Eigen/src/Core/CommaInitializer.h | 11 + .../libs/eigen/Eigen/src/Core/CwiseBinaryOp.h | 3 +- .../libs/eigen/Eigen/src/Core/CwiseUnaryOp.h | 2 +- .../eigen/Eigen/src/Core/CwiseUnaryView.h | 2 +- .../src/libs/eigen/Eigen/src/Core/DenseBase.h | 28 +- .../libs/eigen/Eigen/src/Core/DenseStorage.h | 271 ++-- .../src/libs/eigen/Eigen/src/Core/Diagonal.h | 8 +- .../eigen/Eigen/src/Core/DiagonalProduct.h | 5 +- .../src/libs/eigen/Eigen/src/Core/EigenBase.h | 30 - .../src/libs/eigen/Eigen/src/Core/Functors.h | 45 +- .../eigen/Eigen/src/Core/GeneralProduct.h | 25 +- ground/gcs/src/libs/eigen/Eigen/src/Core/IO.h | 9 +- .../src/libs/eigen/Eigen/src/Core/MapBase.h | 17 +- .../libs/eigen/Eigen/src/Core/MathFunctions.h | 8 +- .../src/libs/eigen/Eigen/src/Core/Matrix.h | 17 +- .../libs/eigen/Eigen/src/Core/MatrixBase.h | 72 +- .../eigen/Eigen/src/Core/PermutationMatrix.h | 37 +- .../eigen/Eigen/src/Core/PlainObjectBase.h | 48 +- .../libs/eigen/Eigen/src/Core/ProductBase.h | 14 +- .../gcs/src/libs/eigen/Eigen/src/Core/Redux.h | 5 +- .../gcs/src/libs/eigen/Eigen/src/Core/Ref.h | 43 +- .../src/libs/eigen/Eigen/src/Core/Replicate.h | 4 +- .../libs/eigen/Eigen/src/Core/ReturnByValue.h | 11 + .../eigen/Eigen/src/Core/SelfCwiseBinaryOp.h | 10 +- .../eigen/Eigen/src/Core/SolveTriangular.h | 8 +- .../libs/eigen/Eigen/src/Core/StableNorm.h | 27 +- .../src/libs/eigen/Eigen/src/Core/Transpose.h | 4 +- .../eigen/Eigen/src/Core/TriangularMatrix.h | 29 +- .../libs/eigen/Eigen/src/Core/VectorwiseOp.h | 5 +- .../src/libs/eigen/Eigen/src/Core/Visitor.h | 7 +- .../eigen/Eigen/src/Core/arch/NEON/Complex.h | 2 +- .../Eigen/src/Core/arch/NEON/PacketMath.h | 20 +- .../Eigen/src/Core/arch/SSE/MathFunctions.h | 25 +- .../Eigen/src/Core/arch/SSE/PacketMath.h | 58 +- .../src/Core/products/CoeffBasedProduct.h | 79 +- .../Core/products/GeneralBlockPanelKernel.h | 6 + .../src/Core/products/GeneralMatrixMatrix.h | 8 +- .../src/Core/products/GeneralMatrixVector.h | 15 +- .../Eigen/src/Core/products/Parallelizer.h | 17 +- .../Core/products/SelfadjointMatrixVector.h | 6 +- .../products/TriangularMatrixMatrix_MKL.h | 4 +- .../Core/products/TriangularSolverMatrix.h | 13 +- .../eigen/Eigen/src/Core/util/Constants.h | 13 + .../Eigen/src/Core/util/ForwardDeclarations.h | 3 + .../eigen/Eigen/src/Core/util/MKL_support.h | 51 +- .../libs/eigen/Eigen/src/Core/util/Macros.h | 330 ++++- .../libs/eigen/Eigen/src/Core/util/Memory.h | 42 +- .../eigen/Eigen/src/Core/util/StaticAssert.h | 6 +- .../eigen/Eigen/src/Core/util/XprHelper.h | 10 +- .../Eigen/src/Eigen2Support/LeastSquares.h | 1 - .../libs/eigen/Eigen/src/Eigen2Support/SVD.h | 3 +- .../src/Eigenvalues/ComplexEigenSolver.h | 8 + .../Eigen/src/Eigenvalues/ComplexSchur_MKL.h | 1 - .../eigen/Eigen/src/Eigenvalues/EigenSolver.h | 9 + .../src/Eigenvalues/GeneralizedEigenSolver.h | 9 + .../libs/eigen/Eigen/src/Eigenvalues/RealQZ.h | 14 +- .../eigen/Eigen/src/Eigenvalues/RealSchur.h | 12 +- .../Eigen/src/Eigenvalues/RealSchur_MKL.h | 4 - .../src/Eigenvalues/SelfAdjointEigenSolver.h | 231 ++-- .../eigen/Eigen/src/Geometry/AlignedBox.h | 85 +- .../libs/eigen/Eigen/src/Geometry/AngleAxis.h | 13 +- .../eigen/Eigen/src/Geometry/EulerAngles.h | 2 +- .../eigen/Eigen/src/Geometry/Homogeneous.h | 2 +- .../eigen/Eigen/src/Geometry/Hyperplane.h | 12 +- .../Eigen/src/Geometry/ParametrizedLine.h | 2 +- .../eigen/Eigen/src/Geometry/Quaternion.h | 61 +- .../eigen/Eigen/src/Geometry/Rotation2D.h | 7 +- .../libs/eigen/Eigen/src/Geometry/Transform.h | 86 +- .../eigen/Eigen/src/Geometry/Translation.h | 2 +- .../libs/eigen/Eigen/src/Geometry/Umeyama.h | 10 +- .../Eigen/src/Householder/BlockHouseholder.h | 2 +- .../BasicPreconditioners.h | 4 +- .../src/IterativeLinearSolvers/BiCGSTAB.h | 24 +- .../ConjugateGradient.h | 35 +- .../IterativeLinearSolvers/IncompleteLUT.h | 25 +- .../IterativeSolverBase.h | 48 +- .../src/libs/eigen/Eigen/src/LU/FullPivLU.h | 23 +- .../libs/eigen/Eigen/src/LU/PartialPivLU.h | 8 + .../eigen/Eigen/src/OrderingMethods/Amd.h | 21 +- .../Eigen/src/OrderingMethods/Eigen_Colamd.h | 31 +- .../Eigen/src/OrderingMethods/Ordering.h | 12 +- .../Eigen/src/PardisoSupport/PardisoSupport.h | 2 +- .../eigen/Eigen/src/QR/ColPivHouseholderQR.h | 37 +- .../Eigen/src/QR/ColPivHouseholderQR_MKL.h | 1 - .../eigen/Eigen/src/QR/FullPivHouseholderQR.h | 61 +- .../libs/eigen/Eigen/src/QR/HouseholderQR.h | 102 +- .../eigen/Eigen/src/QR/HouseholderQR_MKL.h | 30 +- .../src/SPQRSupport/SuiteSparseQRSupport.h | 72 +- .../src/libs/eigen/Eigen/src/SVD/JacobiSVD.h | 131 +- .../libs/eigen/Eigen/src/SVD/JacobiSVD_MKL.h | 4 +- .../src/SparseCholesky/SimplicialCholesky.h | 42 +- .../eigen/Eigen/src/SparseCore/AmbiVector.h | 4 +- .../Eigen/src/SparseCore/CompressedStorage.h | 8 +- .../ConservativeSparseSparseProduct.h | 34 +- .../Eigen/src/SparseCore/MappedSparseMatrix.h | 2 + .../eigen/Eigen/src/SparseCore/SparseBlock.h | 144 +- .../Eigen/src/SparseCore/SparseColEtree.h | 6 +- .../src/SparseCore/SparseCwiseBinaryOp.h | 12 +- .../Eigen/src/SparseCore/SparseDenseProduct.h | 48 +- .../eigen/Eigen/src/SparseCore/SparseMatrix.h | 38 +- .../Eigen/src/SparseCore/SparseMatrixBase.h | 48 +- .../Eigen/src/SparseCore/SparsePermutation.h | 6 +- .../Eigen/src/SparseCore/SparseProduct.h | 5 +- .../SparseSparseProductWithPruning.h | 13 +- .../Eigen/src/SparseCore/SparseTranspose.h | 10 +- .../eigen/Eigen/src/SparseCore/SparseUtil.h | 9 +- .../eigen/Eigen/src/SparseCore/SparseVector.h | 1 + .../eigen/Eigen/src/SparseCore/SparseView.h | 6 +- .../Eigen/src/SparseCore/TriangularSolver.h | 2 +- .../libs/eigen/Eigen/src/SparseLU/SparseLU.h | 98 +- .../eigen/Eigen/src/SparseLU/SparseLUImpl.h | 2 + .../Eigen/src/SparseLU/SparseLU_Memory.h | 45 +- .../src/SparseLU/SparseLU_SupernodalMatrix.h | 12 +- .../Eigen/src/SparseLU/SparseLU_column_bmod.h | 4 +- .../Eigen/src/SparseLU/SparseLU_kernel_bmod.h | 4 +- .../Eigen/src/SparseLU/SparseLU_panel_bmod.h | 8 +- .../Eigen/src/SparseLU/SparseLU_pivotL.h | 13 +- .../libs/eigen/Eigen/src/SparseQR/SparseQR.h | 202 ++- .../eigen/Eigen/src/StlSupport/StdDeque.h | 20 +- .../libs/eigen/Eigen/src/StlSupport/StdList.h | 20 +- .../eigen/Eigen/src/StlSupport/StdVector.h | 2 +- .../Eigen/src/UmfPackSupport/UmfPackSupport.h | 112 +- .../Eigen/src/plugins/ArrayCwiseBinaryOps.h | 54 +- .../Eigen/src/plugins/ArrayCwiseUnaryOps.h | 16 - .../eigen/Eigen/src/plugins/BlockMethods.h | 240 ++-- .../Eigen/src/plugins/MatrixCwiseBinaryOps.h | 21 +- .../Eigen/src/plugins/MatrixCwiseUnaryOps.h | 15 - ground/gcs/src/libs/eigen/blas/CMakeLists.txt | 3 + ground/gcs/src/libs/eigen/blas/README.txt | 3 - ground/gcs/src/libs/eigen/blas/xerbla.cpp | 2 +- .../eigen/cmake/EigenConfigureTesting.cmake | 35 +- .../src/libs/eigen/cmake/EigenTesting.cmake | 37 +- .../src/libs/eigen/cmake/FindCholmod.cmake | 2 +- .../gcs/src/libs/eigen/cmake/FindFFTW.cmake | 2 +- .../gcs/src/libs/eigen/cmake/FindMetis.cmake | 38 +- .../gcs/src/libs/eigen/cmake/FindSPQR.cmake | 7 +- .../src/libs/eigen/cmake/FindUmfpack.cmake | 21 +- .../libs/eigen/cmake/language_support.cmake | 4 +- .../libs/eigen/doc/A05_PortingFrom2To3.dox | 2 + .../libs/eigen/doc/A10_Eigen2SupportModes.dox | 2 + .../libs/eigen/doc/AsciiQuickReference.txt | 70 +- ground/gcs/src/libs/eigen/doc/CMakeLists.txt | 4 +- ground/gcs/src/libs/eigen/doc/Doxyfile.in | 14 +- .../src/libs/eigen/doc/LinearLeastSquares.dox | 27 - ground/gcs/src/libs/eigen/doc/Manual.dox | 3 + .../eigen/doc/MatrixfreeSolverExample.dox | 21 + ground/gcs/src/libs/eigen/doc/Pitfalls.dox | 38 + .../libs/eigen/doc/PreprocessorDirectives.dox | 14 +- .../gcs/src/libs/eigen/doc/QuickReference.dox | 20 +- .../libs/eigen/doc/SparseLinearSystems.dox | 6 +- .../libs/eigen/doc/SparseQuickReference.dox | 67 +- .../libs/eigen/doc/TopicMultithreading.dox | 2 +- .../src/libs/eigen/doc/TutorialGeometry.dox | 2 +- .../gcs/src/libs/eigen/doc/TutorialSparse.dox | 33 +- .../libs/eigen/doc/UnalignedArrayAssert.dox | 15 +- .../gcs/src/libs/eigen/doc/UsingIntelMKL.dox | 3 +- ground/gcs/src/libs/eigen/doc/eigendoxy.css | 10 + .../libs/eigen/doc/eigendoxy_footer.html.in | 4 +- .../libs/eigen/doc/examples/CMakeLists.txt | 4 +- .../libs/eigen/doc/examples/matrixfree_cg.cpp | 180 +++ .../libs/eigen/doc/snippets/CMakeLists.txt | 6 +- .../snippets/MatrixBase_applyOnTheLeft.cpp | 7 + .../snippets/MatrixBase_applyOnTheRight.cpp | 9 + .../eigen/doc/special_examples/CMakeLists.txt | 12 +- .../Tutorial_sparse_example_details.cpp | 4 +- ground/gcs/src/libs/eigen/eigen3.pc.in | 7 +- .../src/libs/eigen/failtest/CMakeLists.txt | 17 + .../src/libs/eigen/failtest/colpivqr_int.cpp | 14 + .../libs/eigen/failtest/eigensolver_cplx.cpp | 14 + .../libs/eigen/failtest/eigensolver_int.cpp | 14 + .../src/libs/eigen/failtest/fullpivlu_int.cpp | 14 + .../src/libs/eigen/failtest/fullpivqr_int.cpp | 14 + .../src/libs/eigen/failtest/jacobisvd_int.cpp | 14 + .../gcs/src/libs/eigen/failtest/ldlt_int.cpp | 14 + .../gcs/src/libs/eigen/failtest/llt_int.cpp | 14 + .../libs/eigen/failtest/partialpivlu_int.cpp | 14 + ground/gcs/src/libs/eigen/failtest/qr_int.cpp | 14 + ground/gcs/src/libs/eigen/failtest/ref_1.cpp | 18 + ground/gcs/src/libs/eigen/failtest/ref_2.cpp | 15 + ground/gcs/src/libs/eigen/failtest/ref_3.cpp | 15 + ground/gcs/src/libs/eigen/failtest/ref_4.cpp | 15 + ground/gcs/src/libs/eigen/failtest/ref_5.cpp | 16 + .../gcs/src/libs/eigen/lapack/CMakeLists.txt | 3 + .../gcs/src/libs/eigen/scripts/eigen_gen_docs | 5 +- ground/gcs/src/libs/eigen/test/CMakeLists.txt | 13 +- ground/gcs/src/libs/eigen/test/array.cpp | 15 +- .../src/libs/eigen/test/array_for_matrix.cpp | 16 +- ground/gcs/src/libs/eigen/test/block.cpp | 25 +- ground/gcs/src/libs/eigen/test/cholesky.cpp | 82 +- .../libs/eigen/test/conjugate_gradient.cpp | 6 +- .../libs/eigen/test/conservative_resize.cpp | 31 +- ground/gcs/src/libs/eigen/test/cwiseop.cpp | 2 + ground/gcs/src/libs/eigen/test/dynalloc.cpp | 41 +- .../src/libs/eigen/test/eigen2/CMakeLists.txt | 1 + .../libs/eigen/test/eigen2/eigen2_adjoint.cpp | 2 - .../eigen/test/eigen2/eigen2_basicstuff.cpp | 3 - .../libs/eigen/test/eigen2/eigen2_cwiseop.cpp | 7 +- .../eigen/test/eigen2/eigen2_geometry.cpp | 3 +- .../eigen2_geometry_with_eigen2_prefix.cpp | 3 +- .../libs/eigen/test/eigen2/eigen2_inverse.cpp | 1 - .../test/eigen2/eigen2_linearstructure.cpp | 3 +- .../eigen/test/eigen2/eigen2_nomalloc.cpp | 12 +- .../eigen/test/eigen2/eigen2_submatrices.cpp | 8 +- .../eigen/test/eigen2/eigen2_triangular.cpp | 12 +- .../gcs/src/libs/eigen/test/eigen2/product.h | 7 +- .../gcs/src/libs/eigen/test/eigen2support.cpp | 1 + .../eigen/test/eigensolver_selfadjoint.cpp | 43 +- .../src/libs/eigen/test/geo_alignedbox.cpp | 2 + .../src/libs/eigen/test/geo_eulerangles.cpp | 67 +- .../src/libs/eigen/test/geo_hyperplane.cpp | 28 + .../libs/eigen/test/geo_transformations.cpp | 57 +- ground/gcs/src/libs/eigen/test/jacobisvd.cpp | 157 ++- ground/gcs/src/libs/eigen/test/lu.cpp | 9 +- ground/gcs/src/libs/eigen/test/main.h | 45 +- .../gcs/src/libs/eigen/test/mapped_matrix.cpp | 24 + ground/gcs/src/libs/eigen/test/mpl2only.cpp | 20 + ground/gcs/src/libs/eigen/test/nomalloc.cpp | 35 +- ground/gcs/src/libs/eigen/test/nullary.cpp | 4 +- ground/gcs/src/libs/eigen/test/packetmath.cpp | 22 +- ground/gcs/src/libs/eigen/test/product.h | 43 +- .../gcs/src/libs/eigen/test/product_extra.cpp | 64 +- .../gcs/src/libs/eigen/test/product_large.cpp | 23 + .../gcs/src/libs/eigen/test/product_mmtr.cpp | 4 +- .../libs/eigen/test/product_notemporary.cpp | 9 + .../gcs/src/libs/eigen/test/product_trmm.cpp | 1 + .../src/libs/eigen/test/qr_fullpivoting.cpp | 4 + ground/gcs/src/libs/eigen/test/real_qz.cpp | 16 + ground/gcs/src/libs/eigen/test/redux.cpp | 8 + ground/gcs/src/libs/eigen/test/ref.cpp | 145 +- .../gcs/src/libs/eigen/test/rvalue_types.cpp | 62 + .../libs/eigen/test/simplicial_cholesky.cpp | 41 +- ground/gcs/src/libs/eigen/test/sparse.h | 10 +- .../gcs/src/libs/eigen/test/sparse_basic.cpp | 81 +- .../src/libs/eigen/test/sparse_product.cpp | 17 +- .../gcs/src/libs/eigen/test/sparse_solver.h | 73 +- .../gcs/src/libs/eigen/test/sparse_vector.cpp | 13 +- ground/gcs/src/libs/eigen/test/sparselu.cpp | 12 +- ground/gcs/src/libs/eigen/test/sparseqr.cpp | 36 +- .../gcs/src/libs/eigen/test/stable_norm.cpp | 19 +- .../src/libs/eigen/test/stddeque_overload.cpp | 158 +++ .../src/libs/eigen/test/stdlist_overload.cpp | 192 +++ ground/gcs/src/libs/eigen/test/umeyama.cpp | 9 +- .../gcs/src/libs/eigen/test/vectorwiseop.cpp | 22 + ground/gcs/src/libs/eigen/test/visitor.cpp | 5 + ground/gcs/src/libs/eigen/test/zerosized.cpp | 31 +- .../eigen/unsupported/Eigen/AlignedVector3 | 2 +- .../eigen/unsupported/Eigen/CMakeLists.txt | 6 +- .../eigen/unsupported/Eigen/OpenGLSupport | 41 +- .../Eigen/src/AutoDiff/AutoDiffScalar.h | 2 +- .../unsupported/Eigen/src/CMakeLists.txt | 4 +- .../Eigen/src/Eigenvalues/CMakeLists.txt | 6 + .../IterativeSolvers/ConstrainedConjGrad.h | 10 +- .../Eigen/src/IterativeSolvers/DGMRES.h | 4 +- .../Eigen/src/IterativeSolvers/GMRES.h | 30 +- .../Eigen/src/IterativeSolvers/MINRES.h | 73 +- .../src/LevenbergMarquardt/CMakeLists.txt | 2 +- .../Eigen/src/MatrixFunctions/MatrixPower.h | 1 - .../unsupported/doc/examples/CMakeLists.txt | 4 +- .../unsupported/doc/snippets/CMakeLists.txt | 4 +- .../eigen/unsupported/test/CMakeLists.txt | 7 +- .../src/libs/eigen/unsupported/test/FFTW.cpp | 13 +- .../libs/eigen/unsupported/test/autodiff.cpp | 45 +- .../libs/eigen/unsupported/test/minres.cpp | 25 +- .../eigen/unsupported/test/mpreal/mpreal.h | 1164 ++++++++++------- .../unsupported/test/polynomialsolver.cpp | 2 - 285 files changed, 6145 insertions(+), 2607 deletions(-) delete mode 100644 ground/gcs/src/libs/eigen/doc/LinearLeastSquares.dox create mode 100644 ground/gcs/src/libs/eigen/doc/MatrixfreeSolverExample.dox create mode 100644 ground/gcs/src/libs/eigen/doc/Pitfalls.dox create mode 100644 ground/gcs/src/libs/eigen/doc/examples/matrixfree_cg.cpp create mode 100644 ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheLeft.cpp create mode 100644 ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheRight.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/colpivqr_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/eigensolver_cplx.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/eigensolver_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/fullpivlu_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/fullpivqr_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/jacobisvd_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/ldlt_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/llt_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/partialpivlu_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/qr_int.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/ref_1.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/ref_2.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/ref_3.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/ref_4.cpp create mode 100644 ground/gcs/src/libs/eigen/failtest/ref_5.cpp create mode 100644 ground/gcs/src/libs/eigen/test/mpl2only.cpp create mode 100644 ground/gcs/src/libs/eigen/test/rvalue_types.cpp create mode 100644 ground/gcs/src/libs/eigen/test/stddeque_overload.cpp create mode 100644 ground/gcs/src/libs/eigen/test/stdlist_overload.cpp create mode 100644 ground/gcs/src/libs/eigen/unsupported/Eigen/src/Eigenvalues/CMakeLists.txt diff --git a/ground/gcs/src/libs/eigen/.hg_archival.txt b/ground/gcs/src/libs/eigen/.hg_archival.txt index 9fceca658..c4115d7f6 100644 --- a/ground/gcs/src/libs/eigen/.hg_archival.txt +++ b/ground/gcs/src/libs/eigen/.hg_archival.txt @@ -1,4 +1,4 @@ repo: 8a21fd850624c931e448cbcfb38168cb2717c790 -node: ffa86ffb557094721ca71dcea6aed2651b9fd610 +node: 07105f7124f9aef00a68c85e0fc606e65d3d6c15 branch: 3.2 -tag: 3.2.0 +tag: 3.2.8 diff --git a/ground/gcs/src/libs/eigen/.hgtags b/ground/gcs/src/libs/eigen/.hgtags index 1b9b1142e..8f0097f20 100644 --- a/ground/gcs/src/libs/eigen/.hgtags +++ b/ground/gcs/src/libs/eigen/.hgtags @@ -23,3 +23,11 @@ bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2 da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1 4b687cad1d23066f66863f4f87298447298443df 3.2-rc1 1eeda7b1258bcd306018c0738e2b6a8543661141 3.2-rc2 +ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0 +6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1 +1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2 +36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3 +10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4 +bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5 +c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6 +b30b87236a1b1552af32ac34075ee5696a9b5a33 3.2.7 diff --git a/ground/gcs/src/libs/eigen/CMakeLists.txt b/ground/gcs/src/libs/eigen/CMakeLists.txt index ad0269ea6..77e9f2d35 100644 --- a/ground/gcs/src/libs/eigen/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/CMakeLists.txt @@ -1,6 +1,5 @@ project(Eigen) - -cmake_minimum_required(VERSION 2.8.2) +cmake_minimum_required(VERSION 2.8.5) # guard against in-source builds @@ -55,6 +54,7 @@ endif(EIGEN_HG_CHANGESET) include(CheckCXXCompilerFlag) +include(GNUInstallDirs) set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) @@ -204,7 +204,7 @@ if(NOT MSVC) option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF) if(EIGEN_TEST_NEON) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a"8) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a8") message(STATUS "Enabling NEON in tests/examples") endif() @@ -288,25 +288,26 @@ option(EIGEN_TEST_C++0x "Enables all C++0x features." OFF) include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) -# the user modifiable install path for header files -set(EIGEN_INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR} CACHE PATH "The directory where we install the header files (optional)") - -# set the internal install path for header files which depends on wether the user modifiable -# EIGEN_INCLUDE_INSTALL_DIR has been set by the user or not. -if(EIGEN_INCLUDE_INSTALL_DIR) - set(INCLUDE_INSTALL_DIR - ${EIGEN_INCLUDE_INSTALL_DIR} - CACHE INTERNAL - "The directory where we install the header files (internal)" - ) +# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR +if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR) + set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR} + CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed") else() set(INCLUDE_INSTALL_DIR - "${CMAKE_INSTALL_PREFIX}/include/eigen3" - CACHE INTERNAL - "The directory where we install the header files (internal)" - ) + "${CMAKE_INSTALL_INCLUDEDIR}/eigen3" + CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed" + ) endif() +set(CMAKEPACKAGE_INSTALL_DIR + "${CMAKE_INSTALL_LIBDIR}/cmake/eigen3" + CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen3Config.cmake is installed" + ) +set(PKGCONFIG_INSTALL_DIR + "${CMAKE_INSTALL_DATADIR}/pkgconfig" + CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where eigen3.pc is installed" + ) + # similar to set_target_properties but append the property instead of overwriting it macro(ei_add_target_property target prop value) @@ -324,21 +325,9 @@ install(FILES ) if(EIGEN_BUILD_PKGCONFIG) - SET(path_separator ":") - STRING(REPLACE ${path_separator} ";" pkg_config_libdir_search "$ENV{PKG_CONFIG_LIBDIR}") - message(STATUS "searching for 'pkgconfig' directory in PKG_CONFIG_LIBDIR ( $ENV{PKG_CONFIG_LIBDIR} ), ${CMAKE_INSTALL_PREFIX}/share, and ${CMAKE_INSTALL_PREFIX}/lib") - FIND_PATH(pkg_config_libdir pkgconfig ${pkg_config_libdir_search} ${CMAKE_INSTALL_PREFIX}/share ${CMAKE_INSTALL_PREFIX}/lib ${pkg_config_libdir_search}) - if(pkg_config_libdir) - SET(pkg_config_install_dir ${pkg_config_libdir}) - message(STATUS "found ${pkg_config_libdir}/pkgconfig" ) - else(pkg_config_libdir) - SET(pkg_config_install_dir ${CMAKE_INSTALL_PREFIX}/share) - message(STATUS "pkgconfig not found; installing in ${pkg_config_install_dir}" ) - endif(pkg_config_libdir) - - configure_file(eigen3.pc.in eigen3.pc) + configure_file(eigen3.pc.in eigen3.pc @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc - DESTINATION ${pkg_config_install_dir}/pkgconfig + DESTINATION ${PKGCONFIG_INSTALL_DIR} ) endif(EIGEN_BUILD_PKGCONFIG) @@ -401,12 +390,15 @@ if(cmake_generator_tolower MATCHES "makefile") message(STATUS "--------------+--------------------------------------------------------------") message(STATUS "Command | Description") message(STATUS "--------------+--------------------------------------------------------------") - message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:") - message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath") - message(STATUS " | Eigen headers will then be installed to:") - message(STATUS " | ${INCLUDE_INSTALL_DIR}") - message(STATUS " | To install Eigen headers to a separate location, do:") - message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath") + message(STATUS "make install | Install Eigen. Headers will be installed to:") + message(STATUS " | /") + message(STATUS " | Using the following values:") + message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}") + message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}") + message(STATUS " | Change the install location of Eigen headers using:") + message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix") + message(STATUS " | Or:") + message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir") message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX") message(STATUS "make check | Build and run the unit-tests. Read this page:") message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests") diff --git a/ground/gcs/src/libs/eigen/CTestConfig.cmake b/ground/gcs/src/libs/eigen/CTestConfig.cmake index 4c0027824..0557c491a 100644 --- a/ground/gcs/src/libs/eigen/CTestConfig.cmake +++ b/ground/gcs/src/libs/eigen/CTestConfig.cmake @@ -4,14 +4,10 @@ ## # The following are required to uses Dart and the Cdash dashboard ## ENABLE_TESTING() ## INCLUDE(CTest) -set(CTEST_PROJECT_NAME "Eigen") +set(CTEST_PROJECT_NAME "Eigen3.2") set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") set(CTEST_DROP_METHOD "http") set(CTEST_DROP_SITE "manao.inria.fr") -set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen") +set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2") set(CTEST_DROP_SITE_CDASH TRUE) -set(CTEST_PROJECT_SUBPROJECTS -Official -Unsupported -) diff --git a/ground/gcs/src/libs/eigen/Eigen/CholmodSupport b/ground/gcs/src/libs/eigen/Eigen/CholmodSupport index 745b884e7..88c29a646 100644 --- a/ground/gcs/src/libs/eigen/Eigen/CholmodSupport +++ b/ground/gcs/src/libs/eigen/Eigen/CholmodSupport @@ -12,7 +12,7 @@ extern "C" { /** \ingroup Support_modules * \defgroup CholmodSupport_Module CholmodSupport module * - * This module provides an interface to the Cholmod library which is part of the suitesparse package. + * This module provides an interface to the Cholmod library which is part of the suitesparse package. * It provides the two following main factorization classes: * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization. * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial). diff --git a/ground/gcs/src/libs/eigen/Eigen/Core b/ground/gcs/src/libs/eigen/Eigen/Core index 9131cc3fc..509c529e1 100644 --- a/ground/gcs/src/libs/eigen/Eigen/Core +++ b/ground/gcs/src/libs/eigen/Eigen/Core @@ -95,7 +95,7 @@ extern "C" { // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: - #ifdef __INTEL_COMPILER + #if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110 #include #else #include @@ -123,7 +123,7 @@ #undef bool #undef vector #undef pixel - #elif defined __ARM_NEON__ + #elif defined __ARM_NEON #define EIGEN_VECTORIZE #define EIGEN_VECTORIZE_NEON #include @@ -165,7 +165,7 @@ #endif // required for __cpuid, needs to be included after cmath -#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) +#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE)) #include #endif diff --git a/ground/gcs/src/libs/eigen/Eigen/Eigen2Support b/ground/gcs/src/libs/eigen/Eigen/Eigen2Support index 36156d29a..6aa009d20 100644 --- a/ground/gcs/src/libs/eigen/Eigen/Eigen2Support +++ b/ground/gcs/src/libs/eigen/Eigen/Eigen2Support @@ -14,12 +14,25 @@ #error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header #endif +#ifndef EIGEN_NO_EIGEN2_DEPRECATED_WARNING + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) +#warning "Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)" +#else +#pragma message ("Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)") +#endif + +#endif // EIGEN_NO_EIGEN2_DEPRECATED_WARNING + #include "src/Core/util/DisableStupidWarnings.h" /** \ingroup Support_modules * \defgroup Eigen2Support_Module Eigen2 support module - * This module provides a couple of deprecated functions improving the compatibility with Eigen2. * + * \warning Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. + * + * This module provides a couple of deprecated functions improving the compatibility with Eigen2. + * * To use it, define EIGEN2_SUPPORT before including any Eigen header * \code * #define EIGEN2_SUPPORT diff --git a/ground/gcs/src/libs/eigen/Eigen/SPQRSupport b/ground/gcs/src/libs/eigen/Eigen/SPQRSupport index 77016442e..7f1eb4770 100644 --- a/ground/gcs/src/libs/eigen/Eigen/SPQRSupport +++ b/ground/gcs/src/libs/eigen/Eigen/SPQRSupport @@ -10,7 +10,7 @@ /** \ingroup Support_modules * \defgroup SPQRSupport_Module SuiteSparseQR module * - * This module provides an interface to the SPQR library, which is part of the suitesparse package. + * This module provides an interface to the SPQR library, which is part of the suitesparse package. * * \code * #include diff --git a/ground/gcs/src/libs/eigen/Eigen/SparseCore b/ground/gcs/src/libs/eigen/Eigen/SparseCore index 9b5be5e15..24bcf0156 100644 --- a/ground/gcs/src/libs/eigen/Eigen/SparseCore +++ b/ground/gcs/src/libs/eigen/Eigen/SparseCore @@ -14,7 +14,7 @@ /** * \defgroup SparseCore_Module SparseCore module * - * This module provides a sparse matrix representation, and basic associatd matrix manipulations + * This module provides a sparse matrix representation, and basic associated matrix manipulations * and operations. * * See the \ref TutorialSparse "Sparse tutorial" diff --git a/ground/gcs/src/libs/eigen/Eigen/UmfPackSupport b/ground/gcs/src/libs/eigen/Eigen/UmfPackSupport index 984f64a84..7b1b66064 100644 --- a/ground/gcs/src/libs/eigen/Eigen/UmfPackSupport +++ b/ground/gcs/src/libs/eigen/Eigen/UmfPackSupport @@ -12,7 +12,7 @@ extern "C" { /** \ingroup Support_modules * \defgroup UmfPackSupport_Module UmfPackSupport module * - * This module provides an interface to the UmfPack library which is part of the suitesparse package. + * This module provides an interface to the UmfPack library which is part of the suitesparse package. * It provides the following factorization class: * - class UmfPackLU: a multifrontal sequential LU factorization. * diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LDLT.h b/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LDLT.h index d19cb3968..abd30bd91 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LDLT.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LDLT.h @@ -16,7 +16,10 @@ namespace Eigen { namespace internal { -template struct LDLT_Traits; + template struct LDLT_Traits; + + // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef + enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; } /** \ingroup Cholesky_Module @@ -69,7 +72,12 @@ template class LDLT * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ - LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {} + LDLT() + : m_matrix(), + m_transpositions(), + m_sign(internal::ZeroSign), + m_isInitialized(false) + {} /** \brief Default Constructor with memory preallocation * @@ -81,6 +89,7 @@ template class LDLT : m_matrix(size, size), m_transpositions(size), m_temporary(size), + m_sign(internal::ZeroSign), m_isInitialized(false) {} @@ -93,6 +102,7 @@ template class LDLT : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), + m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix); @@ -139,7 +149,7 @@ template class LDLT inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); - return m_sign == 1; + return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; } #ifdef EIGEN2_SUPPORT @@ -153,7 +163,7 @@ template class LDLT inline bool isNegative(void) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); - return m_sign == -1; + return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; } /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. @@ -225,6 +235,11 @@ template class LDLT } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } /** \internal * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. @@ -235,7 +250,7 @@ template class LDLT MatrixType m_matrix; TranspositionType m_transpositions; TmpMatrixType m_temporary; - int m_sign; + internal::SignMatrix m_sign; bool m_isInitialized; }; @@ -246,7 +261,7 @@ template struct ldlt_inplace; template<> struct ldlt_inplace { template - static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) + static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { using std::abs; typedef typename MatrixType::Scalar Scalar; @@ -258,36 +273,19 @@ template<> struct ldlt_inplace if (size <= 1) { transpositions.setIdentity(); - if(sign) - *sign = numext::real(mat.coeff(0,0))>0 ? 1:-1; + if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef; + else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef; + else sign = ZeroSign; return true; } - RealScalar cutoff(0), biggest_in_corner; - for (Index k = 0; k < size; ++k) { // Find largest diagonal element Index index_of_biggest_in_corner; - biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); + mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); index_of_biggest_in_corner += k; - if(k == 0) - { - // The biggest overall is the point of reference to which further diagonals - // are compared; if any diagonal is negligible compared - // to the largest overall, the algorithm bails. - cutoff = abs(NumTraits::epsilon() * biggest_in_corner); - } - - // Finish early if the matrix is not full rank. - if(biggest_in_corner < cutoff) - { - for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i; - if(sign) *sign = 0; - break; - } - transpositions.coeffRef(k) = index_of_biggest_in_corner; if(k != index_of_biggest_in_corner) { @@ -318,22 +316,27 @@ template<> struct ldlt_inplace if(k>0) { - temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint(); + temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); if(rs>0) A21.noalias() -= A20 * temp.head(k); } - if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff)) - A21 /= mat.coeffRef(k,k); - if(sign) - { - // LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right - int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0; - if(k == 0) - *sign = newSign; - else if(*sign != newSign) - *sign = 0; + // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot + // was smaller than the cutoff value. However, soince LDLT is not rank-revealing + // we should only make sure we do not introduce INF or NaN values. + // LAPACK also uses 0 as the cutoff value. + RealScalar realAkk = numext::real(mat.coeffRef(k,k)); + if((rs>0) && (abs(realAkk) > RealScalar(0))) + A21 /= realAkk; + + if (sign == PositiveSemiDef) { + if (realAkk < 0) sign = Indefinite; + } else if (sign == NegativeSemiDef) { + if (realAkk > 0) sign = Indefinite; + } else if (sign == ZeroSign) { + if (realAkk > 0) sign = PositiveSemiDef; + else if (realAkk < 0) sign = NegativeSemiDef; } } @@ -399,7 +402,7 @@ template<> struct ldlt_inplace template<> struct ldlt_inplace { template - static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) + static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { Transpose matt(mat); return ldlt_inplace::unblocked(matt, transpositions, temp, sign); @@ -436,6 +439,8 @@ template struct LDLT_Traits template LDLT& LDLT::compute(const MatrixType& a) { + check_template_parameters(); + eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); @@ -444,8 +449,9 @@ LDLT& LDLT::compute(const MatrixType& a) m_transpositions.resize(size); m_isInitialized = false; m_temporary.resize(size); + m_sign = internal::ZeroSign; - internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign); + internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, m_sign); m_isInitialized = true; return *this; @@ -458,7 +464,7 @@ LDLT& LDLT::compute(const MatrixType& a) */ template template -LDLT& LDLT::rankUpdate(const MatrixBase& w, const typename NumTraits::Real& sigma) +LDLT& LDLT::rankUpdate(const MatrixBase& w, const typename LDLT::RealScalar& sigma) { const Index size = w.rows(); if (m_isInitialized) @@ -473,7 +479,7 @@ LDLT& LDLT::rankUpdate(const MatrixBase=0 ? 1 : -1; + m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_isInitialized = true; } @@ -504,16 +510,21 @@ struct solve_retval, Rhs> using std::abs; using std::max; typedef typename LDLTType::MatrixType MatrixType; - typedef typename LDLTType::Scalar Scalar; typedef typename LDLTType::RealScalar RealScalar; - const Diagonal vectorD = dec().vectorD(); - RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits::epsilon(), - RealScalar(1) / NumTraits::highest()); // motivated by LAPACK's xGELSS + const typename Diagonal::RealReturnType vectorD(dec().vectorD()); + // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon + // as motivated by LAPACK's xGELSS: + // RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits::epsilon(),RealScalar(1) / NumTraits::highest()); + // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest + // diagonal element is not well justified and to numerical issues in some cases. + // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. + RealScalar tolerance = RealScalar(1) / NumTraits::highest(); + for (Index i = 0; i < vectorD.size(); ++i) { if(abs(vectorD(i)) > tolerance) - dst.row(i) /= vectorD(i); + dst.row(i) /= vectorD(i); else - dst.row(i).setZero(); + dst.row(i).setZero(); } // dst = L^-T (D^-1 L^-1 P b) @@ -566,7 +577,7 @@ MatrixType LDLT::reconstructedMatrix() const // L^* P res = matrixU() * res; // D(L^*P) - res = vectorD().asDiagonal() * res; + res = vectorD().real().asDiagonal() * res; // L(DL^*P) res = matrixL() * res; // P^T (LDL^*P) diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT.h b/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT.h index 2e6189f7d..7c11a2dc2 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT.h @@ -174,6 +174,12 @@ template class LLT LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + /** \internal * Used to compute and store L * The strict upper part is not used and even not initialized. @@ -283,7 +289,7 @@ template struct llt_inplace return k; mat.coeffRef(k,k) = x = sqrt(x); if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); - if (rs>0) A21 *= RealScalar(1)/x; + if (rs>0) A21 /= x; } return -1; } @@ -384,6 +390,8 @@ template struct LLT_Traits template LLT& LLT::compute(const MatrixType& a) { + check_template_parameters(); + eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT_MKL.h b/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT_MKL.h index 64daa445c..66675d747 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT_MKL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Cholesky/LLT_MKL.h @@ -60,7 +60,7 @@ template<> struct mkl_llt \ lda = m.outerStride(); \ \ info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \ - info = (info==0) ? Success : NumericalIssue; \ + info = (info==0) ? -1 : info>0 ? info-1 : size; \ return info; \ } \ }; \ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/CholmodSupport/CholmodSupport.h b/ground/gcs/src/libs/eigen/Eigen/src/CholmodSupport/CholmodSupport.h index 783324b0b..99dbe171c 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/CholmodSupport/CholmodSupport.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/CholmodSupport/CholmodSupport.h @@ -58,10 +58,12 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) res.p = mat.outerIndexPtr(); res.i = mat.innerIndexPtr(); res.x = mat.valuePtr(); + res.z = 0; res.sorted = 1; if(mat.isCompressed()) { res.packed = 1; + res.nz = 0; } else { @@ -76,7 +78,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) { res.itype = CHOLMOD_INT; } - else if (internal::is_same<_Index,UF_long>::value) + else if (internal::is_same<_Index,SuiteSparse_long>::value) { res.itype = CHOLMOD_LONG; } @@ -170,6 +172,7 @@ class CholmodBase : internal::noncopyable CholmodBase() : m_cholmodFactor(0), m_info(Success), m_isInitialized(false) { + m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0); cholmod_start(&m_cholmod); } @@ -241,7 +244,7 @@ class CholmodBase : internal::noncopyable return internal::sparse_solve_retval(*this, b.derived()); } - /** Performs a symbolic decomposition on the sparcity of \a matrix. + /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * @@ -265,7 +268,7 @@ class CholmodBase : internal::noncopyable /** Performs a numeric decomposition of \a matrix * - * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ @@ -302,7 +305,7 @@ class CholmodBase : internal::noncopyable { this->m_info = NumericalIssue; } - // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.) + // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) dest = Matrix::Map(reinterpret_cast(x_cd->x),b.rows(),b.cols()); cholmod_free_dense(&x_cd, &m_cholmod); } @@ -323,7 +326,7 @@ class CholmodBase : internal::noncopyable { this->m_info = NumericalIssue; } - // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.) + // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) dest = viewAsEigen(*x_cs); cholmod_free_sparse(&x_cs, &m_cholmod); } @@ -365,8 +368,8 @@ class CholmodBase : internal::noncopyable * * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization * using the Cholmod library. - * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest. - * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest. + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> @@ -392,7 +395,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl CholmodSimplicialLLT(const MatrixType& matrix) : Base() { init(); - compute(matrix); + Base::compute(matrix); } ~CholmodSimplicialLLT() {} @@ -412,8 +415,8 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl * * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization * using the Cholmod library. - * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest. - * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest. + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> @@ -439,7 +442,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp CholmodSimplicialLDLT(const MatrixType& matrix) : Base() { init(); - compute(matrix); + Base::compute(matrix); } ~CholmodSimplicialLDLT() {} @@ -458,7 +461,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization * using the Cholmod library. * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. - * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> @@ -484,7 +487,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper CholmodSupernodalLLT(const MatrixType& matrix) : Base() { init(); - compute(matrix); + Base::compute(matrix); } ~CholmodSupernodalLLT() {} @@ -501,7 +504,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper * \brief A general Cholesky factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization - * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * This variant permits to change the underlying Cholesky method at runtime. @@ -531,7 +534,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom CholmodDecomposition(const MatrixType& matrix) : Base() { init(); - compute(matrix); + Base::compute(matrix); } ~CholmodDecomposition() {} diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Array.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Array.h index 497efff66..0b9c38c82 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Array.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Array.h @@ -124,6 +124,21 @@ class Array } #endif +#ifdef EIGEN_HAVE_RVALUE_REFERENCES + Array(Array&& other) + : Base(std::move(other)) + { + Base::_check_template_params(); + if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic) + Base::_set_noalias(other); + } + Array& operator=(Array&& other) + { + other.swap(*this); + return *this; + } +#endif + /** Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, @@ -210,7 +225,7 @@ class Array : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { Base::_check_template_params(); - Base::resize(other.rows(), other.cols()); + Base::_resize_to_match(other); *this = other; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayBase.h index 38852600d..33ff55371 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayBase.h @@ -46,9 +46,6 @@ template class ArrayBase typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; - using internal::special_scalar_op_base::Scalar, - typename NumTraits::Scalar>::Real>::operator*; - typedef typename internal::traits::StorageKind StorageKind; typedef typename internal::traits::Index Index; typedef typename internal::traits::Scalar Scalar; @@ -56,6 +53,7 @@ template class ArrayBase typedef typename NumTraits::Real RealScalar; typedef DenseBase Base; + using Base::operator*; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayWrapper.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayWrapper.h index a791bc358..b4641e2a0 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayWrapper.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/ArrayWrapper.h @@ -29,6 +29,11 @@ struct traits > : public traits::type > { typedef ArrayXpr XprKind; + // Let's remove NestByRefBit + enum { + Flags0 = traits::type >::Flags, + Flags = Flags0 & ~NestByRefBit + }; }; } @@ -149,6 +154,11 @@ struct traits > : public traits::type > { typedef MatrixXpr XprKind; + // Let's remove NestByRefBit + enum { + Flags0 = traits::type >::Flags, + Flags = Flags0 & ~NestByRefBit + }; }; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Assign.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Assign.h index 1dccc2f42..f48173172 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Assign.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Assign.h @@ -439,19 +439,26 @@ struct assign_impl PacketTraits; + typedef typename Derived1::Scalar Scalar; + typedef packet_traits PacketTraits; enum { packetSize = PacketTraits::size, alignable = PacketTraits::AlignedOnScalar, - dstAlignment = alignable ? Aligned : int(assign_traits::DstIsAligned) , + dstIsAligned = assign_traits::DstIsAligned, + dstAlignment = alignable ? Aligned : int(dstIsAligned), srcAlignment = assign_traits::JointAlignment }; + const Scalar *dst_ptr = &dst.coeffRef(0,0); + if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0) + { + // the pointer is not aligend-on scalar, so alignment is not possible + return assign_impl::run(dst, src); + } const Index packetAlignedMask = packetSize - 1; const Index innerSize = dst.innerSize(); const Index outerSize = dst.outerSize(); const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0; - Index alignedStart = ((!alignable) || assign_traits::DstIsAligned) ? 0 - : internal::first_aligned(&dst.coeffRef(0,0), innerSize); + Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize); for(Index outer = 0; outer < outerSize; ++outer) { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Block.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Block.h index 358b3188b..827894443 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Block.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Block.h @@ -66,8 +66,9 @@ struct traits > : traits::MaxColsAtCompileTime), XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, - IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 - : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 + IsDense = is_same::value, + IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 + : (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), @@ -81,7 +82,7 @@ struct traits > : traits::Flags&LinearAccessBit))) ? LinearAccessBit : 0, FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, Flags0 = traits::Flags & ( (HereditaryBits & ~RowMajorBit) | diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/BooleanRedux.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/BooleanRedux.h index 6e37e031a..be9f48a8c 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/BooleanRedux.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/BooleanRedux.h @@ -29,9 +29,9 @@ struct all_unroller }; template -struct all_unroller +struct all_unroller { - static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } + static inline bool run(const Derived &/*mat*/) { return true; } }; template @@ -55,9 +55,9 @@ struct any_unroller }; template -struct any_unroller +struct any_unroller { - static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } + static inline bool run(const Derived & /*mat*/) { return false; } }; template diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/CommaInitializer.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/CommaInitializer.h index a96867af4..a036d8c3b 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/CommaInitializer.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/CommaInitializer.h @@ -43,6 +43,17 @@ struct CommaInitializer m_xpr.block(0, 0, other.rows(), other.cols()) = other; } + /* Copy/Move constructor which transfers ownership. This is crucial in + * absence of return value optimization to avoid assertions during destruction. */ + // FIXME in C++11 mode this could be replaced by a proper RValue constructor + inline CommaInitializer(const CommaInitializer& o) + : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { + // Mark original object as finished. In absence of R-value references we need to const_cast: + const_cast(o).m_row = m_xpr.rows(); + const_cast(o).m_col = m_xpr.cols(); + const_cast(o).m_currentBlockRows = 0; + } + /* inserts a scalar value in the target matrix */ CommaInitializer& operator,(const Scalar& s) { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseBinaryOp.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseBinaryOp.h index 586f77aaf..519a866e6 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseBinaryOp.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseBinaryOp.h @@ -81,7 +81,8 @@ struct traits > ) ), Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), - CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits::Cost + Cost0 = EIGEN_ADD_COST(LhsCoeffReadCost,RhsCoeffReadCost), + CoeffReadCost = EIGEN_ADD_COST(Cost0,functor_traits::Cost) }; }; } // end namespace internal diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryOp.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryOp.h index f2de749f9..f7ee60e98 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryOp.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryOp.h @@ -47,7 +47,7 @@ struct traits > Flags = _XprTypeNested::Flags & ( HereditaryBits | LinearAccessBit | AlignedBit | (functor_traits::PacketAccess ? PacketAccessBit : 0)), - CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits::Cost + CoeffReadCost = EIGEN_ADD_COST(_XprTypeNested::CoeffReadCost, functor_traits::Cost) }; }; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryView.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryView.h index b2638d326..f3b2ffeb6 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryView.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/CwiseUnaryView.h @@ -38,7 +38,7 @@ struct traits > typedef typename remove_all::type _MatrixTypeNested; enum { Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)), - CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits::Cost, + CoeffReadCost = EIGEN_ADD_COST(traits<_MatrixTypeNested>::CoeffReadCost, functor_traits::Cost), MatrixTypeInnerStride = inner_stride_at_compile_time::ret, // need to cast the sizeof's from size_t to int explicitly, otherwise: // "error: no integral type can represent all of the enumerator values diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseBase.h index c5800f6c8..4b371b075 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseBase.h @@ -40,15 +40,14 @@ static inline void check_DenseIndex_is_signed() { */ template class DenseBase #ifndef EIGEN_PARSED_BY_DOXYGEN - : public internal::special_scalar_op_base::Scalar, - typename NumTraits::Scalar>::Real> + : public internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real, + DenseCoeffsBase > #else : public DenseCoeffsBase #endif // not EIGEN_PARSED_BY_DOXYGEN { public: - using internal::special_scalar_op_base::Scalar, - typename NumTraits::Scalar>::Real>::operator*; class InnerIterator; @@ -63,8 +62,9 @@ template class DenseBase typedef typename internal::traits::Scalar Scalar; typedef typename internal::packet_traits::type PacketScalar; typedef typename NumTraits::Real RealScalar; + typedef internal::special_scalar_op_base > Base; - typedef DenseCoeffsBase Base; + using Base::operator*; using Base::derived; using Base::const_cast_derived; using Base::rows; @@ -183,10 +183,6 @@ template class DenseBase /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ inline Index nonZeros() const { return size(); } - /** \returns true if either the number of rows or the number of columns is equal to 1. - * In other words, this function returns - * \code rows()==1 || cols()==1 \endcode - * \sa rows(), cols(), IsVectorAtCompileTime. */ /** \returns the outer size. * @@ -266,11 +262,13 @@ template class DenseBase template Derived& operator=(const ReturnByValue& func); -#ifndef EIGEN_PARSED_BY_DOXYGEN - /** Copies \a other into *this without evaluating other. \returns a reference to *this. */ + /** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */ template Derived& lazyAssign(const DenseBase& other); -#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \internal Evaluates \a other into *this. \returns a reference to *this. */ + template + Derived& lazyAssign(const ReturnByValue& other); CommaInitializer operator<< (const Scalar& s); @@ -462,8 +460,10 @@ template class DenseBase template RealScalar lpNorm() const; template - const Replicate replicate() const; - const Replicate replicate(Index rowFacor,Index colFactor) const; + inline const Replicate replicate() const; + + typedef Replicate ReplicateReturnType; + inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const; typedef Reverse ReverseReturnType; typedef const Reverse ConstReverseReturnType; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseStorage.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseStorage.h index 3e7f9c1b7..568493cba 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseStorage.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/DenseStorage.h @@ -24,6 +24,14 @@ namespace internal { struct constructor_without_unaligned_array_assert {}; +template void check_static_allocation_size() +{ + // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit + #if EIGEN_STACK_ALLOCATION_LIMIT + EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + #endif +} + /** \internal * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: * to 16 bytes boundary if the total size is a multiple of 16 bytes. @@ -38,12 +46,12 @@ struct plain_array plain_array() { - EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + check_static_allocation_size(); } plain_array(constructor_without_unaligned_array_assert) { - EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + check_static_allocation_size(); } }; @@ -76,12 +84,12 @@ struct plain_array plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf); - EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + check_static_allocation_size(); } plain_array(constructor_without_unaligned_array_assert) { - EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG); + check_static_allocation_size(); } }; @@ -114,33 +122,41 @@ template class DenseSt { internal::plain_array m_data; public: - inline DenseStorage() {} - inline DenseStorage(internal::constructor_without_unaligned_array_assert) + DenseStorage() {} + DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()) {} - inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} - inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } - static inline DenseIndex rows(void) {return _Rows;} - static inline DenseIndex cols(void) {return _Cols;} - inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} - inline void resize(DenseIndex,DenseIndex,DenseIndex) {} - inline const T *data() const { return m_data.array; } - inline T *data() { return m_data.array; } + DenseStorage(const DenseStorage& other) : m_data(other.m_data) {} + DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) m_data = other.m_data; + return *this; + } + DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } + static DenseIndex rows(void) {return _Rows;} + static DenseIndex cols(void) {return _Cols;} + void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} + void resize(DenseIndex,DenseIndex,DenseIndex) {} + const T *data() const { return m_data.array; } + T *data() { return m_data.array; } }; // null matrix template class DenseStorage { public: - inline DenseStorage() {} - inline DenseStorage(internal::constructor_without_unaligned_array_assert) {} - inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} - inline void swap(DenseStorage& ) {} - static inline DenseIndex rows(void) {return _Rows;} - static inline DenseIndex cols(void) {return _Cols;} - inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} - inline void resize(DenseIndex,DenseIndex,DenseIndex) {} - inline const T *data() const { return 0; } - inline T *data() { return 0; } + DenseStorage() {} + DenseStorage(internal::constructor_without_unaligned_array_assert) {} + DenseStorage(const DenseStorage&) {} + DenseStorage& operator=(const DenseStorage&) { return *this; } + DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + void swap(DenseStorage& ) {} + static DenseIndex rows(void) {return _Rows;} + static DenseIndex cols(void) {return _Cols;} + void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} + void resize(DenseIndex,DenseIndex,DenseIndex) {} + const T *data() const { return 0; } + T *data() { return 0; } }; // more specializations for null matrices; these are necessary to resolve ambiguities @@ -160,18 +176,29 @@ template class DenseStorage class DenseStorage m_data; DenseIndex m_rows; public: - inline DenseStorage() : m_rows(0) {} - inline DenseStorage(internal::constructor_without_unaligned_array_assert) + DenseStorage() : m_rows(0) {} + DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} - inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {} - inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } - inline DenseIndex rows(void) const {return m_rows;} - inline DenseIndex cols(void) const {return _Cols;} - inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; } - inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; } - inline const T *data() const { return m_data.array; } - inline T *data() { return m_data.array; } + DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {} + DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + m_data = other.m_data; + m_rows = other.m_rows; + } + return *this; + } + DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {} + void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + DenseIndex rows(void) const {return m_rows;} + DenseIndex cols(void) const {return _Cols;} + void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; } + void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; } + const T *data() const { return m_data.array; } + T *data() { return m_data.array; } }; // dynamic-size matrix with fixed-size storage and fixed height @@ -199,17 +236,27 @@ template class DenseStorage m_data; DenseIndex m_cols; public: - inline DenseStorage() : m_cols(0) {} - inline DenseStorage(internal::constructor_without_unaligned_array_assert) + DenseStorage() : m_cols(0) {} + DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} - inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {} - inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } - inline DenseIndex rows(void) const {return _Rows;} - inline DenseIndex cols(void) const {return m_cols;} - inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; } - inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; } - inline const T *data() const { return m_data.array; } - inline T *data() { return m_data.array; } + DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {} + DenseStorage& operator=(const DenseStorage& other) + { + if (this != &other) + { + m_data = other.m_data; + m_cols = other.m_cols; + } + return *this; + } + DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {} + void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + DenseIndex rows(void) const {return _Rows;} + DenseIndex cols(void) const {return m_cols;} + void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; } + void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; } + const T *data() const { return m_data.array; } + T *data() { return m_data.array; } }; // purely dynamic matrix. @@ -219,18 +266,35 @@ template class DenseStorage(size)), m_rows(nbRows), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } - inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } - inline void swap(DenseStorage& other) +#ifdef EIGEN_HAVE_RVALUE_REFERENCES + DenseStorage(DenseStorage&& other) + : m_data(std::move(other.m_data)) + , m_rows(std::move(other.m_rows)) + , m_cols(std::move(other.m_cols)) + { + other.m_data = nullptr; + } + DenseStorage& operator=(DenseStorage&& other) + { + using std::swap; + swap(m_data, other.m_data); + swap(m_rows, other.m_rows); + swap(m_cols, other.m_cols); + return *this; + } +#endif + ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } + void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } - inline DenseIndex rows(void) const {return m_rows;} - inline DenseIndex cols(void) const {return m_cols;} - inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) + DenseIndex rows(void) const {return m_rows;} + DenseIndex cols(void) const {return m_cols;} + void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); m_rows = nbRows; @@ -250,8 +314,11 @@ template class DenseStorage class DenseStorage(size)), m_cols(nbCols) + DenseStorage() : m_data(0), m_cols(0) {} + DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} + DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(nbCols) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } - inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } - inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } - static inline DenseIndex rows(void) {return _Rows;} - inline DenseIndex cols(void) const {return m_cols;} - inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols) +#ifdef EIGEN_HAVE_RVALUE_REFERENCES + DenseStorage(DenseStorage&& other) + : m_data(std::move(other.m_data)) + , m_cols(std::move(other.m_cols)) + { + other.m_data = nullptr; + } + DenseStorage& operator=(DenseStorage&& other) + { + using std::swap; + swap(m_data, other.m_data); + swap(m_cols, other.m_cols); + return *this; + } +#endif + ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } + void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + static DenseIndex rows(void) {return _Rows;} + DenseIndex cols(void) const {return m_cols;} + void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); m_cols = nbCols; @@ -286,8 +368,11 @@ template class DenseStorage class DenseStorage(size)), m_rows(nbRows) + DenseStorage() : m_data(0), m_rows(0) {} + DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} + DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(nbRows) { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } - inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } - inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } - inline DenseIndex rows(void) const {return m_rows;} - static inline DenseIndex cols(void) {return _Cols;} - inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex) +#ifdef EIGEN_HAVE_RVALUE_REFERENCES + DenseStorage(DenseStorage&& other) + : m_data(std::move(other.m_data)) + , m_rows(std::move(other.m_rows)) + { + other.m_data = nullptr; + } + DenseStorage& operator=(DenseStorage&& other) + { + using std::swap; + swap(m_data, other.m_data); + swap(m_rows, other.m_rows); + return *this; + } +#endif + ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } + void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + DenseIndex rows(void) const {return m_rows;} + static DenseIndex cols(void) {return _Cols;} + void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex) { m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); m_rows = nbRows; @@ -322,8 +422,11 @@ template class DenseStorage::diagonal() const * * \sa MatrixBase::diagonal(), class Diagonal */ template -inline typename MatrixBase::template DiagonalIndexReturnType::Type +inline typename MatrixBase::DiagonalDynamicIndexReturnType MatrixBase::diagonal(Index index) { - return typename DiagonalIndexReturnType::Type(derived(), index); + return DiagonalDynamicIndexReturnType(derived(), index); } /** This is the const version of diagonal(Index). */ template -inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type +inline typename MatrixBase::ConstDiagonalDynamicIndexReturnType MatrixBase::diagonal(Index index) const { - return typename ConstDiagonalIndexReturnType::Type(derived(), index); + return ConstDiagonalDynamicIndexReturnType(derived(), index); } /** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/DiagonalProduct.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/DiagonalProduct.h index c03a0c2e1..cc6b536e1 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/DiagonalProduct.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/DiagonalProduct.h @@ -34,8 +34,9 @@ struct traits > _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))), _LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0, - Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit), - CoeffReadCost = NumTraits::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost + Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit), + Cost0 = EIGEN_ADD_COST(NumTraits::MulCost, MatrixType::CoeffReadCost), + CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost) }; }; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/EigenBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/EigenBase.h index 2b8dd1b70..fadb45852 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/EigenBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/EigenBase.h @@ -126,36 +126,6 @@ Derived& DenseBase::operator-=(const EigenBase &other) return derived(); } -/** replaces \c *this by \c *this * \a other. - * - * \returns a reference to \c *this - */ -template -template -inline Derived& -MatrixBase::operator*=(const EigenBase &other) -{ - other.derived().applyThisOnTheRight(derived()); - return derived(); -} - -/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). - */ -template -template -inline void MatrixBase::applyOnTheRight(const EigenBase &other) -{ - other.derived().applyThisOnTheRight(derived()); -} - -/** replaces \c *this by \c *this * \a other. */ -template -template -inline void MatrixBase::applyOnTheLeft(const EigenBase &other) -{ - other.derived().applyThisOnTheLeft(derived()); -} - } // end namespace Eigen #endif // EIGEN_EIGENBASE_H diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Functors.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Functors.h index 04fb21732..5f14c6587 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Functors.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Functors.h @@ -259,6 +259,47 @@ template<> struct functor_traits { }; }; +/** \internal + * \brief Template functors for comparison of two scalars + * \todo Implement packet-comparisons + */ +template struct scalar_cmp_op; + +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +template +struct result_of(Scalar,Scalar)> { + typedef bool type; +}; + + +template struct scalar_cmp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;} +}; +template struct scalar_cmp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a struct scalar_cmp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;} +}; +template struct scalar_cmp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);} +}; +template struct scalar_cmp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op) + EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;} +}; + // unary functors: /** \internal @@ -589,7 +630,7 @@ struct linspaced_op_impl template EIGEN_STRONG_INLINE const Packet packetOp(Index i) const - { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1(i),m_interPacket))); } + { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1(Scalar(i)),m_interPacket))); } const Scalar m_low; const Scalar m_step; @@ -609,7 +650,7 @@ template struct functor_traits< linspaced_o template struct linspaced_op { typedef typename packet_traits::type Packet; - linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {} + linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {} template EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/GeneralProduct.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/GeneralProduct.h index 2a59d9464..29ac522d2 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/GeneralProduct.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/GeneralProduct.h @@ -232,7 +232,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest& // FIXME not very good if rhs is real and lhs complex while alpha is real too const Index cols = dest.cols(); for (Index j=0; j @@ -257,7 +257,7 @@ template class GeneralProduct : public ProductBase, Lhs, Rhs> { - template struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; + template struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) @@ -281,22 +281,22 @@ class GeneralProduct template inline void evalTo(Dest& dest) const { - internal::outer_product_selector_run(*this, dest, set(), IsRowMajor()); + internal::outer_product_selector_run(*this, dest, set(), is_row_major()); } template inline void addTo(Dest& dest) const { - internal::outer_product_selector_run(*this, dest, add(), IsRowMajor()); + internal::outer_product_selector_run(*this, dest, add(), is_row_major()); } template inline void subTo(Dest& dest) const { - internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor()); + internal::outer_product_selector_run(*this, dest, sub(), is_row_major()); } template void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { - internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor()); + internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major()); } }; @@ -425,15 +425,18 @@ template<> struct gemv_selector ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) * RhsBlasTraits::extractScalarFactor(prod.rhs()); + // make sure Dest is a compile-time vector type (bug 1166) + typedef typename conditional::type ActualDest; + enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... - EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, + EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1), ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), - MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal + MightCannotUseDest = (ActualDest::InnerStrideAtCompileTime!=1) || ComplexByReal }; - gemv_static_vector_if static_dest; + gemv_static_vector_if static_dest; bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; @@ -522,7 +525,7 @@ template<> struct gemv_selector actualLhs.rows(), actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhsPtr, 1, - dest.data(), dest.innerStride(), + dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166) actualAlpha); } }; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/IO.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/IO.h index c8d5f6379..8d4bc59e9 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/IO.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/IO.h @@ -185,21 +185,22 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& explicit_precision = fmt.precision; } + std::streamsize old_precision = 0; + if(explicit_precision) old_precision = s.precision(explicit_precision); + bool align_cols = !(fmt.flags & DontAlignCols); if(align_cols) { // compute the largest width - for(Index j = 1; j < m.cols(); ++j) + for(Index j = 0; j < m.cols(); ++j) for(Index i = 0; i < m.rows(); ++i) { std::stringstream sstr; - if(explicit_precision) sstr.precision(explicit_precision); + sstr.copyfmt(s); sstr << m.coeff(i,j); width = std::max(width, Index(sstr.str().length())); } } - std::streamsize old_precision = 0; - if(explicit_precision) old_precision = s.precision(explicit_precision); s << fmt.matPrefix; for(Index i = 0; i < m.rows(); ++i) { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/MapBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/MapBase.h index 6876de588..81efc4a6d 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/MapBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/MapBase.h @@ -123,7 +123,7 @@ template class MapBase return internal::ploadt(m_data + index * innerStride()); } - inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) + explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) checkSanity(); @@ -149,6 +149,10 @@ template class MapBase checkSanity(); } + #ifdef EIGEN_MAPBASE_PLUGIN + #include EIGEN_MAPBASE_PLUGIN + #endif + protected: void checkSanity() const @@ -157,7 +161,7 @@ template class MapBase internal::inner_stride_at_compile_time::ret==1), PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); eigen_assert(EIGEN_IMPLIES(internal::traits::Flags&AlignedBit, (size_t(m_data) % 16) == 0) - && "data is not aligned"); + && "input pointer is not aligned on a 16 byte boundary"); } PointerType m_data; @@ -168,6 +172,7 @@ template class MapBase template class MapBase : public MapBase { + typedef MapBase ReadOnlyMapBase; public: typedef MapBase Base; @@ -230,13 +235,17 @@ template class MapBase Derived& operator=(const MapBase& other) { - Base::Base::operator=(other); + ReadOnlyMapBase::Base::operator=(other); return derived(); } - using Base::Base::operator=; + // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base, + // see bugs 821 and 920. + using ReadOnlyMapBase::Base::operator=; }; +#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS + } // end namespace Eigen #endif // EIGEN_MAPBASE_H diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/MathFunctions.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/MathFunctions.h index 2bfc5ebd9..4e17ecd4b 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/MathFunctions.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/MathFunctions.h @@ -294,7 +294,7 @@ struct hypot_impl RealScalar _x = abs(x); RealScalar _y = abs(y); RealScalar p = (max)(_x, _y); - if(p==RealScalar(0)) return 0; + if(p==RealScalar(0)) return RealScalar(0); RealScalar q = (min)(_x, _y); RealScalar qp = q/p; return p * sqrt(RealScalar(1) + qp*qp); @@ -707,21 +707,21 @@ struct scalar_fuzzy_impl : scalar_fuzzy_default_impl:: template inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, - typename NumTraits::Real precision = NumTraits::dummy_precision()) + const typename NumTraits::Real &precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); } template inline bool isApprox(const Scalar& x, const Scalar& y, - typename NumTraits::Real precision = NumTraits::dummy_precision()) + const typename NumTraits::Real &precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApprox(x, y, precision); } template inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, - typename NumTraits::Real precision = NumTraits::dummy_precision()) + const typename NumTraits::Real &precision = NumTraits::dummy_precision()) { return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Matrix.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Matrix.h index 0ba5d90cc..02be142d8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Matrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Matrix.h @@ -211,6 +211,21 @@ class Matrix : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } +#ifdef EIGEN_HAVE_RVALUE_REFERENCES + Matrix(Matrix&& other) + : Base(std::move(other)) + { + Base::_check_template_params(); + if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic) + Base::_set_noalias(other); + } + Matrix& operator=(Matrix&& other) + { + other.swap(*this); + return *this; + } +#endif + /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, @@ -304,7 +319,7 @@ class Matrix : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) { Base::_check_template_params(); - Base::resize(other.rows(), other.cols()); + Base::_resize_to_match(other); // FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to // go for pure _set() implementations, right? *this = other; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/MatrixBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/MatrixBase.h index 9193b6abb..e83ef4dc0 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/MatrixBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/MatrixBase.h @@ -159,13 +159,11 @@ template class MatrixBase template Derived& operator=(const ReturnByValue& other); -#ifndef EIGEN_PARSED_BY_DOXYGEN template Derived& lazyAssign(const ProductBase& other); template Derived& lazyAssign(const MatrixPowerProduct& other); -#endif // not EIGEN_PARSED_BY_DOXYGEN template Derived& operator+=(const MatrixBase& other); @@ -215,7 +213,7 @@ template class MatrixBase typedef Diagonal DiagonalReturnType; DiagonalReturnType diagonal(); - typedef typename internal::add_const >::type ConstDiagonalReturnType; + typedef typename internal::add_const >::type ConstDiagonalReturnType; ConstDiagonalReturnType diagonal() const; template struct DiagonalIndexReturnType { typedef Diagonal Type; }; @@ -223,16 +221,12 @@ template class MatrixBase template typename DiagonalIndexReturnType::Type diagonal(); template typename ConstDiagonalIndexReturnType::Type diagonal() const; + + typedef Diagonal DiagonalDynamicIndexReturnType; + typedef typename internal::add_const >::type ConstDiagonalDynamicIndexReturnType; - // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. - // On the other hand they confuse MSVC8... - #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later - typename MatrixBase::template DiagonalIndexReturnType::Type diagonal(Index index); - typename MatrixBase::template ConstDiagonalIndexReturnType::Type diagonal(Index index) const; - #else - typename DiagonalIndexReturnType::Type diagonal(Index index); - typename ConstDiagonalIndexReturnType::Type diagonal(Index index) const; - #endif + DiagonalDynamicIndexReturnType diagonal(Index index); + ConstDiagonalDynamicIndexReturnType diagonal(Index index) const; #ifdef EIGEN2_SUPPORT template typename internal::eigen2_part_return_type::type part(); @@ -446,6 +440,15 @@ template class MatrixBase template void applyOnTheRight(Index p, Index q, const JacobiRotation& j); +///////// SparseCore module ///////// + + template + EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type + cwiseProduct(const SparseMatrixBase &other) const + { + return other.cwiseProduct(derived()); + } + ///////// MatrixFunctions module ///////// typedef typename internal::stem_function::type StemFunction; @@ -510,6 +513,51 @@ template class MatrixBase {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; + +/*************************************************************************** +* Implementation of matrix base methods +***************************************************************************/ + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline Derived& +MatrixBase::operator*=(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=(). + * + * Example: \include MatrixBase_applyOnTheRight.cpp + * Output: \verbinclude MatrixBase_applyOnTheRight.out + */ +template +template +inline void MatrixBase::applyOnTheRight(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); +} + +/** replaces \c *this by \a other * \c *this. + * + * Example: \include MatrixBase_applyOnTheLeft.cpp + * Output: \verbinclude MatrixBase_applyOnTheLeft.out + */ +template +template +inline void MatrixBase::applyOnTheLeft(const EigenBase &other) +{ + other.derived().applyThisOnTheLeft(derived()); +} + } // end namespace Eigen #endif // EIGEN_MATRIXBASE_H diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/PermutationMatrix.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/PermutationMatrix.h index 4fc5dd318..85ffae265 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/PermutationMatrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/PermutationMatrix.h @@ -250,6 +250,35 @@ class PermutationBase : public EigenBase template friend inline PlainPermutationType operator*(const Transpose >& other, const PermutationBase& perm) { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); } + + /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation. + * + * This function is O(\c n) procedure allocating a buffer of \c n booleans. + */ + Index determinant() const + { + Index res = 1; + Index n = size(); + Matrix mask(n); + mask.fill(false); + Index r = 0; + while(r < n) + { + // search for the next seed + while(r=n) + break; + // we got one, let's follow it until we are back to the seed + Index k0 = r++; + mask.coeffRef(k0) = true; + for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k)) + { + mask.coeffRef(k) = true; + res = -res; + } + } + return res; + } protected: @@ -553,8 +582,12 @@ struct permut_matrix_product_retval template inline void evalTo(Dest& dst) const { const Index n = Side==OnTheLeft ? rows() : cols(); - - if(is_same::value && extract_data(dst) == extract_data(m_matrix)) + // FIXME we need an is_same for expression that is not sensitive to constness. For instance + // is_same_xpr, Block >::value should be true. + if( is_same::value + && blas_traits::HasUsableDirectAccess + && blas_traits::HasUsableDirectAccess + && extract_data(dst) == extract_data(m_matrix)) { // apply the permutation inplace Matrix mask(m_permutation.size()); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/PlainObjectBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/PlainObjectBase.h index af0a479c7..a4e4af4a7 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/PlainObjectBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/PlainObjectBase.h @@ -47,7 +47,10 @@ template<> struct check_rows_cols_for_overflow { } }; -template struct conservative_resize_like_impl; +template +struct conservative_resize_like_impl; template struct matrix_swap_impl; @@ -434,6 +437,36 @@ class PlainObjectBase : public internal::dense_xpr_base::type } #endif +#ifdef EIGEN_HAVE_RVALUE_REFERENCES + PlainObjectBase(PlainObjectBase&& other) + : m_storage( std::move(other.m_storage) ) + { + } + + PlainObjectBase& operator=(PlainObjectBase&& other) + { + using std::swap; + swap(m_storage, other.m_storage); + return *this; + } +#endif + + /** Copy constructor */ + EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other) + : m_storage() + { + _check_template_params(); + lazyAssign(other); + } + + template + EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase &other) + : m_storage() + { + _check_template_params(); + lazyAssign(other); + } + EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols) : m_storage(a_size, nbRows, nbCols) { @@ -570,6 +603,8 @@ class PlainObjectBase : public internal::dense_xpr_base::type : (rows() == other.rows() && cols() == other.cols()))) && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); EIGEN_ONLY_USED_FOR_DEBUG(other); + if(this->size()==0) + resizeLike(other); #else resizeLike(other); #endif @@ -668,8 +703,10 @@ private: enum { ThisConstantIsPrivateInPlainObjectBase }; }; +namespace internal { + template -struct internal::conservative_resize_like_impl +struct conservative_resize_like_impl { typedef typename Derived::Index Index; static void run(DenseBase& _this, Index rows, Index cols) @@ -729,11 +766,14 @@ struct internal::conservative_resize_like_impl } }; -namespace internal { - +// Here, the specialization for vectors inherits from the general matrix case +// to allow calling .conservativeResize(rows,cols) on vectors. template struct conservative_resize_like_impl + : conservative_resize_like_impl { + using conservative_resize_like_impl::run; + typedef typename Derived::Index Index; static void run(DenseBase& _this, Index size) { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/ProductBase.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/ProductBase.h index a494b5f87..cf74470a9 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/ProductBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/ProductBase.h @@ -85,7 +85,14 @@ class ProductBase : public MatrixBase public: +#ifndef EIGEN_NO_MALLOC + typedef typename Base::PlainObject BasePlainObject; + typedef Matrix DynPlainObject; + typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)), + BasePlainObject, DynPlainObject>::type PlainObject; +#else typedef typename Base::PlainObject PlainObject; +#endif ProductBase(const Lhs& a_lhs, const Rhs& a_rhs) : m_lhs(a_lhs), m_rhs(a_rhs) @@ -180,7 +187,12 @@ namespace internal { template struct nested, N, PlainObject> { - typedef PlainObject const& type; + typedef typename GeneralProduct::PlainObject const& type; +}; +template +struct nested, N, PlainObject> +{ + typedef typename GeneralProduct::PlainObject const& type; }; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Redux.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Redux.h index 50548fa9a..9b8662a6f 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Redux.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Redux.h @@ -247,8 +247,9 @@ struct redux_impl } }; -template -struct redux_impl +// NOTE: for SliceVectorizedTraversal we simply bypass unrolling +template +struct redux_impl { typedef typename Derived::Scalar Scalar; typedef typename packet_traits::type PacketScalar; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Ref.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Ref.h index aba795bdb..7a3becaf8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Ref.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Ref.h @@ -94,24 +94,26 @@ struct traits > typedef _PlainObjectType PlainObjectType; typedef _StrideType StrideType; enum { - Options = _Options + Options = _Options, + Flags = traits >::Flags | NestByRefBit }; template struct match { enum { HasDirectAccess = internal::has_direct_access::ret, - StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), + StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)), InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic) || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime) || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1), OuterStrideMatch = Derived::IsVectorAtCompileTime || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime), AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits::Flags&AlignedBit)==AlignedBit), - MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch + ScalarTypeMatch = internal::is_same::value, + MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch }; typedef typename internal::conditional::type type; }; - + }; template @@ -171,8 +173,12 @@ protected: } else ::new (static_cast(this)) Base(expr.data(), expr.rows(), expr.cols()); - ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(), - StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride()); + + if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit))) + ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1); + else + ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(), + StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride()); } StrideBase m_stride; @@ -182,7 +188,11 @@ protected: template class Ref : public RefBase > { + private: typedef internal::traits Traits; + template + inline Ref(const PlainObjectBase& expr, + typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0); public: typedef RefBase Base; @@ -194,17 +204,20 @@ template class Ref inline Ref(PlainObjectBase& expr, typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) { - Base::construct(expr); + EIGEN_STATIC_ASSERT(static_cast(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + Base::construct(expr.derived()); } template inline Ref(const DenseBase& expr, - typename internal::enable_if::value&&bool(Traits::template match::MatchAtCompileTime)),Derived>::type* = 0, - int = Derived::ThisConstantIsPrivateInPlainObjectBase) + typename internal::enable_if::MatchAtCompileTime),Derived>::type* = 0) #else template inline Ref(DenseBase& expr) #endif { + EIGEN_STATIC_ASSERT(static_cast(internal::is_lvalue::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); + EIGEN_STATIC_ASSERT(static_cast(Traits::template match::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH); + enum { THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY = Derived::ThisConstantIsPrivateInPlainObjectBase}; Base::construct(expr.const_cast_derived()); } @@ -223,13 +236,23 @@ template class Ref< EIGEN_DENSE_PUBLIC_INTERFACE(Ref) template - inline Ref(const DenseBase& expr) + inline Ref(const DenseBase& expr, + typename internal::enable_if::ScalarTypeMatch),Derived>::type* = 0) { // std::cout << match_helper::HasDirectAccess << "," << match_helper::OuterStrideMatch << "," << match_helper::InnerStrideMatch << "\n"; // std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n"; // std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n"; construct(expr.derived(), typename Traits::template match::type()); } + + inline Ref(const Ref& other) : Base(other) { + // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy + } + + template + inline Ref(const RefBase& other) { + construct(other.derived(), typename Traits::template match::type()); + } protected: diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Replicate.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Replicate.h index dde86a834..ac4537c14 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Replicate.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Replicate.h @@ -135,7 +135,7 @@ template class Replicate */ template template -inline const Replicate +const Replicate DenseBase::replicate() const { return Replicate(derived()); @@ -150,7 +150,7 @@ DenseBase::replicate() const * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate */ template -inline const Replicate +const typename DenseBase::ReplicateReturnType DenseBase::replicate(Index rowFactor,Index colFactor) const { return Replicate(derived(),rowFactor,colFactor); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/ReturnByValue.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/ReturnByValue.h index d66c24ba0..f635598dc 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/ReturnByValue.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/ReturnByValue.h @@ -72,6 +72,8 @@ template class ReturnByValue const Unusable& coeff(Index,Index) const { return *reinterpret_cast(this); } Unusable& coeffRef(Index) { return *reinterpret_cast(this); } Unusable& coeffRef(Index,Index) { return *reinterpret_cast(this); } + template Unusable& packet(Index) const; + template Unusable& packet(Index, Index) const; #endif }; @@ -83,6 +85,15 @@ Derived& DenseBase::operator=(const ReturnByValue& other) return derived(); } +template +template +Derived& DenseBase::lazyAssign(const ReturnByValue& other) +{ + other.evalTo(derived()); + return derived(); +} + + } // end namespace Eigen #endif // EIGEN_RETURNBYVALUE_H diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h index 22f3047b4..0956475af 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/SelfCwiseBinaryOp.h @@ -180,15 +180,9 @@ inline Derived& DenseBase::operator*=(const Scalar& other) template inline Derived& DenseBase::operator/=(const Scalar& other) { - typedef typename internal::conditional::IsInteger, - internal::scalar_quotient_op, - internal::scalar_product_op >::type BinOp; typedef typename Derived::PlainObject PlainObject; - SelfCwiseBinaryOp tmp(derived()); - Scalar actual_other; - if(NumTraits::IsInteger) actual_other = other; - else actual_other = Scalar(1)/other; - tmp = PlainObject::Constant(rows(),cols(), actual_other); + SelfCwiseBinaryOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived()); + tmp = PlainObject::Constant(rows(),cols(), other); return derived(); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/SolveTriangular.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/SolveTriangular.h index ef17f288e..83565ddd8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/SolveTriangular.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/SolveTriangular.h @@ -116,17 +116,17 @@ template struct triangular_solver_unroller { enum { IsLower = ((Mode&Lower)==Lower), - I = IsLower ? Index : Size - Index - 1, - S = IsLower ? 0 : I+1 + RowIndex = IsLower ? Index : Size - Index - 1, + S = IsLower ? 0 : RowIndex+1 }; static void run(const Lhs& lhs, Rhs& rhs) { if (Index>0) - rhs.coeffRef(I) -= lhs.row(I).template segment(S).transpose() + rhs.coeffRef(RowIndex) -= lhs.row(RowIndex).template segment(S).transpose() .cwiseProduct(rhs.template segment(S)).sum(); if(!(Mode & UnitDiag)) - rhs.coeffRef(I) /= lhs.coeff(I,I); + rhs.coeffRef(RowIndex) /= lhs.coeff(RowIndex,RowIndex); triangular_solver_unroller::run(lhs,rhs); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/StableNorm.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/StableNorm.h index c83e955ee..389d94275 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/StableNorm.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/StableNorm.h @@ -17,16 +17,29 @@ namespace internal { template inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale) { - Scalar max = bl.cwiseAbs().maxCoeff(); - if (max>scale) + using std::max; + Scalar maxCoeff = bl.cwiseAbs().maxCoeff(); + + if (maxCoeff>scale) { - ssq = ssq * numext::abs2(scale/max); - scale = max; - invScale = Scalar(1)/scale; + ssq = ssq * numext::abs2(scale/maxCoeff); + Scalar tmp = Scalar(1)/maxCoeff; + if(tmp > NumTraits::highest()) + { + invScale = NumTraits::highest(); + scale = Scalar(1)/invScale; + } + else + { + scale = maxCoeff; + invScale = tmp; + } } - // TODO if the max is much much smaller than the current scale, + + // TODO if the maxCoeff is much much smaller than the current scale, // then we can neglect this sub vector - ssq += (bl*invScale).squaredNorm(); + if(scale>Scalar(0)) // if scale==0, then bl is 0 + ssq += (bl*invScale).squaredNorm(); } template diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Transpose.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Transpose.h index f21b3aa65..22096ea2f 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Transpose.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Transpose.h @@ -284,7 +284,8 @@ struct inplace_transpose_selector { // non square matrix * Notice however that this method is only useful if you want to replace a matrix by its own transpose. * If you just need the transpose of a matrix, use transpose(). * - * \note if the matrix is not square, then \c *this must be a resizable matrix. + * \note if the matrix is not square, then \c *this must be a resizable matrix. + * This excludes (non-square) fixed-size matrices, block-expressions and maps. * * \sa transpose(), adjoint(), adjointInPlace() */ template @@ -315,6 +316,7 @@ inline void DenseBase::transposeInPlace() * If you just need the adjoint of a matrix, use adjoint(). * * \note if the matrix is not square, then \c *this must be a resizable matrix. + * This excludes (non-square) fixed-size matrices, block-expressions and maps. * * \sa transpose(), adjoint(), transposeInPlace() */ template diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/TriangularMatrix.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/TriangularMatrix.h index fba07365f..4d65392c6 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/TriangularMatrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/TriangularMatrix.h @@ -278,21 +278,21 @@ template class TriangularView /** Efficient triangular matrix times vector/matrix product */ template - TriangularProduct + TriangularProduct operator*(const MatrixBase& rhs) const { return TriangularProduct - + (m_matrix, rhs.derived()); } /** Efficient vector/matrix times triangular matrix product */ template friend - TriangularProduct + TriangularProduct operator*(const MatrixBase& lhs, const TriangularView& rhs) { return TriangularProduct - + (lhs.derived(),rhs.m_matrix); } @@ -380,19 +380,19 @@ template class TriangularView EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase& other) { setZero(); - return assignProduct(other,1); + return assignProduct(other.derived(),1); } template EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase& other) { - return assignProduct(other,1); + return assignProduct(other.derived(),1); } template EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase& other) { - return assignProduct(other,-1); + return assignProduct(other.derived(),-1); } @@ -400,25 +400,34 @@ template class TriangularView EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct& other) { setZero(); - return assignProduct(other,other.alpha()); + return assignProduct(other.derived(),other.alpha()); } template EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct& other) { - return assignProduct(other,other.alpha()); + return assignProduct(other.derived(),other.alpha()); } template EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct& other) { - return assignProduct(other,-other.alpha()); + return assignProduct(other.derived(),-other.alpha()); } protected: template EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase& prod, const Scalar& alpha); + + template + EIGEN_STRONG_INLINE TriangularView& assignProduct(const TriangularProduct& prod, const Scalar& alpha) + { + lazyAssign(alpha*prod.eval()); + return *this; + } MatrixTypeNested m_matrix; }; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/VectorwiseOp.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/VectorwiseOp.h index 511564875..d5ab03664 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/VectorwiseOp.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/VectorwiseOp.h @@ -50,7 +50,7 @@ struct traits > MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime, Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits, Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0), - TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime + TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime }; #if EIGEN_GNUC_AT_LEAST(3,4) typedef typename MemberOp::template Cost CostOpType; @@ -58,7 +58,8 @@ struct traits > typedef typename MemberOp::template Cost CostOpType; #endif enum { - CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value) + CoeffReadCost = TraversalSize==Dynamic ? Dynamic + : TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value) }; }; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/Visitor.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/Visitor.h index 64867b7a2..dd94eb8f0 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/Visitor.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/Visitor.h @@ -76,14 +76,17 @@ template template void DenseBase::visit(Visitor& visitor) const { + typedef typename internal::remove_all::type ThisNested; + typename Derived::Nested thisNested(derived()); + enum { unroll = SizeAtCompileTime != Dynamic && CoeffReadCost != Dynamic && (SizeAtCompileTime == 1 || internal::functor_traits::Cost != Dynamic) && SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits::Cost <= EIGEN_UNROLLING_LIMIT }; - return internal::visitor_impl::run(derived(), visitor); + >::run(thisNested, visitor); } namespace internal { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/Complex.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/Complex.h index f183d31de..8d9255eef 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/Complex.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/Complex.h @@ -110,7 +110,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup(const std::complex< template<> EIGEN_STRONG_INLINE void pstore >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); } template<> EIGEN_STRONG_INLINE void pstoreu >(std::complex * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); } -template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { __pld((float *)addr); } +template<> EIGEN_STRONG_INLINE void prefetch >(const std::complex * addr) { EIGEN_ARM_PREFETCH((float *)addr); } template<> EIGEN_STRONG_INLINE std::complex pfirst(const Packet2cf& a) { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/PacketMath.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/PacketMath.h index 163bac215..d49670e04 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/NEON/PacketMath.h @@ -48,9 +48,18 @@ typedef uint32x4_t Packet4ui; #define EIGEN_INIT_NEON_PACKET2(X, Y) {X, Y} #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W} #endif - -#ifndef __pld -#define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" ); + +// arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function +// which available on LLVM and GCC (at least) +#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); +#elif defined __pld + #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) +#elif !defined(__aarch64__) + #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); +#else + // by default no explicit prefetching + #define EIGEN_ARM_PREFETCH(ADDR) #endif template<> struct packet_traits : default_packet_traits @@ -209,8 +218,8 @@ template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& f template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } -template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { __pld(addr); } -template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { __pld(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_ARM_PREFETCH(addr); } +template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { EIGEN_ARM_PREFETCH(addr); } // FIXME only store the 2 first elements ? template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } @@ -375,6 +384,7 @@ template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) a_lo = vget_low_s32(a); a_hi = vget_high_s32(a); max = vpmax_s32(a_lo, a_hi); + max = vpmax_s32(max, max); return vget_lane_s32(max, 0); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h index 3376a984e..2b07168ae 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/MathFunctions.h @@ -52,7 +52,7 @@ Packet4f plog(const Packet4f& _x) Packet4i emm0; - Packet4f invalid_mask = _mm_cmplt_ps(x, _mm_setzero_ps()); + Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps()); x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ @@ -126,7 +126,7 @@ Packet4f pexp(const Packet4f& _x) _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); - Packet4f tmp = _mm_setzero_ps(), fx; + Packet4f tmp, fx; Packet4i emm0; // clamp x @@ -166,7 +166,7 @@ Packet4f pexp(const Packet4f& _x) emm0 = _mm_cvttps_epi32(fx); emm0 = _mm_add_epi32(emm0, p4i_0x7f); emm0 = _mm_slli_epi32(emm0, 23); - return pmul(y, _mm_castsi128_ps(emm0)); + return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp(const Packet2d& _x) @@ -195,7 +195,7 @@ Packet2d pexp(const Packet2d& _x) _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0); - Packet2d tmp = _mm_setzero_pd(), fx; + Packet2d tmp, fx; Packet4i emm0; // clamp x @@ -239,7 +239,7 @@ Packet2d pexp(const Packet2d& _x) emm0 = _mm_add_epi32(emm0, p4i_1023_0); emm0 = _mm_slli_epi32(emm0, 20); emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3)); - return pmul(x, _mm_castsi128_pd(emm0)); + return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x); } /* evaluation of 4 sines at onces, using SSE2 intrinsics. @@ -279,7 +279,7 @@ Packet4f psin(const Packet4f& _x) _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f); _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI - Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y; + Packet4f xmm1, xmm2, xmm3, sign_bit, y; Packet4i emm0, emm2; sign_bit = x; @@ -378,7 +378,7 @@ Packet4f pcos(const Packet4f& _x) _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f); _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI - Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y; + Packet4f xmm1, xmm2, xmm3, y; Packet4i emm0, emm2; x = pabs(x); @@ -442,8 +442,11 @@ Packet4f pcos(const Packet4f& _x) return _mm_xor_ps(y, sign_bit); } +#if EIGEN_FAST_MATH + // This is based on Quake3's fast inverse square root. // For detail see here: http://www.beyond3d.com/content/articles/8/ +// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly. template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt(const Packet4f& _x) { @@ -457,6 +460,14 @@ Packet4f psqrt(const Packet4f& _x) return pmul(_x,x); } +#else + +template<> EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& x) { return _mm_sqrt_ps(x); } + +#endif + +template<> EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& x) { return _mm_sqrt_pd(x); } + } // end namespace internal } // end namespace Eigen diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/PacketMath.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/PacketMath.h index 5d7985e75..bef898b1f 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/arch/SSE/PacketMath.h @@ -14,9 +14,6 @@ namespace Eigen { namespace internal { -// Deprecated in C++11. -#define register - #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif @@ -86,7 +83,8 @@ template<> struct packet_traits : default_packet_traits size=2, HasDiv = 1, - HasExp = 1 + HasExp = 1, + HasSqrt = 1 }; }; template<> struct packet_traits : default_packet_traits @@ -237,63 +235,27 @@ template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { E return _mm_loadu_ps(from); #endif } - template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); } - template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } #else -// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would -// require pointer casting to incompatible pointer types and leads to invalid code -// because of the strict aliasing rule. The "dummy" stuff are required to enforce -// a correct instruction dependency. -// TODO: do the same for MSVC (ICC is compatible) // NOTE: with the code below, MSVC's compiler crashes! -#if defined(__GNUC__) && defined(__i386__) - // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd - #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1 -#elif defined(__clang__) - // bug 201: Segfaults in __mm_loadh_pd with clang 2.8 - #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1 -#else - #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0 -#endif - template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD -#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS return _mm_loadu_ps(from); -#else - __m128d res; - res = _mm_load_sd((const double*)(from)) ; - res = _mm_loadh_pd(res, (const double*)(from+2)) ; - return _mm_castpd_ps(res); -#endif } +#endif + template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD -#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS return _mm_loadu_pd(from); -#else - __m128d res; - res = _mm_load_sd(from) ; - res = _mm_loadh_pd(res,from+1); - return res; -#endif } template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD -#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS - return _mm_loadu_si128(reinterpret_cast(from)); -#else - __m128d res; - res = _mm_load_sd((const double*)(from)) ; - res = _mm_loadh_pd(res, (const double*)(from+2)) ; - return _mm_castpd_si128(res); -#endif + return _mm_loadu_si128(reinterpret_cast(from)); } -#endif + template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { @@ -510,8 +472,8 @@ template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) // for GCC (eg., it does not like using std::min after the pstore !!) EIGEN_ALIGN16 int aux[4]; pstore(aux, a); - register int aux0 = aux[0] EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) // for GCC (eg., it does not like using std::min after the pstore !!) EIGEN_ALIGN16 int aux[4]; pstore(aux, a); - register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; - register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; + int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; + int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; return aux0>aux2 ? aux0 : aux2; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/CoeffBasedProduct.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/CoeffBasedProduct.h index c06a0df1c..2a9d65b94 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/CoeffBasedProduct.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/CoeffBasedProduct.h @@ -90,6 +90,7 @@ struct traits > | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0), CoeffReadCost = InnerSize == Dynamic ? Dynamic + : InnerSize == 0 ? 0 : InnerSize * (NumTraits::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) + (InnerSize - 1) * NumTraits::AddCost, @@ -133,7 +134,7 @@ class CoeffBasedProduct }; typedef internal::product_coeff_impl ScalarCoeffImpl; typedef CoeffBasedProduct LazyCoeffBasedProductType; @@ -184,7 +185,7 @@ class CoeffBasedProduct { PacketScalar res; internal::product_packet_impl ::run(row, col, m_lhs, m_rhs, res); return res; @@ -242,12 +243,12 @@ struct product_coeff_impl static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) { product_coeff_impl::run(row, col, lhs, rhs, res); - res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col); + res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col); } }; template -struct product_coeff_impl +struct product_coeff_impl { typedef typename Lhs::Index Index; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) @@ -256,16 +257,23 @@ struct product_coeff_impl } }; +template +struct product_coeff_impl +{ + typedef typename Lhs::Index Index; + static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res) + { + res = RetScalar(0); + } +}; + template struct product_coeff_impl { typedef typename Lhs::Index Index; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) { - eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = lhs.coeff(row, 0) * rhs.coeff(0, col); - for(Index i = 1; i < lhs.cols(); ++i) - res += lhs.coeff(row, i) * rhs.coeff(i, col); + res = (lhs.row(row).transpose().cwiseProduct( rhs.col(col) )).sum(); } }; @@ -295,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet> } }; +template +struct product_coeff_impl +{ + typedef typename Lhs::Index Index; + static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res) + { + res = 0; + } +}; + template struct product_coeff_impl { @@ -304,8 +322,7 @@ struct product_coeff_impl::run(row, col, lhs, rhs, pres); - product_coeff_impl::run(row, col, lhs, rhs, res); + product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); res = predux(pres); } }; @@ -373,7 +390,7 @@ struct product_packet_impl static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { product_packet_impl::run(row, col, lhs, rhs, res); - res = pmadd(pset1(lhs.coeff(row, UnrollingIndex)), rhs.template packet(UnrollingIndex, col), res); + res = pmadd(pset1(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet(UnrollingIndex-1, col), res); } }; @@ -384,12 +401,12 @@ struct product_packet_impl static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) { product_packet_impl::run(row, col, lhs, rhs, res); - res = pmadd(lhs.template packet(row, UnrollingIndex), pset1(rhs.coeff(UnrollingIndex, col)), res); + res = pmadd(lhs.template packet(row, UnrollingIndex-1), pset1(rhs.coeff(UnrollingIndex-1, col)), res); } }; template -struct product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) @@ -399,7 +416,7 @@ struct product_packet_impl }; template -struct product_packet_impl +struct product_packet_impl { typedef typename Lhs::Index Index; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res) @@ -408,16 +425,35 @@ struct product_packet_impl } }; +template +struct product_packet_impl +{ + typedef typename Lhs::Index Index; + static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res) + { + res = pset1(0); + } +}; + +template +struct product_packet_impl +{ + typedef typename Lhs::Index Index; + static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res) + { + res = pset1(0); + } +}; + template struct product_packet_impl { typedef typename Lhs::Index Index; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) { - eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = pmul(pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); - for(Index i = 1; i < lhs.cols(); ++i) - res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); + res = pset1(0); + for(Index i = 0; i < lhs.cols(); ++i) + res = pmadd(pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); } }; @@ -427,10 +463,9 @@ struct product_packet_impl typedef typename Lhs::Index Index; static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res) { - eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix"); - res = pmul(lhs.template packet(row, 0), pset1(rhs.coeff(0, col))); - for(Index i = 1; i < lhs.cols(); ++i) - res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); + res = pset1(0); + for(Index i = 0; i < lhs.cols(); ++i) + res = pmadd(lhs.template packet(row, i), pset1(rhs.coeff(i, col)), res); } }; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h index 780fa74d3..bcdca5b0d 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -1128,6 +1128,8 @@ EIGEN_DONT_INLINE void gemm_pack_lhs::size }; EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); + EIGEN_UNUSED_VARIABLE(stride) + EIGEN_UNUSED_VARIABLE(offset) eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) ); conj_if::IsComplex && Conjugate> cj; @@ -1215,6 +1217,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; Index packet_cols = (cols/nr) * nr; @@ -1266,6 +1270,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; Index packet_cols = (cols/nr) * nr; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h index 3f5ffcf51..cfd2f0095 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -140,8 +140,10 @@ static void run(Index rows, Index cols, Index depth, // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { +#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG)) typedef internal::scalar_product_op BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); +#endif } template void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); + if(m_lhs.cols()==0 || m_lhs.rows()==0 || m_rhs.cols()==0) + return; typename internal::add_const_on_value_type::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type::type rhs = RhsBlasTraits::extract(m_rhs); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixVector.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixVector.h index c1cb78498..09387703e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixVector.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/GeneralMatrixVector.h @@ -52,11 +52,7 @@ EIGEN_DONT_INLINE static void run( Index rows, Index cols, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsIncr, - ResScalar* res, Index - #ifdef EIGEN_INTERNAL_DEBUGGING - resIncr - #endif - , RhsScalar alpha); + ResScalar* res, Index resIncr, RhsScalar alpha); }; template @@ -64,12 +60,9 @@ EIGEN_DONT_INLINE void general_matrix_vector_product(&lhs0[i]), ptmp0, pload(&res[i]))); + pstore(&res[i], pcj.pmadd(pload(&lhs0[i]), ptmp0, pload(&res[i]))); else for (Index i = alignedStart;i(&lhs0[i]), ptmp0, pload(&res[i]))); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/Parallelizer.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/Parallelizer.h index 5c3e9b7ac..6937ee332 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/products/Parallelizer.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/products/Parallelizer.h @@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos if(transpose) std::swap(rows,cols); - Index blockCols = (cols / threads) & ~Index(0x3); - Index blockRows = (rows / threads) & ~Index(0x7); - GemmParallelInfo* info = new GemmParallelInfo[threads]; - #pragma omp parallel for schedule(static,1) num_threads(threads) - for(Index i=0; i(t0); @@ -147,7 +147,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product class Rotation2D; template class AngleAxis; template class Translation; +// Sparse module: +template class SparseMatrixBase; + #ifdef EIGEN2_SUPPORT template class eigen2_RotationBase; template class eigen2_Cross; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/MKL_support.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/MKL_support.h index 1e6e355d6..1ef3b61db 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/MKL_support.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/MKL_support.h @@ -54,11 +54,60 @@ #endif #if defined EIGEN_USE_MKL +# include +/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/ +# ifndef INTEL_MKL_VERSION +# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */ +# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/ +# undef EIGEN_USE_MKL +# endif +# ifndef EIGEN_USE_MKL + /*If the MKL version is too old, undef everything*/ +# undef EIGEN_USE_MKL_ALL +# undef EIGEN_USE_BLAS +# undef EIGEN_USE_LAPACKE +# undef EIGEN_USE_MKL_VML +# undef EIGEN_USE_LAPACKE_STRICT +# undef EIGEN_USE_LAPACKE +# endif +#endif -#include +#if defined EIGEN_USE_MKL #include #define EIGEN_MKL_VML_THRESHOLD 128 +/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */ +/* MKL_BLAS, etc are not defined in 11.2 */ +#ifdef MKL_DOMAIN_ALL +#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL +#else +#define EIGEN_MKL_DOMAIN_ALL MKL_ALL +#endif + +#ifdef MKL_DOMAIN_BLAS +#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS +#else +#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS +#endif + +#ifdef MKL_DOMAIN_FFT +#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT +#else +#define EIGEN_MKL_DOMAIN_FFT MKL_FFT +#endif + +#ifdef MKL_DOMAIN_VML +#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML +#else +#define EIGEN_MKL_DOMAIN_VML MKL_VML +#endif + +#ifdef MKL_DOMAIN_PARDISO +#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO +#else +#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO +#endif + namespace Eigen { typedef std::complex dcomplex; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Macros.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Macros.h index a50f1f8c8..e0d90eb82 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Macros.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Macros.h @@ -13,23 +13,292 @@ #define EIGEN_WORLD_VERSION 3 #define EIGEN_MAJOR_VERSION 2 -#define EIGEN_MINOR_VERSION 0 +#define EIGEN_MINOR_VERSION 8 #define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ EIGEN_MINOR_VERSION>=z)))) + + +// Compiler identification, EIGEN_COMP_* + +/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC #ifdef __GNUC__ + #define EIGEN_COMP_GNUC 1 +#else + #define EIGEN_COMP_GNUC 0 +#endif + +/// \internal EIGEN_COMP_CLANG set to 1 if the compiler is clang (alias for __clang__) +#if defined(__clang__) + #define EIGEN_COMP_CLANG 1 +#else + #define EIGEN_COMP_CLANG 0 +#endif + + +/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm +#if defined(__llvm__) + #define EIGEN_COMP_LLVM 1 +#else + #define EIGEN_COMP_LLVM 0 +#endif + +/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise +#if defined(__INTEL_COMPILER) + #define EIGEN_COMP_ICC __INTEL_COMPILER +#else + #define EIGEN_COMP_ICC 0 +#endif + +/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw +#if defined(__MINGW32__) + #define EIGEN_COMP_MINGW 1 +#else + #define EIGEN_COMP_MINGW 0 +#endif + +/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio +#if defined(__SUNPRO_CC) + #define EIGEN_COMP_SUNCC 1 +#else + #define EIGEN_COMP_SUNCC 0 +#endif + +/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise. +#if defined(_MSC_VER) + #define EIGEN_COMP_MSVC _MSC_VER +#else + #define EIGEN_COMP_MSVC 0 +#endif + +/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC +#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC) + #define EIGEN_COMP_MSVC_STRICT _MSC_VER +#else + #define EIGEN_COMP_MSVC_STRICT 0 +#endif + +/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++ +#if defined(__IBMCPP__) || defined(__xlc__) + #define EIGEN_COMP_IBM 1 +#else + #define EIGEN_COMP_IBM 0 +#endif + +/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler +#if defined(__PGI) + #define EIGEN_COMP_PGI 1 +#else + #define EIGEN_COMP_PGI 0 +#endif + +/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler +#if defined(__CC_ARM) || defined(__ARMCC_VERSION) + #define EIGEN_COMP_ARM 1 +#else + #define EIGEN_COMP_ARM 0 +#endif + + +/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.) +#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM ) + #define EIGEN_COMP_GNUC_STRICT 1 +#else + #define EIGEN_COMP_GNUC_STRICT 0 +#endif + + +#if EIGEN_COMP_GNUC #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x) + #define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__= 201103L) || \ + (defined(_MSC_VER) && _MSC_VER >= 1600)) + #define EIGEN_HAVE_RVALUE_REFERENCES +#endif + + +// Cross compiler wrapper around LLVM's __has_builtin +#ifdef __has_builtin +# define EIGEN_HAS_BUILTIN(x) __has_builtin(x) +#else +# define EIGEN_HAS_BUILTIN(x) 0 +#endif + /** Allows to disable some optimizations which might affect the accuracy of the result. * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them. * They currently include: @@ -238,11 +522,16 @@ #endif // Suppresses 'unused variable' warnings. -#define EIGEN_UNUSED_VARIABLE(var) (void)var; +namespace Eigen { + namespace internal { + template void ignore_unused_variable(const T&) {} + } +} +#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var); #if !defined(EIGEN_ASM_COMMENT) #if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) ) - #define EIGEN_ASM_COMMENT(X) asm("#" X) + #define EIGEN_ASM_COMMENT(X) __asm__("#" X) #else #define EIGEN_ASM_COMMENT(X) #endif @@ -266,6 +555,7 @@ #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler #endif +#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8) #define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16) #if EIGEN_ALIGN_STATICALLY @@ -284,7 +574,8 @@ #endif #ifndef EIGEN_STACK_ALLOCATION_LIMIT -#define EIGEN_STACK_ALLOCATION_LIMIT 20000 +// 131072 == 128 KB +#define EIGEN_STACK_ALLOCATION_LIMIT 131072 #endif #ifndef EIGEN_DEFAULT_IO_FORMAT @@ -300,7 +591,7 @@ // just an empty macro ! #define EIGEN_EMPTY -#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER)) +#if defined(_MSC_VER) && (_MSC_VER < 1900) && (!defined(__INTEL_COMPILER)) #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ using Base::operator =; #elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653) @@ -319,8 +610,11 @@ } #endif -#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ - EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) +/** \internal + * \brief Macro to manually inherit assignment operators. + * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined. + */ +#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) /** * Just a side note. Commenting within defines works only by documenting @@ -392,6 +686,8 @@ #define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \ : ((int)a >= (int)b) ? (int)a : (int)b) +#define EIGEN_ADD_COST(a,b) int(a)==Dynamic || int(b)==Dynamic ? Dynamic : int(a)+int(b) + #define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b))) #define EIGEN_IMPLIES(a,b) (!(a) || (b)) diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Memory.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Memory.h index 451535a0c..bc1ea69ed 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Memory.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/Memory.h @@ -63,7 +63,7 @@ // Currently, let's include it only on unix systems: #if defined(__unix__) || defined(__unix) #include - #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0) + #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0) #define EIGEN_HAS_POSIX_MEMALIGN 1 #endif #endif @@ -272,12 +272,12 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size) // The defined(_mm_free) is just here to verify that this MSVC version // implements _mm_malloc/_mm_free based on the corresponding _aligned_ // functions. This may not always be the case and we just try to be safe. - #if defined(_MSC_VER) && defined(_mm_free) + #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free) result = _aligned_realloc(ptr,new_size,16); #else result = generic_aligned_realloc(ptr,new_size,old_size); #endif -#elif defined(_MSC_VER) +#elif defined(_MSC_VER) && (!defined(_WIN32_WCE)) result = _aligned_realloc(ptr,new_size,16); #else result = handmade_aligned_realloc(ptr,new_size,old_size); @@ -417,6 +417,8 @@ template inline T* conditional_aligned_realloc_new(T* pt template inline T* conditional_aligned_new_auto(size_t size) { + if(size==0) + return 0; // short-cut. Also fixes Bug 884 check_size_for_overflow(size); T *result = reinterpret_cast(conditional_aligned_malloc(sizeof(T)*size)); if(NumTraits::RequireInitialization) @@ -464,9 +466,8 @@ template inline void conditional_aligned_delete_auto(T * template static inline Index first_aligned(const Scalar* array, Index size) { - enum { PacketSize = packet_traits::size, - PacketAlignedMask = PacketSize-1 - }; + static const Index PacketSize = packet_traits::size; + static const Index PacketAlignedMask = PacketSize-1; if(PacketSize==1) { @@ -522,7 +523,7 @@ template struct smart_copy_helper { // you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA // to the appropriate stack allocation function #ifndef EIGEN_ALLOCA - #if (defined __linux__) + #if (defined __linux__) || (defined __APPLE__) || (defined alloca) #define EIGEN_ALLOCA alloca #elif defined(_MSC_VER) #define EIGEN_ALLOCA _alloca @@ -578,7 +579,7 @@ template class aligned_stack_memory_handler */ #ifdef EIGEN_ALLOCA - #ifdef __arm__ + #if defined(__arm__) || defined(_WIN32) #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast((reinterpret_cast(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16) #else #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA @@ -612,7 +613,6 @@ template class aligned_stack_memory_handler void* operator new(size_t size, const std::nothrow_t&) throw() { \ try { return Eigen::internal::conditional_aligned_malloc(size); } \ catch (...) { return 0; } \ - return 0; \ } #else #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ @@ -630,11 +630,15 @@ template class aligned_stack_memory_handler } \ void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free(ptr); } \ void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free(ptr); } \ + void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free(ptr); } \ + void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free(ptr); } \ /* in-place new and delete. since (at least afaik) there is no actual */ \ /* memory allocated we can safely let the default implementation handle */ \ /* this particular case. */ \ static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \ + static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \ void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \ + void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \ /* nothrow-new (returns zero instead of std::bad_alloc) */ \ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ void operator delete(void *ptr, const std::nothrow_t&) throw() { \ @@ -729,15 +733,6 @@ public: ::new( p ) T( value ); } - // Support for c++11 -#if (__cplusplus >= 201103L) - template - void construct(pointer p, Args&&... args) - { - ::new(p) T(std::forward(args)...); - } -#endif - void destroy( pointer p ) { p->~T(); @@ -784,9 +779,9 @@ namespace internal { #ifdef EIGEN_CPUID -inline bool cpuid_is_vendor(int abcd[4], const char* vendor) +inline bool cpuid_is_vendor(int abcd[4], const int vendor[3]) { - return abcd[1]==(reinterpret_cast(vendor))[0] && abcd[3]==(reinterpret_cast(vendor))[1] && abcd[2]==(reinterpret_cast(vendor))[2]; + return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2]; } inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3) @@ -928,13 +923,16 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3) { #ifdef EIGEN_CPUID int abcd[4]; + const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e}; + const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163}; + const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!" // identify the CPU vendor EIGEN_CPUID(abcd,0x0,0); int max_std_funcs = abcd[1]; - if(cpuid_is_vendor(abcd,"GenuineIntel")) + if(cpuid_is_vendor(abcd,GenuineIntel)) queryCacheSizes_intel(l1,l2,l3,max_std_funcs); - else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!")) + else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_)) queryCacheSizes_amd(l1,l2,l3); else // by default let's use Intel's API diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/StaticAssert.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/StaticAssert.h index 8872c5b64..e53d2b878 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/StaticAssert.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/StaticAssert.h @@ -26,7 +26,7 @@ #ifndef EIGEN_NO_STATIC_ASSERT - #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600)) + #if __has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600) // if native static_assert is enabled, let's use it #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG); @@ -90,7 +90,9 @@ YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED, THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE, THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH, - OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG + OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG, + IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY, + STORAGE_LAYOUT_DOES_NOT_MATCH }; }; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/XprHelper.h b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/XprHelper.h index 3c4773054..d05f8e5f6 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Core/util/XprHelper.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Core/util/XprHelper.h @@ -341,7 +341,7 @@ template::type> str }; template -T* const_cast_ptr(const T* ptr) +inline T* const_cast_ptr(const T* ptr) { return const_cast(ptr); } @@ -366,17 +366,17 @@ struct dense_xpr_base /** \internal Helper base class to add a scalar multiple operator * overloads for complex types */ -template::value > -struct special_scalar_op_base : public DenseCoeffsBase +struct special_scalar_op_base : public BaseType { // dummy operator* so that the // "using special_scalar_op_base::operator*" compiles void operator*() const; }; -template -struct special_scalar_op_base : public DenseCoeffsBase +template +struct special_scalar_op_base : public BaseType { const CwiseUnaryOp, Derived> operator*(const OtherScalar& scalar) const diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/LeastSquares.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/LeastSquares.h index 0e6fdb488..7992d4944 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/LeastSquares.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/LeastSquares.h @@ -147,7 +147,6 @@ void fitHyperplane(int numPoints, // compute the covariance matrix CovMatrixType covMat = CovMatrixType::Zero(size, size); - VectorType remean = VectorType::Zero(size); for(int i = 0; i < numPoints; ++i) { VectorType diff = (*(points[i]) - mean).conjugate(); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/SVD.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/SVD.h index 077d26d54..3d03d2288 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/SVD.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigen2Support/SVD.h @@ -512,8 +512,7 @@ template template bool SVD::solve(const MatrixBase &b, ResultType* result) const { - const int rows = m_matU.rows(); - ei_assert(b.rows() == rows); + ei_assert(b.rows() == m_matU.rows()); Scalar maxVal = m_sigma.cwise().abs().maxCoeff(); for (int j=0; j class ComplexEigenSolver } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + EigenvectorType m_eivec; EigenvalueType m_eivalues; ComplexSchur m_schur; @@ -251,6 +257,8 @@ template ComplexEigenSolver& ComplexEigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) { + check_template_parameters(); + // this code is inspired from Jampack eigen_assert(matrix.cols() == matrix.rows()); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h index 91496ae5b..27aed923c 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/ComplexSchur_MKL.h @@ -45,7 +45,6 @@ ComplexSchur >& \ ComplexSchur >::compute(const Matrix& matrix, bool computeU) \ { \ typedef Matrix MatrixType; \ - typedef MatrixType::Scalar Scalar; \ typedef MatrixType::RealScalar RealScalar; \ typedef std::complex ComplexScalar; \ \ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/EigenSolver.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/EigenSolver.h index 6e7150685..20c59a7a2 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/EigenSolver.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/EigenSolver.h @@ -298,6 +298,13 @@ template class EigenSolver void doComputeEigenvectors(); protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL); + } + MatrixType m_eivec; EigenvalueType m_eivalues; bool m_isInitialized; @@ -364,6 +371,8 @@ template EigenSolver& EigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) { + check_template_parameters(); + using std::sqrt; using std::abs; eigen_assert(matrix.cols() == matrix.rows()); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h index dc240e13e..956e80d9e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h @@ -263,6 +263,13 @@ template class GeneralizedEigenSolver } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL); + } + MatrixType m_eivec; ComplexVectorType m_alphas; VectorType m_betas; @@ -290,6 +297,8 @@ template GeneralizedEigenSolver& GeneralizedEigenSolver::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors) { + check_template_parameters(); + using std::sqrt; using std::abs; eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows()); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealQZ.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealQZ.h index 5706eeebe..aa3833eba 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealQZ.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealQZ.h @@ -240,10 +240,10 @@ namespace Eigen { m_S.coeffRef(i,j) = Scalar(0.0); m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint()); m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint()); + // update Q + if (m_computeQZ) + m_Q.applyOnTheRight(i-1,i,G); } - // update Q - if (m_computeQZ) - m_Q.applyOnTheRight(i-1,i,G); // kill T(i,i-1) if(m_T.coeff(i,i-1)!=Scalar(0)) { @@ -251,10 +251,10 @@ namespace Eigen { m_T.coeffRef(i,i-1) = Scalar(0.0); m_S.applyOnTheRight(i,i-1,G); m_T.topRows(i).applyOnTheRight(i,i-1,G); + // update Z + if (m_computeQZ) + m_Z.applyOnTheLeft(i,i-1,G.adjoint()); } - // update Z - if (m_computeQZ) - m_Z.applyOnTheLeft(i,i-1,G.adjoint()); } } } @@ -313,7 +313,7 @@ namespace Eigen { using std::abs; using std::sqrt; const Index dim=m_S.cols(); - if (abs(m_S.coeff(i+1,i)==Scalar(0))) + if (abs(m_S.coeff(i+1,i))==Scalar(0)) return; Index z = findSmallDiagEntry(i,i+1); if (z==i-1) diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur.h index 64d136341..16d387537 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur.h @@ -234,7 +234,7 @@ template class RealSchur typedef Matrix Vector3s; Scalar computeNormOfT(); - Index findSmallSubdiagEntry(Index iu, const Scalar& norm); + Index findSmallSubdiagEntry(Index iu); void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift); void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo); void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector); @@ -286,7 +286,7 @@ RealSchur& RealSchur::computeFromHessenberg(const HessMa { while (iu >= 0) { - Index il = findSmallSubdiagEntry(iu, norm); + Index il = findSmallSubdiagEntry(iu); // Check for convergence if (il == iu) // One root found @@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur::computeNormOfT() /** \internal Look for single small sub-diagonal element and returns its index */ template -inline typename MatrixType::Index RealSchur::findSmallSubdiagEntry(Index iu, const Scalar& norm) +inline typename MatrixType::Index RealSchur::findSmallSubdiagEntry(Index iu) { using std::abs; Index res = iu; while (res > 0) { Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res)); - if (s == 0.0) - s = norm; - if (abs(m_matT.coeff(res,res-1)) < NumTraits::epsilon() * s) + if (abs(m_matT.coeff(res,res-1)) <= NumTraits::epsilon() * s) break; res--; } @@ -457,9 +455,7 @@ inline void RealSchur::initFrancisQRStep(Index il, Index iu, const V const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2))); const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1))); if (abs(lhs) < NumTraits::epsilon() * rhs) - { break; - } } } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h index ad9736460..c3089b468 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/RealSchur_MKL.h @@ -44,10 +44,6 @@ template<> inline \ RealSchur >& \ RealSchur >::compute(const Matrix& matrix, bool computeU) \ { \ - typedef Matrix MatrixType; \ - typedef MatrixType::Scalar Scalar; \ - typedef MatrixType::RealScalar RealScalar; \ -\ eigen_assert(matrix.cols() == matrix.rows()); \ \ lapack_int n = matrix.cols(), sdim, info; \ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index 3993046a8..1131c8af8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -80,6 +80,8 @@ template class SelfAdjointEigenSolver /** \brief Scalar type for matrices of type \p _MatrixType. */ typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; + + typedef Matrix EigenvectorsType; /** \brief Real scalar type for \p _MatrixType. * @@ -225,7 +227,7 @@ template class SelfAdjointEigenSolver * * \sa eigenvalues() */ - const MatrixType& eigenvectors() const + const EigenvectorsType& eigenvectors() const { eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); @@ -351,7 +353,12 @@ template class SelfAdjointEigenSolver #endif // EIGEN2_SUPPORT protected: - MatrixType m_eivec; + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + + EigenvectorsType m_eivec; RealVectorType m_eivalues; typename TridiagonalizationType::SubDiagonalType m_subdiag; ComputationInfo m_info; @@ -376,7 +383,7 @@ template class SelfAdjointEigenSolver * "implicit symmetric QR step with Wilkinson shift" */ namespace internal { -template +template static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n); } @@ -384,6 +391,8 @@ template SelfAdjointEigenSolver& SelfAdjointEigenSolver ::compute(const MatrixType& matrix, int options) { + check_template_parameters(); + using std::abs; eigen_assert(matrix.cols() == matrix.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 @@ -406,7 +415,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver // declare some aliases RealVectorType& diag = m_eivalues; - MatrixType& mat = m_eivec; + EigenvectorsType& mat = m_eivec; // map the matrix coefficients to [-1:1] to avoid over- and underflow. mat = matrix.template triangularView(); @@ -442,7 +451,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver while (start>0 && m_subdiag[start-1]!=0) start--; - internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); + internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); } if (iter <= m_maxIterations * n) @@ -490,7 +499,13 @@ template struct direct_selfadjoint_eigenvalues struct direct_selfadjoint_eigenvalues Scalar(0)) + Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3; + if(a_over_3 Scalar(0)) + Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b; + if(q 0, atan2 is in [0, pi] and theta is in [0, pi/3] Scalar cos_theta = cos(theta); Scalar sin_theta = sin(theta); - roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta; - roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); - roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); - - // Sort in increasing order. - if (roots(0) >= roots(1)) - std::swap(roots(0),roots(1)); - if (roots(1) >= roots(2)) - { - std::swap(roots(1),roots(2)); - if (roots(0) >= roots(1)) - std::swap(roots(0),roots(1)); - } + // roots are already sorted, since cos is monotonically decreasing on [0, pi] + roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3) + roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3) + roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta; } - + + static inline bool extract_kernel(MatrixType& mat, Ref res, Ref representative) + { + using std::abs; + Index i0; + // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal): + mat.diagonal().cwiseAbs().maxCoeff(&i0); + // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector, + // so let's save it: + representative = mat.col(i0); + Scalar n0, n1; + VectorType c0, c1; + n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm(); + n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm(); + if(n0>n1) res = c0/std::sqrt(n0); + else res = c1/std::sqrt(n1); + + return true; + } + static inline void run(SolverType& solver, const MatrixType& mat, int options) { - using std::sqrt; eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && "invalid option parameter"); bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; - MatrixType& eivecs = solver.m_eivec; + EigenvectorsType& eivecs = solver.m_eivec; VectorType& eivals = solver.m_eivalues; - // map the matrix coefficients to [-1:1] to avoid over- and underflow. - Scalar scale = mat.cwiseAbs().maxCoeff(); - MatrixType scaledMat = mat / scale; + // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow. + Scalar shift = mat.trace() / Scalar(3); + // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later + MatrixType scaledMat = mat.template selfadjointView(); + scaledMat.diagonal().array() -= shift; + Scalar scale = scaledMat.cwiseAbs().maxCoeff(); + if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations // compute the eigenvalues computeRoots(scaledMat,eivals); - // compute the eigen vectors + // compute the eigenvectors if(computeEigenvectors) { - Scalar safeNorm2 = Eigen::NumTraits::epsilon(); - safeNorm2 *= safeNorm2; if((eivals(2)-eivals(0))<=Eigen::NumTraits::epsilon()) { + // All three eigenvalues are numerically the same eivecs.setIdentity(); } else { - scaledMat = scaledMat.template selfadjointView(); MatrixType tmp; tmp = scaledMat; + // Compute the eigenvector of the most distinct eigenvalue Scalar d0 = eivals(2) - eivals(1); Scalar d1 = eivals(1) - eivals(0); - int k = d0 > d1 ? 2 : 0; - d0 = d0 > d1 ? d1 : d0; - - tmp.diagonal().array () -= eivals(k); - VectorType cross; - Scalar n; - n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm(); - - if(n>safeNorm2) - eivecs.col(k) = cross / sqrt(n); - else + Index k(0), l(2); + if(d0 > d1) { - n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm(); - - if(n>safeNorm2) - eivecs.col(k) = cross / sqrt(n); - else - { - n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm(); - - if(n>safeNorm2) - eivecs.col(k) = cross / sqrt(n); - else - { - // the input matrix and/or the eigenvaues probably contains some inf/NaN, - // => exit - // scale back to the original size. - eivals *= scale; - - solver.m_info = NumericalIssue; - solver.m_isInitialized = true; - solver.m_eigenvectorsOk = computeEigenvectors; - return; - } - } + std::swap(k,l); + d0 = d1; } - tmp = scaledMat; - tmp.diagonal().array() -= eivals(1); - - if(d0<=Eigen::NumTraits::epsilon()) - eivecs.col(1) = eivecs.col(k).unitOrthogonal(); - else + // Compute the eigenvector of index k { - n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm(); - if(n>safeNorm2) - eivecs.col(1) = cross / sqrt(n); - else - { - n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm(); - if(n>safeNorm2) - eivecs.col(1) = cross / sqrt(n); - else - { - n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm(); - if(n>safeNorm2) - eivecs.col(1) = cross / sqrt(n); - else - { - // we should never reach this point, - // if so the last two eigenvalues are likely to ve very closed to each other - eivecs.col(1) = eivecs.col(k).unitOrthogonal(); - } - } - } - - // make sure that eivecs[1] is orthogonal to eivecs[2] - Scalar d = eivecs.col(1).dot(eivecs.col(k)); - eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized(); + tmp.diagonal().array () -= eivals(k); + // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector. + extract_kernel(tmp, eivecs.col(k), eivecs.col(l)); } - eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized(); + // Compute eigenvector of index l + if(d0<=2*Eigen::NumTraits::epsilon()*d1) + { + // If d0 is too small, then the two other eigenvalues are numerically the same, + // and thus we only have to ortho-normalize the near orthogonal vector we saved above. + eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l); + eivecs.col(l).normalize(); + } + else + { + tmp = scaledMat; + tmp.diagonal().array () -= eivals(l); + + VectorType dummy; + extract_kernel(tmp, eivecs.col(l), dummy); + } + + // Compute last eigenvector from the other two + eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized(); } } + // Rescale back to the original size. eivals *= scale; + eivals.array() += shift; solver.m_info = Success; solver.m_isInitialized = true; @@ -665,11 +655,12 @@ template struct direct_selfadjoint_eigenvalues struct direct_selfadjoint_eigenvalues struct direct_selfadjoint_eigenvaluesc2) + if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits::epsilon()) { - eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0); - eivecs.col(1) /= sqrt(a2+b2); + eivecs.setIdentity(); } else { - eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0); - eivecs.col(1) /= sqrt(c2+b2); - } + scaledMat.diagonal().array () -= eivals(1); + Scalar a2 = numext::abs2(scaledMat(0,0)); + Scalar c2 = numext::abs2(scaledMat(1,1)); + Scalar b2 = numext::abs2(scaledMat(1,0)); + if(a2>c2) + { + eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0); + eivecs.col(1) /= sqrt(a2+b2); + } + else + { + eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0); + eivecs.col(1) /= sqrt(c2+b2); + } - eivecs.col(0) << eivecs.col(1).unitOrthogonal(); + eivecs.col(0) << eivecs.col(1).unitOrthogonal(); + } } // Rescale back to the original size. @@ -736,7 +736,7 @@ SelfAdjointEigenSolver& SelfAdjointEigenSolver } namespace internal { -template +template static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) { using std::abs; @@ -788,8 +788,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta // apply the givens rotation to the unit matrix Q = Q * G if (matrixQ) { - // FIXME if StorageOrder == RowMajor this operation is not very efficient - Map > q(matrixQ,n,n); + Map > q(matrixQ,n,n); q.applyOnTheRight(k,k+1,rot); } } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AlignedBox.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AlignedBox.h index 8e186d57a..7e1cd9eb7 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AlignedBox.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AlignedBox.h @@ -19,10 +19,12 @@ namespace Eigen { * * \brief An axis aligned box * - * \param _Scalar the type of the scalar coefficients - * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * \tparam _Scalar the type of the scalar coefficients + * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. * * This class represents an axis aligned box as a pair of the minimal and maximal corners. + * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty(). + * \sa alignedboxtypedefs */ template class AlignedBox @@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */ enum CornerType { - /** 1D names */ + /** 1D names @{ */ Min=0, Max=1, + /** @} */ - /** Added names for 2D */ + /** Identifier for 2D corner @{ */ BottomLeft=0, BottomRight=1, TopLeft=2, TopRight=3, + /** @} */ - /** Added names for 3D */ + /** Identifier for 3D corner @{ */ BottomLeftFloor=0, BottomRightFloor=1, TopLeftFloor=2, TopRightFloor=3, BottomLeftCeil=4, BottomRightCeil=5, TopLeftCeil=6, TopRightCeil=7 + /** @} */ }; @@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim) { setEmpty(); } - /** Constructs a box with extremities \a _min and \a _max. */ + /** Constructs a box with extremities \a _min and \a _max. + * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */ template inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {} /** Constructs a box containing a single point \a p. */ template - inline explicit AlignedBox(const MatrixBase& a_p) - { - typename internal::nested::type p(a_p.derived()); - m_min = p; - m_max = p; - } + inline explicit AlignedBox(const MatrixBase& p) : m_min(p), m_max(m_min) + { } ~AlignedBox() {} /** \returns the dimension in which the box holds */ inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); } - /** \deprecated use isEmpty */ + /** \deprecated use isEmpty() */ inline bool isNull() const { return isEmpty(); } - /** \deprecated use setEmpty */ + /** \deprecated use setEmpty() */ inline void setNull() { setEmpty(); } - /** \returns true if the box is empty. */ + /** \returns true if the box is empty. + * \sa setEmpty */ inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); } - /** Makes \c *this an empty box. */ + /** Makes \c *this an empty box. + * \sa isEmpty */ inline void setEmpty() { m_min.setConstant( ScalarTraits::highest() ); @@ -159,7 +163,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) * a uniform distribution */ inline VectorType sample() const { - VectorType r; + VectorType r(dim()); for(Index d=0; d - inline bool contains(const MatrixBase& a_p) const + inline bool contains(const MatrixBase& p) const { - typename internal::nested::type p(a_p.derived()); - return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all(); + typename internal::nested::type p_n(p.derived()); + return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all(); } /** \returns true if the box \a b is entirely inside the box \c *this. */ inline bool contains(const AlignedBox& b) const { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); } - /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ + /** \returns true if the box \a b is intersecting the box \c *this. + * \sa intersection, clamp */ + inline bool intersects(const AlignedBox& b) const + { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); } + + /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. + * \sa extend(const AlignedBox&) */ template - inline AlignedBox& extend(const MatrixBase& a_p) + inline AlignedBox& extend(const MatrixBase& p) { - typename internal::nested::type p(a_p.derived()); - m_min = m_min.cwiseMin(p); - m_max = m_max.cwiseMax(p); + typename internal::nested::type p_n(p.derived()); + m_min = m_min.cwiseMin(p_n); + m_max = m_max.cwiseMax(p_n); return *this; } - /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */ + /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. + * \sa merged, extend(const MatrixBase&) */ inline AlignedBox& extend(const AlignedBox& b) { m_min = m_min.cwiseMin(b.m_min); @@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) return *this; } - /** Clamps \c *this by the box \a b and returns a reference to \c *this. */ + /** Clamps \c *this by the box \a b and returns a reference to \c *this. + * \note If the boxes don't intersect, the resulting box is empty. + * \sa intersection(), intersects() */ inline AlignedBox& clamp(const AlignedBox& b) { m_min = m_min.cwiseMax(b.m_min); @@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) return *this; } - /** Returns an AlignedBox that is the intersection of \a b and \c *this */ + /** Returns an AlignedBox that is the intersection of \a b and \c *this + * \note If the boxes don't intersect, the resulting box is empty. + * \sa intersects(), clamp, contains() */ inline AlignedBox intersection(const AlignedBox& b) const {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); } - /** Returns an AlignedBox that is the union of \a b and \c *this */ + /** Returns an AlignedBox that is the union of \a b and \c *this. + * \note Merging with an empty box may result in a box bigger than \c *this. + * \sa extend(const AlignedBox&) */ inline AlignedBox merged(const AlignedBox& b) const { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); } @@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) /** \returns the squared distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. - * \sa exteriorDistance() + * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&) */ template - inline Scalar squaredExteriorDistance(const MatrixBase& a_p) const; + inline Scalar squaredExteriorDistance(const MatrixBase& p) const; /** \returns the squared distance between the boxes \a b and \c *this, * and zero if the boxes intersect. - * \sa exteriorDistance() + * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&) */ inline Scalar squaredExteriorDistance(const AlignedBox& b) const; /** \returns the distance between the point \a p and the box \c *this, * and zero if \a p is inside the box. - * \sa squaredExteriorDistance() + * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&) */ template inline NonInteger exteriorDistance(const MatrixBase& p) const @@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) /** \returns the distance between the boxes \a b and \c *this, * and zero if the boxes intersect. - * \sa squaredExteriorDistance() + * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&) */ inline NonInteger exteriorDistance(const AlignedBox& b) const { using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AngleAxis.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AngleAxis.h index 553d38c74..a8d3cdcfa 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AngleAxis.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/AngleAxis.h @@ -83,10 +83,17 @@ public: template inline explicit AngleAxis(const MatrixBase& m) { *this = m; } + /** \returns the value of the rotation angle in radian */ Scalar angle() const { return m_angle; } + /** \returns a read-write reference to the stored angle in radian */ Scalar& angle() { return m_angle; } + /** \returns the rotation axis */ const Vector3& axis() const { return m_axis; } + /** \returns a read-write reference to the stored rotation axis. + * + * \warning The rotation axis must remain a \b unit vector. + */ Vector3& axis() { return m_axis; } /** Concatenates two rotations */ @@ -131,7 +138,7 @@ public: m_angle = Scalar(other.angle()); } - static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); } + static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); } /** \returns \c true if \c *this is approximately equal to \a other, within the precision * determined by \a prec. @@ -165,8 +172,8 @@ AngleAxis& AngleAxis::operator=(const QuaternionBase::dummy_precision()*NumTraits::dummy_precision()) { - m_angle = 0; - m_axis << 1, 0, 0; + m_angle = Scalar(0); + m_axis << Scalar(1), Scalar(0), Scalar(0); } else { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/EulerAngles.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/EulerAngles.h index 97984d590..82802fb43 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/EulerAngles.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/EulerAngles.h @@ -28,7 +28,7 @@ namespace Eigen { * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode * This corresponds to the right-multiply conventions (with right hand side frames). * - * The returned angles are in the ranges [0:pi]x[0:pi]x[-pi:pi]. + * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. * * \sa class AngleAxis */ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Homogeneous.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Homogeneous.h index 00e71d190..372e422b9 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Homogeneous.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Homogeneous.h @@ -79,7 +79,7 @@ template class Homogeneous { if( (int(Direction)==Vertical && row==m_matrix.rows()) || (int(Direction)==Horizontal && col==m_matrix.cols())) - return 1; + return Scalar(1); return m_matrix.coeff(row, col); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Hyperplane.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Hyperplane.h index aeff43fef..00b7c4300 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Hyperplane.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Hyperplane.h @@ -100,7 +100,17 @@ public: { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) Hyperplane result(p0.size()); - result.normal() = (p2 - p0).cross(p1 - p0).normalized(); + VectorType v0(p2 - p0), v1(p1 - p0); + result.normal() = v0.cross(v1); + RealScalar norm = result.normal().norm(); + if(norm <= v0.norm() * v1.norm() * NumTraits::epsilon()) + { + Matrix m; m << v0.transpose(), v1.transpose(); + JacobiSVD > svd(m, ComputeFullV); + result.normal() = svd.matrixV().col(2); + } + else + result.normal() /= norm; result.offset() = -p0.dot(result.normal()); return result; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/ParametrizedLine.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/ParametrizedLine.h index 77fa228e6..cf3252df9 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/ParametrizedLine.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/ParametrizedLine.h @@ -129,7 +129,7 @@ public: * determined by \a prec. * * \sa MatrixBase::isApprox() */ - bool isApprox(const ParametrizedLine& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const + bool isApprox(const ParametrizedLine& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } protected: diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Quaternion.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Quaternion.h index e135f2b66..25ed17bb6 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Quaternion.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Quaternion.h @@ -102,11 +102,11 @@ public: /** \returns a quaternion representing an identity rotation * \sa MatrixBase::Identity() */ - static inline Quaternion Identity() { return Quaternion(1, 0, 0, 0); } + static inline Quaternion Identity() { return Quaternion(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); } /** \sa QuaternionBase::Identity(), MatrixBase::setIdentity() */ - inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; } + inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; } /** \returns the squared norm of the quaternion's coefficients * \sa QuaternionBase::norm(), MatrixBase::squaredNorm() @@ -150,10 +150,6 @@ public: /** \returns the conjugated quaternion */ Quaternion conjugate() const; - /** \returns an interpolation for a constant motion between \a other and \c *this - * \a t in [0;1] - * see http://en.wikipedia.org/wiki/Slerp - */ template Quaternion slerp(const Scalar& t, const QuaternionBase& other) const; /** \returns \c true if \c *this is approximately equal to \a other, within the precision @@ -165,7 +161,7 @@ public: { return coeffs().isApprox(other.coeffs(), prec); } /** return the result vector of \a v through the rotation*/ - EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const; + EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const; /** \returns \c *this with scalar type casted to \a NewScalarType * @@ -194,11 +190,11 @@ public: * \brief The quaternion class used to represent 3D orientations and rotations * * \tparam _Scalar the scalar type, i.e., the type of the coefficients - * \tparam _Options controls the memory alignement of the coeffecients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign. + * \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign. * * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of * orientations and rotations of objects in three dimensions. Compared to other representations - * like Euler angles or 3x3 matrices, quatertions offer the following advantages: + * like Euler angles or 3x3 matrices, quaternions offer the following advantages: * \li \b compact storage (4 scalars) * \li \b efficient to compose (28 flops), * \li \b stable spherical interpolation @@ -207,6 +203,8 @@ public: * \li \c Quaternionf for \c float * \li \c Quaterniond for \c double * + * \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized. + * * \sa class AngleAxis, class Transform */ @@ -233,7 +231,7 @@ class Quaternion : public QuaternionBase > public: typedef _Scalar Scalar; - EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion) using Base::operator*=; typedef typename internal::traits::Coefficients Coefficients; @@ -343,12 +341,12 @@ class Map, _Options > public: typedef _Scalar Scalar; typedef typename internal::traits::Coefficients Coefficients; - EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * - * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order: + * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ @@ -380,12 +378,12 @@ class Map, _Options > public: typedef _Scalar Scalar; typedef typename internal::traits::Coefficients Coefficients; - EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) using Base::operator*=; /** Constructs a Mapped Quaternion object from the pointer \a coeffs * - * The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order: + * The pointer \a coeffs must reference the four coefficients of Quaternion in the following order: * \code *coeffs == {x, y, z, w} \endcode * * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */ @@ -399,16 +397,16 @@ class Map, _Options > }; /** \ingroup Geometry_Module - * Map an unaligned array of single precision scalar as a quaternion */ + * Map an unaligned array of single precision scalars as a quaternion */ typedef Map, 0> QuaternionMapf; /** \ingroup Geometry_Module - * Map an unaligned array of double precision scalar as a quaternion */ + * Map an unaligned array of double precision scalars as a quaternion */ typedef Map, 0> QuaternionMapd; /** \ingroup Geometry_Module - * Map a 16-bits aligned array of double precision scalars as a quaternion */ + * Map a 16-byte aligned array of single precision scalars as a quaternion */ typedef Map, Aligned> QuaternionMapAlignedf; /** \ingroup Geometry_Module - * Map a 16-bits aligned array of double precision scalars as a quaternion */ + * Map a 16-byte aligned array of double precision scalars as a quaternion */ typedef Map, Aligned> QuaternionMapAlignedd; /*************************************************************************** @@ -463,12 +461,12 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase::operator*= (const Quaterni */ template EIGEN_STRONG_INLINE typename QuaternionBase::Vector3 -QuaternionBase::_transformVector(Vector3 v) const +QuaternionBase::_transformVector(const Vector3& v) const { // Note that this algorithm comes from the optimization by hand // of the conversion to a Matrix followed by a Matrix/Vector product. // It appears to be much faster than the common algorithm found - // in the litterature (30 versus 39 flops). It also requires two + // in the literature (30 versus 39 flops). It also requires two // Vector3 as temporaries. Vector3 uv = this->vec().cross(v); uv += uv; @@ -579,7 +577,7 @@ inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase accuraletly compute the rotation axis by computing the + // => accurately compute the rotation axis by computing the // intersection of the two planes. This is done by solving: // x^T v0 = 0 // x^T v1 = 0 @@ -588,7 +586,7 @@ inline Derived& QuaternionBase::setFromTwoVectors(const MatrixBase::dummy_precision()) { - c = max(c,-1); + c = (max)(c,Scalar(-1)); Matrix m; m << v0.transpose(), v1.transpose(); JacobiSVD > svd(m, ComputeFullV); Vector3 axis = svd.matrixV().col(2); @@ -639,7 +637,7 @@ inline Quaternion::Scalar> QuaternionBasesquaredNorm(); - if (n2 > 0) + if (n2 > Scalar(0)) return Quaternion(conjugate().coeffs() / n2); else { @@ -669,16 +667,19 @@ template inline typename internal::traits::Scalar QuaternionBase::angularDistance(const QuaternionBase& other) const { - using std::acos; + using std::atan2; using std::abs; - double d = abs(this->dot(other)); - if (d>=1.0) - return Scalar(0); - return static_cast(2 * acos(d)); + Quaternion d = (*this) * other.conjugate(); + return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) ); } + + /** \returns the spherical linear interpolation between the two quaternions - * \c *this and \a other at the parameter \a t + * \c *this and \a other at the parameter \a t in [0;1]. + * + * This represents an interpolation for a constant motion between \c *this and \a other, + * see also http://en.wikipedia.org/wiki/Slerp. */ template template @@ -709,7 +710,7 @@ QuaternionBase::slerp(const Scalar& t, const QuaternionBase(scale0 * coeffs() + scale1 * other.coeffs()); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Rotation2D.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Rotation2D.h index 1cac343a5..a2d59fce1 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Rotation2D.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Rotation2D.h @@ -60,6 +60,9 @@ public: /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ inline Rotation2D(const Scalar& a) : m_angle(a) {} + + /** Default constructor wihtout initialization. The represented rotation is undefined. */ + Rotation2D() {} /** \returns the rotation angle */ inline Scalar angle() const { return m_angle; } @@ -81,10 +84,10 @@ public: /** Applies the rotation to a 2D vector */ Vector2 operator* (const Vector2& vec) const { return toRotationMatrix() * vec; } - + template Rotation2D& fromRotationMatrix(const MatrixBase& m); - Matrix2 toRotationMatrix(void) const; + Matrix2 toRotationMatrix() const; /** \returns the spherical interpolation between \c *this and \a other using * parameter \a t. It is in fact equivalent to a linear interpolation. diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Transform.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Transform.h index 887e718d6..0186f3b82 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Transform.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Transform.h @@ -62,6 +62,8 @@ struct transform_construct_from_matrix; template struct transform_take_affine_part; +template struct transform_make_affine; + } // end namespace internal /** \geometry_module \ingroup Geometry_Module @@ -100,15 +102,15 @@ template struct transform_take_affine_part; * * However, unlike a plain matrix, the Transform class provides many features * simplifying both its assembly and usage. In particular, it can be composed - * with any other transformations (Transform,Translation,RotationBase,Matrix) + * with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix) * and can be directly used to transform implicit homogeneous vectors. All these * operations are handled via the operator*. For the composition of transformations, * its principle consists to first convert the right/left hand sides of the product * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product. * Of course, internally, operator* tries to perform the minimal number of operations * according to the nature of each terms. Likewise, when applying the transform - * to non homogeneous vectors, the latters are automatically promoted to homogeneous - * one before doing the matrix product. The convertions to homogeneous representations + * to points, the latters are automatically promoted to homogeneous vectors + * before doing the matrix product. The conventions to homogeneous representations * are performed as follow: * * \b Translation t (Dim)x(1): @@ -122,7 +124,7 @@ template struct transform_take_affine_part; * R & 0\\ * 0\,...\,0 & 1 * \end{array} \right) \f$ - * + * + * \b Scaling \b DiagonalMatrix S (Dim)x(Dim): + * \f$ \left( \begin{array}{cc} + * S & 0\\ + * 0\,...\,0 & 1 + * \end{array} \right) \f$ * - * \b Column \b vector v (Dim)x(1): + * \b Column \b point v (Dim)x(1): * \f$ \left( \begin{array}{c} * v\\ * 1 * \end{array} \right) \f$ * - * \b Set \b of \b column \b vectors V1...Vn (Dim)x(n): + * \b Set \b of \b column \b points V1...Vn (Dim)x(n): * \f$ \left( \begin{array}{ccc} * v_1 & ... & v_n\\ * 1 & ... & 1 @@ -194,9 +202,9 @@ public: /** type of the matrix used to represent the linear part of the transformation */ typedef Matrix LinearMatrixType; /** type of read/write reference to the linear part of the transformation */ - typedef Block LinearPart; + typedef Block LinearPart; /** type of read reference to the linear part of the transformation */ - typedef const Block ConstLinearPart; + typedef const Block ConstLinearPart; /** type of read/write reference to the affine part of the transformation */ typedef typename internal::conditional::run(m_matrix); } inline Transform(const Transform& other) @@ -383,26 +390,39 @@ public: /** \returns a writable expression of the translation vector of the transformation */ inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); } - /** \returns an expression of the product between the transform \c *this and a matrix expression \a other + /** \returns an expression of the product between the transform \c *this and a matrix expression \a other. * - * The right hand side \a other might be either: - * \li a vector of size Dim, + * The right-hand-side \a other can be either: * \li an homogeneous vector of size Dim+1, - * \li a set of vectors of size Dim x Dynamic, - * \li a set of homogeneous vectors of size Dim+1 x Dynamic, - * \li a linear transformation matrix of size Dim x Dim, - * \li an affine transformation matrix of size Dim x Dim+1, + * \li a set of homogeneous vectors of size Dim+1 x N, * \li a transformation matrix of size Dim+1 x Dim+1. + * + * Moreover, if \c *this represents an affine transformation (i.e., Mode!=Projective), then \a other can also be: + * \li a point of size Dim (computes: \code this->linear() * other + this->translation()\endcode), + * \li a set of N points as a Dim x N matrix (computes: \code (this->linear() * other).colwise() + this->translation()\endcode), + * + * In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \a other. + * + * If you want to interpret \a other as a linear or affine transformation, then first convert it to a Transform<> type, + * or do your own cooking. + * + * Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only: + * \code + * Affine3f A; + * Vector3f v1, v2; + * v2 = A.linear() * v1; + * \endcode + * */ // note: this function is defined here because some compilers cannot find the respective declaration template - EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl::ResultType + EIGEN_STRONG_INLINE const typename OtherDerived::PlainObject operator * (const EigenBase &other) const { return internal::transform_right_product_impl::run(*this,other.derived()); } /** \returns the product expression of a transformation matrix \a a times a transform \a b * - * The left hand side \a other might be either: + * The left hand side \a other can be either: * \li a linear transformation matrix of size Dim x Dim, * \li an affine transformation matrix of size Dim x Dim+1, * \li a general transformation matrix of size Dim+1 x Dim+1. @@ -530,9 +550,9 @@ public: inline Transform& operator=(const UniformScaling& t); inline Transform& operator*=(const UniformScaling& s) { return scale(s.factor()); } - inline Transform operator*(const UniformScaling& s) const + inline Transform operator*(const UniformScaling& s) const { - Transform res = *this; + Transform res = *this; res.scale(s.factor()); return res; } @@ -591,11 +611,7 @@ public: */ void makeAffine() { - if(int(Mode)!=int(AffineCompact)) - { - matrix().template block<1,Dim>(Dim,0).setZero(); - matrix().coeffRef(Dim,Dim) = Scalar(1); - } + internal::transform_make_affine::run(m_matrix); } /** \internal @@ -1079,6 +1095,24 @@ Transform::fromPositionOrientationScale(const MatrixBas namespace internal { +template +struct transform_make_affine +{ + template + static void run(MatrixType &mat) + { + static const int Dim = MatrixType::ColsAtCompileTime-1; + mat.template block<1,Dim>(Dim,0).setZero(); + mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1); + } +}; + +template<> +struct transform_make_affine +{ + template static void run(MatrixType &) { } +}; + // selector needed to avoid taking the inverse of a 3x4 matrix template struct projective_transform_inverse diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Translation.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Translation.h index 7fda179cc..2e7798686 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Translation.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Translation.h @@ -162,7 +162,7 @@ public: * determined by \a prec. * * \sa MatrixBase::isApprox() */ - bool isApprox(const Translation& other, typename NumTraits::Real prec = NumTraits::dummy_precision()) const + bool isApprox(const Translation& other, const typename NumTraits::Real& prec = NumTraits::dummy_precision()) const { return m_coeffs.isApprox(other.m_coeffs, prec); } }; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Umeyama.h b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Umeyama.h index 345b47e0c..5e20662f8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Umeyama.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Geometry/Umeyama.h @@ -113,7 +113,7 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo const Index n = src.cols(); // number of measurements // required for demeaning ... - const RealScalar one_over_n = 1 / static_cast(n); + const RealScalar one_over_n = RealScalar(1) / static_cast(n); // computation of mean const VectorType src_mean = src.rowwise().sum() * one_over_n; @@ -136,16 +136,16 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo // Eq. (39) VectorType S = VectorType::Ones(m); - if (sigma.determinant()<0) S(m-1) = -1; + if (sigma.determinant() 0 ) { + if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) { Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose(); } else { - const Scalar s = S(m-1); S(m-1) = -1; + const Scalar s = S(m-1); S(m-1) = Scalar(-1); Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); S(m-1) = s; } @@ -156,7 +156,7 @@ umeyama(const MatrixBase& src, const MatrixBase& dst, boo if (with_scaling) { // Eq. (42) - const Scalar c = 1/src_var * svd.singularValues().dot(S); + const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); // Eq. (41) Rt.col(m).head(m) = dst_mean; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/Householder/BlockHouseholder.h b/ground/gcs/src/libs/eigen/Eigen/src/Householder/BlockHouseholder.h index 1991c6527..60dbea5f5 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/Householder/BlockHouseholder.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/Householder/BlockHouseholder.h @@ -48,7 +48,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec typedef typename MatrixType::Index Index; enum { TFactorSize = MatrixType::ColsAtCompileTime }; Index nbVecs = vectors.cols(); - Matrix T(nbVecs,nbVecs); + Matrix T(nbVecs,nbVecs); make_block_householder_triangular_factor(T, vectors, hCoeffs); const TriangularView& V(vectors); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h index 73ca9bfde..1f3c060d0 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h @@ -65,10 +65,10 @@ class DiagonalPreconditioner { typename MatType::InnerIterator it(mat,j); while(it && it.index()!=j) ++it; - if(it && it.index()==j) + if(it && it.index()==j && it.value()!=Scalar(0)) m_invdiag(j) = Scalar(1)/it.value(); else - m_invdiag(j) = 0; + m_invdiag(j) = Scalar(1); } m_isInitialized = true; return *this; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h index 6fc6ab852..551221907 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h @@ -39,7 +39,6 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, int maxIters = iters; int n = mat.cols(); - x = precond.solve(x); VectorType r = rhs - mat * x; VectorType r0 = r; @@ -61,6 +60,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, VectorType s(n), t(n); RealScalar tol2 = tol*tol; + RealScalar eps2 = NumTraits::epsilon()*NumTraits::epsilon(); int i = 0; int restarts = 0; @@ -69,7 +69,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, Scalar rho_old = rho; rho = r0.dot(r); - if (internal::isMuchSmallerThan(rho,r0_sqnorm)) + if (abs(rho) < eps2*r0_sqnorm) { // The new residual vector became too orthogonal to the arbitrarily choosen direction r0 // Let's restart with a new r0: @@ -142,7 +142,7 @@ struct traits > * SparseMatrix A(n,n); * // fill A and b * BiCGSTAB > solver; - * solver(A); + * solver.compute(A); * x = solver.solve(b); * std::cout << "#iterations: " << solver.iterations() << std::endl; * std::cout << "estimated error: " << solver.error() << std::endl; @@ -151,20 +151,7 @@ struct traits > * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. Here is a step by - * step execution example starting with a random guess and printing the evolution - * of the estimated error: - * * \code - * x = VectorXd::Random(n); - * solver.setMaxIterations(1); - * int i = 0; - * do { - * x = solver.solveWithGuess(b,x); - * std::cout << i << " : " << solver.error() << std::endl; - * ++i; - * } while (solver.info()!=Success && i<100); - * \endcode - * Note that such a step by step excution is slightly slower. + * One can control the start using the solveWithGuess() method. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ @@ -199,7 +186,8 @@ public: * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - BiCGSTAB(const MatrixType& A) : Base(A) {} + template + explicit BiCGSTAB(const EigenBase& A) : Base(A.derived()) {} ~BiCGSTAB() {} diff --git a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h index a74a8155e..7dd4010c3 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h @@ -112,9 +112,9 @@ struct traits > * This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm. * The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse. * - * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. - * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower - * or Upper. Default is Lower. + * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, + * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() @@ -137,21 +137,10 @@ struct traits > * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. Here is a step by - * step execution example starting with a random guess and printing the evolution - * of the estimated error: - * * \code - * x = VectorXd::Random(n); - * cg.setMaxIterations(1); - * int i = 0; - * do { - * x = cg.solveWithGuess(b,x); - * std::cout << i << " : " << cg.error() << std::endl; - * ++i; - * } while (cg.info()!=Success && i<100); - * \endcode - * Note that such a step by step excution is slightly slower. + * One can control the start using the solveWithGuess() method. * + * ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, int _UpLo, typename _Preconditioner> @@ -189,7 +178,8 @@ public: * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - ConjugateGradient(const MatrixType& A) : Base(A) {} + template + explicit ConjugateGradient(const EigenBase& A) : Base(A.derived()) {} ~ConjugateGradient() {} @@ -213,6 +203,10 @@ public: template void _solveWithGuess(const Rhs& b, Dest& x) const { + typedef typename internal::conditional + >::type MatrixWrapperType; m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; @@ -222,8 +216,7 @@ public: m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); - internal::conjugate_gradient(mp_matrix->template selfadjointView(), b.col(j), xj, - Base::m_preconditioner, m_iterations, m_error); + internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); } m_isInitialized = true; @@ -234,7 +227,7 @@ public: template void _solve(const Rhs& b, Dest& x) const { - x.setOnes(); + x.setZero(); _solveWithGuess(b,x); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h index b55afc136..d3f37fea2 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable { analyzePattern(amat); factorize(amat); - m_isInitialized = m_factorizationIsOk; return *this; } @@ -160,7 +159,7 @@ class IncompleteLUT : internal::noncopyable template void _solve(const Rhs& b, Dest& x) const { - x = m_Pinv * b; + x = m_Pinv * b; x = m_lu.template triangularView().solve(x); x = m_lu.template triangularView().solve(x); x = m_P * x; @@ -223,18 +222,29 @@ template void IncompleteLUT::analyzePattern(const _MatrixType& amat) { // Compute the Fill-reducing permutation + // Since ILUT does not perform any numerical pivoting, + // it is highly preferable to keep the diagonal through symmetric permutations. +#ifndef EIGEN_MPL2_ONLY + // To this end, let's symmetrize the pattern and perform AMD on it. SparseMatrix mat1 = amat; SparseMatrix mat2 = amat.transpose(); - // Symmetrize the pattern // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... SparseMatrix AtA = mat2 + mat1; - AtA.prune(keep_diag()); - internal::minimum_degree_ordering(AtA, m_P); // Then compute the AMD ordering... - - m_Pinv = m_P.inverse(); // ... and the inverse permutation + AMDOrdering ordering; + ordering(AtA,m_P); + m_Pinv = m_P.inverse(); // cache the inverse permutation +#else + // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine. + SparseMatrix mat1 = amat; + COLAMDOrdering ordering; + ordering(mat1,m_Pinv); + m_P = m_Pinv.inverse(); +#endif m_analysisIsOk = true; + m_factorizationIsOk = false; + m_isInitialized = false; } template @@ -442,6 +452,7 @@ void IncompleteLUT::factorize(const _MatrixType& amat) m_lu.makeCompressed(); m_factorizationIsOk = true; + m_isInitialized = m_factorizationIsOk; m_info = Success; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h index 2036922d6..501ef2f8d 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h @@ -49,10 +49,11 @@ public: * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - IterativeSolverBase(const MatrixType& A) + template + IterativeSolverBase(const EigenBase& A) { init(); - compute(A); + compute(A.derived()); } ~IterativeSolverBase() {} @@ -62,9 +63,11 @@ public: * Currently, this function mostly call analyzePattern on the preconditioner. In the future * we might, for instance, implement column reodering for faster matrix vector products. */ - Derived& analyzePattern(const MatrixType& A) + template + Derived& analyzePattern(const EigenBase& A) { - m_preconditioner.analyzePattern(A); + grabInput(A.derived()); + m_preconditioner.analyzePattern(*mp_matrix); m_isInitialized = true; m_analysisIsOk = true; m_info = Success; @@ -80,11 +83,12 @@ public: * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - Derived& factorize(const MatrixType& A) + template + Derived& factorize(const EigenBase& A) { + grabInput(A.derived()); eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); - mp_matrix = &A; - m_preconditioner.factorize(A); + m_preconditioner.factorize(*mp_matrix); m_factorizationIsOk = true; m_info = Success; return derived(); @@ -100,10 +104,11 @@ public: * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - Derived& compute(const MatrixType& A) + template + Derived& compute(const EigenBase& A) { - mp_matrix = &A; - m_preconditioner.compute(A); + grabInput(A.derived()); + m_preconditioner.compute(*mp_matrix); m_isInitialized = true; m_analysisIsOk = true; m_factorizationIsOk = true; @@ -212,6 +217,28 @@ public: } protected: + + template + void grabInput(const EigenBase& A) + { + // we const cast to prevent the creation of a MatrixType temporary by the compiler. + grabInput_impl(A.const_cast_derived()); + } + + template + void grabInput_impl(const EigenBase& A) + { + m_copyMatrix = A; + mp_matrix = &m_copyMatrix; + } + + void grabInput_impl(MatrixType& A) + { + if(MatrixType::RowsAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==Dynamic) + m_copyMatrix.resize(0,0); + mp_matrix = &A; + } + void init() { m_isInitialized = false; @@ -220,6 +247,7 @@ protected: m_maxIterations = -1; m_tolerance = NumTraits::epsilon(); } + MatrixType m_copyMatrix; const MatrixType* mp_matrix; Preconditioner m_preconditioner; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/LU/FullPivLU.h b/ground/gcs/src/libs/eigen/Eigen/src/LU/FullPivLU.h index dfe25f424..e38470463 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/LU/FullPivLU.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/LU/FullPivLU.h @@ -20,10 +20,11 @@ namespace Eigen { * * \param MatrixType the type of the matrix of which we are computing the LU decomposition * - * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A - * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q - * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal - * coefficients) of U are sorted in such a way that any zeros are at the end. + * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is + * decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is + * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU + * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any + * zeros are at the end. * * This decomposition provides the generic approach to solving systems of linear equations, computing * the rank, invertibility, inverse, kernel, and determinant. @@ -373,6 +374,12 @@ template class FullPivLU inline Index cols() const { return m_lu.cols(); } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + MatrixType m_lu; PermutationPType m_p; PermutationQType m_q; @@ -417,6 +424,8 @@ FullPivLU::FullPivLU(const MatrixType& matrix) template FullPivLU& FullPivLU::compute(const MatrixType& matrix) { + check_template_parameters(); + // the permutations are stored as int indices, so just to be sure: eigen_assert(matrix.rows()<=NumTraits::highest() && matrix.cols()<=NumTraits::highest()); @@ -511,8 +520,8 @@ typename internal::traits::Scalar FullPivLU::determinant } /** \returns the matrix represented by the decomposition, - * i.e., it returns the product: P^{-1} L U Q^{-1}. - * This function is provided for debug purpose. */ + * i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$. + * This function is provided for debug purposes. */ template MatrixType FullPivLU::reconstructedMatrix() const { @@ -679,7 +688,7 @@ struct solve_retval, Rhs> */ const Index rows = dec().rows(), cols = dec().cols(), - nonzero_pivots = dec().nonzeroPivots(); + nonzero_pivots = dec().rank(); eigen_assert(rhs().rows() == rows); const Index smalldim = (std::min)(rows, cols); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/LU/PartialPivLU.h b/ground/gcs/src/libs/eigen/Eigen/src/LU/PartialPivLU.h index 740ee694c..7d1db948c 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/LU/PartialPivLU.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/LU/PartialPivLU.h @@ -171,6 +171,12 @@ template class PartialPivLU inline Index cols() const { return m_lu.cols(); } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + MatrixType m_lu; PermutationType m_p; TranspositionType m_rowsTranspositions; @@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t template PartialPivLU& PartialPivLU::compute(const MatrixType& matrix) { + check_template_parameters(); + // the row permutation is stored as int indices, so just to be sure: eigen_assert(matrix.rows()::highest()); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Amd.h b/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Amd.h index 41b4fd7e3..658b954c7 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Amd.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Amd.h @@ -8,7 +8,7 @@ NOTE: this routine has been adapted from the CSparse library: Copyright (c) 2006, Timothy A. Davis. -http://www.cise.ufl.edu/research/sparse/CSparse +http://www.suitesparse.com CSparse is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public @@ -137,22 +137,27 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation degree[i] = len[i]; // degree of node i } mark = internal::cs_wclear(0, 0, w, n); /* clear w */ - elen[n] = -2; /* n is a dead element */ - Cp[n] = -1; /* n is a root of assembly tree */ - w[n] = 0; /* n is a dead element */ /* --- Initialize degree lists ------------------------------------------ */ for(i = 0; i < n; i++) { + bool has_diag = false; + for(p = Cp[i]; p dense) /* node i is dense */ + else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */ { nv[i] = 0; /* absorb i into element n */ elen[i] = -1; /* node i is dead */ @@ -168,6 +173,10 @@ void minimum_degree_ordering(SparseMatrix& C, Permutation } } + elen[n] = -2; /* n is a dead element */ + Cp[n] = -1; /* n is a root of assembly tree */ + w[n] = 0; /* n is a dead element */ + while (nel < n) /* while (selecting pivots) do */ { /* --- Select node of minimum approximate degree -------------------- */ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h b/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h index 44548f660..359fd4417 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Eigen_Colamd.h @@ -41,12 +41,8 @@ // // The colamd/symamd library is available at // -// http://www.cise.ufl.edu/research/sparse/colamd/ +// http://www.suitesparse.com -// This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h -// file. It is required by the colamd.c, colamdmex.c, and symamdmex.c -// files, and by any C code that calls the routines whose prototypes are -// listed below, or that uses the colamd/symamd definitions listed below. #ifndef EIGEN_COLAMD_H #define EIGEN_COLAMD_H @@ -102,9 +98,6 @@ namespace internal { /* === Definitions ========================================================== */ /* ========================================================================== */ -#define COLAMD_MAX(a,b) (((a) > (b)) ? (a) : (b)) -#define COLAMD_MIN(a,b) (((a) < (b)) ? (a) : (b)) - #define ONES_COMPLEMENT(r) (-(r)-1) /* -------------------------------------------------------------------------- */ @@ -516,7 +509,7 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */ Col [col].start = p [col] ; Col [col].length = p [col+1] - p [col] ; - if (Col [col].length < 0) + if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200 { /* column pointers must be non-decreasing */ stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ; @@ -739,8 +732,8 @@ static void init_scoring /* === Extract knobs ==================================================== */ - dense_row_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ; - dense_col_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ; + dense_row_count = std::max(0, (std::min)(Index(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ; + dense_col_count = std::max(0, (std::min)(Index(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ; COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ; max_deg = 0 ; n_col2 = n_col ; @@ -804,7 +797,7 @@ static void init_scoring else { /* keep track of max degree of remaining rows */ - max_deg = COLAMD_MAX (max_deg, deg) ; + max_deg = (std::max)(max_deg, deg) ; } } COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ; @@ -842,7 +835,7 @@ static void init_scoring /* add row's external degree */ score += Row [row].shared1.degree - 1 ; /* guard against integer overflow */ - score = COLAMD_MIN (score, n_col) ; + score = (std::min)(score, n_col) ; } /* determine pruned column length */ col_length = (Index) (new_cp - &A [Col [c].start]) ; @@ -914,7 +907,7 @@ static void init_scoring head [score] = c ; /* see if this score is less than current min */ - min_score = COLAMD_MIN (min_score, score) ; + min_score = (std::min)(min_score, score) ; } @@ -1040,7 +1033,7 @@ static Index find_ordering /* return the number of garbage collections */ /* === Garbage_collection, if necessary ============================= */ - needed_memory = COLAMD_MIN (pivot_col_score, n_col - k) ; + needed_memory = (std::min)(pivot_col_score, n_col - k) ; if (pfree + needed_memory >= Alen) { pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ; @@ -1099,7 +1092,7 @@ static Index find_ordering /* return the number of garbage collections */ /* clear tag on pivot column */ Col [pivot_col].shared1.thickness = pivot_col_thickness ; - max_deg = COLAMD_MAX (max_deg, pivot_row_degree) ; + max_deg = (std::max)(max_deg, pivot_row_degree) ; /* === Kill all rows used to construct pivot row ==================== */ @@ -1273,7 +1266,7 @@ static Index find_ordering /* return the number of garbage collections */ /* add set difference */ cur_score += row_mark - tag_mark ; /* integer overflow... */ - cur_score = COLAMD_MIN (cur_score, n_col) ; + cur_score = (std::min)(cur_score, n_col) ; } /* recompute the column's length */ @@ -1386,7 +1379,7 @@ static Index find_ordering /* return the number of garbage collections */ cur_score -= Col [col].shared1.thickness ; /* make sure score is less or equal than the max score */ - cur_score = COLAMD_MIN (cur_score, max_score) ; + cur_score = (std::min)(cur_score, max_score) ; COLAMD_ASSERT (cur_score >= 0) ; /* store updated score */ @@ -1409,7 +1402,7 @@ static Index find_ordering /* return the number of garbage collections */ head [cur_score] = col ; /* see if this score is less than current min */ - min_score = COLAMD_MIN (min_score, cur_score) ; + min_score = (std::min)(min_score, cur_score) ; } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Ordering.h b/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Ordering.h index b4da6531a..f3c31f9cb 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Ordering.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/OrderingMethods/Ordering.h @@ -109,7 +109,7 @@ class NaturalOrdering * \class COLAMDOrdering * * Functor computing the \em column \em approximate \em minimum \em degree ordering - * The matrix should be in column-major format + * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()). */ template class COLAMDOrdering @@ -118,10 +118,14 @@ class COLAMDOrdering typedef PermutationMatrix PermutationType; typedef Matrix IndexVector; - /** Compute the permutation vector form a sparse matrix */ + /** Compute the permutation vector \a perm form the sparse matrix \a mat + * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + */ template void operator() (const MatrixType& mat, PermutationType& perm) { + eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering"); + Index m = mat.rows(); Index n = mat.cols(); Index nnz = mat.nonZeros(); @@ -132,12 +136,12 @@ class COLAMDOrdering Index stats [COLAMD_STATS]; internal::colamd_set_defaults(knobs); - Index info; IndexVector p(n+1), A(Alen); for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; // Call Colamd routine to compute the ordering - info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); + Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); + EIGEN_UNUSED_VARIABLE(info); eigen_assert( info && "COLAMD failed " ); perm.resize(n); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/PardisoSupport/PardisoSupport.h b/ground/gcs/src/libs/eigen/Eigen/src/PardisoSupport/PardisoSupport.h index 1c48f0df7..18cd7d88a 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/PardisoSupport/PardisoSupport.h @@ -219,7 +219,7 @@ class PardisoImpl void pardisoInit(int type) { m_type = type; - bool symmetric = abs(m_type) < 10; + bool symmetric = std::abs(m_type) < 10; m_iparm[0] = 1; // No solver default m_iparm[1] = 3; // use Metis for the ordering m_iparm[2] = 1; // Numbers of processors, value of OMP_NUM_THREADS diff --git a/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR.h b/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR.h index 8b01f8179..567eab7cd 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR.h @@ -76,7 +76,8 @@ template class ColPivHouseholderQR m_colsTranspositions(), m_temp(), m_colSqNorms(), - m_isInitialized(false) {} + m_isInitialized(false), + m_usePrescribedThreshold(false) {} /** \brief Default Constructor with memory preallocation * @@ -349,7 +350,7 @@ template class ColPivHouseholderQR return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. - : NumTraits::epsilon() * m_qr.diagonalSize(); + : NumTraits::epsilon() * RealScalar(m_qr.diagonalSize()); } /** \returns the number of nonzero pivots in the QR decomposition. @@ -383,6 +384,12 @@ template class ColPivHouseholderQR } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + MatrixType m_qr; HCoeffsType m_hCoeffs; PermutationType m_colsPermutation; @@ -421,6 +428,8 @@ typename MatrixType::RealScalar ColPivHouseholderQR::logAbsDetermina template ColPivHouseholderQR& ColPivHouseholderQR::compute(const MatrixType& matrix) { + check_template_parameters(); + using std::abs; Index rows = matrix.rows(); Index cols = matrix.cols(); @@ -462,20 +471,10 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const // we store that back into our table: it can't hurt to correct our table. m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm; - // if the current biggest column is smaller than epsilon times the initial biggest column, - // terminate to avoid generating nan/inf values. - // Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so) - // repetitions of the unit test, with the result of solve() filled with large values of the order - // of 1/(size*epsilon). - if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k)) - { + // Track the number of meaningful pivots but do not stop the decomposition to make + // sure that the initial matrix is properly reproduced. See bug 941. + if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k)) m_nonzero_pivots = k; - m_hCoeffs.tail(size-k).setZero(); - m_qr.bottomRightCorner(rows-k,cols-k) - .template triangularView() - .setZero(); - break; - } // apply the transposition to the columns m_colsTranspositions.coeffRef(k) = biggest_col_index; @@ -504,7 +503,7 @@ ColPivHouseholderQR& ColPivHouseholderQR::compute(const } m_colsPermutation.setIdentity(PermIndexType(cols)); - for(PermIndexType k = 0; k < m_nonzero_pivots; ++k) + for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k) m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k))); m_det_pq = (number_of_transpositions%2) ? -1 : 1; @@ -554,13 +553,15 @@ struct solve_retval, Rhs> } // end namespace internal -/** \returns the matrix Q as a sequence of householder transformations */ +/** \returns the matrix Q as a sequence of householder transformations. + * You can extract the meaningful part only by using: + * \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/ template typename ColPivHouseholderQR::HouseholderSequenceType ColPivHouseholderQR ::householderQ() const { eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized."); - return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots); + return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()); } /** \return the column-pivoting Householder QR decomposition of \c *this. diff --git a/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h b/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h index b5b198326..7b6ba0a5e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/QR/ColPivHouseholderQR_MKL.h @@ -49,7 +49,6 @@ ColPivHouseholderQR MatrixType; \ - typedef MatrixType::Scalar Scalar; \ typedef MatrixType::RealScalar RealScalar; \ Index rows = matrix.rows();\ Index cols = matrix.cols();\ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/QR/FullPivHouseholderQR.h b/ground/gcs/src/libs/eigen/Eigen/src/QR/FullPivHouseholderQR.h index 0dd5ad347..0b39966e1 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/QR/FullPivHouseholderQR.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/QR/FullPivHouseholderQR.h @@ -63,9 +63,10 @@ template class FullPivHouseholderQR typedef typename MatrixType::Index Index; typedef internal::FullPivHouseholderQRMatrixQReturnType MatrixQReturnType; typedef typename internal::plain_diag_type::type HCoeffsType; - typedef Matrix IntRowVectorType; + typedef Matrix IntDiagSizeVectorType; typedef PermutationMatrix PermutationType; - typedef typename internal::plain_col_type::type IntColVectorType; typedef typename internal::plain_row_type::type RowVectorType; typedef typename internal::plain_col_type::type ColVectorType; @@ -93,10 +94,10 @@ template class FullPivHouseholderQR FullPivHouseholderQR(Index rows, Index cols) : m_qr(rows, cols), m_hCoeffs((std::min)(rows,cols)), - m_rows_transpositions(rows), - m_cols_transpositions(cols), + m_rows_transpositions((std::min)(rows,cols)), + m_cols_transpositions((std::min)(rows,cols)), m_cols_permutation(cols), - m_temp((std::min)(rows,cols)), + m_temp(cols), m_isInitialized(false), m_usePrescribedThreshold(false) {} @@ -115,10 +116,10 @@ template class FullPivHouseholderQR FullPivHouseholderQR(const MatrixType& matrix) : m_qr(matrix.rows(), matrix.cols()), m_hCoeffs((std::min)(matrix.rows(), matrix.cols())), - m_rows_transpositions(matrix.rows()), - m_cols_transpositions(matrix.cols()), + m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())), + m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())), m_cols_permutation(matrix.cols()), - m_temp((std::min)(matrix.rows(), matrix.cols())), + m_temp(matrix.cols()), m_isInitialized(false), m_usePrescribedThreshold(false) { @@ -126,11 +127,12 @@ template class FullPivHouseholderQR } /** This method finds a solution x to the equation Ax=b, where A is the matrix of which - * *this is the QR decomposition, if any exists. + * \c *this is the QR decomposition. * * \param b the right-hand-side of the equation to solve. * - * \returns a solution. + * \returns the exact or least-square solution if the rank is greater or equal to the number of columns of A, + * and an arbitrary solution otherwise. * * \note The case where b is a matrix is not yet implemented. Also, this * code is space inefficient. @@ -172,7 +174,7 @@ template class FullPivHouseholderQR } /** \returns a const reference to the vector of indices representing the rows transpositions */ - const IntColVectorType& rowsTranspositions() const + const IntDiagSizeVectorType& rowsTranspositions() const { eigen_assert(m_isInitialized && "FullPivHouseholderQR is not initialized."); return m_rows_transpositions; @@ -344,7 +346,7 @@ template class FullPivHouseholderQR return m_usePrescribedThreshold ? m_prescribedThreshold // this formula comes from experimenting (see "LU precision tuning" thread on the list) // and turns out to be identical to Higham's formula used already in LDLt. - : NumTraits::epsilon() * m_qr.diagonalSize(); + : NumTraits::epsilon() * RealScalar(m_qr.diagonalSize()); } /** \returns the number of nonzero pivots in the QR decomposition. @@ -366,10 +368,16 @@ template class FullPivHouseholderQR RealScalar maxPivot() const { return m_maxpivot; } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + MatrixType m_qr; HCoeffsType m_hCoeffs; - IntColVectorType m_rows_transpositions; - IntRowVectorType m_cols_transpositions; + IntDiagSizeVectorType m_rows_transpositions; + IntDiagSizeVectorType m_cols_transpositions; PermutationType m_cols_permutation; RowVectorType m_temp; bool m_isInitialized, m_usePrescribedThreshold; @@ -405,6 +413,8 @@ typename MatrixType::RealScalar FullPivHouseholderQR::logAbsDetermin template FullPivHouseholderQR& FullPivHouseholderQR::compute(const MatrixType& matrix) { + check_template_parameters(); + using std::abs; Index rows = matrix.rows(); Index cols = matrix.cols(); @@ -415,10 +425,10 @@ FullPivHouseholderQR& FullPivHouseholderQR::compute(cons m_temp.resize(cols); - m_precision = NumTraits::epsilon() * size; + m_precision = NumTraits::epsilon() * RealScalar(size); - m_rows_transpositions.resize(matrix.rows()); - m_cols_transpositions.resize(matrix.cols()); + m_rows_transpositions.resize(size); + m_cols_transpositions.resize(size); Index number_of_transpositions = 0; RealScalar biggest(0); @@ -516,17 +526,6 @@ struct solve_retval, Rhs> dec().hCoeffs().coeff(k), &temp.coeffRef(0)); } - if(!dec().isSurjective()) - { - // is c is in the image of R ? - RealScalar biggest_in_upper_part_of_c = c.topRows( dec().rank() ).cwiseAbs().maxCoeff(); - RealScalar biggest_in_lower_part_of_c = c.bottomRows(rows-dec().rank()).cwiseAbs().maxCoeff(); - // FIXME brain dead - const RealScalar m_precision = NumTraits::epsilon() * (std::min)(rows,cols); - // this internal:: prefix is needed by at least gcc 3.4 and ICC - if(!internal::isMuchSmallerThan(biggest_in_lower_part_of_c, biggest_in_upper_part_of_c, m_precision)) - return; - } dec().matrixQR() .topLeftCorner(dec().rank(), dec().rank()) .template triangularView() @@ -548,14 +547,14 @@ template struct FullPivHouseholderQRMatrixQReturnType { public: typedef typename MatrixType::Index Index; - typedef typename internal::plain_col_type::type IntColVectorType; + typedef typename FullPivHouseholderQR::IntDiagSizeVectorType IntDiagSizeVectorType; typedef typename internal::plain_diag_type::type HCoeffsType; typedef Matrix WorkVectorType; FullPivHouseholderQRMatrixQReturnType(const MatrixType& qr, const HCoeffsType& hCoeffs, - const IntColVectorType& rowsTranspositions) + const IntDiagSizeVectorType& rowsTranspositions) : m_qr(qr), m_hCoeffs(hCoeffs), m_rowsTranspositions(rowsTranspositions) @@ -595,7 +594,7 @@ public: protected: typename MatrixType::Nested m_qr; typename HCoeffsType::Nested m_hCoeffs; - typename IntColVectorType::Nested m_rowsTranspositions; + typename IntDiagSizeVectorType::Nested m_rowsTranspositions; }; } // end namespace internal diff --git a/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR.h b/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR.h index abc61bcbb..343a66499 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR.h @@ -189,6 +189,12 @@ template class HouseholderQR const HCoeffsType& hCoeffs() const { return m_hCoeffs; } protected: + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } + MatrixType m_qr; HCoeffsType m_hCoeffs; RowVectorType m_temp; @@ -251,56 +257,62 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename } /** \internal */ -template -void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, - typename MatrixQR::Index maxBlockSize=32, - typename MatrixQR::Scalar* tempData = 0) +template +struct householder_qr_inplace_blocked { - typedef typename MatrixQR::Index Index; - typedef typename MatrixQR::Scalar Scalar; - typedef Block BlockType; - - Index rows = mat.rows(); - Index cols = mat.cols(); - Index size = (std::min)(rows, cols); - - typedef Matrix TempType; - TempType tempVector; - if(tempData==0) + // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h + static void run(MatrixQR& mat, HCoeffs& hCoeffs, + typename MatrixQR::Index maxBlockSize=32, + typename MatrixQR::Scalar* tempData = 0) { - tempVector.resize(cols); - tempData = tempVector.data(); - } + typedef typename MatrixQR::Index Index; + typedef typename MatrixQR::Scalar Scalar; + typedef Block BlockType; - Index blockSize = (std::min)(maxBlockSize,size); + Index rows = mat.rows(); + Index cols = mat.cols(); + Index size = (std::min)(rows, cols); - Index k = 0; - for (k = 0; k < size; k += blockSize) - { - Index bs = (std::min)(size-k,blockSize); // actual size of the block - Index tcols = cols - k - bs; // trailing columns - Index brows = rows-k; // rows of the block - - // partition the matrix: - // A00 | A01 | A02 - // mat = A10 | A11 | A12 - // A20 | A21 | A22 - // and performs the qr dec of [A11^T A12^T]^T - // and update [A21^T A22^T]^T using level 3 operations. - // Finally, the algorithm continue on A22 - - BlockType A11_21 = mat.block(k,k,brows,bs); - Block hCoeffsSegment = hCoeffs.segment(k,bs); - - householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData); - - if(tcols) + typedef Matrix TempType; + TempType tempVector; + if(tempData==0) { - BlockType A21_22 = mat.block(k,k+bs,brows,tcols); - apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint()); + tempVector.resize(cols); + tempData = tempVector.data(); + } + + Index blockSize = (std::min)(maxBlockSize,size); + + Index k = 0; + for (k = 0; k < size; k += blockSize) + { + Index bs = (std::min)(size-k,blockSize); // actual size of the block + Index tcols = cols - k - bs; // trailing columns + Index brows = rows-k; // rows of the block + + // partition the matrix: + // A00 | A01 | A02 + // mat = A10 | A11 | A12 + // A20 | A21 | A22 + // and performs the qr dec of [A11^T A12^T]^T + // and update [A21^T A22^T]^T using level 3 operations. + // Finally, the algorithm continue on A22 + + BlockType A11_21 = mat.block(k,k,brows,bs); + Block hCoeffsSegment = hCoeffs.segment(k,bs); + + householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData); + + if(tcols) + { + BlockType A21_22 = mat.block(k,k+bs,brows,tcols); + apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint()); + } } } -} +}; template struct solve_retval, Rhs> @@ -343,6 +355,8 @@ struct solve_retval, Rhs> template HouseholderQR& HouseholderQR::compute(const MatrixType& matrix) { + check_template_parameters(); + Index rows = matrix.rows(); Index cols = matrix.cols(); Index size = (std::min)(rows,cols); @@ -352,7 +366,7 @@ HouseholderQR& HouseholderQR::compute(const MatrixType& m_temp.resize(cols); - internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data()); + internal::householder_qr_inplace_blocked::run(m_qr, m_hCoeffs, 48, m_temp.data()); m_isInitialized = true; return *this; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR_MKL.h b/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR_MKL.h index 5313de604..b80f1b48d 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR_MKL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/QR/HouseholderQR_MKL.h @@ -34,28 +34,30 @@ #ifndef EIGEN_QR_MKL_H #define EIGEN_QR_MKL_H -#include "Eigen/src/Core/util/MKL_support.h" +#include "../Core/util/MKL_support.h" namespace Eigen { -namespace internal { + namespace internal { -/** \internal Specialization for the data types supported by MKL */ + /** \internal Specialization for the data types supported by MKL */ #define EIGEN_MKL_QR_NOPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \ template \ -void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, \ - typename MatrixQR::Index maxBlockSize=32, \ - EIGTYPE* tempData = 0) \ +struct householder_qr_inplace_blocked \ { \ - lapack_int m = mat.rows(); \ - lapack_int n = mat.cols(); \ - lapack_int lda = mat.outerStride(); \ - lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ - LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \ - hCoeffs.adjointInPlace(); \ -\ -} + static void run(MatrixQR& mat, HCoeffs& hCoeffs, \ + typename MatrixQR::Index = 32, \ + typename MatrixQR::Scalar* = 0) \ + { \ + lapack_int m = (lapack_int) mat.rows(); \ + lapack_int n = (lapack_int) mat.cols(); \ + lapack_int lda = (lapack_int) mat.outerStride(); \ + lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \ + hCoeffs.adjointInPlace(); \ + } \ +}; EIGEN_MKL_QR_NOPIV(double, double, d) EIGEN_MKL_QR_NOPIV(float, float, s) diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/ground/gcs/src/libs/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h index aa41f434c..36138101d 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h @@ -47,7 +47,7 @@ namespace Eigen { * You can then apply it to a vector. * * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix. - * NOTE : The Index type of R is always UF_long. You can get it with SPQR::Index + * NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index * * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<> * NOTE @@ -59,22 +59,18 @@ class SPQR public: typedef typename _MatrixType::Scalar Scalar; typedef typename _MatrixType::RealScalar RealScalar; - typedef UF_long Index ; + typedef SuiteSparse_long Index ; typedef SparseMatrix MatrixType; typedef PermutationMatrix PermutationType; public: SPQR() - : m_ordering(SPQR_ORDERING_DEFAULT), - m_allow_tol(SPQR_DEFAULT_TOL), - m_tolerance (NumTraits::epsilon()) + : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()), m_useDefaultThreshold(true) { cholmod_l_start(&m_cc); } - SPQR(const _MatrixType& matrix) - : m_ordering(SPQR_ORDERING_DEFAULT), - m_allow_tol(SPQR_DEFAULT_TOL), - m_tolerance (NumTraits::epsilon()) + SPQR(const _MatrixType& matrix) + : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits::epsilon()), m_useDefaultThreshold(true) { cholmod_l_start(&m_cc); compute(matrix); @@ -82,21 +78,43 @@ class SPQR ~SPQR() { - // Calls SuiteSparseQR_free() + SPQR_free(); + cholmod_l_finish(&m_cc); + } + void SPQR_free() + { cholmod_l_free_sparse(&m_H, &m_cc); cholmod_l_free_sparse(&m_cR, &m_cc); cholmod_l_free_dense(&m_HTau, &m_cc); std::free(m_E); std::free(m_HPinv); - cholmod_l_finish(&m_cc); } + void compute(const _MatrixType& matrix) { + if(m_isInitialized) SPQR_free(); + MatrixType mat(matrix); + + /* Compute the default threshold as in MatLab, see: + * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing + * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 + */ + RealScalar pivotThreshold = m_tolerance; + if(m_useDefaultThreshold) + { + using std::max; + RealScalar max2Norm = 0.0; + for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm()); + if(max2Norm==RealScalar(0)) + max2Norm = RealScalar(1); + pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits::epsilon(); + } + cholmod_sparse A; A = viewAsCholmod(mat); Index col = matrix.cols(); - m_rank = SuiteSparseQR(m_ordering, m_tolerance, col, &A, + m_rank = SuiteSparseQR(m_ordering, pivotThreshold, col, &A, &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc); if (!m_cR) @@ -112,7 +130,7 @@ class SPQR /** * Get the number of rows of the input matrix and the Q matrix */ - inline Index rows() const {return m_H->nrow; } + inline Index rows() const {return m_cR->nrow; } /** * Get the number of columns of the input matrix. @@ -137,16 +155,25 @@ class SPQR { eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()"); eigen_assert(b.cols()==1 && "This method is for vectors only"); - + //Compute Q^T * b - Dest y; + typename Dest::PlainObject y, y2; y = matrixQ().transpose() * b; - // Solves with the triangular matrix R + + // Solves with the triangular matrix R Index rk = this->rank(); - y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView().solve(y.topRows(rk)); - y.bottomRows(cols()-rk).setZero(); + y2 = y; + y.resize((std::max)(cols(),Index(y.rows())),y.cols()); + y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView().solve(y2.topRows(rk)); + // Apply the column permutation - dest.topRows(cols()) = colsPermutation() * y.topRows(cols()); + // colsPermutation() performs a copy of the permutation, + // so let's apply it manually: + for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i); + for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero(); + +// y.bottomRows(y.rows()-rk).setZero(); +// dest = colsPermutation() * y.topRows(cols()); m_info = Success; } @@ -189,7 +216,11 @@ class SPQR /// Set the fill-reducing ordering method to be used void setSPQROrdering(int ord) { m_ordering = ord;} /// Set the tolerance tol to treat columns with 2-norm < =tol as zero - void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; } + void setPivotThreshold(const RealScalar& tol) + { + m_useDefaultThreshold = false; + m_tolerance = tol; + } /** \returns a pointer to the SPQR workspace */ cholmod_common *cholmodCommon() const { return &m_cc; } @@ -222,6 +253,7 @@ class SPQR mutable cholmod_dense *m_HTau; // The Householder coefficients mutable Index m_rank; // The rank of the matrix mutable cholmod_common m_cc; // Workspace and parameters + bool m_useDefaultThreshold; // Use default threshold template friend struct SPQR_QProduct; }; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD.h b/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD.h index 4786768ff..89ace381e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD.h @@ -375,14 +375,19 @@ struct svd_precondition_2x2_block_to_be_real Scalar z; JacobiRotation rot; RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p))); + if(n==0) { z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q); work_matrix.row(p) *= z; if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z); - z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); - work_matrix.row(q) *= z; - if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + if(work_matrix.coeff(q,q)!=Scalar(0)) + { + z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q); + work_matrix.row(q) *= z; + if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z); + } + // otherwise the second row is already zero, so we have nothing to do. } else { @@ -412,6 +417,7 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, JacobiRotation *j_right) { using std::sqrt; + using std::abs; Matrix m; m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); @@ -425,9 +431,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, } else { - RealScalar u = d / t; - rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u)); - rot1.s() = rot1.c() * u; + RealScalar t2d2 = numext::hypot(t,d); + rot1.c() = abs(t)/t2d2; + rot1.s() = d/t2d2; + if(tmakeJacobi(m,0,1); @@ -528,8 +536,9 @@ template class JacobiSVD JacobiSVD() : m_isInitialized(false), m_isAllocated(false), + m_usePrescribedThreshold(false), m_computationOptions(0), - m_rows(-1), m_cols(-1) + m_rows(-1), m_cols(-1), m_diagSize(0) {} @@ -542,6 +551,7 @@ template class JacobiSVD JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0) : m_isInitialized(false), m_isAllocated(false), + m_usePrescribedThreshold(false), m_computationOptions(0), m_rows(-1), m_cols(-1) { @@ -561,6 +571,7 @@ template class JacobiSVD JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0) : m_isInitialized(false), m_isAllocated(false), + m_usePrescribedThreshold(false), m_computationOptions(0), m_rows(-1), m_cols(-1) { @@ -662,23 +673,92 @@ template class JacobiSVD eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); return m_nonzeroSingularValues; } + + /** \returns the rank of the matrix of which \c *this is the SVD. + * + * \note This method has to determine which singular values should be considered nonzero. + * For that, it uses the threshold value that you can control by calling + * setThreshold(const RealScalar&). + */ + inline Index rank() const + { + using std::abs; + eigen_assert(m_isInitialized && "JacobiSVD is not initialized."); + if(m_singularValues.size()==0) return 0; + RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold(); + Index i = m_nonzeroSingularValues-1; + while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i; + return i+1; + } + + /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(), + * which need to determine when singular values are to be considered nonzero. + * This is not used for the SVD decomposition itself. + * + * When it needs to get the threshold value, Eigen calls threshold(). + * The default is \c NumTraits::epsilon() + * + * \param threshold The new value to use as the threshold. + * + * A singular value will be considered nonzero if its value is strictly greater than + * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$. + * + * If you want to come back to the default behavior, call setThreshold(Default_t) + */ + JacobiSVD& setThreshold(const RealScalar& threshold) + { + m_usePrescribedThreshold = true; + m_prescribedThreshold = threshold; + return *this; + } + + /** Allows to come back to the default behavior, letting Eigen use its default formula for + * determining the threshold. + * + * You should pass the special object Eigen::Default as parameter here. + * \code svd.setThreshold(Eigen::Default); \endcode + * + * See the documentation of setThreshold(const RealScalar&). + */ + JacobiSVD& setThreshold(Default_t) + { + m_usePrescribedThreshold = false; + return *this; + } + + /** Returns the threshold that will be used by certain methods such as rank(). + * + * See the documentation of setThreshold(const RealScalar&). + */ + RealScalar threshold() const + { + eigen_assert(m_isInitialized || m_usePrescribedThreshold); + return m_usePrescribedThreshold ? m_prescribedThreshold + : (std::max)(1,m_diagSize)*NumTraits::epsilon(); + } inline Index rows() const { return m_rows; } inline Index cols() const { return m_cols; } private: void allocate(Index rows, Index cols, unsigned int computationOptions); + + static void check_template_parameters() + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); + } protected: MatrixUType m_matrixU; MatrixVType m_matrixV; SingularValuesType m_singularValues; WorkMatrixType m_workMatrix; - bool m_isInitialized, m_isAllocated; + bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold; bool m_computeFullU, m_computeThinU; bool m_computeFullV, m_computeThinV; unsigned int m_computationOptions; Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize; + RealScalar m_prescribedThreshold; template friend struct internal::svd_precondition_2x2_block_to_be_real; @@ -687,6 +767,7 @@ template class JacobiSVD internal::qr_preconditioner_impl m_qr_precond_morecols; internal::qr_preconditioner_impl m_qr_precond_morerows; + MatrixType m_scaledMatrix; }; template @@ -733,14 +814,17 @@ void JacobiSVD::allocate(Index rows, Index cols, u : 0); m_workMatrix.resize(m_diagSize, m_diagSize); - if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); - if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); + if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this); + if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this); + if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols); } template JacobiSVD& JacobiSVD::compute(const MatrixType& matrix, unsigned int computationOptions) { + check_template_parameters(); + using std::abs; allocate(matrix.rows(), matrix.cols(), computationOptions); @@ -751,11 +835,21 @@ JacobiSVD::compute(const MatrixType& matrix, unsig // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286) const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits::denorm_min(); + // Scaling factor to reduce over/under-flows + RealScalar scale = matrix.cwiseAbs().maxCoeff(); + if(scale==RealScalar(0)) scale = RealScalar(1); + /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */ - if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix)) + if(m_rows!=m_cols) { - m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize); + m_scaledMatrix = matrix / scale; + m_qr_precond_morecols.run(*this, m_scaledMatrix); + m_qr_precond_morerows.run(*this, m_scaledMatrix); + } + else + { + m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale; if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows); if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize); if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols); @@ -781,7 +875,8 @@ JacobiSVD::compute(const MatrixType& matrix, unsig using std::max; RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q)))); - if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold) + // We compare both values to threshold instead of calling max to be robust to NaN (See bug 791) + if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold) { finished = false; @@ -830,6 +925,8 @@ JacobiSVD::compute(const MatrixType& matrix, unsig if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i)); } } + + m_singularValues *= scale; m_isInitialized = true; return *this; @@ -851,11 +948,11 @@ struct solve_retval, Rhs> // So A^{-1} = V S^{-1} U^* Matrix tmp; - Index nonzeroSingVals = dec().nonzeroSingularValues(); + Index rank = dec().rank(); - tmp.noalias() = dec().matrixU().leftCols(nonzeroSingVals).adjoint() * rhs(); - tmp = dec().singularValues().head(nonzeroSingVals).asDiagonal().inverse() * tmp; - dst = dec().matrixV().leftCols(nonzeroSingVals) * tmp; + tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs(); + tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp; + dst = dec().matrixV().leftCols(rank) * tmp; } }; } // end namespace internal diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD_MKL.h b/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD_MKL.h index decda7540..14e461c4e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD_MKL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SVD/JacobiSVD_MKL.h @@ -45,8 +45,8 @@ JacobiSVD, ColPiv JacobiSVD, ColPivHouseholderQRPreconditioner>::compute(const Matrix& matrix, unsigned int computationOptions) \ { \ typedef Matrix MatrixType; \ - typedef MatrixType::Scalar Scalar; \ - typedef MatrixType::RealScalar RealScalar; \ + /*typedef MatrixType::Scalar Scalar;*/ \ + /*typedef MatrixType::RealScalar RealScalar;*/ \ allocate(matrix.rows(), matrix.cols(), computationOptions); \ \ /*const RealScalar precision = RealScalar(2) * NumTraits::epsilon();*/ \ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h index f41d7e010..e1f96ba5a 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCholesky/SimplicialCholesky.h @@ -37,6 +37,7 @@ class SimplicialCholeskyBase : internal::noncopyable { public: typedef typename internal::traits::MatrixType MatrixType; + typedef typename internal::traits::OrderingType OrderingType; enum { UpLo = internal::traits::UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; @@ -240,15 +241,16 @@ class SimplicialCholeskyBase : internal::noncopyable RealScalar m_shiftScale; }; -template class SimplicialLLT; -template class SimplicialLDLT; -template class SimplicialCholesky; +template > class SimplicialLLT; +template > class SimplicialLDLT; +template > class SimplicialCholesky; namespace internal { -template struct traits > +template struct traits > { typedef _MatrixType MatrixType; + typedef _Ordering OrderingType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; @@ -259,9 +261,10 @@ template struct traits struct traits > +template struct traits > { typedef _MatrixType MatrixType; + typedef _Ordering OrderingType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Index Index; @@ -272,9 +275,10 @@ template struct traits struct traits > +template struct traits > { typedef _MatrixType MatrixType; + typedef _Ordering OrderingType; enum { UpLo = _UpLo }; }; @@ -294,11 +298,12 @@ template struct traits * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. + * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> * - * \sa class SimplicialLDLT + * \sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering */ -template - class SimplicialLLT : public SimplicialCholeskyBase > +template + class SimplicialLLT : public SimplicialCholeskyBase > { public: typedef _MatrixType MatrixType; @@ -382,11 +387,12 @@ public: * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. + * \tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<> * - * \sa class SimplicialLLT + * \sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering */ -template - class SimplicialLDLT : public SimplicialCholeskyBase > +template + class SimplicialLDLT : public SimplicialCholeskyBase > { public: typedef _MatrixType MatrixType; @@ -467,8 +473,8 @@ public: * * \sa class SimplicialLDLT, class SimplicialLLT */ -template - class SimplicialCholesky : public SimplicialCholeskyBase > +template + class SimplicialCholesky : public SimplicialCholeskyBase > { public: typedef _MatrixType MatrixType; @@ -612,15 +618,13 @@ void SimplicialCholeskyBase::ordering(const MatrixType& a, CholMatrixTy { eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); - // TODO allows to configure the permutation // Note that amd compute the inverse permutation { CholMatrixType C; C = a.template selfadjointView(); - // remove diagonal entries: - // seems not to be needed - // C.prune(keep_diag()); - internal::minimum_degree_ordering(C, m_Pinv); + + OrderingType ordering; + ordering(C,m_Pinv); } if(m_Pinv.size()>0) diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/AmbiVector.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/AmbiVector.h index 17fff96a7..220c6451c 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/AmbiVector.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/AmbiVector.h @@ -69,7 +69,7 @@ class AmbiVector delete[] m_buffer; if (size<1000) { - Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar); + Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar); m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); m_buffer = new Scalar[allocSize]; } @@ -88,7 +88,7 @@ class AmbiVector Index copyElements = m_allocatedElements; m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size); Index allocSize = m_allocatedElements * sizeof(ListEl); - allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); + allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar); Scalar* newBuffer = new Scalar[allocSize]; memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); delete[] m_buffer; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/CompressedStorage.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/CompressedStorage.h index 3321fab4a..a667cb56e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/CompressedStorage.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/CompressedStorage.h @@ -51,8 +51,8 @@ class CompressedStorage CompressedStorage& operator=(const CompressedStorage& other) { resize(other.size()); - memcpy(m_values, other.m_values, m_size * sizeof(Scalar)); - memcpy(m_indices, other.m_indices, m_size * sizeof(Index)); + internal::smart_copy(other.m_values, other.m_values + m_size, m_values); + internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); return *this; } @@ -83,10 +83,10 @@ class CompressedStorage reallocate(m_size); } - void resize(size_t size, float reserveSizeFactor = 0) + void resize(size_t size, double reserveSizeFactor = 0) { if (m_allocatedSize1) std::sort(indices.data(),indices.data()+nnz); - for(int k=0; k RowMajorMatrix; - typedef SparseMatrix ColMajorMatrix; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); // sort the non zeros: @@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhsRow, lhs, resRow); @@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhsRow, resRow); @@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; + typedef SparseMatrix RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); res = resRow; @@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhs, resCol); res = resCol; @@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhsCol, rhs, resCol); @@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector ColMajorMatrix; + typedef SparseMatrix ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl(lhs, rhsCol, resCol); @@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector RowMajorMatrix; - typedef SparseMatrix ColMajorMatrix; + typedef SparseMatrix RowMajorMatrix; + typedef SparseMatrix ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl(rhs, lhs, resRow); // sort the non zeros: diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h index 93cd4832d..ab1a266a9 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -50,6 +50,8 @@ class MappedSparseMatrix inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } + + bool isCompressed() const { return true; } //---------------------------------------- // direct access interface diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseBlock.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseBlock.h index 0b3e193db..4f4983508 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseBlock.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseBlock.h @@ -57,6 +57,16 @@ public: inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) {} + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } @@ -66,6 +76,10 @@ public: typename XprType::Nested m_matrix; Index m_outerStart; const internal::variable_if_dynamic m_outerSize; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + private: + Index nonZeros() const; }; @@ -80,6 +94,7 @@ class BlockImpl,BlockRows,BlockCols,true typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; typedef typename internal::remove_all::type _MatrixTypeNested; typedef Block BlockType; + typedef Block ConstBlockType; public: enum { IsRowMajor = internal::traits::IsRowMajor }; EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) @@ -221,6 +236,21 @@ public: else return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } const Scalar& lastCoeff() const { @@ -243,6 +273,104 @@ public: }; + +template +class BlockImpl,BlockRows,BlockCols,true,Sparse> + : public SparseMatrixBase,BlockRows,BlockCols,true> > +{ + typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef typename internal::remove_all::type _MatrixTypeNested; + typedef Block BlockType; +public: + enum { IsRowMajor = internal::traits::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) +protected: + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + + class InnerIterator: public SparseMatrixType::InnerIterator + { + public: + inline InnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator + { + public: + inline ReverseInnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + + inline BlockImpl(const SparseMatrixType& xpr, int i) + : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + {} + + inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + {} + + inline const Scalar* valuePtr() const + { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + + inline const Index* innerIndexPtr() const + { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; } + + inline const Index* outerIndexPtr() const + { return m_matrix.outerIndexPtr() + m_outerStart; } + + Index nonZeros() const + { + if(m_matrix.isCompressed()) + return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]) + - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]); + else if(m_outerSize.value()==0) + return 0; + else + return Map >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart); + } + + const Scalar& lastCoeff() const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); + eigen_assert(nonZeros()>0); + if(m_matrix.isCompressed()) + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; + else + return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; + } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + + typename SparseMatrixType::Nested m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; +}; + //---------- /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this @@ -263,7 +391,8 @@ const typename SparseMatrixBase::ConstInnerVectorReturnType SparseMatri * is col-major (resp. row-major). */ template -Block SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) +typename SparseMatrixBase::InnerVectorsReturnType +SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) { return Block(derived(), IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, @@ -275,7 +404,8 @@ Block SparseMatrixBase::innerVectors(Inde * is col-major (resp. row-major). Read-only. */ template -const Block SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const +const typename SparseMatrixBase::ConstInnerVectorsReturnType +SparseMatrixBase::innerVectors(Index outerStart, Index outerSize) const { return Block(derived(), IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, @@ -302,8 +432,8 @@ public: : m_matrix(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), - m_blockRows(xpr.rows()), - m_blockCols(xpr.cols()) + m_blockRows(BlockRows==1 ? 1 : xpr.rows()), + m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} /** Dynamic-size constructor @@ -391,15 +521,19 @@ public: protected: friend class InnerIterator; friend class ReverseInnerIterator; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) typename XprType::Nested m_matrix; const internal::variable_if_dynamic m_startRow; const internal::variable_if_dynamic m_startCol; const internal::variable_if_dynamic m_blockRows; const internal::variable_if_dynamic m_blockCols; - + private: + Index nonZeros() const; }; } // end namespace Eigen #endif // EIGEN_SPARSE_BLOCK_H + diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseColEtree.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseColEtree.h index f89ca3814..f8745f461 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseColEtree.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseColEtree.h @@ -63,6 +63,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl typedef typename MatrixType::Index Index; Index nc = mat.cols(); // Number of columns Index m = mat.rows(); + Index diagSize = (std::min)(nc,m); IndexVector root(nc); // root of subtree of etree root.setZero(); IndexVector pp(nc); // disjoint sets @@ -72,7 +73,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl Index row,col; firstRowElt.resize(m); firstRowElt.setConstant(nc); - firstRowElt.segment(0, nc).setLinSpaced(nc, 0, nc-1); + firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); bool found_diag; for (col = 0; col < nc; col++) { @@ -91,7 +92,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl Index rset, cset, rroot; for (col = 0; col < nc; col++) { - found_diag = false; + found_diag = col>=m; pp(col) = col; cset = col; root(cset) = col; @@ -105,6 +106,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl Index i = col; if(it) i = it.index(); if (i == col) found_diag = true; + row = firstRowElt(i); if (row >= col) continue; rset = internal::etree_find(row, pp); // Find the name of the set containing row diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index ec86ca933..546273759 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -55,10 +55,9 @@ class CwiseBinaryOpImpl EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) CwiseBinaryOpImpl() { - typedef typename internal::traits::StorageKind LhsStorageKind; - typedef typename internal::traits::StorageKind RhsStorageKind; EIGEN_STATIC_ASSERT(( - (!internal::is_same::value) + (!internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value) || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))), THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH); } @@ -73,7 +72,8 @@ class CwiseBinaryOpImpl::InnerIterator typedef internal::sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; - EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) + // NOTE: we have to prefix Index by "typename Lhs::" to avoid an ICE with VC11 + EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename Lhs::Index outer) : Base(binOp.derived(),outer) {} }; @@ -313,10 +313,10 @@ SparseMatrixBase::operator+=(const SparseMatrixBase& othe template template -EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE +EIGEN_STRONG_INLINE const typename SparseMatrixBase::template CwiseProductDenseReturnType::Type SparseMatrixBase::cwiseProduct(const MatrixBase &other) const { - return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived()); + return typename CwiseProductDenseReturnType::Type(derived(), other.derived()); } } // end namespace Eigen diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseDenseProduct.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseDenseProduct.h index 30975c29c..ccb6ae7b7 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseDenseProduct.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseDenseProduct.h @@ -19,7 +19,10 @@ template struct SparseDenseProductRet template struct SparseDenseProductReturnType { - typedef SparseDenseOuterProduct Type; + typedef typename internal::conditional< + Lhs::IsRowMajor, + SparseDenseOuterProduct, + SparseDenseOuterProduct >::type Type; }; template struct DenseSparseProductReturnType @@ -29,7 +32,10 @@ template struct DenseSparseProductRet template struct DenseSparseProductReturnType { - typedef SparseDenseOuterProduct Type; + typedef typename internal::conditional< + Rhs::IsRowMajor, + SparseDenseOuterProduct, + SparseDenseOuterProduct >::type Type; }; namespace internal { @@ -114,18 +120,31 @@ class SparseDenseOuterProduct::InnerIterator : public _LhsNes typedef typename SparseDenseOuterProduct::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) - : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer)) - { - } + : Base(prod.lhs(), 0), m_outer(outer), m_factor(get(prod.rhs(), outer, typename internal::traits::StorageKind() )) + { } inline Index outer() const { return m_outer; } - inline Index row() const { return Transpose ? Base::row() : m_outer; } - inline Index col() const { return Transpose ? m_outer : Base::row(); } + inline Index row() const { return Transpose ? m_outer : Base::index(); } + inline Index col() const { return Transpose ? Base::index() : m_outer; } inline Scalar value() const { return Base::value() * m_factor; } protected: - int m_outer; + static Scalar get(const _RhsNested &rhs, Index outer, Dense = Dense()) + { + return rhs.coeff(outer); + } + + static Scalar get(const _RhsNested &rhs, Index outer, Sparse = Sparse()) + { + typename Traits::_RhsNested::InnerIterator it(rhs, outer); + if (it && it.index()==0) + return it.value(); + + return Scalar(0); + } + + Index m_outer; Scalar m_factor; }; @@ -155,13 +174,13 @@ struct sparse_time_dense_product_impl -template -inline const typename SparseDenseProductReturnType::Type -SparseMatrixBase::operator*(const MatrixBase &other) const -{ - return typename SparseDenseProductReturnType::Type(derived(), other.derived()); -} - } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrix.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrix.h index adceafe18..2ff201551 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrix.h @@ -223,7 +223,7 @@ class SparseMatrix if(isCompressed()) { - reserve(VectorXi::Constant(outerSize(), 2)); + reserve(Matrix::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } @@ -402,7 +402,7 @@ class SparseMatrix * \sa insertBack, insertBackByOuterInner */ inline void startVec(Index outer) { - eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially"); + eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially"); eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially"); m_outerIndex[outer+1] = m_outerIndex[outer]; } @@ -480,7 +480,7 @@ class SparseMatrix if(m_innerNonZeros != 0) return; m_innerNonZeros = static_cast(std::malloc(m_outerSize * sizeof(Index))); - for (int i = 0; i < m_outerSize; i++) + for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } @@ -691,7 +691,8 @@ class SparseMatrix m_data.swap(other.m_data); } - /** Sets *this to the identity matrix */ + /** Sets *this to the identity matrix. + * This function also turns the matrix into compressed mode, and drop any reserved memory. */ inline void setIdentity() { eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); @@ -699,6 +700,8 @@ class SparseMatrix Eigen::Map >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); Eigen::Map >(&this->m_data.value(0), rows()).setOnes(); Eigen::Map >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); + std::free(m_innerNonZeros); + m_innerNonZeros = 0; } inline SparseMatrix& operator=(const SparseMatrix& other) { @@ -752,8 +755,8 @@ class SparseMatrix else for (Index i=0; i trMat(mat.rows(),mat.cols()); + typedef typename SparseMatrixType::Index Index; + SparseMatrix trMat(mat.rows(),mat.cols()); - if(begin wi(trMat.outerSize()); wi.setZero(); for(InputIterator it(begin); it!=end; ++it) { @@ -1018,11 +1022,11 @@ void SparseMatrix::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task - VectorXi wi(innerSize()); + Matrix wi(innerSize()); wi.fill(-1); Index count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers - for(int j=0; j& SparseMatrix positions(dest.outerSize()); for (Index j=0; j::Scalar& Sparse size_t p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; - float reallocRatio = 1; + double reallocRatio = 1; if (m_data.allocatedSize()<=m_data.size()) { // if there is no preallocated memory, let's reserve a minimum of 32 elements @@ -1189,13 +1193,13 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse { // we need to reallocate the data, to reduce multiple reallocations // we use a smart resize algorithm based on the current filling ratio - // in addition, we use float to avoid integers overflows - float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); - reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); + // in addition, we use double to avoid integers overflows + double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); + reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); // furthermore we bound the realloc ratio to: // 1) reduce multiple minor realloc when the matrix is almost filled // 2) avoid to allocate too much memory when the matrix is almost empty - reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); + reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); } } m_data.resize(m_data.size()+1,reallocRatio); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrixBase.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrixBase.h index 706f699b8..9341d9ad2 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseMatrixBase.h @@ -23,7 +23,14 @@ namespace Eigen { * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN. */ -template class SparseMatrixBase : public EigenBase +template class SparseMatrixBase +#ifndef EIGEN_PARSED_BY_DOXYGEN + : public internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real, + EigenBase > +#else + : public EigenBase +#endif // not EIGEN_PARSED_BY_DOXYGEN { public: @@ -36,7 +43,6 @@ template class SparseMatrixBase : public EigenBase >::type PacketReturnType; typedef SparseMatrixBase StorageBaseType; - typedef EigenBase Base; template Derived& operator=(const EigenBase &other) @@ -132,6 +138,9 @@ template class SparseMatrixBase : public EigenBase inline Derived& derived() { return *static_cast(this); } inline Derived& const_cast_derived() const { return *static_cast(const_cast(this)); } + + typedef internal::special_scalar_op_base > Base; + using Base::operator*; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase @@ -302,8 +311,8 @@ template class SparseMatrixBase : public EigenBase } else { - SparseMatrix trans = m; - s << static_cast >&>(trans); + SparseMatrix trans = m; + s << static_cast >&>(trans); } } return s; @@ -317,20 +326,18 @@ template class SparseMatrixBase : public EigenBase Derived& operator*=(const Scalar& other); Derived& operator/=(const Scalar& other); - #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \ - CwiseBinaryOp< \ - internal::scalar_product_op< \ - typename internal::scalar_product_traits< \ - typename internal::traits::Scalar, \ - typename internal::traits::Scalar \ - >::ReturnType \ - >, \ - const Derived, \ - const OtherDerived \ - > + template struct CwiseProductDenseReturnType { + typedef CwiseBinaryOp::Scalar, + typename internal::traits::Scalar + >::ReturnType>, + const Derived, + const OtherDerived + > Type; + }; template - EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE + EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType::Type cwiseProduct(const MatrixBase &other) const; // sparse * sparse @@ -358,7 +365,8 @@ template class SparseMatrixBase : public EigenBase /** sparse * dense (returns a dense object unless it is an outer product) */ template const typename SparseDenseProductReturnType::Type - operator*(const MatrixBase &other) const; + operator*(const MatrixBase &other) const + { return typename SparseDenseProductReturnType::Type(derived(), other.derived()); } /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */ SparseSymmetricPermutationProduct twistedBy(const PermutationMatrix& perm) const @@ -403,8 +411,10 @@ template class SparseMatrixBase : public EigenBase const ConstInnerVectorReturnType innerVector(Index outer) const; // set of inner-vectors - Block innerVectors(Index outerStart, Index outerSize); - const Block innerVectors(Index outerStart, Index outerSize) const; + typedef Block InnerVectorsReturnType; + typedef Block ConstInnerVectorsReturnType; + InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize); + const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const; /** \internal use operator= */ template diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparsePermutation.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparsePermutation.h index b897b7595..75e210009 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparsePermutation.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparsePermutation.h @@ -57,11 +57,11 @@ struct permut_sparsematrix_product_retval if(MoveOuter) { SparseMatrix tmp(m_matrix.rows(), m_matrix.cols()); - VectorXi sizes(m_matrix.outerSize()); + Matrix sizes(m_matrix.outerSize()); for(Index j=0; j tmp(m_matrix.rows(), m_matrix.cols()); - VectorXi sizes(tmp.outerSize()); + Matrix sizes(tmp.outerSize()); sizes.setZero(); PermutationMatrix perm; if((Side==OnTheLeft) ^ Transposed) diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseProduct.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseProduct.h index 70b6480ef..cf7663070 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseProduct.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseProduct.h @@ -16,6 +16,7 @@ template struct SparseSparseProductReturnType { typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::Index Index; enum { LhsRowMajor = internal::traits::Flags & RowMajorBit, RhsRowMajor = internal::traits::Flags & RowMajorBit, @@ -24,11 +25,11 @@ struct SparseSparseProductReturnType }; typedef typename internal::conditional, + SparseMatrix, typename internal::nested::type>::type LhsNested; typedef typename internal::conditional, + SparseMatrix, typename internal::nested::type>::type RhsNested; typedef SparseSparseProduct Type; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index 70857c7b6..fcc18f5c9 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); - //int size = lhs.outerSize(); + //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer @@ -100,7 +100,7 @@ struct sparse_sparse_product_with_pruning_selector SparseTemporaryType; + typedef SparseMatrix SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl(lhs, rhs, _res, tolerance); res = _res; @@ -126,10 +126,11 @@ struct sparse_sparse_product_with_pruning_selector ColMajorMatrix; - ColMajorMatrix colLhs(lhs); - ColMajorMatrix colRhs(rhs); - internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); + typedef SparseMatrix ColMajorMatrixLhs; + typedef SparseMatrix ColMajorMatrixRhs; + ColMajorMatrixLhs colLhs(lhs); + ColMajorMatrixRhs colRhs(rhs); + internal::sparse_sparse_product_with_pruning_impl(colLhs, colRhs, res, tolerance); // let's transpose the product to get a column x column product // typedef SparseMatrix SparseTemporaryType; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseTranspose.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseTranspose.h index 7c300ee8d..76d031d52 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseTranspose.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseTranspose.h @@ -26,7 +26,7 @@ template class TransposeImpl inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } }; -// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl:: in front of Index, +// NOTE: VC10 and VC11 trigger an ICE if don't put typename TransposeImpl:: in front of Index, // a typedef typename TransposeImpl::Index Index; // does not fix the issue. // An alternative is to define the nested class in the parent class itself. @@ -40,8 +40,8 @@ template class TransposeImpl::InnerItera EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl::Index outer) : Base(trans.derived().nestedExpression(), outer) {} - Index row() const { return Base::col(); } - Index col() const { return Base::row(); } + typename TransposeImpl::Index row() const { return Base::col(); } + typename TransposeImpl::Index col() const { return Base::row(); } }; template class TransposeImpl::ReverseInnerIterator @@ -54,8 +54,8 @@ template class TransposeImpl::ReverseInn EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl::Index outer) : Base(xpr.derived().nestedExpression(), outer) {} - Index row() const { return Base::col(); } - Index col() const { return Base::row(); } + typename TransposeImpl::Index row() const { return Base::col(); } + typename TransposeImpl::Index col() const { return Base::row(); } }; } // end namespace Eigen diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseUtil.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseUtil.h index 064a40707..d627546de 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseUtil.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseUtil.h @@ -67,7 +67,6 @@ const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern; const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; -template class SparseMatrixBase; template class SparseMatrix; template class DynamicSparseMatrix; template class SparseVector; @@ -84,8 +83,10 @@ template class DenseTimeSparseProduct; template class SparseDenseOuterProduct; template struct SparseSparseProductReturnType; -template::ColsAtCompileTime> struct DenseSparseProductReturnType; -template::ColsAtCompileTime> struct SparseDenseProductReturnType; +template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct DenseSparseProductReturnType; +template::ColsAtCompileTime,internal::traits::RowsAtCompileTime)> struct SparseDenseProductReturnType; template class SparseSymmetricPermutationProduct; namespace internal { @@ -143,7 +144,7 @@ template struct plain_matrix_type * * \sa SparseMatrix::setFromTriplets() */ -template +template::Index > class Triplet { public: diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseVector.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseVector.h index 7e15c814b..49865d0e7 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseVector.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseVector.h @@ -158,6 +158,7 @@ class SparseVector Index inner = IsColVector ? row : col; Index outer = IsColVector ? col : row; + EIGEN_ONLY_USED_FOR_DEBUG(outer); eigen_assert(outer==0); return insert(inner); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseView.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseView.h index fd8450463..2820b39b8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseView.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/SparseView.h @@ -35,9 +35,9 @@ class SparseView : public SparseMatrixBase > public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView) - SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0), - typename NumTraits::Real m_epsilon = NumTraits::dummy_precision()) : - m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {} + explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0), + const RealScalar &epsilon = NumTraits::dummy_precision()) + : m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {} class InnerIterator; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/TriangularSolver.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/TriangularSolver.h index cb8ad82b4..ccc12af79 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/TriangularSolver.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseCore/TriangularSolver.h @@ -69,7 +69,7 @@ struct sparse_solve_triangular_selector for(int i=lhs.rows()-1 ; i>=0 ; --i) { Scalar tmp = other.coeff(i,col); - Scalar l_ii = 0; + Scalar l_ii(0); typename Lhs::InnerIterator it(lhs, i); while(it && it.index() - bool _solve(const MatrixBase &B, MatrixBase &_X) const + bool _solve(const MatrixBase &B, MatrixBase &X_base) const { - Dest& X(_X.derived()); + Dest& X(X_base.derived()); eigen_assert(m_factorizationIsOk && "The matrix should be factorized first"); EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); @@ -229,8 +229,10 @@ class SparseLU : public internal::SparseLUImplmatrixL().solveInPlace(X); @@ -258,16 +260,16 @@ class SparseLU : public internal::SparseLUImplcols(); ++j) { for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) { - if(it.row() < j) continue; - if(it.row() == j) + if(it.index() == j) { - det *= (std::abs)(it.value()); + using std::abs; + det *= abs(it.value()); break; } } @@ -294,7 +296,8 @@ class SparseLU : public internal::SparseLUImplcols(); ++j) + { + for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) + { + if(it.index() == j) + { + if(it.value()<0) + det = -det; + else if(it.value()==0) + return 0; + break; + } + } + } + return det * m_detPermR * m_detPermC; + } + + /** \returns The determinant of the matrix. + * + * \sa absDeterminant(), logAbsDeterminant() + */ + Scalar determinant() + { + eigen_assert(m_factorizationIsOk && "The matrix should be factorized first."); + // Initialize with the determinant of the row matrix + Scalar det = Scalar(1.); + // Note that the diagonal blocks of U are stored in supernodes, + // which are available in the L part :) + for (Index j = 0; j < this->cols(); ++j) + { + for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it) + { + if(it.index() == j) + { + det *= it.value(); + break; + } + } + } + return det * Scalar(m_detPermR * m_detPermC); + } protected: // Functions void initperfvalues() { - m_perfv.panel_size = 1; + m_perfv.panel_size = 16; m_perfv.relax = 1; m_perfv.maxsuper = 128; m_perfv.rowblk = 16; @@ -344,8 +390,8 @@ class SparseLU : public internal::SparseLUImpl m_perfv; RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot - Index m_nnzL, m_nnzU; // Nonzeros in L and U factors - Index m_detPermR; // Determinant of the coefficient matrix + Index m_nnzL, m_nnzU; // Nonzeros in L and U factors + Index m_detPermR, m_detPermC; // Determinants of the permutation matrices private: // Disable copy constructor SparseLU (const SparseLU& ); @@ -621,7 +667,8 @@ void SparseLU::factorize(const MatrixType& matrix) } // Update the determinant of the row permutation matrix - if (pivrow != jj) m_detPermR *= -1; + // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot. + if (pivrow != jj) m_detPermR = -m_detPermR; // Prune columns (0:jj-1) using column jj Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu); @@ -636,10 +683,13 @@ void SparseLU::factorize(const MatrixType& matrix) jcol += panel_size; // Move to the next panel } // end for -- end elimination + m_detPermR = m_perm_r.determinant(); + m_detPermC = m_perm_c.determinant(); + // Count the number of nonzeros in factors Base::countnz(n, m_nnzL, m_nnzU, m_glu); // Apply permutation to the L subscripts - Base::fixupL(n, m_perm_r.indices(), m_glu); + Base::fixupL(n, m_perm_r.indices(), m_glu); // Create supernode matrix L m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); @@ -699,8 +749,8 @@ struct SparseLUMatrixUReturnType : internal::no_assignment_operator } else { - Map, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); - Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); + Map, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); + Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); U = A.template triangularView().solve(U); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLUImpl.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLUImpl.h index 14d70897d..99d651e40 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLUImpl.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLUImpl.h @@ -21,6 +21,8 @@ class SparseLUImpl { public: typedef Matrix ScalarVector; + typedef Matrix ScalarMatrix; + typedef Map > MappedMatrixBlock; typedef Matrix IndexVector; typedef typename ScalarVector::RealScalar RealScalar; typedef Ref > BlockScalarVector; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_Memory.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_Memory.h index a5158025c..45f96d16a 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_Memory.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_Memory.h @@ -70,23 +70,30 @@ Index SparseLUImpl::expand(VectorType& vec, Index& length, Index if(num_expansions == 0 || keep_prev) new_len = length ; // First time allocate requested else - new_len = Index(alpha * length); + new_len = (std::max)(length+1,Index(alpha * length)); VectorType old_vec; // Temporary vector to hold the previous values if (nbElts > 0 ) old_vec = vec.segment(0,nbElts); //Allocate or expand the current vector - try +#ifdef EIGEN_EXCEPTIONS + try +#endif { vec.resize(new_len); } +#ifdef EIGEN_EXCEPTIONS catch(std::bad_alloc& ) +#else + if(!vec.size()) +#endif { - if ( !num_expansions ) + if (!num_expansions) { // First time to allocate from LUMemInit() - throw; // Pass the exception to LUMemInit() which has a try... catch block + // Let LUMemInit() deals with it. + return -1; } if (keep_prev) { @@ -100,12 +107,18 @@ Index SparseLUImpl::expand(VectorType& vec, Index& length, Index do { alpha = (alpha + 1)/2; - new_len = Index(alpha * length); + new_len = (std::max)(length+1,Index(alpha * length)); +#ifdef EIGEN_EXCEPTIONS try +#endif { vec.resize(new_len); } +#ifdef EIGEN_EXCEPTIONS catch(std::bad_alloc& ) +#else + if (!vec.size()) +#endif { tries += 1; if ( tries > 10) return new_len; @@ -139,10 +152,9 @@ template Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t& glu) { Index& num_expansions = glu.num_expansions; //No memory expansions so far - num_expansions = 0; - glu.nzumax = glu.nzlumax = (std::max)(fillratio * annz, m*n); // estimated number of nonzeros in U - glu.nzlmax = (std::max)(Index(4), fillratio) * annz / 4; // estimated nnz in L factor - + num_expansions = 0; + glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U + glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated nnz in L factor // Return the estimated size to the user if necessary Index tempSpace; tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar); @@ -166,14 +178,10 @@ Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lw // Reserve memory for L/U factors do { - try - { - expand(glu.lusup, glu.nzlumax, 0, 0, num_expansions); - expand(glu.ucol,glu.nzumax, 0, 0, num_expansions); - expand(glu.lsub,glu.nzlmax, 0, 0, num_expansions); - expand(glu.usub,glu.nzumax, 0, 1, num_expansions); - } - catch(std::bad_alloc& ) + if( (expand(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0) + || (expand(glu.ucol, glu.nzumax, 0, 0, num_expansions)<0) + || (expand (glu.lsub, glu.nzlmax, 0, 0, num_expansions)<0) + || (expand (glu.usub, glu.nzumax, 0, 1, num_expansions)<0) ) { //Reduce the estimated size and retry glu.nzlumax /= 2; @@ -181,10 +189,7 @@ Index SparseLUImpl::memInit(Index m, Index n, Index annz, Index lw glu.nzlmax /= 2; if (glu.nzlumax < annz ) return glu.nzlumax; } - } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size()); - - ++num_expansions; return 0; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h index ad6f2183f..54a569408 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -189,8 +189,8 @@ class MappedSuperNodalMatrix::InnerIterator m_idval(mat.colIndexPtr()[outer]), m_startidval(m_idval), m_endidval(mat.colIndexPtr()[outer+1]), - m_idrow(mat.rowIndexPtr()[outer]), - m_endidrow(mat.rowIndexPtr()[outer+1]) + m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]), + m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1]) {} inline InnerIterator& operator++() { @@ -236,7 +236,7 @@ void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) con Index n = X.rows(); Index nrhs = X.cols(); const Scalar * Lval = valuePtr(); // Nonzero values - Matrix work(n, nrhs); // working vector + Matrix work(n, nrhs); // working vector work.setZero(); for (Index k = 0; k <= nsuper(); k ++) { @@ -267,12 +267,12 @@ void MappedSuperNodalMatrix::solveInPlace( MatrixBase&X) con Index lda = colIndexPtr()[fsupc+1] - luptr; // Triangular solve - Map, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) ); - Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); + Map, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) ); + Map< Matrix, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) ); U = A.template triangularView().solve(U); // Matrix-vector product - new (&A) Map, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); + new (&A) Map, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); work.block(0, 0, nrow, nrhs) = A * U; //Begin Scatter diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h index f24bd87d3..cacc7e987 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_column_bmod.h @@ -162,11 +162,11 @@ Index SparseLUImpl::column_bmod(const Index jcol, const Index nseg // points to the beginning of jcol in snode L\U(jsupno) ufirst = glu.xlusup(jcol) + d_fsupc; Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol); - Map, 0, OuterStride<> > A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); + MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); VectorBlock u(glu.lusup, ufirst, nsupc); u = A.template triangularView().solve(u); - new (&A) Map, 0, OuterStride<> > ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); + new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); VectorBlock l(glu.lusup, ufirst+nsupc, nrow); l.noalias() -= A * u; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h index 0d0283b13..6af026754 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_kernel_bmod.h @@ -56,7 +56,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod::run(const int segsi // Dense triangular solve -- start effective triangle luptr += lda * no_zeros + no_zeros; // Form Eigen matrix and vector - Map, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) ); + Map, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) ); Map > u(tempv.data(), segsize); u = A.template triangularView().solve(u); @@ -65,7 +65,7 @@ EIGEN_DONT_INLINE void LU_kernel_bmod::run(const int segsi luptr += segsize; const Index PacketSize = internal::packet_traits::size; Index ldl = internal::first_multiple(nrow, PacketSize); - Map, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) ); + Map, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) ); Index aligned_offset = internal::first_aligned(tempv.data()+segsize, PacketSize); Index aligned_with_B_offset = (PacketSize-internal::first_aligned(B.data(), PacketSize))%PacketSize; Map, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) ); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h index da0e0fc3c..9d2ff2906 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_panel_bmod.h @@ -102,7 +102,7 @@ void SparseLUImpl::panel_bmod(const Index m, const Index w, const if(nsupc >= 2) { Index ldu = internal::first_multiple(u_rows, PacketSize); - Map, Aligned, OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu)); + Map > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu)); // gather U Index u_col = 0; @@ -136,17 +136,17 @@ void SparseLUImpl::panel_bmod(const Index m, const Index w, const Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); no_zeros = (krep - u_rows + 1) - fsupc; luptr += lda * no_zeros + no_zeros; - Map, 0, OuterStride<> > A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) ); + MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) ); U = A.template triangularView().solve(U); // update luptr += u_rows; - Map, 0, OuterStride<> > B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) ); + MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) ); eigen_assert(tempv.size()>w*ldu + nrow*w + 1); Index ldl = internal::first_multiple(nrow, PacketSize); Index offset = (PacketSize-internal::first_aligned(B.data(), PacketSize)) % PacketSize; - Map, 0, OuterStride<> > L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl)); + MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl)); L.setZero(); internal::sparselu_gemm(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride()); diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h index ddcd4ec98..2e49ef667 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseLU/SparseLU_pivotL.h @@ -71,13 +71,14 @@ Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& dia // Determine the largest abs numerical value for partial pivoting Index diagind = iperm_c(jcol); // diagonal index - RealScalar pivmax = 0.0; + RealScalar pivmax(-1.0); Index pivptr = nsupc; Index diag = emptyIdxLU; RealScalar rtemp; Index isub, icol, itemp, k; for (isub = nsupc; isub < nsupr; ++isub) { - rtemp = std::abs(lu_col_ptr[isub]); + using std::abs; + rtemp = abs(lu_col_ptr[isub]); if (rtemp > pivmax) { pivmax = rtemp; pivptr = isub; @@ -86,8 +87,9 @@ Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& dia } // Test for singularity - if ( pivmax == 0.0 ) { - pivrow = lsub_ptr[pivptr]; + if ( pivmax <= RealScalar(0.0) ) { + // if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero + pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr]; perm_r(pivrow) = jcol; return (jcol+1); } @@ -101,7 +103,8 @@ Index SparseLUImpl::pivotL(const Index jcol, const RealScalar& dia if (diag >= 0 ) { // Diagonal element exists - rtemp = std::abs(lu_col_ptr[diag]); + using std::abs; + rtemp = abs(lu_col_ptr[diag]); if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag; } pivrow = lsub_ptr[pivptr]; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/SparseQR/SparseQR.h b/ground/gcs/src/libs/eigen/Eigen/src/SparseQR/SparseQR.h index 07c46e4b9..a00bd5db1 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/SparseQR/SparseQR.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/SparseQR/SparseQR.h @@ -2,7 +2,7 @@ // for linear algebra. // // Copyright (C) 2012-2013 Desire Nuentsa -// Copyright (C) 2012-2013 Gael Guennebaud +// Copyright (C) 2012-2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -58,6 +58,7 @@ namespace internal { * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module * OrderingMethods \endlink module for the list of built-in and external ordering methods. * + * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()). * */ template @@ -74,13 +75,26 @@ class SparseQR typedef Matrix ScalarVector; typedef PermutationMatrix PermutationType; public: - SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) + SparseQR () : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { } - SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false) + /** Construct a QR factorization of the matrix \a mat. + * + * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + * + * \sa compute() + */ + SparseQR(const MatrixType& mat) : m_isInitialized(false), m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false) { compute(mat); } + + /** Computes the QR factorization of the sparse matrix \a mat. + * + * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). + * + * \sa analyzePattern(), factorize() + */ void compute(const MatrixType& mat) { analyzePattern(mat); @@ -161,11 +175,12 @@ class SparseQR b = y; // Solve with the triangular matrix R + y.resize((std::max)(cols(),Index(y.rows())),y.cols()); y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView().solve(b.topRows(rank)); - y.bottomRows(y.size()-rank).setZero(); + y.bottomRows(y.rows()-rank).setZero(); // Apply the column permutation - if (m_perm_c.size()) dest.topRows(cols()) = colsPermutation() * y.topRows(cols()); + if (m_perm_c.size()) dest = colsPermutation() * y.topRows(cols()); else dest = y.topRows(cols()); m_info = Success; @@ -205,7 +220,7 @@ class SparseQR /** \brief Reports whether previous computation was successful. * - * \returns \c Success if computation was succesful, + * \returns \c Success if computation was successful, * \c NumericalIssue if the QR factorization reports a numerical problem * \c InvalidInput if the input matrix is invalid * @@ -246,7 +261,8 @@ class SparseQR Index m_nonzeropivots; // Number of non zero pivots found IndexVector m_etree; // Column elimination tree IndexVector m_firstRowElt; // First element in each row - bool m_isQSorted; // whether Q is sorted or not + bool m_isQSorted; // whether Q is sorted or not + bool m_isEtreeOk; // whether the elimination tree match the initial input matrix template friend struct SparseQR_QProduct; template friend struct SparseQRMatrixQReturnType; @@ -254,20 +270,26 @@ class SparseQR }; /** \brief Preprocessing step of a QR factorization + * + * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * * In this step, the fill-reducing permutation is computed and applied to the columns of A - * and the column elimination tree is computed as well. Only the sparcity pattern of \a mat is exploited. + * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited. * * \note In this step it is assumed that there is no empty row in the matrix \a mat. */ template void SparseQR::analyzePattern(const MatrixType& mat) { + eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR"); + // Copy to a column major matrix if the input is rowmajor + typename internal::conditional::type matCpy(mat); // Compute the column fill reducing ordering OrderingType ord; - ord(mat, m_perm_c); + ord(matCpy, m_perm_c); Index n = mat.cols(); Index m = mat.rows(); + Index diagSize = (std::min)(m,n); if (!m_perm_c.size()) { @@ -277,22 +299,23 @@ void SparseQR::analyzePattern(const MatrixType& mat) // Compute the column elimination tree of the permuted matrix m_outputPerm_c = m_perm_c.inverse(); - internal::coletree(mat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); + internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); + m_isEtreeOk = true; - m_R.resize(n, n); - m_Q.resize(m, n); + m_R.resize(m, n); + m_Q.resize(m, diagSize); // Allocate space for nonzero elements : rough estimation m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree m_Q.reserve(2*mat.nonZeros()); - m_hcoeffs.resize(n); + m_hcoeffs.resize(diagSize); m_analysisIsok = true; } /** \brief Performs the numerical QR factorization of the input matrix * * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with - * a matrix having the same sparcity pattern than \a mat. + * a matrix having the same sparsity pattern than \a mat. * * \param mat The sparse column-major matrix */ @@ -305,23 +328,47 @@ void SparseQR::factorize(const MatrixType& mat) eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); Index m = mat.rows(); Index n = mat.cols(); - IndexVector mark(m); mark.setConstant(-1); // Record the visited nodes - IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q - Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q - ScalarVector tval(m); // The dense vector used to compute the current column - bool found_diag; - + Index diagSize = (std::min)(m,n); + IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes + IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q + Index nzcolR, nzcolQ; // Number of nonzero for the current column of R and Q + ScalarVector tval(m); // The dense vector used to compute the current column + RealScalar pivotThreshold = m_threshold; + + m_R.setZero(); + m_Q.setZero(); m_pmat = mat; - m_pmat.uncompress(); // To have the innerNonZeroPtr allocated - // Apply the fill-in reducing permutation lazily: - for (int i = 0; i < n; i++) + if(!m_isEtreeOk) { - Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; - m_pmat.outerIndexPtr()[p] = mat.outerIndexPtr()[i]; - m_pmat.innerNonZeroPtr()[p] = mat.outerIndexPtr()[i+1] - mat.outerIndexPtr()[i]; + m_outputPerm_c = m_perm_c.inverse(); + internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data()); + m_isEtreeOk = true; + } + + m_pmat.uncompress(); // To have the innerNonZeroPtr allocated + + // Apply the fill-in reducing permutation lazily: + { + // If the input is row major, copy the original column indices, + // otherwise directly use the input matrix + // + IndexVector originalOuterIndicesCpy; + const Index *originalOuterIndices = mat.outerIndexPtr(); + if(MatrixType::IsRowMajor) + { + originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1); + originalOuterIndices = originalOuterIndicesCpy.data(); + } + + for (int i = 0; i < n; i++) + { + Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i; + m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; + m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; + } } - /* Compute the default threshold, see : + /* Compute the default threshold as in MatLab, see: * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 */ @@ -329,33 +376,35 @@ void SparseQR::factorize(const MatrixType& mat) { RealScalar max2Norm = 0.0; for (int j = 0; j < n; j++) max2Norm = (max)(max2Norm, m_pmat.col(j).norm()); - m_threshold = 20 * (m + n) * max2Norm * NumTraits::epsilon(); + if(max2Norm==RealScalar(0)) + max2Norm = RealScalar(1); + pivotThreshold = 20 * (m + n) * max2Norm * NumTraits::epsilon(); } // Initialize the numerical permutation m_pivotperm.setIdentity(n); Index nonzeroCol = 0; // Record the number of valid pivots - + m_Q.startVec(0); + // Left looking rank-revealing QR factorization: compute a column of R and Q at a time for (Index col = 0; col < n; ++col) { mark.setConstant(-1); m_R.startVec(col); - m_Q.startVec(col); mark(nonzeroCol) = col; Qidx(0) = nonzeroCol; nzcolR = 0; nzcolQ = 1; - found_diag = false; + bool found_diag = nonzeroCol>=m; tval.setZero(); // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e., // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k. // Note: if the diagonal entry does not exist, then its contribution must be explicitly added, // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found. - for (typename MatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) + for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) { - Index curIdx = nonzeroCol ; + Index curIdx = nonzeroCol; if(itp) curIdx = itp.row(); if(curIdx == nonzeroCol) found_diag = true; @@ -397,7 +446,7 @@ void SparseQR::factorize(const MatrixType& mat) // Browse all the indexes of R(:,col) in reverse order for (Index i = nzcolR-1; i >= 0; i--) { - Index curIdx = m_pivotperm.indices()(Ridx(i)); + Index curIdx = Ridx(i); // Apply the curIdx-th householder vector to the current column (temporarily stored into tval) Scalar tdot(0); @@ -426,33 +475,36 @@ void SparseQR::factorize(const MatrixType& mat) } } } // End update current column - - // Compute the Householder reflection that eliminate the current column - // FIXME this step should call the Householder module. - Scalar tau; - RealScalar beta; - Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); - // First, the squared norm of Q((col+1):m, col) - RealScalar sqrNorm = 0.; - for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); + Scalar tau = 0; + RealScalar beta = 0; - if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) + if(nonzeroCol < diagSize) { - tau = RealScalar(0); - beta = numext::real(c0); - tval(Qidx(0)) = 1; - } - else - { - beta = std::sqrt(numext::abs2(c0) + sqrNorm); - if(numext::real(c0) >= RealScalar(0)) - beta = -beta; - tval(Qidx(0)) = 1; - for (Index itq = 1; itq < nzcolQ; ++itq) - tval(Qidx(itq)) /= (c0 - beta); - tau = numext::conj((beta-c0) / beta); - + // Compute the Householder reflection that eliminate the current column + // FIXME this step should call the Householder module. + Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); + + // First, the squared norm of Q((col+1):m, col) + RealScalar sqrNorm = 0.; + for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq))); + if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0)) + { + beta = numext::real(c0); + tval(Qidx(0)) = 1; + } + else + { + using std::sqrt; + beta = sqrt(numext::abs2(c0) + sqrNorm); + if(numext::real(c0) >= RealScalar(0)) + beta = -beta; + tval(Qidx(0)) = 1; + for (Index itq = 1; itq < nzcolQ; ++itq) + tval(Qidx(itq)) /= (c0 - beta); + tau = numext::conj((beta-c0) / beta); + + } } // Insert values in R @@ -466,45 +518,49 @@ void SparseQR::factorize(const MatrixType& mat) } } - if(abs(beta) >= m_threshold) + if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold) { m_R.insertBackByOuterInner(col, nonzeroCol) = beta; - nonzeroCol++; // The householder coefficient - m_hcoeffs(col) = tau; + m_hcoeffs(nonzeroCol) = tau; // Record the householder reflections for (Index itq = 0; itq < nzcolQ; ++itq) { Index iQ = Qidx(itq); - m_Q.insertBackByOuterInnerUnordered(col,iQ) = tval(iQ); + m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ); tval(iQ) = Scalar(0.); - } + } + nonzeroCol++; + if(nonzeroCol void evalTo(DesType& res) const { + Index m = m_qr.rows(); Index n = m_qr.cols(); + Index diagSize = (std::min)(m,n); res = m_other; if (m_transpose) { eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes"); //Compute res = Q' * other column by column for(Index j = 0; j < res.cols(); j++){ - for (Index k = 0; k < n; k++) + for (Index k = 0; k < diagSize; k++) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); + if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } @@ -578,14 +637,15 @@ struct SparseQR_QProduct : ReturnByValue=0; k--) + for (Index k = diagSize-1; k >=0; k--) { Scalar tau = Scalar(0); tau = m_qr.m_Q.col(k).dot(res.col(j)); + if(tau==Scalar(0)) continue; tau = tau * m_qr.m_hcoeffs(k); res.col(j) -= tau * m_qr.m_Q.col(k); } @@ -615,7 +675,7 @@ struct SparseQRMatrixQReturnType : public EigenBase(m_qr); } inline Index rows() const { return m_qr.rows(); } - inline Index cols() const { return m_qr.cols(); } + inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); } // To use for operations with the transpose of Q SparseQRMatrixQTransposeReturnType transpose() const { diff --git a/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdDeque.h b/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdDeque.h index 4ee8e5c10..69a46b2b8 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdDeque.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdDeque.h @@ -11,14 +11,7 @@ #ifndef EIGEN_STDDEQUE_H #define EIGEN_STDDEQUE_H -#include "Eigen/src/StlSupport/details.h" - -// Define the explicit instantiation (e.g. necessary for the Intel compiler) -#if defined(__INTEL_COMPILER) || defined(__GNUC__) - #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) template class std::deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >; -#else - #define EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(...) -#endif +#include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of @@ -26,19 +19,18 @@ * is used automatically. */ #define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \ -EIGEN_EXPLICIT_STL_DEQUE_INSTANTIATION(__VA_ARGS__) \ namespace std \ { \ - template \ - class deque<__VA_ARGS__, _Ay> \ + template<> \ + class deque<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \ public: \ typedef __VA_ARGS__ value_type; \ - typedef typename deque_base::allocator_type allocator_type; \ - typedef typename deque_base::size_type size_type; \ - typedef typename deque_base::iterator iterator; \ + typedef deque_base::allocator_type allocator_type; \ + typedef deque_base::size_type size_type; \ + typedef deque_base::iterator iterator; \ explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {} \ template \ deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdList.h b/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdList.h index 627381ece..050c2373e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdList.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdList.h @@ -10,14 +10,7 @@ #ifndef EIGEN_STDLIST_H #define EIGEN_STDLIST_H -#include "Eigen/src/StlSupport/details.h" - -// Define the explicit instantiation (e.g. necessary for the Intel compiler) -#if defined(__INTEL_COMPILER) || defined(__GNUC__) - #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) template class std::list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> >; -#else - #define EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(...) -#endif +#include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of @@ -25,19 +18,18 @@ * is used automatically. */ #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \ -EIGEN_EXPLICIT_STL_LIST_INSTANTIATION(__VA_ARGS__) \ namespace std \ { \ - template \ - class list<__VA_ARGS__, _Ay> \ + template<> \ + class list<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \ public: \ typedef __VA_ARGS__ value_type; \ - typedef typename list_base::allocator_type allocator_type; \ - typedef typename list_base::size_type size_type; \ - typedef typename list_base::iterator iterator; \ + typedef list_base::allocator_type allocator_type; \ + typedef list_base::size_type size_type; \ + typedef list_base::iterator iterator; \ explicit list(const allocator_type& a = allocator_type()) : list_base(a) {} \ template \ list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \ diff --git a/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdVector.h b/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdVector.h index 40a9abefa..611664a2e 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdVector.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/StlSupport/StdVector.h @@ -11,7 +11,7 @@ #ifndef EIGEN_STDVECTOR_H #define EIGEN_STDVECTOR_H -#include "Eigen/src/StlSupport/details.h" +#include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of diff --git a/ground/gcs/src/libs/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h b/ground/gcs/src/libs/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h index 3a48cecf7..29c60c378 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/UmfPackSupport/UmfPackSupport.h @@ -107,6 +107,16 @@ inline int umfpack_get_determinant(std::complex *Mx, double *Ex, void *N return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info); } +namespace internal { + template struct umfpack_helper_is_sparse_plain : false_type {}; + template + struct umfpack_helper_is_sparse_plain > + : true_type {}; + template + struct umfpack_helper_is_sparse_plain > + : true_type {}; +} + /** \ingroup UmfPackSupport_Module * \brief A sparse LU factorization and solver based on UmfPack * @@ -192,10 +202,14 @@ class UmfPackLU : internal::noncopyable * Note that the matrix should be column-major, and in compressed format for best performance. * \sa SparseMatrix::makeCompressed(). */ - void compute(const MatrixType& matrix) + template + void compute(const InputMatrixType& matrix) { - analyzePattern(matrix); - factorize(matrix); + if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); + grapInput(matrix.derived()); + analyzePattern_impl(); + factorize_impl(); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. @@ -230,23 +244,15 @@ class UmfPackLU : internal::noncopyable * * \sa factorize(), compute() */ - void analyzePattern(const MatrixType& matrix) + template + void analyzePattern(const InputMatrixType& matrix) { - if(m_symbolic) - umfpack_free_symbolic(&m_symbolic,Scalar()); - if(m_numeric) - umfpack_free_numeric(&m_numeric,Scalar()); + if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar()); + if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); - grapInput(matrix); + grapInput(matrix.derived()); - int errorCode = 0; - errorCode = umfpack_symbolic(matrix.rows(), matrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, - &m_symbolic, 0, 0); - - m_isInitialized = true; - m_info = errorCode ? InvalidInput : Success; - m_analysisIsOk = true; - m_factorizationIsOk = false; + analyzePattern_impl(); } /** Performs a numeric decomposition of \a matrix @@ -255,20 +261,16 @@ class UmfPackLU : internal::noncopyable * * \sa analyzePattern(), compute() */ - void factorize(const MatrixType& matrix) + template + void factorize(const InputMatrixType& matrix) { eigen_assert(m_analysisIsOk && "UmfPackLU: you must first call analyzePattern()"); if(m_numeric) umfpack_free_numeric(&m_numeric,Scalar()); - grapInput(matrix); - - int errorCode; - errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, - m_symbolic, &m_numeric, 0, 0); - - m_info = errorCode ? NumericalIssue : Success; - m_factorizationIsOk = true; + grapInput(matrix.derived()); + + factorize_impl(); } #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -283,19 +285,20 @@ class UmfPackLU : internal::noncopyable protected: - void init() { - m_info = InvalidInput; - m_isInitialized = false; - m_numeric = 0; - m_symbolic = 0; - m_outerIndexPtr = 0; - m_innerIndexPtr = 0; - m_valuePtr = 0; + m_info = InvalidInput; + m_isInitialized = false; + m_numeric = 0; + m_symbolic = 0; + m_outerIndexPtr = 0; + m_innerIndexPtr = 0; + m_valuePtr = 0; + m_extractedDataAreDirty = true; } - void grapInput(const MatrixType& mat) + template + void grapInput_impl(const InputMatrixType& mat, internal::true_type) { m_copyMatrix.resize(mat.rows(), mat.cols()); if( ((MatrixType::Flags&RowMajorBit)==RowMajorBit) || sizeof(typename MatrixType::Index)!=sizeof(int) || !mat.isCompressed() ) @@ -313,6 +316,45 @@ class UmfPackLU : internal::noncopyable m_valuePtr = mat.valuePtr(); } } + + template + void grapInput_impl(const InputMatrixType& mat, internal::false_type) + { + m_copyMatrix = mat; + m_outerIndexPtr = m_copyMatrix.outerIndexPtr(); + m_innerIndexPtr = m_copyMatrix.innerIndexPtr(); + m_valuePtr = m_copyMatrix.valuePtr(); + } + + template + void grapInput(const InputMatrixType& mat) + { + grapInput_impl(mat, internal::umfpack_helper_is_sparse_plain()); + } + + void analyzePattern_impl() + { + int errorCode = 0; + errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, + &m_symbolic, 0, 0); + + m_isInitialized = true; + m_info = errorCode ? InvalidInput : Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + m_extractedDataAreDirty = true; + } + + void factorize_impl() + { + int errorCode; + errorCode = umfpack_numeric(m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, + m_symbolic, &m_numeric, 0, 0); + + m_info = errorCode ? NumericalIssue : Success; + m_factorizationIsOk = true; + m_extractedDataAreDirty = true; + } // cached data to reduce reallocation, etc. mutable LUMatrixType m_l; diff --git a/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h index 5c8c476ee..1951286f3 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseBinaryOps.h @@ -70,6 +70,43 @@ max return (max)(Derived::PlainObject::Constant(rows(), cols(), other)); } + +#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \ +template \ +EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const OtherDerived> \ +OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ +{ \ + return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); \ +}\ +typedef CwiseBinaryOp, const Derived, const CwiseNullaryOp, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \ +typedef CwiseBinaryOp, const CwiseNullaryOp, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \ +EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \ +OP(const Scalar& s) const { \ + return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \ +} \ +friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \ +OP(const Scalar& s, const Derived& d) { \ + return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \ +} + +#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \ +template \ +EIGEN_STRONG_INLINE const CwiseBinaryOp, const OtherDerived, const Derived> \ +OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const \ +{ \ + return CwiseBinaryOp, const OtherDerived, const Derived>(other.derived(), derived()); \ +} \ +\ +inline const RCmp ## RCOMPARATOR ## ReturnType \ +OP(const Scalar& s) const { \ + return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \ +} \ +friend inline const Cmp ## RCOMPARATOR ## ReturnType \ +OP(const Scalar& s, const Derived& d) { \ + return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \ +} + + /** \returns an expression of the coefficient-wise \< operator of *this and \a other * * Example: \include Cwise_less.cpp @@ -77,7 +114,7 @@ max * * \sa all(), any(), operator>(), operator<=() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less) +EIGEN_MAKE_CWISE_COMP_OP(operator<, LT) /** \returns an expression of the coefficient-wise \<= operator of *this and \a other * @@ -86,7 +123,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less) * * \sa all(), any(), operator>=(), operator<() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal) +EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE) /** \returns an expression of the coefficient-wise \> operator of *this and \a other * @@ -95,7 +132,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal) * * \sa all(), any(), operator>=(), operator<() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater) +EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT) /** \returns an expression of the coefficient-wise \>= operator of *this and \a other * @@ -104,7 +141,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater) * * \sa all(), any(), operator>(), operator<=() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal) +EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE) /** \returns an expression of the coefficient-wise == operator of *this and \a other * @@ -118,7 +155,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal) * * \sa all(), any(), isApprox(), isMuchSmallerThan() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to) +EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ) /** \returns an expression of the coefficient-wise != operator of *this and \a other * @@ -132,7 +169,10 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to) * * \sa all(), any(), isApprox(), isMuchSmallerThan() */ -EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to) +EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ) + +#undef EIGEN_MAKE_CWISE_COMP_OP +#undef EIGEN_MAKE_CWISE_COMP_R_OP // scalar addition @@ -209,3 +249,5 @@ operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL); return CwiseBinaryOp(derived(),other.derived()); } + + diff --git a/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h index a59636790..1c3ed3fcd 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/plugins/ArrayCwiseUnaryOps.h @@ -185,19 +185,3 @@ cube() const { return derived(); } - -#define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \ - inline const CwiseUnaryOp >, const Derived> \ - METHOD_NAME(const Scalar& s) const { \ - return CwiseUnaryOp >, const Derived> \ - (derived(), std::bind2nd(FUNCTOR(), s)); \ - } - -EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator==, std::equal_to) -EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator!=, std::not_equal_to) -EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<, std::less) -EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal) -EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater) -EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal) - - diff --git a/ground/gcs/src/libs/eigen/Eigen/src/plugins/BlockMethods.h b/ground/gcs/src/libs/eigen/Eigen/src/plugins/BlockMethods.h index 6911bedef..2788251e0 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/plugins/BlockMethods.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/plugins/BlockMethods.h @@ -113,13 +113,13 @@ inline const Block topRightCorner() const /** \returns an expression of a top-right corner of *this. * - * \tparam CRows number of rows in corner as specified at compile time - * \tparam CCols number of columns in corner as specified at compile time - * \param cRows number of rows in corner as specified at run time - * \param cCols number of columns in corner as specified at run time + * \tparam CRows number of rows in corner as specified at compile-time + * \tparam CCols number of columns in corner as specified at compile-time + * \param cRows number of rows in corner as specified at run-time + * \param cCols number of columns in corner as specified at run-time * - * This function is mainly useful for corners where the number of rows is specified at compile time - * and the number of columns is specified at run time, or vice versa. The compile-time and run-time + * This function is mainly useful for corners where the number of rows is specified at compile-time + * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * @@ -188,13 +188,13 @@ inline const Block topLeftCorner() const /** \returns an expression of a top-left corner of *this. * - * \tparam CRows number of rows in corner as specified at compile time - * \tparam CCols number of columns in corner as specified at compile time - * \param cRows number of rows in corner as specified at run time - * \param cCols number of columns in corner as specified at run time + * \tparam CRows number of rows in corner as specified at compile-time + * \tparam CCols number of columns in corner as specified at compile-time + * \param cRows number of rows in corner as specified at run-time + * \param cCols number of columns in corner as specified at run-time * - * This function is mainly useful for corners where the number of rows is specified at compile time - * and the number of columns is specified at run time, or vice versa. The compile-time and run-time + * This function is mainly useful for corners where the number of rows is specified at compile-time + * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * @@ -263,13 +263,13 @@ inline const Block bottomRightCorner() const /** \returns an expression of a bottom-right corner of *this. * - * \tparam CRows number of rows in corner as specified at compile time - * \tparam CCols number of columns in corner as specified at compile time - * \param cRows number of rows in corner as specified at run time - * \param cCols number of columns in corner as specified at run time + * \tparam CRows number of rows in corner as specified at compile-time + * \tparam CCols number of columns in corner as specified at compile-time + * \param cRows number of rows in corner as specified at run-time + * \param cCols number of columns in corner as specified at run-time * - * This function is mainly useful for corners where the number of rows is specified at compile time - * and the number of columns is specified at run time, or vice versa. The compile-time and run-time + * This function is mainly useful for corners where the number of rows is specified at compile-time + * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * @@ -338,13 +338,13 @@ inline const Block bottomLeftCorner() const /** \returns an expression of a bottom-left corner of *this. * - * \tparam CRows number of rows in corner as specified at compile time - * \tparam CCols number of columns in corner as specified at compile time - * \param cRows number of rows in corner as specified at run time - * \param cCols number of columns in corner as specified at run time + * \tparam CRows number of rows in corner as specified at compile-time + * \tparam CCols number of columns in corner as specified at compile-time + * \param cRows number of rows in corner as specified at run-time + * \param cCols number of columns in corner as specified at run-time * - * This function is mainly useful for corners where the number of rows is specified at compile time - * and the number of columns is specified at run time, or vice versa. The compile-time and run-time + * This function is mainly useful for corners where the number of rows is specified at compile-time + * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a cRows should equal \a CRows unless * \a CRows is \a Dynamic, and the same for the number of columns. * @@ -390,7 +390,11 @@ inline ConstRowsBlockXpr topRows(Index n) const /** \returns a block consisting of the top rows of *this. * - * \tparam N the number of rows in the block + * \tparam N the number of rows in the block as specified at compile-time + * \param n the number of rows in the block as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_topRows.cpp * Output: \verbinclude MatrixBase_template_int_topRows.out @@ -398,16 +402,16 @@ inline ConstRowsBlockXpr topRows(Index n) const * \sa class Block, block(Index,Index,Index,Index) */ template -inline typename NRowsBlockXpr::Type topRows() +inline typename NRowsBlockXpr::Type topRows(Index n = N) { - return typename NRowsBlockXpr::Type(derived(), 0, 0, N, cols()); + return typename NRowsBlockXpr::Type(derived(), 0, 0, n, cols()); } /** This is the const version of topRows().*/ template -inline typename ConstNRowsBlockXpr::Type topRows() const +inline typename ConstNRowsBlockXpr::Type topRows(Index n = N) const { - return typename ConstNRowsBlockXpr::Type(derived(), 0, 0, N, cols()); + return typename ConstNRowsBlockXpr::Type(derived(), 0, 0, n, cols()); } @@ -434,7 +438,11 @@ inline ConstRowsBlockXpr bottomRows(Index n) const /** \returns a block consisting of the bottom rows of *this. * - * \tparam N the number of rows in the block + * \tparam N the number of rows in the block as specified at compile-time + * \param n the number of rows in the block as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_bottomRows.cpp * Output: \verbinclude MatrixBase_template_int_bottomRows.out @@ -442,16 +450,16 @@ inline ConstRowsBlockXpr bottomRows(Index n) const * \sa class Block, block(Index,Index,Index,Index) */ template -inline typename NRowsBlockXpr::Type bottomRows() +inline typename NRowsBlockXpr::Type bottomRows(Index n = N) { - return typename NRowsBlockXpr::Type(derived(), rows() - N, 0, N, cols()); + return typename NRowsBlockXpr::Type(derived(), rows() - n, 0, n, cols()); } /** This is the const version of bottomRows().*/ template -inline typename ConstNRowsBlockXpr::Type bottomRows() const +inline typename ConstNRowsBlockXpr::Type bottomRows(Index n = N) const { - return typename ConstNRowsBlockXpr::Type(derived(), rows() - N, 0, N, cols()); + return typename ConstNRowsBlockXpr::Type(derived(), rows() - n, 0, n, cols()); } @@ -459,28 +467,32 @@ inline typename ConstNRowsBlockXpr::Type bottomRows() const /** \returns a block consisting of a range of rows of *this. * * \param startRow the index of the first row in the block - * \param numRows the number of rows in the block + * \param n the number of rows in the block * * Example: \include DenseBase_middleRows_int.cpp * Output: \verbinclude DenseBase_middleRows_int.out * * \sa class Block, block(Index,Index,Index,Index) */ -inline RowsBlockXpr middleRows(Index startRow, Index numRows) +inline RowsBlockXpr middleRows(Index startRow, Index n) { - return RowsBlockXpr(derived(), startRow, 0, numRows, cols()); + return RowsBlockXpr(derived(), startRow, 0, n, cols()); } /** This is the const version of middleRows(Index,Index).*/ -inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const +inline ConstRowsBlockXpr middleRows(Index startRow, Index n) const { - return ConstRowsBlockXpr(derived(), startRow, 0, numRows, cols()); + return ConstRowsBlockXpr(derived(), startRow, 0, n, cols()); } /** \returns a block consisting of a range of rows of *this. * - * \tparam N the number of rows in the block + * \tparam N the number of rows in the block as specified at compile-time * \param startRow the index of the first row in the block + * \param n the number of rows in the block as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include DenseBase_template_int_middleRows.cpp * Output: \verbinclude DenseBase_template_int_middleRows.out @@ -488,16 +500,16 @@ inline ConstRowsBlockXpr middleRows(Index startRow, Index numRows) const * \sa class Block, block(Index,Index,Index,Index) */ template -inline typename NRowsBlockXpr::Type middleRows(Index startRow) +inline typename NRowsBlockXpr::Type middleRows(Index startRow, Index n = N) { - return typename NRowsBlockXpr::Type(derived(), startRow, 0, N, cols()); + return typename NRowsBlockXpr::Type(derived(), startRow, 0, n, cols()); } /** This is the const version of middleRows().*/ template -inline typename ConstNRowsBlockXpr::Type middleRows(Index startRow) const +inline typename ConstNRowsBlockXpr::Type middleRows(Index startRow, Index n = N) const { - return typename ConstNRowsBlockXpr::Type(derived(), startRow, 0, N, cols()); + return typename ConstNRowsBlockXpr::Type(derived(), startRow, 0, n, cols()); } @@ -524,7 +536,11 @@ inline ConstColsBlockXpr leftCols(Index n) const /** \returns a block consisting of the left columns of *this. * - * \tparam N the number of columns in the block + * \tparam N the number of columns in the block as specified at compile-time + * \param n the number of columns in the block as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_leftCols.cpp * Output: \verbinclude MatrixBase_template_int_leftCols.out @@ -532,16 +548,16 @@ inline ConstColsBlockXpr leftCols(Index n) const * \sa class Block, block(Index,Index,Index,Index) */ template -inline typename NColsBlockXpr::Type leftCols() +inline typename NColsBlockXpr::Type leftCols(Index n = N) { - return typename NColsBlockXpr::Type(derived(), 0, 0, rows(), N); + return typename NColsBlockXpr::Type(derived(), 0, 0, rows(), n); } /** This is the const version of leftCols().*/ template -inline typename ConstNColsBlockXpr::Type leftCols() const +inline typename ConstNColsBlockXpr::Type leftCols(Index n = N) const { - return typename ConstNColsBlockXpr::Type(derived(), 0, 0, rows(), N); + return typename ConstNColsBlockXpr::Type(derived(), 0, 0, rows(), n); } @@ -568,7 +584,11 @@ inline ConstColsBlockXpr rightCols(Index n) const /** \returns a block consisting of the right columns of *this. * - * \tparam N the number of columns in the block + * \tparam N the number of columns in the block as specified at compile-time + * \param n the number of columns in the block as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_rightCols.cpp * Output: \verbinclude MatrixBase_template_int_rightCols.out @@ -576,16 +596,16 @@ inline ConstColsBlockXpr rightCols(Index n) const * \sa class Block, block(Index,Index,Index,Index) */ template -inline typename NColsBlockXpr::Type rightCols() +inline typename NColsBlockXpr::Type rightCols(Index n = N) { - return typename NColsBlockXpr::Type(derived(), 0, cols() - N, rows(), N); + return typename NColsBlockXpr::Type(derived(), 0, cols() - n, rows(), n); } /** This is the const version of rightCols().*/ template -inline typename ConstNColsBlockXpr::Type rightCols() const +inline typename ConstNColsBlockXpr::Type rightCols(Index n = N) const { - return typename ConstNColsBlockXpr::Type(derived(), 0, cols() - N, rows(), N); + return typename ConstNColsBlockXpr::Type(derived(), 0, cols() - n, rows(), n); } @@ -613,8 +633,12 @@ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const /** \returns a block consisting of a range of columns of *this. * - * \tparam N the number of columns in the block + * \tparam N the number of columns in the block as specified at compile-time * \param startCol the index of the first column in the block + * \param n the number of columns in the block as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include DenseBase_template_int_middleCols.cpp * Output: \verbinclude DenseBase_template_int_middleCols.out @@ -622,16 +646,16 @@ inline ConstColsBlockXpr middleCols(Index startCol, Index numCols) const * \sa class Block, block(Index,Index,Index,Index) */ template -inline typename NColsBlockXpr::Type middleCols(Index startCol) +inline typename NColsBlockXpr::Type middleCols(Index startCol, Index n = N) { - return typename NColsBlockXpr::Type(derived(), 0, startCol, rows(), N); + return typename NColsBlockXpr::Type(derived(), 0, startCol, rows(), n); } /** This is the const version of middleCols().*/ template -inline typename ConstNColsBlockXpr::Type middleCols(Index startCol) const +inline typename ConstNColsBlockXpr::Type middleCols(Index startCol, Index n = N) const { - return typename ConstNColsBlockXpr::Type(derived(), 0, startCol, rows(), N); + return typename ConstNColsBlockXpr::Type(derived(), 0, startCol, rows(), n); } @@ -667,15 +691,15 @@ inline const Block block(Index startRow, In /** \returns an expression of a block in *this. * - * \tparam BlockRows number of rows in block as specified at compile time - * \tparam BlockCols number of columns in block as specified at compile time + * \tparam BlockRows number of rows in block as specified at compile-time + * \tparam BlockCols number of columns in block as specified at compile-time * \param startRow the first row in the block * \param startCol the first column in the block - * \param blockRows number of rows in block as specified at run time - * \param blockCols number of columns in block as specified at run time + * \param blockRows number of rows in block as specified at run-time + * \param blockCols number of columns in block as specified at run-time * - * This function is mainly useful for blocks where the number of rows is specified at compile time - * and the number of columns is specified at run time, or vice versa. The compile-time and run-time + * This function is mainly useful for blocks where the number of rows is specified at compile-time + * and the number of columns is specified at run-time, or vice versa. The compile-time and run-time * information should not contradict. In other words, \a blockRows should equal \a BlockRows unless * \a BlockRows is \a Dynamic, and the same for the number of columns. * @@ -738,7 +762,7 @@ inline ConstRowXpr row(Index i) const * \only_for_vectors * * \param start the first coefficient in the segment - * \param vecSize the number of coefficients in the segment + * \param n the number of coefficients in the segment * * Example: \include MatrixBase_segment_int_int.cpp * Output: \verbinclude MatrixBase_segment_int_int.out @@ -749,25 +773,25 @@ inline ConstRowXpr row(Index i) const * * \sa class Block, segment(Index) */ -inline SegmentReturnType segment(Index start, Index vecSize) +inline SegmentReturnType segment(Index start, Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return SegmentReturnType(derived(), start, vecSize); + return SegmentReturnType(derived(), start, n); } /** This is the const version of segment(Index,Index).*/ -inline ConstSegmentReturnType segment(Index start, Index vecSize) const +inline ConstSegmentReturnType segment(Index start, Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return ConstSegmentReturnType(derived(), start, vecSize); + return ConstSegmentReturnType(derived(), start, n); } /** \returns a dynamic-size expression of the first coefficients of *this. * * \only_for_vectors * - * \param vecSize the number of coefficients in the block + * \param n the number of coefficients in the segment * * Example: \include MatrixBase_start_int.cpp * Output: \verbinclude MatrixBase_start_int.out @@ -778,25 +802,24 @@ inline ConstSegmentReturnType segment(Index start, Index vecSize) const * * \sa class Block, block(Index,Index) */ -inline SegmentReturnType head(Index vecSize) +inline SegmentReturnType head(Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return SegmentReturnType(derived(), 0, vecSize); + return SegmentReturnType(derived(), 0, n); } /** This is the const version of head(Index).*/ -inline ConstSegmentReturnType - head(Index vecSize) const +inline ConstSegmentReturnType head(Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return ConstSegmentReturnType(derived(), 0, vecSize); + return ConstSegmentReturnType(derived(), 0, n); } /** \returns a dynamic-size expression of the last coefficients of *this. * * \only_for_vectors * - * \param vecSize the number of coefficients in the block + * \param n the number of coefficients in the segment * * Example: \include MatrixBase_end_int.cpp * Output: \verbinclude MatrixBase_end_int.out @@ -807,95 +830,106 @@ inline ConstSegmentReturnType * * \sa class Block, block(Index,Index) */ -inline SegmentReturnType tail(Index vecSize) +inline SegmentReturnType tail(Index n) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return SegmentReturnType(derived(), this->size() - vecSize, vecSize); + return SegmentReturnType(derived(), this->size() - n, n); } /** This is the const version of tail(Index).*/ -inline ConstSegmentReturnType tail(Index vecSize) const +inline ConstSegmentReturnType tail(Index n) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return ConstSegmentReturnType(derived(), this->size() - vecSize, vecSize); + return ConstSegmentReturnType(derived(), this->size() - n, n); } /** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this * * \only_for_vectors * - * The template parameter \a Size is the number of coefficients in the block + * \tparam N the number of coefficients in the segment as specified at compile-time + * \param start the index of the first element in the segment + * \param n the number of coefficients in the segment as specified at compile-time * - * \param start the index of the first element of the sub-vector + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_segment.cpp * Output: \verbinclude MatrixBase_template_int_segment.out * * \sa class Block */ -template -inline typename FixedSegmentReturnType::Type segment(Index start) +template +inline typename FixedSegmentReturnType::Type segment(Index start, Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return typename FixedSegmentReturnType::Type(derived(), start); + return typename FixedSegmentReturnType::Type(derived(), start, n); } /** This is the const version of segment(Index).*/ -template -inline typename ConstFixedSegmentReturnType::Type segment(Index start) const +template +inline typename ConstFixedSegmentReturnType::Type segment(Index start, Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return typename ConstFixedSegmentReturnType::Type(derived(), start); + return typename ConstFixedSegmentReturnType::Type(derived(), start, n); } /** \returns a fixed-size expression of the first coefficients of *this. * * \only_for_vectors * - * The template parameter \a Size is the number of coefficients in the block + * \tparam N the number of coefficients in the segment as specified at compile-time + * \param n the number of coefficients in the segment as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_start.cpp * Output: \verbinclude MatrixBase_template_int_start.out * * \sa class Block */ -template -inline typename FixedSegmentReturnType::Type head() +template +inline typename FixedSegmentReturnType::Type head(Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return typename FixedSegmentReturnType::Type(derived(), 0); + return typename FixedSegmentReturnType::Type(derived(), 0, n); } /** This is the const version of head().*/ -template -inline typename ConstFixedSegmentReturnType::Type head() const +template +inline typename ConstFixedSegmentReturnType::Type head(Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return typename ConstFixedSegmentReturnType::Type(derived(), 0); + return typename ConstFixedSegmentReturnType::Type(derived(), 0, n); } /** \returns a fixed-size expression of the last coefficients of *this. * * \only_for_vectors * - * The template parameter \a Size is the number of coefficients in the block + * \tparam N the number of coefficients in the segment as specified at compile-time + * \param n the number of coefficients in the segment as specified at run-time + * + * The compile-time and run-time information should not contradict. In other words, + * \a n should equal \a N unless \a N is \a Dynamic. * * Example: \include MatrixBase_template_int_end.cpp * Output: \verbinclude MatrixBase_template_int_end.out * * \sa class Block */ -template -inline typename FixedSegmentReturnType::Type tail() +template +inline typename FixedSegmentReturnType::Type tail(Index n = N) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return typename FixedSegmentReturnType::Type(derived(), size() - Size); + return typename FixedSegmentReturnType::Type(derived(), size() - n); } /** This is the const version of tail.*/ -template -inline typename ConstFixedSegmentReturnType::Type tail() const +template +inline typename ConstFixedSegmentReturnType::Type tail(Index n = N) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) - return typename ConstFixedSegmentReturnType::Type(derived(), size() - Size); + return typename ConstFixedSegmentReturnType::Type(derived(), size() - n); } diff --git a/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h index 3a737df7b..c4a042b70 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseBinaryOps.h @@ -83,7 +83,7 @@ cwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> cwiseMin(const Scalar &other) const { - return cwiseMin(Derived::PlainObject::Constant(rows(), cols(), other)); + return cwiseMin(Derived::Constant(rows(), cols(), other)); } /** \returns an expression of the coefficient-wise max of *this and \a other @@ -107,7 +107,7 @@ cwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const EIGEN_STRONG_INLINE const CwiseBinaryOp, const Derived, const ConstantReturnType> cwiseMax(const Scalar &other) const { - return cwiseMax(Derived::PlainObject::Constant(rows(), cols(), other)); + return cwiseMax(Derived::Constant(rows(), cols(), other)); } @@ -124,3 +124,20 @@ cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS &other) const { return CwiseBinaryOp, const Derived, const OtherDerived>(derived(), other.derived()); } + +typedef CwiseBinaryOp, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType; + +/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by isApprox() and + * isMuchSmallerThan(). + * + * \sa cwiseEqual(const MatrixBase &) const + */ +inline const CwiseScalarEqualReturnType +cwiseEqual(const Scalar& s) const +{ + return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op()); +} diff --git a/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h index 0cf0640ba..8de10935d 100644 --- a/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h +++ b/ground/gcs/src/libs/eigen/Eigen/src/plugins/MatrixCwiseUnaryOps.h @@ -50,18 +50,3 @@ cwiseSqrt() const { return derived(); } inline const CwiseUnaryOp, const Derived> cwiseInverse() const { return derived(); } -/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s - * - * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. - * In order to check for equality between two vectors or matrices with floating-point coefficients, it is - * generally a far better idea to use a fuzzy comparison as provided by isApprox() and - * isMuchSmallerThan(). - * - * \sa cwiseEqual(const MatrixBase &) const - */ -inline const CwiseUnaryOp >, const Derived> -cwiseEqual(const Scalar& s) const -{ - return CwiseUnaryOp >,const Derived> - (derived(), std::bind1st(std::equal_to(), s)); -} diff --git a/ground/gcs/src/libs/eigen/blas/CMakeLists.txt b/ground/gcs/src/libs/eigen/blas/CMakeLists.txt index c35a2fdbe..a9bc05137 100644 --- a/ground/gcs/src/libs/eigen/blas/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/blas/CMakeLists.txt @@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) if(EIGEN_Fortran_COMPILER_WORKS) enable_language(Fortran OPTIONAL) + if(NOT CMAKE_Fortran_COMPILER) + set(EIGEN_Fortran_COMPILER_WORKS OFF) + endif() endif() add_custom_target(blas) diff --git a/ground/gcs/src/libs/eigen/blas/README.txt b/ground/gcs/src/libs/eigen/blas/README.txt index 07a8bd92a..63a5203b9 100644 --- a/ground/gcs/src/libs/eigen/blas/README.txt +++ b/ground/gcs/src/libs/eigen/blas/README.txt @@ -1,9 +1,6 @@ This directory contains a BLAS library built on top of Eigen. -This is currently a work in progress which is far to be ready for use, -but feel free to contribute to it if you wish. - This module is not built by default. In order to compile it, you need to type 'make blas' from within your build dir. diff --git a/ground/gcs/src/libs/eigen/blas/xerbla.cpp b/ground/gcs/src/libs/eigen/blas/xerbla.cpp index 0d57710fe..dd39a5244 100644 --- a/ground/gcs/src/libs/eigen/blas/xerbla.cpp +++ b/ground/gcs/src/libs/eigen/blas/xerbla.cpp @@ -1,7 +1,7 @@ #include -#if (defined __GNUC__) +#if (defined __GNUC__) && (!defined __MINGW32__) && (!defined __CYGWIN__) #define EIGEN_WEAK_LINKING __attribute__ ((weak)) #else #define EIGEN_WEAK_LINKING diff --git a/ground/gcs/src/libs/eigen/cmake/EigenConfigureTesting.cmake b/ground/gcs/src/libs/eigen/cmake/EigenConfigureTesting.cmake index 228d29e97..2b11d8360 100644 --- a/ground/gcs/src/libs/eigen/cmake/EigenConfigureTesting.cmake +++ b/ground/gcs/src/libs/eigen/cmake/EigenConfigureTesting.cmake @@ -26,31 +26,20 @@ include(CTest) set(EIGEN_TEST_BUILD_FLAGS " " CACHE STRING "Options passed to the build command of unit tests") -# overwrite default DartConfiguration.tcl -# The worarounds are different for each version of the MSVC IDE -if(MSVC_IDE) - if(CMAKE_MAKE_PROGRAM_SAVE MATCHES "devenv") # devenv - set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} Eigen.sln /build \"Release\" /project buildtests ${EIGEN_TEST_BUILD_FLAGS} \n# ") - else() # msbuild - set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests.vcxproj /p:Configuration=\${CTEST_CONFIGURATION_TYPE} ${EIGEN_TEST_BUILD_FLAGS}\n# ") - endif() -else() - # for make and nmake - set(EIGEN_MAKECOMMAND_PLACEHOLDER "${CMAKE_MAKE_PROGRAM_SAVE} buildtests ${EIGEN_TEST_BUILD_FLAGS}") +# Overwrite default DartConfiguration.tcl such that ctest can build our unit tests. +# Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target. +# At this stage, we can also add custom flags to the build tool through the user defined EIGEN_TEST_BUILD_FLAGS variable. +file(READ "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" EIGEN_DART_CONFIG_FILE) +# try to grab the default flags +string(REGEX MATCH "MakeCommand:.*-- (.*)\nDefaultCTestConfigurationType" EIGEN_DUMMY ${EIGEN_DART_CONFIG_FILE}) +if(NOT CMAKE_MATCH_1) +string(REGEX MATCH "MakeCommand:.*[^c]make (.*)\nDefaultCTestConfigurationType" EIGEN_DUMMY ${EIGEN_DART_CONFIG_FILE}) endif() +string(REGEX REPLACE "MakeCommand:.*DefaultCTestConfigurationType" "MakeCommand: ${CMAKE_COMMAND} --build . --target buildtests --config \"\${CTEST_CONFIGURATION_TYPE}\" -- ${CMAKE_MATCH_1} ${EIGEN_TEST_BUILD_FLAGS}\nDefaultCTestConfigurationType" + EIGEN_DART_CONFIG_FILE2 ${EIGEN_DART_CONFIG_FILE}) +file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" ${EIGEN_DART_CONFIG_FILE2}) -# copy ctest properties, which currently -# o raise the warning levels -configure_file(${CMAKE_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl) - -# restore default CMAKE_MAKE_PROGRAM -set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE}) -# un-set temporary variables so that it is like they never existed. -# CMake 2.6.3 introduces the more logical unset() syntax for this. -set(CMAKE_MAKE_PROGRAM_SAVE) -set(EIGEN_MAKECOMMAND_PLACEHOLDER) - -configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake) # some documentation of this function would be nice ei_init_testing() diff --git a/ground/gcs/src/libs/eigen/cmake/EigenTesting.cmake b/ground/gcs/src/libs/eigen/cmake/EigenTesting.cmake index d9e22ab1a..cbe12d51b 100644 --- a/ground/gcs/src/libs/eigen/cmake/EigenTesting.cmake +++ b/ground/gcs/src/libs/eigen/cmake/EigenTesting.cmake @@ -322,22 +322,21 @@ macro(ei_get_compilerver VAR) endif() else() # on all other system we rely on ${CMAKE_CXX_COMPILER} - # supporting a "--version" flag + # supporting a "--version" or "/version" flag - # check whether the head command exists - find_program(HEAD_EXE head NO_CMAKE_ENVIRONMENT_PATH NO_CMAKE_PATH NO_CMAKE_SYSTEM_PATH) - if(HEAD_EXE) - execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version - COMMAND head -n 1 - OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE) + if(WIN32 AND ${CMAKE_CXX_COMPILER_ID} EQUAL "Intel") + set(EIGEN_CXX_FLAG_VERSION "/version") else() - execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version - OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE) - string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string}) + set(EIGEN_CXX_FLAG_VERSION "--version") endif() + execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION} + OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE) + string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string}) + ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER) set(${VAR} "${CNAME}-${CVER}") + endif() endmacro(ei_get_compilerver) @@ -452,20 +451,12 @@ macro(ei_set_build_string) endmacro(ei_set_build_string) macro(ei_is_64bit_env VAR) - - file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/is64.cpp" - "int main() { return (sizeof(int*) == 8 ? 1 : 0); } - ") - try_run(run_res compile_res - ${CMAKE_CURRENT_BINARY_DIR} "${CMAKE_CURRENT_BINARY_DIR}/is64.cpp" - RUN_OUTPUT_VARIABLE run_output) - - if(compile_res AND run_res) - set(${VAR} ${run_res}) - elseif(CMAKE_CL_64) - set(${VAR} 1) - elseif("$ENV{Platform}" STREQUAL "X64") # nmake 64 bit + if(CMAKE_SIZEOF_VOID_P EQUAL 8) set(${VAR} 1) + elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(${VAR} 0) + else() + message(WARNING "Unsupported pointer size. Please contact the authors.") endif() endmacro(ei_is_64bit_env) diff --git a/ground/gcs/src/libs/eigen/cmake/FindCholmod.cmake b/ground/gcs/src/libs/eigen/cmake/FindCholmod.cmake index 7b3046d45..23239c300 100644 --- a/ground/gcs/src/libs/eigen/cmake/FindCholmod.cmake +++ b/ground/gcs/src/libs/eigen/cmake/FindCholmod.cmake @@ -86,4 +86,4 @@ include(FindPackageHandleStandardArgs) find_package_handle_standard_args(CHOLMOD DEFAULT_MSG CHOLMOD_INCLUDES CHOLMOD_LIBRARIES) -mark_as_advanced(CHOLMOD_INCLUDES CHOLMOD_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY) +mark_as_advanced(CHOLMOD_INCLUDES CHOLMOD_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY CAMD_LIBRARY CCOLAMD_LIBRARY CHOLMOD_METIS_LIBRARY) diff --git a/ground/gcs/src/libs/eigen/cmake/FindFFTW.cmake b/ground/gcs/src/libs/eigen/cmake/FindFFTW.cmake index a9e9925e7..6c4dc9ab4 100644 --- a/ground/gcs/src/libs/eigen/cmake/FindFFTW.cmake +++ b/ground/gcs/src/libs/eigen/cmake/FindFFTW.cmake @@ -115,5 +115,5 @@ include(FindPackageHandleStandardArgs) find_package_handle_standard_args(FFTW DEFAULT_MSG FFTW_INCLUDES FFTW_LIBRARIES) -mark_as_advanced(FFTW_INCLUDES FFTW_LIBRARIES) +mark_as_advanced(FFTW_INCLUDES FFTW_LIBRARIES FFTW_LIB FFTWF_LIB FFTWL_LIB) diff --git a/ground/gcs/src/libs/eigen/cmake/FindMetis.cmake b/ground/gcs/src/libs/eigen/cmake/FindMetis.cmake index 627c3e9ae..6a0ce790c 100644 --- a/ground/gcs/src/libs/eigen/cmake/FindMetis.cmake +++ b/ground/gcs/src/libs/eigen/cmake/FindMetis.cmake @@ -10,16 +10,50 @@ find_path(METIS_INCLUDES PATHS $ENV{METISDIR} ${INCLUDE_INSTALL_DIR} - PATH_SUFFIXES + PATH_SUFFIXES + . metis include ) +macro(_metis_check_version) + file(READ "${METIS_INCLUDES}/metis.h" _metis_version_header) + + string(REGEX MATCH "define[ \t]+METIS_VER_MAJOR[ \t]+([0-9]+)" _metis_major_version_match "${_metis_version_header}") + set(METIS_MAJOR_VERSION "${CMAKE_MATCH_1}") + string(REGEX MATCH "define[ \t]+METIS_VER_MINOR[ \t]+([0-9]+)" _metis_minor_version_match "${_metis_version_header}") + set(METIS_MINOR_VERSION "${CMAKE_MATCH_1}") + string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}") + set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}") + if(NOT METIS_MAJOR_VERSION) + message(STATUS "Could not determine Metis version. Assuming version 4.0.0") + set(METIS_VERSION 4.0.0) + else() + set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION}) + endif() + if(${METIS_VERSION} VERSION_LESS ${Metis_FIND_VERSION}) + set(METIS_VERSION_OK FALSE) + else() + set(METIS_VERSION_OK TRUE) + endif() + + if(NOT METIS_VERSION_OK) + message(STATUS "Metis version ${METIS_VERSION} found in ${METIS_INCLUDES}, " + "but at least version ${Metis_FIND_VERSION} is required") + endif(NOT METIS_VERSION_OK) +endmacro(_metis_check_version) + + if(METIS_INCLUDES AND Metis_FIND_VERSION) + _metis_check_version() + else() + set(METIS_VERSION_OK TRUE) + endif() + find_library(METIS_LIBRARIES metis PATHS $ENV{METISDIR} ${LIB_INSTALL_DIR} PATH_SUFFIXES lib) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(METIS DEFAULT_MSG - METIS_INCLUDES METIS_LIBRARIES) + METIS_INCLUDES METIS_LIBRARIES METIS_VERSION_OK) mark_as_advanced(METIS_INCLUDES METIS_LIBRARIES) diff --git a/ground/gcs/src/libs/eigen/cmake/FindSPQR.cmake b/ground/gcs/src/libs/eigen/cmake/FindSPQR.cmake index 794c212af..1e958c3c1 100644 --- a/ground/gcs/src/libs/eigen/cmake/FindSPQR.cmake +++ b/ground/gcs/src/libs/eigen/cmake/FindSPQR.cmake @@ -26,7 +26,12 @@ if(SPQR_LIBRARIES) find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS $ENV{SPQRDIR} ${LIB_INSTALL_DIR}) if (SUITESPARSE_LIBRARY) set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${SUITESPARSE_LIBRARY}) - endif (SUITESPARSE_LIBRARY) + endif() + + find_library(CHOLMOD_LIBRARY cholmod PATHS $ENV{UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) + if(CHOLMOD_LIBRARY) + set(SPQR_LIBRARIES ${SPQR_LIBRARIES} ${CHOLMOD_LIBRARY}) + endif() endif(SPQR_LIBRARIES) diff --git a/ground/gcs/src/libs/eigen/cmake/FindUmfpack.cmake b/ground/gcs/src/libs/eigen/cmake/FindUmfpack.cmake index 16b046cd6..53cf0b49b 100644 --- a/ground/gcs/src/libs/eigen/cmake/FindUmfpack.cmake +++ b/ground/gcs/src/libs/eigen/cmake/FindUmfpack.cmake @@ -20,24 +20,29 @@ find_library(UMFPACK_LIBRARIES umfpack PATHS $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR} if(UMFPACK_LIBRARIES) - if (NOT UMFPACK_LIBDIR) + if(NOT UMFPACK_LIBDIR) get_filename_component(UMFPACK_LIBDIR ${UMFPACK_LIBRARIES} PATH) endif(NOT UMFPACK_LIBDIR) find_library(COLAMD_LIBRARY colamd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) - if (COLAMD_LIBRARY) + if(COLAMD_LIBRARY) set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${COLAMD_LIBRARY}) - endif (COLAMD_LIBRARY) + endif () find_library(AMD_LIBRARY amd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) - if (AMD_LIBRARY) + if(AMD_LIBRARY) set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${AMD_LIBRARY}) - endif (AMD_LIBRARY) + endif () find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) - if (SUITESPARSE_LIBRARY) + if(SUITESPARSE_LIBRARY) set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${SUITESPARSE_LIBRARY}) - endif (SUITESPARSE_LIBRARY) + endif () + + find_library(CHOLMOD_LIBRARY cholmod PATHS $ENV{UMFPACK_LIBDIR} $ENV{UMFPACKDIR} ${LIB_INSTALL_DIR}) + if(CHOLMOD_LIBRARY) + set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${CHOLMOD_LIBRARY}) + endif() endif(UMFPACK_LIBRARIES) @@ -45,4 +50,4 @@ include(FindPackageHandleStandardArgs) find_package_handle_standard_args(UMFPACK DEFAULT_MSG UMFPACK_INCLUDES UMFPACK_LIBRARIES) -mark_as_advanced(UMFPACK_INCLUDES UMFPACK_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY) +mark_as_advanced(UMFPACK_INCLUDES UMFPACK_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY CHOLMOD_LIBRARY SUITESPARSE_LIBRARY) diff --git a/ground/gcs/src/libs/eigen/cmake/language_support.cmake b/ground/gcs/src/libs/eigen/cmake/language_support.cmake index 2ca303c92..231f7ff70 100644 --- a/ground/gcs/src/libs/eigen/cmake/language_support.cmake +++ b/ground/gcs/src/libs/eigen/cmake/language_support.cmake @@ -23,7 +23,7 @@ function(workaround_9220 language language_works) #message("DEBUG: language = ${language}") set(text "project(test NONE) - cmake_minimum_required(VERSION 2.6.0) + cmake_minimum_required(VERSION 2.8.0) set (CMAKE_Fortran_FLAGS \"${CMAKE_Fortran_FLAGS}\") set (CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS}\") enable_language(${language} OPTIONAL) @@ -33,7 +33,7 @@ function(workaround_9220 language language_works) file(WRITE ${CMAKE_BINARY_DIR}/language_tests/${language}/CMakeLists.txt ${text}) execute_process( - COMMAND ${CMAKE_COMMAND} . + COMMAND ${CMAKE_COMMAND} . -G "${CMAKE_GENERATOR}" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/language_tests/${language} RESULT_VARIABLE return_code OUTPUT_QUIET diff --git a/ground/gcs/src/libs/eigen/doc/A05_PortingFrom2To3.dox b/ground/gcs/src/libs/eigen/doc/A05_PortingFrom2To3.dox index d885b4f6d..4d5f3ae1f 100644 --- a/ground/gcs/src/libs/eigen/doc/A05_PortingFrom2To3.dox +++ b/ground/gcs/src/libs/eigen/doc/A05_PortingFrom2To3.dox @@ -2,6 +2,8 @@ namespace Eigen { /** \page Eigen2ToEigen3 Porting from Eigen2 to Eigen3 +
Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.
+ This page lists the most important API changes between Eigen2 and Eigen3, and gives tips to help porting your application from Eigen2 to Eigen3. diff --git a/ground/gcs/src/libs/eigen/doc/A10_Eigen2SupportModes.dox b/ground/gcs/src/libs/eigen/doc/A10_Eigen2SupportModes.dox index abfdb4683..f3df91515 100644 --- a/ground/gcs/src/libs/eigen/doc/A10_Eigen2SupportModes.dox +++ b/ground/gcs/src/libs/eigen/doc/A10_Eigen2SupportModes.dox @@ -2,6 +2,8 @@ namespace Eigen { /** \page Eigen2SupportModes Eigen 2 support modes +
Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.
+ This page documents the Eigen2 support modes, a powerful tool to help migrating your project from Eigen 2 to Eigen 3. Don't miss our page on \ref Eigen2ToEigen3 "API changes" between Eigen 2 and Eigen 3. diff --git a/ground/gcs/src/libs/eigen/doc/AsciiQuickReference.txt b/ground/gcs/src/libs/eigen/doc/AsciiQuickReference.txt index 6b0a7cd6a..b9f497f87 100644 --- a/ground/gcs/src/libs/eigen/doc/AsciiQuickReference.txt +++ b/ground/gcs/src/libs/eigen/doc/AsciiQuickReference.txt @@ -16,8 +16,8 @@ double s; // Basic usage // Eigen // Matlab // comments x.size() // length(x) // vector size -C.rows() // size(C)(1) // number of rows -C.cols() // size(C)(2) // number of columns +C.rows() // size(C,1) // number of rows +C.cols() // size(C,2) // number of columns x(i) // x(i+1) // Matlab is 1-based C(i,j) // C(i+1,j+1) // @@ -41,8 +41,8 @@ MatrixXd::Ones(rows,cols) // ones(rows,cols) C.setOnes(rows,cols) // C = ones(rows,cols) MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1). C.setRandom(rows,cols) // C = rand(rows,cols)*2-1 -VectorXd::LinSpace(size,low,high) // linspace(low,high,size)' -v.setLinSpace(size,low,high) // v = linspace(low,high,size)' +VectorXd::LinSpaced(size,low,high) // linspace(low,high,size)' +v.setLinSpaced(size,low,high) // v = linspace(low,high,size)' // Matrix slicing and blocks. All expressions listed here are read/write. @@ -51,20 +51,34 @@ v.setLinSpace(size,low,high) // v = linspace(low,high,size)' // Eigen // Matlab x.head(n) // x(1:n) x.head() // x(1:n) -x.tail(n) // N = rows(x); x(N - n: N) -x.tail() // N = rows(x); x(N - n: N) +x.tail(n) // x(end - n + 1: end) +x.tail() // x(end - n + 1: end) x.segment(i, n) // x(i+1 : i+n) x.segment(i) // x(i+1 : i+n) P.block(i, j, rows, cols) // P(i+1 : i+rows, j+1 : j+cols) P.block(i, j) // P(i+1 : i+rows, j+1 : j+cols) +P.row(i) // P(i+1, :) +P.col(j) // P(:, j+1) +P.leftCols() // P(:, 1:cols) +P.leftCols(cols) // P(:, 1:cols) +P.middleCols(j) // P(:, j+1:j+cols) +P.middleCols(j, cols) // P(:, j+1:j+cols) +P.rightCols() // P(:, end-cols+1:end) +P.rightCols(cols) // P(:, end-cols+1:end) +P.topRows() // P(1:rows, :) +P.topRows(rows) // P(1:rows, :) +P.middleRows(i) // P(i+1:i+rows, :) +P.middleRows(i, rows) // P(i+1:i+rows, :) +P.bottomRows() // P(end-rows+1:end, :) +P.bottomRows(rows) // P(end-rows+1:end, :) P.topLeftCorner(rows, cols) // P(1:rows, 1:cols) -P.topRightCorner(rows, cols) // [m n]=size(P); P(1:rows, n-cols+1:n) -P.bottomLeftCorner(rows, cols) // [m n]=size(P); P(m-rows+1:m, 1:cols) -P.bottomRightCorner(rows, cols) // [m n]=size(P); P(m-rows+1:m, n-cols+1:n) +P.topRightCorner(rows, cols) // P(1:rows, end-cols+1:end) +P.bottomLeftCorner(rows, cols) // P(end-rows+1:end, 1:cols) +P.bottomRightCorner(rows, cols) // P(end-rows+1:end, end-cols+1:end) P.topLeftCorner() // P(1:rows, 1:cols) -P.topRightCorner() // [m n]=size(P); P(1:rows, n-cols+1:n) -P.bottomLeftCorner() // [m n]=size(P); P(m-rows+1:m, 1:cols) -P.bottomRightCorner() // [m n]=size(P); P(m-rows+1:m, n-cols+1:n) +P.topRightCorner() // P(1:rows, end-cols+1:end) +P.bottomLeftCorner() // P(end-rows+1:end, 1:cols) +P.bottomRightCorner() // P(end-rows+1:end, end-cols+1:end) // Of particular note is Eigen's swap function which is highly optimized. // Eigen // Matlab @@ -77,6 +91,8 @@ R.adjoint() // R' R.transpose() // R.' or conj(R') R.diagonal() // diag(R) x.asDiagonal() // diag(x) +R.transpose().colwise().reverse(); // rot90(R) +R.conjugate() // conj(R) // All the same as Matlab, but matlab doesn't have *= style operators. // Matrix-vector. Matrix-matrix. Matrix-scalar. @@ -125,10 +141,8 @@ int r, c; // Eigen // Matlab R.minCoeff() // min(R(:)) R.maxCoeff() // max(R(:)) -s = R.minCoeff(&r, &c) // [aa, bb] = min(R); [cc, dd] = min(aa); - // r = bb(dd); c = dd; s = cc -s = R.maxCoeff(&r, &c) // [aa, bb] = max(R); [cc, dd] = max(aa); - // row = bb(dd); col = dd; s = cc +s = R.minCoeff(&r, &c) // [s, i] = min(R(:)); [r, c] = ind2sub(size(R), i); +s = R.maxCoeff(&r, &c) // [s, i] = max(R(:)); [r, c] = ind2sub(size(R), i); R.sum() // sum(R(:)) R.colwise().sum() // sum(R) R.rowwise().sum() // sum(R, 2) or sum(R')' @@ -150,13 +164,27 @@ x.squaredNorm() // dot(x, x) Note the equivalence is not true for co x.dot(y) // dot(x, y) x.cross(y) // cross(x, y) Requires #include +//// Type conversion +// Eigen // Matlab +A.cast(); // double(A) +A.cast(); // single(A) +A.cast(); // int32(A) +A.real(); // real(A) +A.imag(); // imag(A) +// if the original type equals destination type, no work is done + +// Note that for most operations Eigen requires all operands to have the same type: +MatrixXf F = MatrixXf::Zero(3,3); +A += F; // illegal in Eigen. In Matlab A = A+F is allowed +A += F.cast(); // F converted to double and then added (generally, conversion happens on-the-fly) + // Eigen can map existing memory into Eigen matrices. float array[3]; -Map(array, 3).fill(10); -int data[4] = 1, 2, 3, 4; -Matrix2i mat2x2(data); -MatrixXi mat2x2 = Map(data); -MatrixXi mat2x2 = Map(data, 2, 2); +Vector3f::Map(array).fill(10); // create a temporary Map over array and sets entries to 10 +int data[4] = {1, 2, 3, 4}; +Matrix2i mat2x2(data); // copies data into mat2x2 +Matrix2i::Map(data) = 2*mat2x2; // overwrite elements of data with 2*mat2x2 +MatrixXi::Map(data, 2, 2) += mat2x2; // adds mat2x2 to elements of data (alternative syntax if size is not know at compile time) // Solve Ax = b. Result stored in x. Matlab: x = A \ b. x = A.ldlt().solve(b)); // A sym. p.s.d. #include diff --git a/ground/gcs/src/libs/eigen/doc/CMakeLists.txt b/ground/gcs/src/libs/eigen/doc/CMakeLists.txt index 8a493031c..02790ee43 100644 --- a/ground/gcs/src/libs/eigen/doc/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/doc/CMakeLists.txt @@ -89,9 +89,11 @@ add_dependencies(doc-unsupported-prerequisites unsupported_snippets unsupported_ add_custom_target(doc ALL COMMAND doxygen COMMAND doxygen Doxyfile-unsupported + COMMAND ${CMAKE_COMMAND} -E copy ${Eigen_BINARY_DIR}/doc/html/group__TopicUnalignedArrayAssert.html ${Eigen_BINARY_DIR}/doc/html/TopicUnalignedArrayAssert.html COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz - COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc/eigen-doc.tgz eigen-doc + COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc + COMMAND ${CMAKE_COMMAND} -E rename eigen-doc.tgz eigen-doc/eigen-doc.tgz COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc) diff --git a/ground/gcs/src/libs/eigen/doc/Doxyfile.in b/ground/gcs/src/libs/eigen/doc/Doxyfile.in index 1a2603b04..696dd2af1 100644 --- a/ground/gcs/src/libs/eigen/doc/Doxyfile.in +++ b/ground/gcs/src/libs/eigen/doc/Doxyfile.in @@ -222,7 +222,7 @@ ALIASES = "only_for_vectors=This is only for vectors (either row- "note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \ "note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \ "note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \ - "nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\" + "nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"" ALIASES += "eigenAutoToc= " ALIASES += "eigenManualPage=\defgroup" @@ -315,7 +315,7 @@ IDL_PROPERTY_SUPPORT = YES # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. -DISTRIBUTE_GROUP_DOC = NO +DISTRIBUTE_GROUP_DOC = YES # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a @@ -365,7 +365,7 @@ TYPEDEF_HIDES_STRUCT = NO # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. -SYMBOL_CACHE_SIZE = 0 +# SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given @@ -562,7 +562,7 @@ GENERATE_BUGLIST = NO # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. -GENERATE_DEPRECATEDLIST= NO +GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. @@ -1465,13 +1465,13 @@ XML_OUTPUT = xml # which can be used by a validating XML parser to check the # syntax of the XML files. -XML_SCHEMA = +# XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. -XML_DTD = +# XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting @@ -1699,7 +1699,7 @@ DOT_NUM_THREADS = 0 # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. -DOT_FONTNAME = FreeSans +DOT_FONTNAME = # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. diff --git a/ground/gcs/src/libs/eigen/doc/LinearLeastSquares.dox b/ground/gcs/src/libs/eigen/doc/LinearLeastSquares.dox deleted file mode 100644 index ab21a87ae..000000000 --- a/ground/gcs/src/libs/eigen/doc/LinearLeastSquares.dox +++ /dev/null @@ -1,27 +0,0 @@ -namespace Eigen { - -/** \eigenManualPage LinearLeastSquares Solving linear least squares problems - -lede - -\eigenAutoToc - -\section LinearLeastSquaresCopied Copied - -The best way to do least squares solving is with a SVD decomposition. Eigen provides one as the JacobiSVD class, and its solve() -is doing least-squares solving. - -Here is an example: - - - - - - -
Example:Output:
\include TutorialLinAlgSVDSolve.cpp \verbinclude TutorialLinAlgSVDSolve.out
- -For more information, including faster but less reliable methods, read our page concentrating on \ref LinearLeastSquares "linear least squares problems". - -*/ - -} diff --git a/ground/gcs/src/libs/eigen/doc/Manual.dox b/ground/gcs/src/libs/eigen/doc/Manual.dox index 3367982ca..52427f066 100644 --- a/ground/gcs/src/libs/eigen/doc/Manual.dox +++ b/ground/gcs/src/libs/eigen/doc/Manual.dox @@ -11,6 +11,7 @@ namespace Eigen { - \subpage TopicCustomizingEigen - \subpage TopicMultiThreading - \subpage TopicUsingIntelMKL + - \subpage TopicPitfalls - \subpage TopicTemplateKeyword - \subpage UserManual_UnderstandingEigen */ @@ -120,6 +121,8 @@ namespace Eigen { \ingroup Sparse_chapter */ /** \addtogroup TopicSparseSystems \ingroup Sparse_chapter */ +/** \addtogroup MatrixfreeSolverExample + \ingroup Sparse_chapter */ /** \addtogroup Sparse_Reference \ingroup Sparse_chapter */ diff --git a/ground/gcs/src/libs/eigen/doc/MatrixfreeSolverExample.dox b/ground/gcs/src/libs/eigen/doc/MatrixfreeSolverExample.dox new file mode 100644 index 000000000..9921c7261 --- /dev/null +++ b/ground/gcs/src/libs/eigen/doc/MatrixfreeSolverExample.dox @@ -0,0 +1,21 @@ + +namespace Eigen { + +/** + +\eigenManualPage MatrixfreeSolverExample Matrix-free solvers + +Iterative solvers such as ConjugateGradient and BiCGSTAB can be used in a matrix free context. To this end, user must provide a wrapper class inheriting EigenBase<> and implementing the following methods: + - Index rows() and Index cols(): returns number of rows and columns respectively + - operator* with and Eigen dense column vector + - resize(rows,cols): needed for source compatibility (can stay empty) + +Eigen::internal::traits<> must also be specialized for the wrapper type. + +For efficiency purpose, one might also want to provide a custom preconditioner. Here is an example using ConjugateGradient and implementing also a custom Jacobi preconditioner: +\include matrixfree_cg.cpp +Output: \verbinclude matrixfree_cg.out + +*/ + +} \ No newline at end of file diff --git a/ground/gcs/src/libs/eigen/doc/Pitfalls.dox b/ground/gcs/src/libs/eigen/doc/Pitfalls.dox new file mode 100644 index 000000000..cf42effef --- /dev/null +++ b/ground/gcs/src/libs/eigen/doc/Pitfalls.dox @@ -0,0 +1,38 @@ +namespace Eigen { + +/** \page TopicPitfalls Common pitfalls + +\section TopicPitfalls_template_keyword Compilation error with template methods + +See this \link TopicTemplateKeyword page \endlink. + +\section TopicPitfalls_auto_keyword C++11 and the auto keyword + +In short: do not use the auto keywords with Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a Matrix<> type. Here is an example: + +\code +MatrixXd A, B; +auto C = A*B; +for(...) { ... w = C * v; ...} +\endcode + +In this example, the type of C is not a MatrixXd but an abstract expression representing a matrix product and storing references to A and B. Therefore, the product of A*B will be carried out multiple times, once per iteration of the for loop. Moreover, if the coefficients of A or B change during the iteration, then C will evaluate to different values. + +Here is another example leading to a segfault: +\code +auto C = ((A+B).eval()).transpose(); +// do something with C +\endcode +The problem is that eval() returns a temporary object (in this case a MatrixXd) which is then referenced by the Transpose<> expression. However, this temporary is deleted right after the first line, and there the C expression reference a dead object. The same issue might occur when sub expressions are automatically evaluated by Eigen as in the following example: +\code +VectorXd u, v; +auto C = u + (A*v).normalized(); +// do something with C +\endcode +where the normalized() method has to evaluate the expensive product A*v to avoid evaluating it twice. On the other hand, the following example is perfectly fine: +\code +auto C = (u + (A*v).normalized()).eval(); +\endcode +In this case, C will be a regular VectorXd object. +*/ +} diff --git a/ground/gcs/src/libs/eigen/doc/PreprocessorDirectives.dox b/ground/gcs/src/libs/eigen/doc/PreprocessorDirectives.dox index eedd5524a..cfaba35d8 100644 --- a/ground/gcs/src/libs/eigen/doc/PreprocessorDirectives.dox +++ b/ground/gcs/src/libs/eigen/doc/PreprocessorDirectives.dox @@ -62,14 +62,19 @@ run time. However, these assertions do cost time and can thus be turned off. expect that any objects passed to it are aligned. This will turn off vectorization. Not defined by default. - \b EIGEN_DONT_ALIGN_STATICALLY - disables alignment of arrays on the stack. Not defined by default, unless \c EIGEN_DONT_ALIGN is defined. + - \b EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP. + See \ref TopicMultiThreading for details. - \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN. - - \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. The only - optimization this currently includes is single precision sin() and cos() in the present of SSE - vectorization. Defined by default. + - \b EIGEN_FAST_MATH - enables some optimizations which might affect the accuracy of the result. This currently + enables the SSE vectorization of sin() and cos(), and speedups sqrt() for single precision. Defined to 1 by default. + Define it to 0 to disable. - \b EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not - correspond to the number of iterations or the number of instructions. The default is value 100. + correspond to the number of iterations or the number of instructions. The default is value 100. + - \b EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal + temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding + this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB. \section TopicPreprocessorDirectivesPlugins Plugins @@ -86,6 +91,7 @@ following macros are supported; none of them are defined by default. - \b EIGEN_MATRIX_PLUGIN - filename of plugin for extending the Matrix class. - \b EIGEN_MATRIXBASE_PLUGIN - filename of plugin for extending the MatrixBase class. - \b EIGEN_PLAINOBJECTBASE_PLUGIN - filename of plugin for extending the PlainObjectBase class. + - \b EIGEN_MAPBASE_PLUGIN - filename of plugin for extending the MapBase class. - \b EIGEN_QUATERNIONBASE_PLUGIN - filename of plugin for extending the QuaternionBase class. - \b EIGEN_SPARSEMATRIX_PLUGIN - filename of plugin for extending the SparseMatrix class. - \b EIGEN_SPARSEMATRIXBASE_PLUGIN - filename of plugin for extending the SparseMatrixBase class. diff --git a/ground/gcs/src/libs/eigen/doc/QuickReference.dox b/ground/gcs/src/libs/eigen/doc/QuickReference.dox index 5e0168649..a4be0f68a 100644 --- a/ground/gcs/src/libs/eigen/doc/QuickReference.dox +++ b/ground/gcs/src/libs/eigen/doc/QuickReference.dox @@ -405,19 +405,19 @@ array1 == array2 array1 != array2 array1 == scalar array1 != scalar array1.min(array2) array1.max(array2) array1.abs2() -array1.abs() std::abs(array1) -array1.sqrt() std::sqrt(array1) -array1.log() std::log(array1) -array1.exp() std::exp(array1) -array1.pow(exponent) std::pow(array1,exponent) +array1.abs() abs(array1) +array1.sqrt() sqrt(array1) +array1.log() log(array1) +array1.exp() exp(array1) +array1.pow(exponent) pow(array1,exponent) array1.square() array1.cube() array1.inverse() -array1.sin() std::sin(array1) -array1.cos() std::cos(array1) -array1.tan() std::tan(array1) -array1.asin() std::asin(array1) -array1.acos() std::acos(array1) +array1.sin() sin(array1) +array1.cos() cos(array1) +array1.tan() tan(array1) +array1.asin() asin(array1) +array1.acos() acos(array1) \endcode diff --git a/ground/gcs/src/libs/eigen/doc/SparseLinearSystems.dox b/ground/gcs/src/libs/eigen/doc/SparseLinearSystems.dox index c00be10d3..051a02ed9 100644 --- a/ground/gcs/src/libs/eigen/doc/SparseLinearSystems.dox +++ b/ground/gcs/src/libs/eigen/doc/SparseLinearSystems.dox @@ -35,17 +35,17 @@ They are summarized in the following table: Requires the PaStiX package, \b CeCILL-C optimized for tough problems and symmetric patterns CholmodSupernodalLLT\link CholmodSupport_Module CholmodSupport \endlinkDirect LLt factorizationSPDFill-in reducing, Leverage fast dense algebra - Requires the SuiteSparse package, \b GPL + Requires the SuiteSparse package, \b GPL UmfPackLU\link UmfPackSupport_Module UmfPackSupport \endlinkDirect LU factorizationSquareFill-in reducing, Leverage fast dense algebra - Requires the SuiteSparse package, \b GPL + Requires the SuiteSparse package, \b GPL SuperLU\link SuperLUSupport_Module SuperLUSupport \endlinkDirect LU factorizationSquareFill-in reducing, Leverage fast dense algebra Requires the SuperLU library, (BSD-like) SPQR\link SPQRSupport_Module SPQRSupport \endlink QR factorization Any, rectangularfill-in reducing, multithreaded, fast dense algebra - requires the SuiteSparse package, \b GPL recommended for linear least-squares problems, has a rank-revealing feature + requires the SuiteSparse package, \b GPL recommended for linear least-squares problems, has a rank-revealing feature Here \c SPD means symmetric positive definite. diff --git a/ground/gcs/src/libs/eigen/doc/SparseQuickReference.dox b/ground/gcs/src/libs/eigen/doc/SparseQuickReference.dox index 4a33d0cc9..e0a30edcc 100644 --- a/ground/gcs/src/libs/eigen/doc/SparseQuickReference.dox +++ b/ground/gcs/src/libs/eigen/doc/SparseQuickReference.dox @@ -21,7 +21,7 @@ i.e either row major or column major. The default is column major. Most arithmet Resize/Reserve \code - sm1.resize(m,n); //Change sm1 to a m x n matrix. + sm1.resize(m,n); // Change sm1 to a m x n matrix. sm1.reserve(nnz); // Allocate room for nnz nonzeros elements. \endcode @@ -71,11 +71,10 @@ i.e either row major or column major. The default is column major. Most arithmet Constant or Random Insertion \code -sm1.setZero(); // Set the matrix with zero elements -sm1.setConstant(val); //Replace all the nonzero values with val +sm1.setZero(); \endcode - The matrix sm1 should have been created before ??? +Remove all non-zero coefficients @@ -152,10 +151,10 @@ It is easy to perform arithmetic operations on sparse matrices provided that the Permutation \code -perm.indices(); // Reference to the vector of indices +perm.indices(); // Reference to the vector of indices sm1.twistedBy(perm); // Permute rows and columns -sm2 = sm1 * perm; //Permute the columns -sm2 = perm * sm1; // Permute the columns +sm2 = sm1 * perm; // Permute the columns +sm2 = perm * sm1; // Permute the columns \endcode @@ -182,9 +181,9 @@ sm2 = perm * sm1; // Permute the columns \section sparseotherops Other supported operations - + + - + - - + + - + + - - - + + + - - +
Operations Code Notes
Code Notes
Sub-matrices
Sub-matrices \code sm1.block(startRow, startCol, rows, cols); @@ -194,25 +193,31 @@ sm2 = perm * sm1; // Permute the columns sm1.bottomLeftCorner( rows, cols); sm1.bottomRightCorner( rows, cols); \endcode - +Contrary to dense matrices, here all these methods are read-only.\n +See \ref TutorialSparse_SubMatrices and below for read-write sub-matrices. +
Range
Range
\code - sm1.innerVector(outer); - sm1.innerVectors(start, size); - sm1.leftCols(size); - sm2.rightCols(size); - sm1.middleRows(start, numRows); - sm1.middleCols(start, numCols); - sm1.col(j); + sm1.innerVector(outer); // RW + sm1.innerVectors(start, size); // RW + sm1.leftCols(size); // RW + sm2.rightCols(size); // RO because sm2 is row-major + sm1.middleRows(start, numRows); // RO becasue sm1 is column-major + sm1.middleCols(start, numCols); // RW + sm1.col(j); // RW \endcode A inner vector is either a row (for row-major) or a column (for column-major). As stated earlier, the evaluation can be done in a matrix with different storage order +A inner vector is either a row (for row-major) or a column (for column-major).\n +As stated earlier, for a read-write sub-matrix (RW), the evaluation can be done in a matrix with different storage order. +
Triangular and selfadjoint views
Triangular and selfadjoint views \code sm2 = sm1.triangularview(); @@ -223,26 +228,30 @@ sm2 = perm * sm1; // Permute the columns \code \endcode
Triangular solve
Triangular solve
\code dv2 = sm1.triangularView().solve(dv1); - dv2 = sm1.topLeftCorner(size, size).triangularView().solve(dv1); + dv2 = sm1.topLeftCorner(size, size) + .triangularView().solve(dv1); \endcode For general sparse solve, Use any suitable module described at \ref TopicSparseSystems
Low-level API
Low-level API \code -sm1.valuePtr(); // Pointer to the values -sm1.innerIndextr(); // Pointer to the indices. -sm1.outerIndexPtr(); //Pointer to the beginning of each inner vector +sm1.valuePtr(); // Pointer to the values +sm1.innerIndextr(); // Pointer to the indices. +sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector \endcode If the matrix is not in compressed form, makeCompressed() should be called before. Note that these functions are mostly provided for interoperability purposes with external libraries. A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section +If the matrix is not in compressed form, makeCompressed() should be called before.\n +Note that these functions are mostly provided for interoperability purposes with external libraries.\n +A better access to the values of the matrix is done by using the InnerIterator class as described in \link TutorialSparse the Tutorial Sparse \endlink section
*/ diff --git a/ground/gcs/src/libs/eigen/doc/TopicMultithreading.dox b/ground/gcs/src/libs/eigen/doc/TopicMultithreading.dox index f7d082668..7db2b0e07 100644 --- a/ground/gcs/src/libs/eigen/doc/TopicMultithreading.dox +++ b/ground/gcs/src/libs/eigen/doc/TopicMultithreading.dox @@ -17,7 +17,7 @@ You can control the number of thread that will be used using either the OpenMP A Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode You can query the number of threads that will be used with: \code -n = Eigen::nbThreads(n); +n = Eigen::nbThreads( ); \endcode You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token. diff --git a/ground/gcs/src/libs/eigen/doc/TutorialGeometry.dox b/ground/gcs/src/libs/eigen/doc/TutorialGeometry.dox index 81aeec978..372a275de 100644 --- a/ground/gcs/src/libs/eigen/doc/TutorialGeometry.dox +++ b/ground/gcs/src/libs/eigen/doc/TutorialGeometry.dox @@ -127,7 +127,7 @@ VectorNf vec1, vec2; vec2 = t.linear() * vec1;\endcode Apply a \em general transformation \n to a \b normal \b vector -(explanations)\code +(explanations)\code VectorNf n1, n2; MatrixNf normalMatrix = t.linear().inverse().transpose(); n2 = (normalMatrix * n1).normalized();\endcode diff --git a/ground/gcs/src/libs/eigen/doc/TutorialSparse.dox b/ground/gcs/src/libs/eigen/doc/TutorialSparse.dox index dbfb4a9eb..ee206cc42 100644 --- a/ground/gcs/src/libs/eigen/doc/TutorialSparse.dox +++ b/ground/gcs/src/libs/eigen/doc/TutorialSparse.dox @@ -83,7 +83,7 @@ There is no notion of compressed/uncompressed mode for a SparseVector. \section TutorialSparseExample First example -Before describing each individual class, let's start with the following typical example: solving the Lapace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions. +Before describing each individual class, let's start with the following typical example: solving the Laplace equation \f$ \nabla u = 0 \f$ on a regular 2D grid using a finite difference scheme and Dirichlet boundary conditions. Such problem can be mathematically expressed as a linear problem of the form \f$ Ax=b \f$ where \f$ x \f$ is the vector of \c m unknowns (in our case, the values of the pixels), \f$ b \f$ is the right hand side vector resulting from the boundary conditions, and \f$ A \f$ is an \f$ m \times m \f$ matrix containing only a few non-zero elements resulting from the discretization of the Laplacian operator. @@ -241,11 +241,11 @@ In the following \em sm denotes a sparse matrix, \em sv a sparse vector, \em dm sm1.real() sm1.imag() -sm1 0.5*sm1 sm1+sm2 sm1-sm2 sm1.cwiseProduct(sm2) \endcode -However, a strong restriction is that the storage orders must match. For instance, in the following example: +However, a strong restriction is that the storage orders must match. For instance, in the following example: \code sm4 = sm1 + sm2 + sm3; \endcode -sm1, sm2, and sm3 must all be row-major or all column major. +sm1, sm2, and sm3 must all be row-major or all column-major. On the other hand, there is no restriction on the target matrix sm4. For instance, this means that for computing \f$ A^T + A \f$, the matrix \f$ A^T \f$ must be evaluated into a temporary matrix of compatible storage order: \code @@ -253,12 +253,15 @@ SparseMatrix A, B; B = SparseMatrix(A.transpose()) + A; \endcode -Binary coefficient wise operators can also mix sparse and dense expressions: +Some binary coefficient-wise operators can also mix sparse and dense expressions: \code sm2 = sm1.cwiseProduct(dm1); -dm2 = sm1 + dm1; +dm1 += sm1; \endcode +However, it is not yet possible to add a sparse and a dense matrix as in dm2 = sm1 + dm1. +Please write this as the equivalent dm2 = dm1; dm2 += sm1 (we plan to lift this restriction +in the next release of %Eigen). %Sparse expressions also support transposition: \code @@ -304,6 +307,26 @@ sm2 = sm1.transpose() * P; \endcode +\subsection TutorialSparse_SubMatrices Block operations + +Regarding read-access, sparse matrices expose the same API than for dense matrices to access to sub-matrices such as blocks, columns, and rows. See \ref TutorialBlockOperations for a detailed introduction. +However, for performance reasons, writing to a sub-sparse-matrix is much more limited, and currently only contiguous sets of columns (resp. rows) of a column-major (resp. row-major) SparseMatrix are writable. Moreover, this information has to be known at compile-time, leaving out methods such as block(...) and corner*(...). The available API for write-access to a SparseMatrix are summarized below: +\code +SparseMatrix sm1; +sm1.col(j) = ...; +sm1.leftCols(ncols) = ...; +sm1.middleCols(j,ncols) = ...; +sm1.rightCols(ncols) = ...; + +SparseMatrix sm2; +sm2.row(i) = ...; +sm2.topRows(nrows) = ...; +sm2.middleRows(i,nrows) = ...; +sm2.bottomRows(nrows) = ...; +\endcode + +In addition, sparse matrices expose the SparseMatrixBase::innerVector() and SparseMatrixBase::innerVectors() methods, which are aliases to the col/middleCols methods for a column-major storage, and to the row/middleRows methods for a row-major storage. + \subsection TutorialSparse_TriangularSelfadjoint Triangular and selfadjoint views Just as with dense matrices, the triangularView() function can be used to address a triangular part of the matrix, and perform triangular solves with a dense right hand side: diff --git a/ground/gcs/src/libs/eigen/doc/UnalignedArrayAssert.dox b/ground/gcs/src/libs/eigen/doc/UnalignedArrayAssert.dox index 8c97d7874..b0d6e18f2 100644 --- a/ground/gcs/src/libs/eigen/doc/UnalignedArrayAssert.dox +++ b/ground/gcs/src/libs/eigen/doc/UnalignedArrayAssert.dox @@ -7,8 +7,8 @@ Hello! You are seeing this webpage because your program terminated on an asserti my_program: path/to/eigen/Eigen/src/Core/DenseStorage.h:44: Eigen::internal::matrix_array::internal::matrix_array() [with T = double, int Size = 2, int MatrixOptions = 2, bool Align = true]: -Assertion `(reinterpret_cast(array) & 0xf) == 0 && "this assertion -is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html +Assertion `(reinterpret_cast(array) & (sizemask)) == 0 && "this assertion +is explained here: http://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html **** READ THIS WEB PAGE !!! ****"' failed. @@ -46,9 +46,9 @@ then you need to read this separate page: \ref TopicStructHavingEigenMembers "St Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types". -\section c2 Cause 2: STL Containers +\section c2 Cause 2: STL Containers or manual memory allocation -If you use STL Containers such as std::vector, std::map, ..., with Eigen objects, or with classes containing Eigen objects, like this, +If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this, \code std::vector my_vector; @@ -60,6 +60,8 @@ then you need to read this separate page: \ref TopicStlContainers "Using STL Con Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". +The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c std::make_shared or \c std::allocate_shared for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers". + \section c3 Cause 3: Passing Eigen objects by value If some function in your code is getting an Eigen object passed by value, like this, @@ -107,7 +109,10 @@ Two possibilities: 128-bit alignment code and thus preserves ABI compatibility, but completely disables vectorization. -For more information, see this FAQ. +If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 128-bit alignment and the assertion, here's the explanation: + +It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization. +It doesn't disable 128bit alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path. */ diff --git a/ground/gcs/src/libs/eigen/doc/UsingIntelMKL.dox b/ground/gcs/src/libs/eigen/doc/UsingIntelMKL.dox index 4b624a156..84db992b6 100644 --- a/ground/gcs/src/libs/eigen/doc/UsingIntelMKL.dox +++ b/ground/gcs/src/libs/eigen/doc/UsingIntelMKL.dox @@ -40,7 +40,8 @@ Since Eigen version 3.1 and later, users can benefit from built-in Intel MKL opt Intel MKL provides highly optimized multi-threaded mathematical routines for x86-compatible architectures. Intel MKL is available on Linux, Mac and Windows for both Intel64 and IA32 architectures. -\warning Be aware that Intel® MKL is a proprietary software. It is the responsibility of the users to buy MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL. +\note +Intel® MKL is a proprietary software and it is the responsibility of users to buy or register for community (free) Intel MKL licenses for their products. Moreover, the license of the user product has to allow linking to proprietary software that excludes any unmodified versions of the GPL. Using Intel MKL through Eigen is easy: -# define the \c EIGEN_USE_MKL_ALL macro before including any Eigen's header diff --git a/ground/gcs/src/libs/eigen/doc/eigendoxy.css b/ground/gcs/src/libs/eigen/doc/eigendoxy.css index 60243d870..efaeb46dc 100644 --- a/ground/gcs/src/libs/eigen/doc/eigendoxy.css +++ b/ground/gcs/src/libs/eigen/doc/eigendoxy.css @@ -199,3 +199,13 @@ h3.version { td.width20em p.endtd { width: 20em; } + +.bigwarning { + font-size:2em; + font-weight:bold; + margin:1em; + padding:1em; + color:red; + border:solid; +} + diff --git a/ground/gcs/src/libs/eigen/doc/eigendoxy_footer.html.in b/ground/gcs/src/libs/eigen/doc/eigendoxy_footer.html.in index e62af8ed5..878244a19 100644 --- a/ground/gcs/src/libs/eigen/doc/eigendoxy_footer.html.in +++ b/ground/gcs/src/libs/eigen/doc/eigendoxy_footer.html.in @@ -17,8 +17,7 @@ $generatedby   - - ---> diff --git a/ground/gcs/src/libs/eigen/doc/examples/CMakeLists.txt b/ground/gcs/src/libs/eigen/doc/examples/CMakeLists.txt index 71b272a15..08cf8efd7 100644 --- a/ground/gcs/src/libs/eigen/doc/examples/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/doc/examples/CMakeLists.txt @@ -6,12 +6,10 @@ foreach(example_src ${examples_SRCS}) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) target_link_libraries(${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) endif() - get_target_property(example_executable - ${example} LOCATION) add_custom_command( TARGET ${example} POST_BUILD - COMMAND ${example_executable} + COMMAND ${example} ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out ) add_dependencies(all_examples ${example}) diff --git a/ground/gcs/src/libs/eigen/doc/examples/matrixfree_cg.cpp b/ground/gcs/src/libs/eigen/doc/examples/matrixfree_cg.cpp new file mode 100644 index 000000000..f0631c3a3 --- /dev/null +++ b/ground/gcs/src/libs/eigen/doc/examples/matrixfree_cg.cpp @@ -0,0 +1,180 @@ +#include +#include +#include +#include + +class MatrixReplacement; +template class MatrixReplacement_ProductReturnType; + +namespace Eigen { +namespace internal { + template<> + struct traits : Eigen::internal::traits > + {}; + + template + struct traits > { + // The equivalent plain objet type of the product. This type is used if the product needs to be evaluated into a temporary. + typedef Eigen::Matrix ReturnType; + }; +} +} + +// Inheriting EigenBase should not be needed in the future. +class MatrixReplacement : public Eigen::EigenBase { +public: + // Expose some compile-time information to Eigen: + typedef double Scalar; + typedef double RealScalar; + enum { + ColsAtCompileTime = Eigen::Dynamic, + RowsAtCompileTime = Eigen::Dynamic, + MaxColsAtCompileTime = Eigen::Dynamic, + MaxRowsAtCompileTime = Eigen::Dynamic + }; + + Index rows() const { return 4; } + Index cols() const { return 4; } + + void resize(Index a_rows, Index a_cols) + { + // This method should not be needed in the future. + assert(a_rows==0 && a_cols==0 || a_rows==rows() && a_cols==cols()); + } + + // In the future, the return type should be Eigen::Product + template + MatrixReplacement_ProductReturnType operator*(const Eigen::MatrixBase& x) const { + return MatrixReplacement_ProductReturnType(*this, x.derived()); + } + +}; + +// The proxy class representing the product of a MatrixReplacement with a MatrixBase<> +template +class MatrixReplacement_ProductReturnType : public Eigen::ReturnByValue > { +public: + typedef MatrixReplacement::Index Index; + + // The ctor store references to the matrix and right-hand-side object (usually a vector). + MatrixReplacement_ProductReturnType(const MatrixReplacement& matrix, const Rhs& rhs) + : m_matrix(matrix), m_rhs(rhs) + {} + + Index rows() const { return m_matrix.rows(); } + Index cols() const { return m_rhs.cols(); } + + // This function is automatically called by Eigen. It must evaluate the product of matrix * rhs into y. + template + void evalTo(Dest& y) const + { + y.setZero(4); + + y(0) += 2 * m_rhs(0); y(1) += 1 * m_rhs(0); + y(0) += 1 * m_rhs(1); y(1) += 2 * m_rhs(1); y(2) += 1 * m_rhs(1); + y(1) += 1 * m_rhs(2); y(2) += 2 * m_rhs(2); y(3) += 1 * m_rhs(2); + y(2) += 1 * m_rhs(3); y(3) += 2 * m_rhs(3); + } + +protected: + const MatrixReplacement& m_matrix; + typename Rhs::Nested m_rhs; +}; + + +/*****/ + +// This class simply warp a diagonal matrix as a Jacobi preconditioner. +// In the future such simple and generic wrapper should be shipped within Eigen itsel. +template +class MyJacobiPreconditioner +{ + typedef _Scalar Scalar; + typedef Eigen::Matrix Vector; + typedef typename Vector::Index Index; + + public: + // this typedef is only to export the scalar type and compile-time dimensions to solve_retval + typedef Eigen::Matrix MatrixType; + + MyJacobiPreconditioner() : m_isInitialized(false) {} + + void setInvDiag(const Eigen::VectorXd &invdiag) { + m_invdiag=invdiag; + m_isInitialized=true; + } + + Index rows() const { return m_invdiag.size(); } + Index cols() const { return m_invdiag.size(); } + + template + MyJacobiPreconditioner& analyzePattern(const MatType& ) { return *this; } + + template + MyJacobiPreconditioner& factorize(const MatType& mat) { return *this; } + + template + MyJacobiPreconditioner& compute(const MatType& mat) { return *this; } + + template + void _solve(const Rhs& b, Dest& x) const + { + x = m_invdiag.array() * b.array() ; + } + + template inline const Eigen::internal::solve_retval + solve(const Eigen::MatrixBase& b) const + { + eigen_assert(m_isInitialized && "MyJacobiPreconditioner is not initialized."); + eigen_assert(m_invdiag.size()==b.rows() + && "MyJacobiPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); + return Eigen::internal::solve_retval(*this, b.derived()); + } + + protected: + Vector m_invdiag; + bool m_isInitialized; +}; + +namespace Eigen { +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef MyJacobiPreconditioner<_MatrixType> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} +} + + +/*****/ + + +int main() +{ + MatrixReplacement A; + Eigen::VectorXd b(4), x; + b << 1, 1, 1, 1; + + // solve Ax = b using CG with matrix-free version: + Eigen::ConjugateGradient < MatrixReplacement, Eigen::Lower|Eigen::Upper, MyJacobiPreconditioner > cg; + + Eigen::VectorXd invdiag(4); + invdiag << 1./3., 1./4., 1./4., 1./3.; + + cg.preconditioner().setInvDiag(invdiag); + cg.compute(A); + x = cg.solve(b); + + std::cout << "#iterations: " << cg.iterations() << std::endl; + std::cout << "estimated error: " << cg.error() << std::endl; +} diff --git a/ground/gcs/src/libs/eigen/doc/snippets/CMakeLists.txt b/ground/gcs/src/libs/eigen/doc/snippets/CMakeLists.txt index 92a22ea61..1135900cf 100644 --- a/ground/gcs/src/libs/eigen/doc/snippets/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/doc/snippets/CMakeLists.txt @@ -14,12 +14,10 @@ foreach(snippet_src ${snippets_SRCS}) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) endif() - get_target_property(compile_snippet_executable - ${compile_snippet_target} LOCATION) add_custom_command( TARGET ${compile_snippet_target} POST_BUILD - COMMAND ${compile_snippet_executable} + COMMAND ${compile_snippet_target} ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out ) add_dependencies(all_snippets ${compile_snippet_target}) @@ -27,4 +25,4 @@ foreach(snippet_src ${snippets_SRCS}) PROPERTIES OBJECT_DEPENDS ${snippet_src}) endforeach(snippet_src) -ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG) \ No newline at end of file +ei_add_target_property(compile_tut_arithmetic_transpose_aliasing COMPILE_FLAGS -DEIGEN_NO_DEBUG) diff --git a/ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheLeft.cpp b/ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheLeft.cpp new file mode 100644 index 000000000..6398c873a --- /dev/null +++ b/ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheLeft.cpp @@ -0,0 +1,7 @@ +Matrix3f A = Matrix3f::Random(3,3), B; +B << 0,1,0, + 0,0,1, + 1,0,0; +cout << "At start, A = " << endl << A << endl; +A.applyOnTheLeft(B); +cout << "After applyOnTheLeft, A = " << endl << A << endl; diff --git a/ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheRight.cpp b/ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheRight.cpp new file mode 100644 index 000000000..e4b71b2d8 --- /dev/null +++ b/ground/gcs/src/libs/eigen/doc/snippets/MatrixBase_applyOnTheRight.cpp @@ -0,0 +1,9 @@ +Matrix3f A = Matrix3f::Random(3,3), B; +B << 0,1,0, + 0,0,1, + 1,0,0; +cout << "At start, A = " << endl << A << endl; +A *= B; +cout << "After A *= B, A = " << endl << A << endl; +A.applyOnTheRight(B); // equivalent to A *= B +cout << "After applyOnTheRight, A = " << endl << A << endl; diff --git a/ground/gcs/src/libs/eigen/doc/special_examples/CMakeLists.txt b/ground/gcs/src/libs/eigen/doc/special_examples/CMakeLists.txt index 138a2f6ef..3ab75dbfe 100644 --- a/ground/gcs/src/libs/eigen/doc/special_examples/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/doc/special_examples/CMakeLists.txt @@ -1,4 +1,3 @@ - if(NOT EIGEN_TEST_NOQT) find_package(Qt4) if(QT4_FOUND) @@ -6,16 +5,17 @@ if(NOT EIGEN_TEST_NOQT) endif() endif(NOT EIGEN_TEST_NOQT) - if(QT4_FOUND) add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp) target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY}) add_custom_command( - TARGET Tutorial_sparse_example - POST_BUILD - COMMAND Tutorial_sparse_example - ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg + TARGET Tutorial_sparse_example + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/../html/ + COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg ) + add_dependencies(all_examples Tutorial_sparse_example) endif(QT4_FOUND) + diff --git a/ground/gcs/src/libs/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp b/ground/gcs/src/libs/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp index 8c3020b63..7d820b44a 100644 --- a/ground/gcs/src/libs/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp +++ b/ground/gcs/src/libs/eigen/doc/special_examples/Tutorial_sparse_example_details.cpp @@ -11,8 +11,8 @@ void insertCoefficient(int id, int i, int j, double w, std::vector& coeffs, int n = boundary.size(); int id1 = i+j*n; - if(i==-1 || i==n) b(id) -= w * boundary(j); // constrained coeffcieint - else if(j==-1 || j==n) b(id) -= w * boundary(i); // constrained coeffcieint + if(i==-1 || i==n) b(id) -= w * boundary(j); // constrained coefficient + else if(j==-1 || j==n) b(id) -= w * boundary(i); // constrained coefficient else coeffs.push_back(T(id,id1,w)); // unknown coefficient } diff --git a/ground/gcs/src/libs/eigen/eigen3.pc.in b/ground/gcs/src/libs/eigen/eigen3.pc.in index c5855de33..3368a3aa1 100644 --- a/ground/gcs/src/libs/eigen/eigen3.pc.in +++ b/ground/gcs/src/libs/eigen/eigen3.pc.in @@ -1,6 +1,9 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} + Name: Eigen3 Description: A C++ template library for linear algebra: vectors, matrices, and related algorithms Requires: -Version: ${EIGEN_VERSION_NUMBER} +Version: @EIGEN_VERSION_NUMBER@ Libs: -Cflags: -I${INCLUDE_INSTALL_DIR} +Cflags: -I${prefix}/@INCLUDE_INSTALL_DIR@ diff --git a/ground/gcs/src/libs/eigen/failtest/CMakeLists.txt b/ground/gcs/src/libs/eigen/failtest/CMakeLists.txt index 5afa2ac82..cadc6a255 100644 --- a/ground/gcs/src/libs/eigen/failtest/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/failtest/CMakeLists.txt @@ -26,6 +26,23 @@ ei_add_failtest("block_on_const_type_actually_const_1") ei_add_failtest("transpose_on_const_type_actually_const") ei_add_failtest("diagonal_on_const_type_actually_const") +ei_add_failtest("ref_1") +ei_add_failtest("ref_2") +ei_add_failtest("ref_3") +ei_add_failtest("ref_4") +ei_add_failtest("ref_5") + +ei_add_failtest("partialpivlu_int") +ei_add_failtest("fullpivlu_int") +ei_add_failtest("llt_int") +ei_add_failtest("ldlt_int") +ei_add_failtest("qr_int") +ei_add_failtest("colpivqr_int") +ei_add_failtest("fullpivqr_int") +ei_add_failtest("jacobisvd_int") +ei_add_failtest("eigensolver_int") +ei_add_failtest("eigensolver_cplx") + if (EIGEN_FAILTEST_FAILURE_COUNT) message(FATAL_ERROR "${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. " diff --git a/ground/gcs/src/libs/eigen/failtest/colpivqr_int.cpp b/ground/gcs/src/libs/eigen/failtest/colpivqr_int.cpp new file mode 100644 index 000000000..db11910d4 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/colpivqr_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/QR" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + ColPivHouseholderQR > qr(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/eigensolver_cplx.cpp b/ground/gcs/src/libs/eigen/failtest/eigensolver_cplx.cpp new file mode 100644 index 000000000..c2e21e189 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/eigensolver_cplx.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/Eigenvalues" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR std::complex +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + EigenSolver > eig(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/eigensolver_int.cpp b/ground/gcs/src/libs/eigen/failtest/eigensolver_int.cpp new file mode 100644 index 000000000..eda8dc20b --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/eigensolver_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/Eigenvalues" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + EigenSolver > eig(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/fullpivlu_int.cpp b/ground/gcs/src/libs/eigen/failtest/fullpivlu_int.cpp new file mode 100644 index 000000000..e9d2c6eb3 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/fullpivlu_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/LU" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + FullPivLU > lu(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/fullpivqr_int.cpp b/ground/gcs/src/libs/eigen/failtest/fullpivqr_int.cpp new file mode 100644 index 000000000..d182a7b6b --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/fullpivqr_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/QR" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + FullPivHouseholderQR > qr(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/jacobisvd_int.cpp b/ground/gcs/src/libs/eigen/failtest/jacobisvd_int.cpp new file mode 100644 index 000000000..12790aef1 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/jacobisvd_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/SVD" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + JacobiSVD > qr(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/ldlt_int.cpp b/ground/gcs/src/libs/eigen/failtest/ldlt_int.cpp new file mode 100644 index 000000000..243e45746 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/ldlt_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/Cholesky" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + LDLT > ldlt(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/llt_int.cpp b/ground/gcs/src/libs/eigen/failtest/llt_int.cpp new file mode 100644 index 000000000..cb020650d --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/llt_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/Cholesky" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + LLT > llt(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/partialpivlu_int.cpp b/ground/gcs/src/libs/eigen/failtest/partialpivlu_int.cpp new file mode 100644 index 000000000..98ef282ea --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/partialpivlu_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/LU" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + PartialPivLU > lu(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/qr_int.cpp b/ground/gcs/src/libs/eigen/failtest/qr_int.cpp new file mode 100644 index 000000000..ce200e818 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/qr_int.cpp @@ -0,0 +1,14 @@ +#include "../Eigen/QR" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define SCALAR int +#else +#define SCALAR float +#endif + +using namespace Eigen; + +int main() +{ + HouseholderQR > qr(Matrix::Random(10,10)); +} diff --git a/ground/gcs/src/libs/eigen/failtest/ref_1.cpp b/ground/gcs/src/libs/eigen/failtest/ref_1.cpp new file mode 100644 index 000000000..8b798d53d --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/ref_1.cpp @@ -0,0 +1,18 @@ +#include "../Eigen/Core" + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +#define CV_QUALIFIER const +#else +#define CV_QUALIFIER +#endif + +using namespace Eigen; + +void call_ref(Ref a) { } + +int main() +{ + VectorXf a(10); + CV_QUALIFIER VectorXf& ac(a); + call_ref(ac); +} diff --git a/ground/gcs/src/libs/eigen/failtest/ref_2.cpp b/ground/gcs/src/libs/eigen/failtest/ref_2.cpp new file mode 100644 index 000000000..0b779ccf5 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/ref_2.cpp @@ -0,0 +1,15 @@ +#include "../Eigen/Core" + +using namespace Eigen; + +void call_ref(Ref a) { } + +int main() +{ + MatrixXf A(10,10); +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD + call_ref(A.row(3)); +#else + call_ref(A.col(3)); +#endif +} diff --git a/ground/gcs/src/libs/eigen/failtest/ref_3.cpp b/ground/gcs/src/libs/eigen/failtest/ref_3.cpp new file mode 100644 index 000000000..f46027d48 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/ref_3.cpp @@ -0,0 +1,15 @@ +#include "../Eigen/Core" + +using namespace Eigen; + +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD +void call_ref(Ref a) { } +#else +void call_ref(const Ref &a) { } +#endif + +int main() +{ + VectorXf a(10); + call_ref(a+a); +} diff --git a/ground/gcs/src/libs/eigen/failtest/ref_4.cpp b/ground/gcs/src/libs/eigen/failtest/ref_4.cpp new file mode 100644 index 000000000..6c11fa4cb --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/ref_4.cpp @@ -0,0 +1,15 @@ +#include "../Eigen/Core" + +using namespace Eigen; + +void call_ref(Ref > a) {} + +int main() +{ + MatrixXf A(10,10); +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD + call_ref(A.transpose()); +#else + call_ref(A); +#endif +} diff --git a/ground/gcs/src/libs/eigen/failtest/ref_5.cpp b/ground/gcs/src/libs/eigen/failtest/ref_5.cpp new file mode 100644 index 000000000..846d52795 --- /dev/null +++ b/ground/gcs/src/libs/eigen/failtest/ref_5.cpp @@ -0,0 +1,16 @@ +#include "../Eigen/Core" + +using namespace Eigen; + +void call_ref(Ref a) { } + +int main() +{ + VectorXf a(10); + DenseBase &ac(a); +#ifdef EIGEN_SHOULD_FAIL_TO_BUILD + call_ref(ac); +#else + call_ref(ac.derived()); +#endif +} diff --git a/ground/gcs/src/libs/eigen/lapack/CMakeLists.txt b/ground/gcs/src/libs/eigen/lapack/CMakeLists.txt index 7e7444326..9883d4c72 100644 --- a/ground/gcs/src/libs/eigen/lapack/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/lapack/CMakeLists.txt @@ -7,6 +7,9 @@ workaround_9220(Fortran EIGEN_Fortran_COMPILER_WORKS) if(EIGEN_Fortran_COMPILER_WORKS) enable_language(Fortran OPTIONAL) + if(NOT CMAKE_Fortran_COMPILER) + set(EIGEN_Fortran_COMPILER_WORKS OFF) + endif() endif() add_custom_target(lapack) diff --git a/ground/gcs/src/libs/eigen/scripts/eigen_gen_docs b/ground/gcs/src/libs/eigen/scripts/eigen_gen_docs index 9b71cd8e0..0e6f9ada2 100644 --- a/ground/gcs/src/libs/eigen/scripts/eigen_gen_docs +++ b/ground/gcs/src/libs/eigen/scripts/eigen_gen_docs @@ -4,6 +4,7 @@ # You should call this script with USER set as you want, else some default # will be used USER=${USER:-'orzel'} +UPLOAD_DIR=dox #ulimit -v 1024000 @@ -14,10 +15,10 @@ mkdir build -p #step 2 : upload # (the '/' at the end of path is very important, see rsync documentation) -rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/dox-devel/ || { echo "upload failed"; exit 1; } +rsync -az --no-p --delete build/doc/html/ $USER@ssh.tuxfamily.org:eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR/ || { echo "upload failed"; exit 1; } #step 3 : fix the perm -ssh $USER@ssh.tuxfamily.org 'chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/dox-devel' || { echo "perm failed"; exit 1; } +ssh $USER@ssh.tuxfamily.org "chmod -R g+w /home/eigen/eigen.tuxfamily.org-web/htdocs/$UPLOAD_DIR" || { echo "perm failed"; exit 1; } echo "Uploaded successfully" diff --git a/ground/gcs/src/libs/eigen/test/CMakeLists.txt b/ground/gcs/src/libs/eigen/test/CMakeLists.txt index ccb0fc798..c0d8a4e28 100644 --- a/ground/gcs/src/libs/eigen/test/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/test/CMakeLists.txt @@ -66,7 +66,7 @@ endif() find_package(Pastix) find_package(Scotch) -find_package(Metis) +find_package(Metis 5.0 REQUIRED) if(PASTIX_FOUND AND BLAS_FOUND) add_definitions("-DEIGEN_PASTIX_SUPPORT") include_directories(${PASTIX_INCLUDES}) @@ -202,7 +202,9 @@ ei_add_test(geo_alignedbox) ei_add_test(stdvector) ei_add_test(stdvector_overload) ei_add_test(stdlist) +ei_add_test(stdlist_overload) ei_add_test(stddeque) +ei_add_test(stddeque_overload) ei_add_test(resize) ei_add_test(sparse_vector) ei_add_test(sparse_basic) @@ -222,6 +224,8 @@ ei_add_test(sizeoverflow) ei_add_test(prec_inverse_4x4) ei_add_test(vectorwiseop) ei_add_test(special_numbers) +ei_add_test(rvalue_types) +ei_add_test(mpl2only) ei_add_test(simplicial_cholesky) ei_add_test(conjugate_gradient) @@ -279,6 +283,13 @@ ei_add_property(EIGEN_TESTING_SUMMARY "CXX_FLAGS: ${CMAKE_CXX_FLAGS}\n") ei_add_property(EIGEN_TESTING_SUMMARY "Sparse lib flags: ${SPARSE_LIBS}\n") option(EIGEN_TEST_EIGEN2 "Run whole Eigen2 test suite against EIGEN2_SUPPORT" OFF) +mark_as_advanced(EIGEN_TEST_EIGEN2) if(EIGEN_TEST_EIGEN2) add_subdirectory(eigen2) endif() + + +option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF) +IF(EIGEN_TEST_BUILD_DOCUMENTATION) + add_dependencies(buildtests doc) +ENDIF() diff --git a/ground/gcs/src/libs/eigen/test/array.cpp b/ground/gcs/src/libs/eigen/test/array.cpp index 8960e49f8..68f6b3d7a 100644 --- a/ground/gcs/src/libs/eigen/test/array.cpp +++ b/ground/gcs/src/libs/eigen/test/array.cpp @@ -109,6 +109,8 @@ template void comparisons(const ArrayType& m) VERIFY(! (m1 < m3).all() ); VERIFY(! (m1 > m3).all() ); } + VERIFY(!(m1 > m2 && m1 < m2).any()); + VERIFY((m1 <= m2 || m1 >= m2).all()); // comparisons to scalar VERIFY( (m1 != (m1(r,c)+1) ).any() ); @@ -167,21 +169,14 @@ template void array_real(const ArrayType& m) Scalar s1 = internal::random(); // these tests are mostly to check possible compilation issues. -// VERIFY_IS_APPROX(m1.sin(), std::sin(m1)); VERIFY_IS_APPROX(m1.sin(), sin(m1)); -// VERIFY_IS_APPROX(m1.cos(), std::cos(m1)); VERIFY_IS_APPROX(m1.cos(), cos(m1)); -// VERIFY_IS_APPROX(m1.asin(), std::asin(m1)); VERIFY_IS_APPROX(m1.asin(), asin(m1)); -// VERIFY_IS_APPROX(m1.acos(), std::acos(m1)); VERIFY_IS_APPROX(m1.acos(), acos(m1)); -// VERIFY_IS_APPROX(m1.tan(), std::tan(m1)); VERIFY_IS_APPROX(m1.tan(), tan(m1)); VERIFY_IS_APPROX(cos(m1+RealScalar(3)*m2), cos((m1+RealScalar(3)*m2).eval())); -// VERIFY_IS_APPROX(std::cos(m1+RealScalar(3)*m2), std::cos((m1+RealScalar(3)*m2).eval())); -// VERIFY_IS_APPROX(m1.abs().sqrt(), std::sqrt(std::abs(m1))); VERIFY_IS_APPROX(m1.abs().sqrt(), sqrt(abs(m1))); VERIFY_IS_APPROX(m1.abs(), sqrt(numext::abs2(m1))); @@ -190,9 +185,10 @@ template void array_real(const ArrayType& m) if(!NumTraits::IsComplex) VERIFY_IS_APPROX(numext::real(m1), m1); - VERIFY_IS_APPROX(m1.abs().log() , log(abs(m1))); + // shift argument of logarithm so that it is not zero + Scalar smallNumber = NumTraits::dummy_precision(); + VERIFY_IS_APPROX((m1.abs() + smallNumber).log() , log(abs(m1) + smallNumber)); -// VERIFY_IS_APPROX(m1.exp(), std::exp(m1)); VERIFY_IS_APPROX(m1.exp() * m2.exp(), exp(m1+m2)); VERIFY_IS_APPROX(m1.exp(), exp(m1)); VERIFY_IS_APPROX(m1.exp() / m2.exp(),(m1-m2).exp()); @@ -236,7 +232,6 @@ template void array_complex(const ArrayType& m) m2(i,j) = sqrt(m1(i,j)); VERIFY_IS_APPROX(m1.sqrt(), m2); -// VERIFY_IS_APPROX(m1.sqrt(), std::sqrt(m1)); VERIFY_IS_APPROX(m1.sqrt(), Eigen::sqrt(m1)); } diff --git a/ground/gcs/src/libs/eigen/test/array_for_matrix.cpp b/ground/gcs/src/libs/eigen/test/array_for_matrix.cpp index 1250c9ff5..9667e1f14 100644 --- a/ground/gcs/src/libs/eigen/test/array_for_matrix.cpp +++ b/ground/gcs/src/libs/eigen/test/array_for_matrix.cpp @@ -102,6 +102,7 @@ template void comparisons(const MatrixType& m) VERIFY( (m1.array() > (m1(r,c)-1) ).any() ); VERIFY( (m1.array() < (m1(r,c)+1) ).any() ); VERIFY( (m1.array() == m1(r,c) ).any() ); + VERIFY( m1.cwiseEqual(m1(r,c)).any() ); // test Select VERIFY_IS_APPROX( (m1.array() void cwise_min_max(const MatrixType& m) // min/max with scalar input VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1), m1.cwiseMin( minM1)); - VERIFY_IS_APPROX(m1, m1.cwiseMin( maxM1)); + VERIFY_IS_APPROX(m1, m1.cwiseMin(maxM1)); + VERIFY_IS_APPROX(-m1, (-m1).cwiseMin(-minM1)); + VERIFY_IS_APPROX(-m1.array(), ((-m1).array().min)( -minM1)); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, maxM1), m1.cwiseMax( maxM1)); - VERIFY_IS_APPROX(m1, m1.cwiseMax( minM1)); + VERIFY_IS_APPROX(m1, m1.cwiseMax(minM1)); + VERIFY_IS_APPROX(-m1, (-m1).cwiseMax(-maxM1)); + VERIFY_IS_APPROX(-m1.array(), ((-m1).array().max)(-maxM1)); VERIFY_IS_APPROX(MatrixType::Constant(rows,cols, minM1).array(), (m1.array().min)( minM1)); VERIFY_IS_APPROX(m1.array(), (m1.array().min)( maxM1)); @@ -202,6 +207,12 @@ template void resize(const MatrixTraits& t) VERIFY(a1.size()==cols); } +void regression_bug_654() +{ + ArrayXf a = RowVectorXf(3); + VectorXf v = Array(3); +} + void test_array_for_matrix() { for(int i = 0; i < g_repeat; i++) { @@ -239,4 +250,5 @@ void test_array_for_matrix() CALL_SUBTEST_5( resize(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_6( resize(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } + CALL_SUBTEST_6( regression_bug_654() ); } diff --git a/ground/gcs/src/libs/eigen/test/block.cpp b/ground/gcs/src/libs/eigen/test/block.cpp index 189fa5aba..9ed5d7bc5 100644 --- a/ground/gcs/src/libs/eigen/test/block.cpp +++ b/ground/gcs/src/libs/eigen/test/block.cpp @@ -10,6 +10,26 @@ #define EIGEN_NO_STATIC_ASSERT // otherwise we fail at compile time on unused paths #include "main.h" +template +typename Eigen::internal::enable_if::IsComplex,typename MatrixType::Scalar>::type +block_real_only(const MatrixType &m1, Index r1, Index r2, Index c1, Index c2, const Scalar& s1) { + // check cwise-Functions: + VERIFY_IS_APPROX(m1.row(r1).cwiseMax(s1), m1.cwiseMax(s1).row(r1)); + VERIFY_IS_APPROX(m1.col(c1).cwiseMin(s1), m1.cwiseMin(s1).col(c1)); + + VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMin(s1), m1.cwiseMin(s1).block(r1,c1,r2-r1+1,c2-c1+1)); + VERIFY_IS_APPROX(m1.block(r1,c1,r2-r1+1,c2-c1+1).cwiseMax(s1), m1.cwiseMax(s1).block(r1,c1,r2-r1+1,c2-c1+1)); + + return Scalar(0); +} + +template +typename Eigen::internal::enable_if::IsComplex,typename MatrixType::Scalar>::type +block_real_only(const MatrixType &, Index, Index, Index, Index, const Scalar&) { + return Scalar(0); +} + + template void block(const MatrixType& m) { typedef typename MatrixType::Index Index; @@ -37,6 +57,8 @@ template void block(const MatrixType& m) Index c1 = internal::random(0,cols-1); Index c2 = internal::random(c1,cols-1); + block_real_only(m1, r1, r2, c1, c1, s1); + //check row() and col() VERIFY_IS_EQUAL(m1.col(c1).transpose(), m1.transpose().row(c1)); //check operator(), both constant and non-constant, on row() and col() @@ -51,7 +73,8 @@ template void block(const MatrixType& m) VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + s1 * m1_copy.col(c2)); m1.col(c1).col(0) += s1 * m1_copy.col(c2); VERIFY_IS_APPROX(m1.col(c1), m1_copy.col(c1) + Scalar(2) * s1 * m1_copy.col(c2)); - + + //check block() Matrix b1(1,1); b1(0,0) = m1(r1,c1); diff --git a/ground/gcs/src/libs/eigen/test/cholesky.cpp b/ground/gcs/src/libs/eigen/test/cholesky.cpp index 378525a83..56885deb7 100644 --- a/ground/gcs/src/libs/eigen/test/cholesky.cpp +++ b/ground/gcs/src/libs/eigen/test/cholesky.cpp @@ -68,6 +68,7 @@ template void cholesky(const MatrixType& m) Index cols = m.cols(); typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; typedef Matrix SquareMatrixType; typedef Matrix VectorType; @@ -179,6 +180,57 @@ template void cholesky(const MatrixType& m) // restore if(sign == -1) symm = -symm; + + // check matrices coming from linear constraints with Lagrange multipliers + if(rows>=3) + { + SquareMatrixType A = symm; + int c = internal::random(0,rows-2); + A.bottomRightCorner(c,c).setZero(); + // Make sure a solution exists: + vecX.setRandom(); + vecB = A * vecX; + vecX.setZero(); + ldltlo.compute(A); + VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); + vecX = ldltlo.solve(vecB); + VERIFY_IS_APPROX(A * vecX, vecB); + } + + // check non-full rank matrices + if(rows>=3) + { + int r = internal::random(1,rows-1); + Matrix a = Matrix::Random(rows,r); + SquareMatrixType A = a * a.adjoint(); + // Make sure a solution exists: + vecX.setRandom(); + vecB = A * vecX; + vecX.setZero(); + ldltlo.compute(A); + VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); + vecX = ldltlo.solve(vecB); + VERIFY_IS_APPROX(A * vecX, vecB); + } + + // check matrices with a wide spectrum + if(rows>=3) + { + RealScalar s = (std::min)(16,std::numeric_limits::max_exponent10/8); + Matrix a = Matrix::Random(rows,rows); + Matrix d = Matrix::Random(rows); + for(int k=0; k(-s,s)); + SquareMatrixType A = a * d.asDiagonal() * a.adjoint(); + // Make sure a solution exists: + vecX.setRandom(); + vecB = A * vecX; + vecX.setZero(); + ldltlo.compute(A); + VERIFY_IS_APPROX(A, ldltlo.reconstructedMatrix()); + vecX = ldltlo.solve(vecB); + VERIFY_IS_APPROX(A * vecX, vecB); + } } // update/downdate @@ -263,23 +315,43 @@ template void cholesky_bug241(const MatrixType& m) // LDLT is not guaranteed to work for indefinite matrices, but happens to work fine if matrix is diagonal. // This test checks that LDLT reports correctly that matrix is indefinite. -// See http://forum.kde.org/viewtopic.php?f=74&t=106942 -template void cholesky_indefinite(const MatrixType& m) +// See http://forum.kde.org/viewtopic.php?f=74&t=106942 and bug 736 +template void cholesky_definiteness(const MatrixType& m) { eigen_assert(m.rows() == 2 && m.cols() == 2); MatrixType mat; + LDLT ldlt(2); + { mat << 1, 0, 0, -1; - LDLT ldlt(mat); + ldlt.compute(mat); VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); } { mat << 1, 2, 2, 1; - LDLT ldlt(mat); + ldlt.compute(mat); VERIFY(!ldlt.isNegative()); VERIFY(!ldlt.isPositive()); } + { + mat << 0, 0, 0, 0; + ldlt.compute(mat); + VERIFY(ldlt.isNegative()); + VERIFY(ldlt.isPositive()); + } + { + mat << 0, 0, 0, 1; + ldlt.compute(mat); + VERIFY(!ldlt.isNegative()); + VERIFY(ldlt.isPositive()); + } + { + mat << -1, 0, 0, 0; + ldlt.compute(mat); + VERIFY(ldlt.isNegative()); + VERIFY(!ldlt.isPositive()); + } } template void cholesky_verify_assert() @@ -309,7 +381,7 @@ void test_cholesky() CALL_SUBTEST_1( cholesky(Matrix()) ); CALL_SUBTEST_3( cholesky(Matrix2d()) ); CALL_SUBTEST_3( cholesky_bug241(Matrix2d()) ); - CALL_SUBTEST_3( cholesky_indefinite(Matrix2d()) ); + CALL_SUBTEST_3( cholesky_definiteness(Matrix2d()) ); CALL_SUBTEST_4( cholesky(Matrix3f()) ); CALL_SUBTEST_5( cholesky(Matrix4d()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE); diff --git a/ground/gcs/src/libs/eigen/test/conjugate_gradient.cpp b/ground/gcs/src/libs/eigen/test/conjugate_gradient.cpp index 869051b31..019cc4d64 100644 --- a/ground/gcs/src/libs/eigen/test/conjugate_gradient.cpp +++ b/ground/gcs/src/libs/eigen/test/conjugate_gradient.cpp @@ -12,13 +12,15 @@ template void test_conjugate_gradient_T() { - ConjugateGradient, Lower> cg_colmajor_lower_diag; - ConjugateGradient, Upper> cg_colmajor_upper_diag; + ConjugateGradient, Lower > cg_colmajor_lower_diag; + ConjugateGradient, Upper > cg_colmajor_upper_diag; + ConjugateGradient, Lower|Upper> cg_colmajor_loup_diag; ConjugateGradient, Lower, IdentityPreconditioner> cg_colmajor_lower_I; ConjugateGradient, Upper, IdentityPreconditioner> cg_colmajor_upper_I; CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_diag) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_diag) ); + CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_loup_diag) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_I) ); CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) ); } diff --git a/ground/gcs/src/libs/eigen/test/conservative_resize.cpp b/ground/gcs/src/libs/eigen/test/conservative_resize.cpp index 2d1ab3f03..498421b4c 100644 --- a/ground/gcs/src/libs/eigen/test/conservative_resize.cpp +++ b/ground/gcs/src/libs/eigen/test/conservative_resize.cpp @@ -60,34 +60,51 @@ void run_matrix_tests() template void run_vector_tests() { - typedef Matrix MatrixType; + typedef Matrix VectorType; - MatrixType m, n; + VectorType m, n; // boundary cases ... - m = n = MatrixType::Random(50); + m = n = VectorType::Random(50); m.conservativeResize(1); VERIFY_IS_APPROX(m, n.segment(0,1)); - m = n = MatrixType::Random(50); + m = n = VectorType::Random(50); m.conservativeResize(50); VERIFY_IS_APPROX(m, n.segment(0,50)); + + m = n = VectorType::Random(50); + m.conservativeResize(m.rows(),1); + VERIFY_IS_APPROX(m, n.segment(0,1)); + + m = n = VectorType::Random(50); + m.conservativeResize(m.rows(),50); + VERIFY_IS_APPROX(m, n.segment(0,50)); // random shrinking ... for (int i=0; i<50; ++i) { const int size = internal::random(1,50); - m = n = MatrixType::Random(50); + m = n = VectorType::Random(50); m.conservativeResize(size); VERIFY_IS_APPROX(m, n.segment(0,size)); + + m = n = VectorType::Random(50); + m.conservativeResize(m.rows(), size); + VERIFY_IS_APPROX(m, n.segment(0,size)); } // random growing with zeroing ... for (int i=0; i<50; ++i) { const int size = internal::random(50,100); - m = n = MatrixType::Random(50); - m.conservativeResizeLike(MatrixType::Zero(size)); + m = n = VectorType::Random(50); + m.conservativeResizeLike(VectorType::Zero(size)); + VERIFY_IS_APPROX(m.segment(0,50), n); + VERIFY( size<=50 || m.segment(50,size-50).sum() == Scalar(0) ); + + m = n = VectorType::Random(50); + m.conservativeResizeLike(Matrix::Zero(1,size)); VERIFY_IS_APPROX(m.segment(0,50), n); VERIFY( size<=50 || m.segment(50,size-50).sum() == Scalar(0) ); } diff --git a/ground/gcs/src/libs/eigen/test/cwiseop.cpp b/ground/gcs/src/libs/eigen/test/cwiseop.cpp index 247fa2a09..e3361da17 100644 --- a/ground/gcs/src/libs/eigen/test/cwiseop.cpp +++ b/ground/gcs/src/libs/eigen/test/cwiseop.cpp @@ -9,6 +9,8 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN2_SUPPORT +#define EIGEN_NO_EIGEN2_DEPRECATED_WARNING + #define EIGEN_NO_STATIC_ASSERT #include "main.h" #include diff --git a/ground/gcs/src/libs/eigen/test/dynalloc.cpp b/ground/gcs/src/libs/eigen/test/dynalloc.cpp index 8bbda1c94..ef92c0507 100644 --- a/ground/gcs/src/libs/eigen/test/dynalloc.cpp +++ b/ground/gcs/src/libs/eigen/test/dynalloc.cpp @@ -53,7 +53,7 @@ void check_aligned_new() void check_aligned_stack_alloc() { - for(int i = 1; i < 1000; i++) + for(int i = 1; i < 400; i++) { ei_declare_aligned_stack_constructed_variable(float,p,i,0); VERIFY(size_t(p)%ALIGNMENT==0); @@ -87,6 +87,32 @@ template void check_dynaligned() delete obj; } +template void check_custom_new_delete() +{ + { + T* t = new T; + delete t; + } + + { + std::size_t N = internal::random(1,10); + T* t = new T[N]; + delete[] t; + } + +#ifdef EIGEN_ALIGN + { + T* t = static_cast((T::operator new)(sizeof(T))); + (T::operator delete)(t, sizeof(T)); + } + + { + T* t = static_cast((T::operator new)(sizeof(T))); + (T::operator delete)(t); + } +#endif +} + void test_dynalloc() { // low level dynamic memory allocation @@ -94,7 +120,9 @@ void test_dynalloc() CALL_SUBTEST(check_aligned_malloc()); CALL_SUBTEST(check_aligned_new()); CALL_SUBTEST(check_aligned_stack_alloc()); - + + // check static allocation, who knows ? + #if EIGEN_ALIGN_STATICALLY for (int i=0; i() ); @@ -102,10 +130,13 @@ void test_dynalloc() CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); CALL_SUBTEST(check_dynaligned() ); + + CALL_SUBTEST( check_custom_new_delete() ); + CALL_SUBTEST( check_custom_new_delete() ); + CALL_SUBTEST( check_custom_new_delete() ); + CALL_SUBTEST( check_custom_new_delete() ); } - - // check static allocation, who knows ? - #if EIGEN_ALIGN_STATICALLY + { MyStruct foo0; VERIFY(size_t(foo0.avec.data())%ALIGNMENT==0); MyClassA fooA; VERIFY(size_t(fooA.avec.data())%ALIGNMENT==0); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/CMakeLists.txt b/ground/gcs/src/libs/eigen/test/eigen2/CMakeLists.txt index 84931e037..9615a6026 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/test/eigen2/CMakeLists.txt @@ -4,6 +4,7 @@ add_dependencies(eigen2_check eigen2_buildtests) add_dependencies(buildtests eigen2_buildtests) add_definitions("-DEIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API") +add_definitions("-DEIGEN_NO_EIGEN2_DEPRECATED_WARNING") ei_add_test(eigen2_meta) ei_add_test(eigen2_sizeof) diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_adjoint.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_adjoint.cpp index 8ec9c9202..c0f811459 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_adjoint.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_adjoint.cpp @@ -29,8 +29,6 @@ template void adjoint(const MatrixType& m) MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols), - mzero = MatrixType::Zero(rows, cols), - identity = SquareMatrixType::Identity(rows, rows), square = SquareMatrixType::Random(rows, rows); VectorType v1 = VectorType::Random(rows), v2 = VectorType::Random(rows), diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_basicstuff.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_basicstuff.cpp index 4fa16d5ae..dd2dec1ef 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_basicstuff.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_basicstuff.cpp @@ -23,11 +23,8 @@ template void basicStuff(const MatrixType& m) m2 = MatrixType::Random(rows, cols), m3(rows, cols), mzero = MatrixType::Zero(rows, cols), - identity = Matrix - ::Identity(rows, rows), square = Matrix::Random(rows, rows); VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), vzero = VectorType::Zero(rows); Scalar x = ei_random(); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_cwiseop.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_cwiseop.cpp index 4391d19b4..22e1cc342 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_cwiseop.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_cwiseop.cpp @@ -35,11 +35,8 @@ template void cwiseops(const MatrixType& m) mzero = MatrixType::Zero(rows, cols), mones = MatrixType::Ones(rows, cols), identity = Matrix - ::Identity(rows, rows), - square = Matrix::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows), + ::Identity(rows, rows); + VectorType vzero = VectorType::Zero(rows), vones = VectorType::Ones(rows), v3(rows); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry.cpp index faf1d7fdb..514040774 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry.cpp @@ -147,7 +147,7 @@ template void geometry(void) t1.setIdentity(); t1.linear() = q1.toRotationMatrix(); - v0 << 50, 2, 1; // = ei_random_matrix().cwiseProduct(Vector3(10,2,0.5)); + v0 << 50, 2, 1;//= ei_random_matrix().cwiseProduct(Vector3(10,2,0.5)); t0.scale(v0); t1.prescale(v0); @@ -392,6 +392,7 @@ template void geometry(void) #define VERIFY_EULER(I,J,K, X,Y,Z) { \ Vector3 ea = m.eulerAngles(I,J,K); \ Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \ + VERIFY_IS_APPROX(m, m1); \ VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \ } VERIFY_EULER(0,1,2, X,Y,Z); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp index 9ff9f4026..12d4a71c3 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_geometry_with_eigen2_prefix.cpp @@ -149,7 +149,7 @@ template void geometry(void) t1.setIdentity(); t1.linear() = q1.toRotationMatrix(); - v0 << 50, 2, 1; // = ei_random_matrix().cwiseProduct(Vector3(10,2,0.5)); + v0 << 50, 2, 1;//= ei_random_matrix().cwiseProduct(Vector3(10,2,0.5)); t0.scale(v0); t1.prescale(v0); @@ -394,6 +394,7 @@ template void geometry(void) #define VERIFY_EULER(I,J,K, X,Y,Z) { \ Vector3 ea = m.eulerAngles(I,J,K); \ Matrix3 m1 = Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \ + VERIFY_IS_APPROX(m, m1); \ VERIFY_IS_APPROX(m, Matrix3(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z()))); \ } VERIFY_EULER(0,1,2, X,Y,Z); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_inverse.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_inverse.cpp index 5de1dfefa..ccd24a194 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_inverse.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_inverse.cpp @@ -25,7 +25,6 @@ template void inverse(const MatrixType& m) MatrixType m1 = MatrixType::Random(rows, cols), m2(rows, cols), - mzero = MatrixType::Zero(rows, cols), identity = MatrixType::Identity(rows, rows); while(ei_abs(m1.determinant()) < RealScalar(0.1) && rows <= 8) diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_linearstructure.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_linearstructure.cpp index 22f02c396..488f4c485 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_linearstructure.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_linearstructure.cpp @@ -25,8 +25,7 @@ template void linearStructure(const MatrixType& m) // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), - m3(rows, cols), - mzero = MatrixType::Zero(rows, cols); + m3(rows, cols); Scalar s1 = ei_random(); while (ei_abs(s1)<1e-3) s1 = ei_random(); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_nomalloc.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_nomalloc.cpp index e234abe4b..d34a69999 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_nomalloc.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_nomalloc.cpp @@ -25,22 +25,12 @@ template void nomalloc(const MatrixType& m) */ typedef typename MatrixType::Scalar Scalar; - typedef Matrix VectorType; int rows = m.rows(); int cols = m.cols(); MatrixType m1 = MatrixType::Random(rows, cols), - m2 = MatrixType::Random(rows, cols), - m3(rows, cols), - mzero = MatrixType::Zero(rows, cols), - identity = Matrix - ::Identity(rows, rows), - square = Matrix - ::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + m2 = MatrixType::Random(rows, cols); Scalar s1 = ei_random(); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_submatrices.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_submatrices.cpp index c5d3f243d..dee970b63 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_submatrices.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_submatrices.cpp @@ -51,16 +51,10 @@ template void submatrices(const MatrixType& m) MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), m3(rows, cols), - mzero = MatrixType::Zero(rows, cols), ones = MatrixType::Ones(rows, cols), - identity = Matrix - ::Identity(rows, rows), square = Matrix ::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - v3 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + VectorType v1 = VectorType::Random(rows); Scalar s1 = ei_random(); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_triangular.cpp b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_triangular.cpp index 3748c7d71..6f17b7dff 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/eigen2_triangular.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2/eigen2_triangular.cpp @@ -13,7 +13,6 @@ template void triangular(const MatrixType& m) { typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits::Real RealScalar; - typedef Matrix VectorType; RealScalar largerEps = 10*test_precision(); @@ -25,16 +24,7 @@ template void triangular(const MatrixType& m) m3(rows, cols), m4(rows, cols), r1(rows, cols), - r2(rows, cols), - mzero = MatrixType::Zero(rows, cols), - mones = MatrixType::Ones(rows, cols), - identity = Matrix - ::Identity(rows, rows), - square = Matrix - ::Random(rows, rows); - VectorType v1 = VectorType::Random(rows), - v2 = VectorType::Random(rows), - vzero = VectorType::Zero(rows); + r2(rows, cols); MatrixType m1up = m1.template part(); MatrixType m2up = m2.template part(); diff --git a/ground/gcs/src/libs/eigen/test/eigen2/product.h b/ground/gcs/src/libs/eigen/test/eigen2/product.h index 2c9604d9a..ae1b4bae4 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2/product.h +++ b/ground/gcs/src/libs/eigen/test/eigen2/product.h @@ -40,8 +40,7 @@ template void product(const MatrixType& m) // to test it, hence I consider that we will have tested Random.h MatrixType m1 = MatrixType::Random(rows, cols), m2 = MatrixType::Random(rows, cols), - m3(rows, cols), - mzero = MatrixType::Zero(rows, cols); + m3(rows, cols); RowSquareMatrixType identity = RowSquareMatrixType::Identity(rows, rows), square = RowSquareMatrixType::Random(rows, rows), @@ -49,9 +48,7 @@ template void product(const MatrixType& m) ColSquareMatrixType square2 = ColSquareMatrixType::Random(cols, cols), res2 = ColSquareMatrixType::Random(cols, cols); - RowVectorType v1 = RowVectorType::Random(rows), - v2 = RowVectorType::Random(rows), - vzero = RowVectorType::Zero(rows); + RowVectorType v1 = RowVectorType::Random(rows); ColVectorType vc2 = ColVectorType::Random(cols), vcres(cols); OtherMajorMatrixType tm1 = m1; diff --git a/ground/gcs/src/libs/eigen/test/eigen2support.cpp b/ground/gcs/src/libs/eigen/test/eigen2support.cpp index ad1d98091..1fa49a8c8 100644 --- a/ground/gcs/src/libs/eigen/test/eigen2support.cpp +++ b/ground/gcs/src/libs/eigen/test/eigen2support.cpp @@ -8,6 +8,7 @@ // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN2_SUPPORT +#define EIGEN_NO_EIGEN2_DEPRECATED_WARNING #include "main.h" diff --git a/ground/gcs/src/libs/eigen/test/eigensolver_selfadjoint.cpp b/ground/gcs/src/libs/eigen/test/eigensolver_selfadjoint.cpp index 5c6ecd875..38689cfbf 100644 --- a/ground/gcs/src/libs/eigen/test/eigensolver_selfadjoint.cpp +++ b/ground/gcs/src/libs/eigen/test/eigensolver_selfadjoint.cpp @@ -29,7 +29,21 @@ template void selfadjointeigensolver(const MatrixType& m) MatrixType a = MatrixType::Random(rows,cols); MatrixType a1 = MatrixType::Random(rows,cols); MatrixType symmA = a.adjoint() * a + a1.adjoint() * a1; + MatrixType symmC = symmA; + + // randomly nullify some rows/columns + { + Index count = 1;//internal::random(-cols,cols); + for(Index k=0; k(0,cols-1); + symmA.row(i).setZero(); + symmA.col(i).setZero(); + } + } + symmA.template triangularView().setZero(); + symmC.template triangularView().setZero(); MatrixType b = MatrixType::Random(rows,cols); MatrixType b1 = MatrixType::Random(rows,cols); @@ -40,7 +54,7 @@ template void selfadjointeigensolver(const MatrixType& m) SelfAdjointEigenSolver eiDirect; eiDirect.computeDirect(symmA); // generalized eigen pb - GeneralizedSelfAdjointEigenSolver eiSymmGen(symmA, symmB); + GeneralizedSelfAdjointEigenSolver eiSymmGen(symmC, symmB); VERIFY_IS_EQUAL(eiSymm.info(), Success); VERIFY((symmA.template selfadjointView() * eiSymm.eigenvectors()).isApprox( @@ -57,27 +71,28 @@ template void selfadjointeigensolver(const MatrixType& m) VERIFY_IS_APPROX(eiSymm.eigenvalues(), eiSymmNoEivecs.eigenvalues()); // generalized eigen problem Ax = lBx - eiSymmGen.compute(symmA, symmB,Ax_lBx); + eiSymmGen.compute(symmC, symmB,Ax_lBx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); - VERIFY((symmA.template selfadjointView() * eiSymmGen.eigenvectors()).isApprox( + VERIFY((symmC.template selfadjointView() * eiSymmGen.eigenvectors()).isApprox( symmB.template selfadjointView() * (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); // generalized eigen problem BAx = lx - eiSymmGen.compute(symmA, symmB,BAx_lx); + eiSymmGen.compute(symmC, symmB,BAx_lx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); - VERIFY((symmB.template selfadjointView() * (symmA.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( + VERIFY((symmB.template selfadjointView() * (symmC.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); // generalized eigen problem ABx = lx - eiSymmGen.compute(symmA, symmB,ABx_lx); + eiSymmGen.compute(symmC, symmB,ABx_lx); VERIFY_IS_EQUAL(eiSymmGen.info(), Success); - VERIFY((symmA.template selfadjointView() * (symmB.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( + VERIFY((symmC.template selfadjointView() * (symmB.template selfadjointView() * eiSymmGen.eigenvectors())).isApprox( (eiSymmGen.eigenvectors() * eiSymmGen.eigenvalues().asDiagonal()), largerEps)); + eiSymm.compute(symmC); MatrixType sqrtSymmA = eiSymm.operatorSqrt(); - VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView()), sqrtSymmA*sqrtSymmA); - VERIFY_IS_APPROX(sqrtSymmA, symmA.template selfadjointView()*eiSymm.operatorInverseSqrt()); + VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), sqrtSymmA*sqrtSymmA); + VERIFY_IS_APPROX(sqrtSymmA, symmC.template selfadjointView()*eiSymm.operatorInverseSqrt()); MatrixType id = MatrixType::Identity(rows, cols); VERIFY_IS_APPROX(id.template selfadjointView().operatorNorm(), RealScalar(1)); @@ -95,15 +110,15 @@ template void selfadjointeigensolver(const MatrixType& m) VERIFY_RAISES_ASSERT(eiSymmUninitialized.operatorInverseSqrt()); // test Tridiagonalization's methods - Tridiagonalization tridiag(symmA); + Tridiagonalization tridiag(symmC); // FIXME tridiag.matrixQ().adjoint() does not work - VERIFY_IS_APPROX(MatrixType(symmA.template selfadjointView()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint()); + VERIFY_IS_APPROX(MatrixType(symmC.template selfadjointView()), tridiag.matrixQ() * tridiag.matrixT().eval() * MatrixType(tridiag.matrixQ()).adjoint()); if (rows > 1) { // Test matrix with NaN - symmA(0,0) = std::numeric_limits::quiet_NaN(); - SelfAdjointEigenSolver eiSymmNaN(symmA); + symmC(0,0) = std::numeric_limits::quiet_NaN(); + SelfAdjointEigenSolver eiSymmNaN(symmC); VERIFY_IS_EQUAL(eiSymmNaN.info(), NoConvergence); } } @@ -113,8 +128,10 @@ void test_eigensolver_selfadjoint() int s = 0; for(int i = 0; i < g_repeat; i++) { // very important to test 3x3 and 2x2 matrices since we provide special paths for them + CALL_SUBTEST_1( selfadjointeigensolver(Matrix2f()) ); CALL_SUBTEST_1( selfadjointeigensolver(Matrix2d()) ); CALL_SUBTEST_1( selfadjointeigensolver(Matrix3f()) ); + CALL_SUBTEST_1( selfadjointeigensolver(Matrix3d()) ); CALL_SUBTEST_2( selfadjointeigensolver(Matrix4d()) ); s = internal::random(1,EIGEN_TEST_MAX_SIZE/4); CALL_SUBTEST_3( selfadjointeigensolver(MatrixXf(s,s)) ); diff --git a/ground/gcs/src/libs/eigen/test/geo_alignedbox.cpp b/ground/gcs/src/libs/eigen/test/geo_alignedbox.cpp index 8e36adbe3..84663ad1f 100644 --- a/ground/gcs/src/libs/eigen/test/geo_alignedbox.cpp +++ b/ground/gcs/src/libs/eigen/test/geo_alignedbox.cpp @@ -172,6 +172,8 @@ void test_geo_alignedbox() CALL_SUBTEST_9( alignedbox(AlignedBox1i()) ); CALL_SUBTEST_10( alignedbox(AlignedBox2i()) ); CALL_SUBTEST_11( alignedbox(AlignedBox3i()) ); + + CALL_SUBTEST_14( alignedbox(AlignedBox(4)) ); } CALL_SUBTEST_12( specificTest1() ); CALL_SUBTEST_13( specificTest2() ); diff --git a/ground/gcs/src/libs/eigen/test/geo_eulerangles.cpp b/ground/gcs/src/libs/eigen/test/geo_eulerangles.cpp index 5445cd81a..b4830bd41 100644 --- a/ground/gcs/src/libs/eigen/test/geo_eulerangles.cpp +++ b/ground/gcs/src/libs/eigen/test/geo_eulerangles.cpp @@ -12,36 +12,48 @@ #include #include -template void check_all_var(const Matrix& ea) + +template +void verify_euler(const Matrix& ea, int i, int j, int k) { typedef Matrix Matrix3; typedef Matrix Vector3; typedef AngleAxis AngleAxisx; using std::abs; + Matrix3 m(AngleAxisx(ea[0], Vector3::Unit(i)) * AngleAxisx(ea[1], Vector3::Unit(j)) * AngleAxisx(ea[2], Vector3::Unit(k))); + Vector3 eabis = m.eulerAngles(i, j, k); + Matrix3 mbis(AngleAxisx(eabis[0], Vector3::Unit(i)) * AngleAxisx(eabis[1], Vector3::Unit(j)) * AngleAxisx(eabis[2], Vector3::Unit(k))); + VERIFY_IS_APPROX(m, mbis); + /* If I==K, and ea[1]==0, then there no unique solution. */ + /* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */ + if( (i!=k || ea[1]!=0) && (i==k || !internal::isApprox(abs(ea[1]),Scalar(M_PI/2),test_precision())) ) + VERIFY((ea-eabis).norm() <= test_precision()); - #define VERIFY_EULER(I,J,K, X,Y,Z) { \ - Matrix3 m(AngleAxisx(ea[0], Vector3::Unit##X()) * AngleAxisx(ea[1], Vector3::Unit##Y()) * AngleAxisx(ea[2], Vector3::Unit##Z())); \ - Vector3 eabis = m.eulerAngles(I,J,K); \ - Matrix3 mbis(AngleAxisx(eabis[0], Vector3::Unit##X()) * AngleAxisx(eabis[1], Vector3::Unit##Y()) * AngleAxisx(eabis[2], Vector3::Unit##Z())); \ - VERIFY_IS_APPROX(m, mbis); \ - /* If I==K, and ea[1]==0, then there no unique solution. */ \ - /* The remark apply in the case where I!=K, and |ea[1]| is close to pi/2. */ \ - if( (I!=K || ea[1]!=0) && (I==K || !internal::isApprox(abs(ea[1]),Scalar(M_PI/2),test_precision())) ) VERIFY((ea-eabis).norm() <= test_precision()); \ - } - VERIFY_EULER(0,1,2, X,Y,Z); - VERIFY_EULER(0,1,0, X,Y,X); - VERIFY_EULER(0,2,1, X,Z,Y); - VERIFY_EULER(0,2,0, X,Z,X); + // approx_or_less_than does not work for 0 + VERIFY(0 < eabis[0] || test_isMuchSmallerThan(eabis[0], Scalar(1))); + VERIFY_IS_APPROX_OR_LESS_THAN(eabis[0], Scalar(M_PI)); + VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(M_PI), eabis[1]); + VERIFY_IS_APPROX_OR_LESS_THAN(eabis[1], Scalar(M_PI)); + VERIFY_IS_APPROX_OR_LESS_THAN(-Scalar(M_PI), eabis[2]); + VERIFY_IS_APPROX_OR_LESS_THAN(eabis[2], Scalar(M_PI)); +} - VERIFY_EULER(1,2,0, Y,Z,X); - VERIFY_EULER(1,2,1, Y,Z,Y); - VERIFY_EULER(1,0,2, Y,X,Z); - VERIFY_EULER(1,0,1, Y,X,Y); +template void check_all_var(const Matrix& ea) +{ + verify_euler(ea, 0,1,2); + verify_euler(ea, 0,1,0); + verify_euler(ea, 0,2,1); + verify_euler(ea, 0,2,0); - VERIFY_EULER(2,0,1, Z,X,Y); - VERIFY_EULER(2,0,2, Z,X,Z); - VERIFY_EULER(2,1,0, Z,Y,X); - VERIFY_EULER(2,1,2, Z,Y,Z); + verify_euler(ea, 1,2,0); + verify_euler(ea, 1,2,1); + verify_euler(ea, 1,0,2); + verify_euler(ea, 1,0,1); + + verify_euler(ea, 2,0,1); + verify_euler(ea, 2,0,2); + verify_euler(ea, 2,1,0); + verify_euler(ea, 2,1,2); } template void eulerangles() @@ -63,7 +75,16 @@ template void eulerangles() ea = m.eulerAngles(0,1,0); check_all_var(ea); - ea = (Array3::Random() + Array3(1,1,0))*Scalar(M_PI)*Array3(0.5,0.5,1); + // Check with purely random Quaternion: + q1.coeffs() = Quaternionx::Coefficients::Random().normalized(); + m = q1; + ea = m.eulerAngles(0,1,2); + check_all_var(ea); + ea = m.eulerAngles(0,1,0); + check_all_var(ea); + + // Check with random angles in range [0:pi]x[-pi:pi]x[-pi:pi]. + ea = (Array3::Random() + Array3(1,0,0))*Scalar(M_PI)*Array3(0.5,1,1); check_all_var(ea); ea[2] = ea[0] = internal::random(0,Scalar(M_PI)); diff --git a/ground/gcs/src/libs/eigen/test/geo_hyperplane.cpp b/ground/gcs/src/libs/eigen/test/geo_hyperplane.cpp index f26fc1329..327537801 100644 --- a/ground/gcs/src/libs/eigen/test/geo_hyperplane.cpp +++ b/ground/gcs/src/libs/eigen/test/geo_hyperplane.cpp @@ -114,6 +114,32 @@ template void lines() } } +template void planes() +{ + using std::abs; + typedef Hyperplane Plane; + typedef Matrix Vector; + + for(int i = 0; i < 10; i++) + { + Vector v0 = Vector::Random(); + Vector v1(v0), v2(v0); + if(internal::random(0,1)>0.25) + v1 += Vector::Random(); + if(internal::random(0,1)>0.25) + v2 += v1 * std::pow(internal::random(0,1),internal::random(1,16)); + if(internal::random(0,1)>0.25) + v2 += Vector::Random() * std::pow(internal::random(0,1),internal::random(1,16)); + + Plane p0 = Plane::Through(v0, v1, v2); + + VERIFY_IS_APPROX(p0.normal().norm(), Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v0), Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v1), Scalar(1)); + VERIFY_IS_MUCH_SMALLER_THAN(p0.absDistance(v2), Scalar(1)); + } +} + template void hyperplane_alignment() { typedef Hyperplane Plane3a; @@ -153,5 +179,7 @@ void test_geo_hyperplane() CALL_SUBTEST_4( hyperplane(Hyperplane,5>()) ); CALL_SUBTEST_1( lines() ); CALL_SUBTEST_3( lines() ); + CALL_SUBTEST_2( planes() ); + CALL_SUBTEST_5( planes() ); } } diff --git a/ground/gcs/src/libs/eigen/test/geo_transformations.cpp b/ground/gcs/src/libs/eigen/test/geo_transformations.cpp index 35ae67ebe..547765714 100644 --- a/ground/gcs/src/libs/eigen/test/geo_transformations.cpp +++ b/ground/gcs/src/libs/eigen/test/geo_transformations.cpp @@ -98,11 +98,19 @@ template void transformations() Matrix3 matrot1, m; Scalar a = internal::random(-Scalar(M_PI), Scalar(M_PI)); - Scalar s0 = internal::random(); + Scalar s0 = internal::random(), + s1 = internal::random(); + + while(v0.norm() < test_precision()) v0 = Vector3::Random(); + while(v1.norm() < test_precision()) v1 = Vector3::Random(); + VERIFY_IS_APPROX(v0, AngleAxisx(a, v0.normalized()) * v0); VERIFY_IS_APPROX(-v0, AngleAxisx(Scalar(M_PI), v0.unitOrthogonal()) * v0); - VERIFY_IS_APPROX(cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0)); + if(abs(cos(a)) > test_precision()) + { + VERIFY_IS_APPROX(cos(a)*v0.squaredNorm(), v0.dot(AngleAxisx(a, v0.unitOrthogonal()) * v0)); + } m = AngleAxisx(a, v0.normalized()).toRotationMatrix().adjoint(); VERIFY_IS_APPROX(Matrix3::Identity(), m * AngleAxisx(a, v0.normalized())); VERIFY_IS_APPROX(Matrix3::Identity(), AngleAxisx(a, v0.normalized()) * m); @@ -123,11 +131,18 @@ template void transformations() // angle-axis conversion AngleAxisx aa = AngleAxisx(q1); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); - VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1); + + if(abs(aa.angle()) > NumTraits::dummy_precision()) + { + VERIFY( !(q1 * v1).isApprox(Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1) ); + } aa.fromRotationMatrix(aa.toRotationMatrix()); VERIFY_IS_APPROX(q1 * v1, Quaternionx(aa) * v1); - VERIFY_IS_NOT_APPROX(q1 * v1, Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1); + if(abs(aa.angle()) > NumTraits::dummy_precision()) + { + VERIFY( !(q1 * v1).isApprox(Quaternionx(AngleAxisx(aa.angle()*2,aa.axis())) * v1) ); + } // AngleAxis VERIFY_IS_APPROX(AngleAxisx(a,v1.normalized()).toRotationMatrix(), @@ -279,6 +294,13 @@ template void transformations() t1 = Eigen::Scaling(s0,s0,s0) * t1; VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); + t0 = t3; + t0.scale(s0); + t1 = t3 * Eigen::Scaling(s0); + VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); + t0.prescale(s0); + t1 = Eigen::Scaling(s0) * t1; + VERIFY_IS_APPROX(t0.matrix(), t1.matrix()); t0.setIdentity(); t0.prerotate(q1).prescale(v0).pretranslate(v0); @@ -340,7 +362,9 @@ template void transformations() // test transform inversion t0.setIdentity(); t0.translate(v0); - t0.linear().setRandom(); + do { + t0.linear().setRandom(); + } while(t0.linear().jacobiSvd().singularValues()(2)()); Matrix4 t044 = Matrix4::Zero(); t044(3,3) = 1; t044.block(0,0,t0.matrix().rows(),4) = t0.matrix(); @@ -390,6 +414,29 @@ template void transformations() t20 = Translation2(v20) * (Rotation2D(s0) * Eigen::Scaling(s0)); t21 = Translation2(v20) * Rotation2D(s0) * Eigen::Scaling(s0); VERIFY_IS_APPROX(t20,t21); + + Rotation2D R0(s0), R1(s1); + t20 = Translation2(v20) * (R0 * Eigen::Scaling(s0)); + t21 = Translation2(v20) * R0 * Eigen::Scaling(s0); + VERIFY_IS_APPROX(t20,t21); + + t20 = Translation2(v20) * (R0 * R0.inverse() * Eigen::Scaling(s0)); + t21 = Translation2(v20) * Eigen::Scaling(s0); + VERIFY_IS_APPROX(t20,t21); + + VERIFY_IS_APPROX(s0, (R0.slerp(0, R1)).angle()); + VERIFY_IS_APPROX(s1, (R0.slerp(1, R1)).angle()); + VERIFY_IS_APPROX(s0, (R0.slerp(0.5, R0)).angle()); + VERIFY_IS_APPROX(Scalar(0), (R0.slerp(0.5, R0.inverse())).angle()); + + // check basic features + { + Rotation2D r1; // default ctor + r1 = Rotation2D(s0); // copy assignment + VERIFY_IS_APPROX(r1.angle(),s0); + Rotation2D r2(r1); // copy ctor + VERIFY_IS_APPROX(r2.angle(),s0); + } } template void transform_alignment() diff --git a/ground/gcs/src/libs/eigen/test/jacobisvd.cpp b/ground/gcs/src/libs/eigen/test/jacobisvd.cpp index 76157c30f..12c556e59 100644 --- a/ground/gcs/src/libs/eigen/test/jacobisvd.cpp +++ b/ground/gcs/src/libs/eigen/test/jacobisvd.cpp @@ -67,6 +67,7 @@ template void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions) { typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::Index Index; Index rows = m.rows(); Index cols = m.cols(); @@ -81,9 +82,90 @@ void jacobisvd_solve(const MatrixType& m, unsigned int computationOptions) RhsType rhs = RhsType::Random(rows, internal::random(1, cols)); JacobiSVD svd(m, computationOptions); + + if(internal::is_same::value) svd.setThreshold(1e-8); + else if(internal::is_same::value) svd.setThreshold(1e-4); + SolutionType x = svd.solve(rhs); + + RealScalar residual = (m*x-rhs).norm(); + // Check that there is no significantly better solution in the neighborhood of x + if(!test_isMuchSmallerThan(residual,rhs.norm())) + { + // If the residual is very small, then we have an exact solution, so we are already good. + for(int k=0;k::epsilon(); + RealScalar residual_y = (m*y-rhs).norm(); + VERIFY( test_isApprox(residual_y,residual) || residual < residual_y ); + + y.row(k) = x.row(k).array() - 2*NumTraits::epsilon(); + residual_y = (m*y-rhs).norm(); + VERIFY( test_isApprox(residual_y,residual) || residual < residual_y ); + } + } + // evaluate normal equation which works also for least-squares solutions - VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs); + if(internal::is_same::value) + { + // This test is not stable with single precision. + // This is probably because squaring m signicantly affects the precision. + VERIFY_IS_APPROX(m.adjoint()*m*x,m.adjoint()*rhs); + } + + // check minimal norm solutions + { + // generate a full-rank m x n problem with m MatrixType2; + typedef Matrix RhsType2; + typedef Matrix MatrixType2T; + Index rank = RankAtCompileTime2==Dynamic ? internal::random(1,cols) : Index(RankAtCompileTime2); + MatrixType2 m2(rank,cols); + int guard = 0; + do { + m2.setRandom(); + } while(m2.jacobiSvd().setThreshold(test_precision()).rank()!=rank && (++guard)<10); + VERIFY(guard<10); + RhsType2 rhs2 = RhsType2::Random(rank); + // use QR to find a reference minimal norm solution + HouseholderQR qr(m2.adjoint()); + Matrix tmp = qr.matrixQR().topLeftCorner(rank,rank).template triangularView().adjoint().solve(rhs2); + tmp.conservativeResize(cols); + tmp.tail(cols-rank).setZero(); + SolutionType x21 = qr.householderQ() * tmp; + // now check with SVD + JacobiSVD svd2(m2, computationOptions); + SolutionType x22 = svd2.solve(rhs2); + VERIFY_IS_APPROX(m2*x21, rhs2); + VERIFY_IS_APPROX(m2*x22, rhs2); + VERIFY_IS_APPROX(x21, x22); + + // Now check with a rank deficient matrix + typedef Matrix MatrixType3; + typedef Matrix RhsType3; + Index rows3 = RowsAtCompileTime3==Dynamic ? internal::random(rank+1,2*cols) : Index(RowsAtCompileTime3); + Matrix C = Matrix::Random(rows3,rank); + MatrixType3 m3 = C * m2; + RhsType3 rhs3 = C * rhs2; + JacobiSVD svd3(m3, computationOptions); + SolutionType x3 = svd3.solve(rhs3); + if(svd3.rank()!=rank) { + std::cout << m3 << "\n\n"; + std::cout << svd3.singularValues().transpose() << "\n"; + std::cout << svd3.rank() << " == " << rank << "\n"; + std::cout << x21.norm() << " == " << x3.norm() << "\n"; + } +// VERIFY_IS_APPROX(m3*x3, rhs3); + VERIFY_IS_APPROX(m3*x21, rhs3); + VERIFY_IS_APPROX(m2*x3, rhs2); + + VERIFY_IS_APPROX(x21, x3); + } } template @@ -92,10 +174,9 @@ void jacobisvd_test_all_computation_options(const MatrixType& m) if (QRPreconditioner == NoQRPreconditioner && m.rows() != m.cols()) return; JacobiSVD fullSvd(m, ComputeFullU|ComputeFullV); - - jacobisvd_check_full(m, fullSvd); - jacobisvd_solve(m, ComputeFullU | ComputeFullV); - + CALL_SUBTEST(( jacobisvd_check_full(m, fullSvd) )); + CALL_SUBTEST(( jacobisvd_solve(m, ComputeFullU | ComputeFullV) )); + #if defined __INTEL_COMPILER // remark #111: statement is unreachable #pragma warning disable 111 @@ -103,20 +184,20 @@ void jacobisvd_test_all_computation_options(const MatrixType& m) if(QRPreconditioner == FullPivHouseholderQRPreconditioner) return; - jacobisvd_compare_to_full(m, ComputeFullU, fullSvd); - jacobisvd_compare_to_full(m, ComputeFullV, fullSvd); - jacobisvd_compare_to_full(m, 0, fullSvd); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU, fullSvd) )); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullV, fullSvd) )); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, 0, fullSvd) )); if (MatrixType::ColsAtCompileTime == Dynamic) { // thin U/V are only available with dynamic number of columns - jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd); - jacobisvd_compare_to_full(m, ComputeThinV, fullSvd); - jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd); - jacobisvd_compare_to_full(m, ComputeThinU , fullSvd); - jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd); - jacobisvd_solve(m, ComputeFullU | ComputeThinV); - jacobisvd_solve(m, ComputeThinU | ComputeFullV); - jacobisvd_solve(m, ComputeThinU | ComputeThinV); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeFullU|ComputeThinV, fullSvd) )); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinV, fullSvd) )); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeFullV, fullSvd) )); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU , fullSvd) )); + CALL_SUBTEST(( jacobisvd_compare_to_full(m, ComputeThinU|ComputeThinV, fullSvd) )); + CALL_SUBTEST(( jacobisvd_solve(m, ComputeFullU | ComputeThinV) )); + CALL_SUBTEST(( jacobisvd_solve(m, ComputeThinU | ComputeFullV) )); + CALL_SUBTEST(( jacobisvd_solve(m, ComputeThinU | ComputeThinV) )); // test reconstruction typedef typename MatrixType::Index Index; @@ -129,12 +210,29 @@ void jacobisvd_test_all_computation_options(const MatrixType& m) template void jacobisvd(const MatrixType& a = MatrixType(), bool pickrandom = true) { - MatrixType m = pickrandom ? MatrixType::Random(a.rows(), a.cols()) : a; + MatrixType m = a; + if(pickrandom) + { + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + Index diagSize = (std::min)(a.rows(), a.cols()); + RealScalar s = std::numeric_limits::max_exponent10/4; + s = internal::random(1,s); + Matrix d = Matrix::Random(diagSize); + for(Index k=0; k(-s,s)); + m = Matrix::Random(a.rows(),diagSize) * d.asDiagonal() * Matrix::Random(diagSize,a.cols()); + // cancel some coeffs + Index n = internal::random(0,m.size()-1); + for(Index i=0; i(0,m.rows()-1), internal::random(0,m.cols()-1)) = Scalar(0); + } - jacobisvd_test_all_computation_options(m); - jacobisvd_test_all_computation_options(m); - jacobisvd_test_all_computation_options(m); - jacobisvd_test_all_computation_options(m); + CALL_SUBTEST(( jacobisvd_test_all_computation_options(m) )); + CALL_SUBTEST(( jacobisvd_test_all_computation_options(m) )); + CALL_SUBTEST(( jacobisvd_test_all_computation_options(m) )); + CALL_SUBTEST(( jacobisvd_test_all_computation_options(m) )); } template void jacobisvd_verify_assert(const MatrixType& m) @@ -223,16 +321,23 @@ void jacobisvd_inf_nan() VERIFY(sub(some_inf, some_inf) != sub(some_inf, some_inf)); svd.compute(MatrixType::Constant(10,10,some_inf), ComputeFullU | ComputeFullV); - Scalar some_nan = zero() / zero(); - VERIFY(some_nan != some_nan); - svd.compute(MatrixType::Constant(10,10,some_nan), ComputeFullU | ComputeFullV); + Scalar nan = std::numeric_limits::quiet_NaN(); + VERIFY(nan != nan); + svd.compute(MatrixType::Constant(10,10,nan), ComputeFullU | ComputeFullV); MatrixType m = MatrixType::Zero(10,10); m(internal::random(0,9), internal::random(0,9)) = some_inf; svd.compute(m, ComputeFullU | ComputeFullV); m = MatrixType::Zero(10,10); - m(internal::random(0,9), internal::random(0,9)) = some_nan; + m(internal::random(0,9), internal::random(0,9)) = nan; + svd.compute(m, ComputeFullU | ComputeFullV); + + // regression test for bug 791 + m.resize(3,3); + m << 0, 2*NumTraits::epsilon(), 0.5, + 0, -0.5, 0, + nan, 0, 0; svd.compute(m, ComputeFullU | ComputeFullV); } @@ -328,6 +433,7 @@ void test_jacobisvd() TEST_SET_BUT_UNUSED_VARIABLE(r) TEST_SET_BUT_UNUSED_VARIABLE(c) + CALL_SUBTEST_10(( jacobisvd(MatrixXd(r,c)) )); CALL_SUBTEST_7(( jacobisvd(MatrixXf(r,c)) )); CALL_SUBTEST_8(( jacobisvd(MatrixXcd(r,c)) )); (void) r; @@ -335,6 +441,7 @@ void test_jacobisvd() // Test on inf/nan matrix CALL_SUBTEST_7( jacobisvd_inf_nan() ); + CALL_SUBTEST_10( jacobisvd_inf_nan() ); } CALL_SUBTEST_7(( jacobisvd(MatrixXf(internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2), internal::random(EIGEN_TEST_MAX_SIZE/4, EIGEN_TEST_MAX_SIZE/2))) )); diff --git a/ground/gcs/src/libs/eigen/test/lu.cpp b/ground/gcs/src/libs/eigen/test/lu.cpp index 25f86755a..374652694 100644 --- a/ground/gcs/src/libs/eigen/test/lu.cpp +++ b/ground/gcs/src/libs/eigen/test/lu.cpp @@ -100,7 +100,9 @@ template void lu_invertible() LU.h */ typedef typename NumTraits::Real RealScalar; - int size = internal::random(1,EIGEN_TEST_MAX_SIZE); + DenseIndex size = MatrixType::RowsAtCompileTime; + if( size==Dynamic) + size = internal::random(1,EIGEN_TEST_MAX_SIZE); MatrixType m1(size, size), m2(size, size), m3(size, size); FullPivLU lu; @@ -122,6 +124,10 @@ template void lu_invertible() m2 = lu.solve(m3); VERIFY_IS_APPROX(m3, m1*m2); VERIFY_IS_APPROX(m2, lu.inverse()*m3); + + // Regression test for Bug 302 + MatrixType m4 = MatrixType::Random(size,size); + VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4); } template void lu_partial_piv() @@ -171,6 +177,7 @@ void test_lu() { for(int i = 0; i < g_repeat; i++) { CALL_SUBTEST_1( lu_non_invertible() ); + CALL_SUBTEST_1( lu_invertible() ); CALL_SUBTEST_1( lu_verify_assert() ); CALL_SUBTEST_2( (lu_non_invertible >()) ); diff --git a/ground/gcs/src/libs/eigen/test/main.h b/ground/gcs/src/libs/eigen/test/main.h index 14f0d2f78..664204866 100644 --- a/ground/gcs/src/libs/eigen/test/main.h +++ b/ground/gcs/src/libs/eigen/test/main.h @@ -17,13 +17,36 @@ #include #include #include + +// The following includes of STL headers have to be done _before_ the +// definition of macros min() and max(). The reason is that many STL +// implementations will not work properly as the min and max symbols collide +// with the STL functions std:min() and std::max(). The STL headers may check +// for the macro definition of min/max and issue a warning or undefine the +// macros. +// +// Still, Windows defines min() and max() in windef.h as part of the regular +// Windows system interfaces and many other Windows APIs depend on these +// macros being available. To prevent the macro expansion of min/max and to +// make Eigen compatible with the Windows environment all function calls of +// std::min() and std::max() have to be written with parenthesis around the +// function name. +// +// All STL headers used by Eigen should be included here. Because main.h is +// included before any Eigen header and because the STL headers are guarded +// against multiple inclusions, no STL header will see our own min/max macro +// definitions. #include #include -#include #include #include #include +#include +// To test that all calls from Eigen code to std::min() and std::max() are +// protected by parenthesis against macro expansion, the min()/max() macros +// are defined here and any not-parenthesized min/max call will cause a +// compiler error. #define min(A,B) please_protect_your_min_with_parentheses #define max(A,B) please_protect_your_max_with_parentheses @@ -383,6 +406,26 @@ void randomPermutationVector(PermutationVectorType& v, typename PermutationVecto } } +template bool isNotNaN(const T& x) +{ + return x==x; +} + +template bool isNaN(const T& x) +{ + return x!=x; +} + +template bool isInf(const T& x) +{ + return x > NumTraits::highest(); +} + +template bool isMinusInf(const T& x) +{ + return x < NumTraits::lowest(); +} + } // end namespace Eigen template struct GetDifferentType; diff --git a/ground/gcs/src/libs/eigen/test/mapped_matrix.cpp b/ground/gcs/src/libs/eigen/test/mapped_matrix.cpp index de9dbbde3..58904fa37 100644 --- a/ground/gcs/src/libs/eigen/test/mapped_matrix.cpp +++ b/ground/gcs/src/libs/eigen/test/mapped_matrix.cpp @@ -114,6 +114,28 @@ template void check_const_correctness(const PlainObjec VERIFY( !(Map::Flags & LvalueBit) ); } +template +void map_not_aligned_on_scalar() +{ + typedef Matrix MatrixType; + typedef typename MatrixType::Index Index; + Index size = 11; + Scalar* array1 = internal::aligned_new((size+1)*(size+1)+1); + Scalar* array2 = reinterpret_cast(sizeof(Scalar)/2+std::size_t(array1)); + Map > map2(array2, size, size, OuterStride<>(size+1)); + MatrixType m2 = MatrixType::Random(size,size); + map2 = m2; + VERIFY_IS_EQUAL(m2, map2); + + typedef Matrix VectorType; + Map map3(array2, size); + MatrixType v3 = VectorType::Random(size); + map3 = v3; + VERIFY_IS_EQUAL(v3, map3); + + internal::aligned_delete(array1, (size+1)*(size+1)+1); +} + void test_mapped_matrix() { for(int i = 0; i < g_repeat; i++) { @@ -137,5 +159,7 @@ void test_mapped_matrix() CALL_SUBTEST_8( map_static_methods(RowVector3d()) ); CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) ); CALL_SUBTEST_10( map_static_methods(VectorXf(12)) ); + + CALL_SUBTEST_11( map_not_aligned_on_scalar() ); } } diff --git a/ground/gcs/src/libs/eigen/test/mpl2only.cpp b/ground/gcs/src/libs/eigen/test/mpl2only.cpp new file mode 100644 index 000000000..5ef0d2b2e --- /dev/null +++ b/ground/gcs/src/libs/eigen/test/mpl2only.cpp @@ -0,0 +1,20 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#define EIGEN_MPL2_ONLY +#include +#include +#include +#include +#include + +int main() +{ + return 0; +} diff --git a/ground/gcs/src/libs/eigen/test/nomalloc.cpp b/ground/gcs/src/libs/eigen/test/nomalloc.cpp index cbd02dd21..8e0402358 100644 --- a/ground/gcs/src/libs/eigen/test/nomalloc.cpp +++ b/ground/gcs/src/libs/eigen/test/nomalloc.cpp @@ -165,6 +165,38 @@ void ctms_decompositions() Eigen::JacobiSVD jSVD; jSVD.compute(A, ComputeFullU | ComputeFullV); } +void test_zerosized() { + // default constructors: + Eigen::MatrixXd A; + Eigen::VectorXd v; + // explicit zero-sized: + Eigen::ArrayXXd A0(0,0); + Eigen::ArrayXd v0(std::ptrdiff_t(0)); // FIXME ArrayXd(0) is ambiguous + + // assigning empty objects to each other: + A=A0; + v=v0; +} + +template void test_reference(const MatrixType& m) { + typedef typename MatrixType::Scalar Scalar; + enum { Flag = MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; + enum { TransposeFlag = !MatrixType::IsRowMajor ? Eigen::RowMajor : Eigen::ColMajor}; + typename MatrixType::Index rows = m.rows(), cols=m.cols(); + // Dynamic reference: + typedef Eigen::Ref > Ref; + typedef Eigen::Ref > RefT; + + Ref r1(m); + Ref r2(m.block(rows/3, cols/4, rows/2, cols/2)); + RefT r3(m.transpose()); + RefT r4(m.topLeftCorner(rows/2, cols/2).transpose()); + + VERIFY_RAISES_ASSERT(RefT r5(m)); + VERIFY_RAISES_ASSERT(Ref r6(m.transpose())); + VERIFY_RAISES_ASSERT(Ref r7(Scalar(2) * m)); +} + void test_nomalloc() { // check that our operator new is indeed called: @@ -175,5 +207,6 @@ void test_nomalloc() // Check decomposition modules with dynamic matrices that have a known compile-time max size (ctms) CALL_SUBTEST_4(ctms_decompositions()); - + CALL_SUBTEST_5(test_zerosized()); + CALL_SUBTEST_6(test_reference(Matrix())); } diff --git a/ground/gcs/src/libs/eigen/test/nullary.cpp b/ground/gcs/src/libs/eigen/test/nullary.cpp index 5408d88b2..fbc721a1a 100644 --- a/ground/gcs/src/libs/eigen/test/nullary.cpp +++ b/ground/gcs/src/libs/eigen/test/nullary.cpp @@ -80,7 +80,9 @@ void testVectorType(const VectorType& base) Matrix col_vector(size); row_vector.setLinSpaced(size,low,high); col_vector.setLinSpaced(size,low,high); - VERIFY( row_vector.isApprox(col_vector.transpose(), NumTraits::epsilon())); + // when using the extended precision (e.g., FPU) the relative error might exceed 1 bit + // when computing the squared sum in isApprox, thus the 2x factor. + VERIFY( row_vector.isApprox(col_vector.transpose(), Scalar(2)*NumTraits::epsilon())); Matrix size_changer(size+50); size_changer.setLinSpaced(size,low,high); diff --git a/ground/gcs/src/libs/eigen/test/packetmath.cpp b/ground/gcs/src/libs/eigen/test/packetmath.cpp index 2c0519c41..38aa256ce 100644 --- a/ground/gcs/src/libs/eigen/test/packetmath.cpp +++ b/ground/gcs/src/libs/eigen/test/packetmath.cpp @@ -239,6 +239,12 @@ template void packetmath_real() data2[i] = internal::random(-87,88); } CHECK_CWISE1_IF(internal::packet_traits::HasExp, std::exp, internal::pexp); + { + data1[0] = std::numeric_limits::quiet_NaN(); + packet_helper::HasExp,Packet> h; + h.store(data2, internal::pexp(h.load(data1))); + VERIFY(isNaN(data2[0])); + } for (int i=0; i void packetmath_real() } if(internal::random(0,1)<0.1) data1[internal::random(0, PacketSize)] = 0; - CHECK_CWISE1_IF(internal::packet_traits::HasLog, std::log, internal::plog); CHECK_CWISE1_IF(internal::packet_traits::HasSqrt, std::sqrt, internal::psqrt); + CHECK_CWISE1_IF(internal::packet_traits::HasLog, std::log, internal::plog); + { + data1[0] = std::numeric_limits::quiet_NaN(); + packet_helper::HasLog,Packet> h; + h.store(data2, internal::plog(h.load(data1))); + VERIFY(isNaN(data2[0])); + data1[0] = -1.0f; + h.store(data2, internal::plog(h.load(data1))); + VERIFY(isNaN(data2[0])); +#if !EIGEN_FAST_MATH + h.store(data2, internal::psqrt(h.load(data1))); + VERIFY(isNaN(data2[0])); + VERIFY(isNaN(data2[1])); +#endif + } } template void packetmath_notcomplex() diff --git a/ground/gcs/src/libs/eigen/test/product.h b/ground/gcs/src/libs/eigen/test/product.h index 856b234ac..0d054ff46 100644 --- a/ground/gcs/src/libs/eigen/test/product.h +++ b/ground/gcs/src/libs/eigen/test/product.h @@ -136,7 +136,46 @@ template void product(const MatrixType& m) VERIFY_IS_APPROX(res.col(r).noalias() = square.adjoint() * square.col(r), (square.adjoint() * square.col(r)).eval()); VERIFY_IS_APPROX(res.col(r).noalias() = square * square.col(r), (square * square.col(r)).eval()); + // vector at runtime (see bug 1166) + { + RowSquareMatrixType ref(square); + ColSquareMatrixType ref2(square2); + ref = res = square; + VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.col(0).transpose() * square.transpose(), (ref.row(0) = m1.col(0).transpose() * square.transpose())); + VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.block(0,0,rows,1).transpose() * square.transpose(), (ref.row(0) = m1.col(0).transpose() * square.transpose())); + VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.col(0).transpose() * square, (ref.row(0) = m1.col(0).transpose() * square)); + VERIFY_IS_APPROX(res.block(0,0,1,rows).noalias() = m1.block(0,0,rows,1).transpose() * square, (ref.row(0) = m1.col(0).transpose() * square)); + ref2 = res2 = square2; + VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.row(0) * square2.transpose(), (ref2.row(0) = m1.row(0) * square2.transpose())); + VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.block(0,0,1,cols) * square2.transpose(), (ref2.row(0) = m1.row(0) * square2.transpose())); + VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.row(0) * square2, (ref2.row(0) = m1.row(0) * square2)); + VERIFY_IS_APPROX(res2.block(0,0,1,cols).noalias() = m1.block(0,0,1,cols) * square2, (ref2.row(0) = m1.row(0) * square2)); + } + // inner product - Scalar x = square2.row(c) * square2.col(c2); - VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum()); + { + Scalar x = square2.row(c) * square2.col(c2); + VERIFY_IS_APPROX(x, square2.row(c).transpose().cwiseProduct(square2.col(c2)).sum()); + } + + // outer product + VERIFY_IS_APPROX(m1.col(c) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols)); + VERIFY_IS_APPROX(m1.row(r).transpose() * m1.col(c).transpose(), m1.block(r,0,1,cols).transpose() * m1.block(0,c,rows,1).transpose()); + VERIFY_IS_APPROX(m1.block(0,c,rows,1) * m1.row(r), m1.block(0,c,rows,1) * m1.block(r,0,1,cols)); + VERIFY_IS_APPROX(m1.col(c) * m1.block(r,0,1,cols), m1.block(0,c,rows,1) * m1.block(r,0,1,cols)); + VERIFY_IS_APPROX(m1.leftCols(1) * m1.row(r), m1.block(0,0,rows,1) * m1.block(r,0,1,cols)); + VERIFY_IS_APPROX(m1.col(c) * m1.topRows(1), m1.block(0,c,rows,1) * m1.block(0,0,1,cols)); + + // Aliasing + { + ColVectorType x(cols); x.setRandom(); + ColVectorType z(x); + ColVectorType y(cols); y.setZero(); + ColSquareMatrixType A(cols,cols); A.setRandom(); + // CwiseBinaryOp + VERIFY_IS_APPROX(x = y + A*x, A*z); + x = z; + // CwiseUnaryOp + VERIFY_IS_APPROX(x = Scalar(1.)*(A*x), A*z); + } } diff --git a/ground/gcs/src/libs/eigen/test/product_extra.cpp b/ground/gcs/src/libs/eigen/test/product_extra.cpp index 744a1ef7f..ea2486937 100644 --- a/ground/gcs/src/libs/eigen/test/product_extra.cpp +++ b/ground/gcs/src/libs/eigen/test/product_extra.cpp @@ -109,8 +109,67 @@ void mat_mat_scalar_scalar_product() double det = 6.0, wt = 0.5; VERIFY_IS_APPROX(dNdxy.transpose()*dNdxy*det*wt, det*wt*dNdxy.transpose()*dNdxy); } + +template +void zero_sized_objects(const MatrixType& m) +{ + typedef typename MatrixType::Scalar Scalar; + const int PacketSize = internal::packet_traits::size; + const int PacketSize1 = PacketSize>1 ? PacketSize-1 : 1; + DenseIndex rows = m.rows(); + DenseIndex cols = m.cols(); -void zero_sized_objects() + { + MatrixType res, a(rows,0), b(0,cols); + VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(rows,cols) ); + VERIFY_IS_APPROX( (res=a*a.transpose()), MatrixType::Zero(rows,rows) ); + VERIFY_IS_APPROX( (res=b.transpose()*b), MatrixType::Zero(cols,cols) ); + VERIFY_IS_APPROX( (res=b.transpose()*a.transpose()), MatrixType::Zero(cols,rows) ); + } + + { + MatrixType res, a(rows,cols), b(cols,0); + res = a*b; + VERIFY(res.rows()==rows && res.cols()==0); + b.resize(0,rows); + res = b*a; + VERIFY(res.rows()==0 && res.cols()==cols); + } + + { + Matrix a; + Matrix b; + Matrix res; + VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize,1) ); + VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize,1) ); + } + + { + Matrix a; + Matrix b; + Matrix res; + VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize1,1) ); + VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize1,1) ); + } + + { + Matrix a(PacketSize,0); + Matrix b(0,1); + Matrix res; + VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize,1) ); + VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize,1) ); + } + + { + Matrix a(PacketSize1,0); + Matrix b(0,1); + Matrix res; + VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize1,1) ); + VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize1,1) ); + } +} + +void bug_127() { // Bug 127 // @@ -171,7 +230,8 @@ void test_product_extra() CALL_SUBTEST_2( mat_mat_scalar_scalar_product() ); CALL_SUBTEST_3( product_extra(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_4( product_extra(MatrixXcd(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); + CALL_SUBTEST_1( zero_sized_objects(MatrixXf(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); } - CALL_SUBTEST_5( zero_sized_objects() ); + CALL_SUBTEST_5( bug_127() ); CALL_SUBTEST_6( unaligned_objects() ); } diff --git a/ground/gcs/src/libs/eigen/test/product_large.cpp b/ground/gcs/src/libs/eigen/test/product_large.cpp index 03d7bd8ed..6bb4d1ad1 100644 --- a/ground/gcs/src/libs/eigen/test/product_large.cpp +++ b/ground/gcs/src/libs/eigen/test/product_large.cpp @@ -9,6 +9,27 @@ #include "product.h" +template +void test_aliasing() +{ + int rows = internal::random(1,12); + int cols = internal::random(1,12); + typedef Matrix MatrixType; + typedef Matrix VectorType; + VectorType x(cols); x.setRandom(); + VectorType z(x); + VectorType y(rows); y.setZero(); + MatrixType A(rows,cols); A.setRandom(); + // CwiseBinaryOp + VERIFY_IS_APPROX(x = y + A*x, A*z); + x = z; + // CwiseUnaryOp + VERIFY_IS_APPROX(x = T(1.)*(A*x), A*z); + x = z; + VERIFY_IS_APPROX(x = y+(-(A*x)), -A*z); + x = z; +} + void test_product_large() { for(int i = 0; i < g_repeat; i++) { @@ -17,6 +38,8 @@ void test_product_large() CALL_SUBTEST_3( product(MatrixXi(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); CALL_SUBTEST_4( product(MatrixXcf(internal::random(1,EIGEN_TEST_MAX_SIZE/2), internal::random(1,EIGEN_TEST_MAX_SIZE/2))) ); CALL_SUBTEST_5( product(Matrix(internal::random(1,EIGEN_TEST_MAX_SIZE), internal::random(1,EIGEN_TEST_MAX_SIZE))) ); + + CALL_SUBTEST_1( test_aliasing() ); } #if defined EIGEN_TEST_PART_6 diff --git a/ground/gcs/src/libs/eigen/test/product_mmtr.cpp b/ground/gcs/src/libs/eigen/test/product_mmtr.cpp index 7d6746800..aeba009f4 100644 --- a/ground/gcs/src/libs/eigen/test/product_mmtr.cpp +++ b/ground/gcs/src/libs/eigen/test/product_mmtr.cpp @@ -24,8 +24,8 @@ template void mmtr(int size) DenseIndex othersize = internal::random(1,200); - MatrixColMaj matc(size, size); - MatrixRowMaj matr(size, size); + MatrixColMaj matc = MatrixColMaj::Zero(size, size); + MatrixRowMaj matr = MatrixRowMaj::Zero(size, size); MatrixColMaj ref1(size, size), ref2(size, size); MatrixColMaj soc(size,othersize); soc.setRandom(); diff --git a/ground/gcs/src/libs/eigen/test/product_notemporary.cpp b/ground/gcs/src/libs/eigen/test/product_notemporary.cpp index 258d238e2..5599d268d 100644 --- a/ground/gcs/src/libs/eigen/test/product_notemporary.cpp +++ b/ground/gcs/src/libs/eigen/test/product_notemporary.cpp @@ -58,10 +58,19 @@ template void product_notemporary(const MatrixType& m) r1 = internal::random(8,rows-r0); VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()), 1); + VERIFY_EVALUATION_COUNT( m3 = (m1 * m2.adjoint()).transpose(), 1); VERIFY_EVALUATION_COUNT( m3.noalias() = m1 * m2.adjoint(), 0); + VERIFY_EVALUATION_COUNT( m3 = s1 * (m1 * m2.transpose()), 1); + VERIFY_EVALUATION_COUNT( m3 = m3 + s1 * (m1 * m2.transpose()), 1); VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * (m1 * m2.transpose()), 0); + VERIFY_EVALUATION_COUNT( m3 = m3 + (m1 * m2.adjoint()), 1); + VERIFY_EVALUATION_COUNT( m3 = m3 + (m1 * m2.adjoint()).transpose(), 1); + VERIFY_EVALUATION_COUNT( m3.noalias() = m3 + m1 * m2.transpose(), 1); // 0 in 3.3 + VERIFY_EVALUATION_COUNT( m3.noalias() += m3 + m1 * m2.transpose(), 1); // 0 in 3.3 + VERIFY_EVALUATION_COUNT( m3.noalias() -= m3 + m1 * m2.transpose(), 1); // 0 in 3.3 + VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * m1 * s2 * m2.adjoint(), 0); VERIFY_EVALUATION_COUNT( m3.noalias() = s1 * m1 * s2 * (m1*s3+m2*s2).adjoint(), 1); VERIFY_EVALUATION_COUNT( m3.noalias() = (s1 * m1).adjoint() * s2 * m2, 0); diff --git a/ground/gcs/src/libs/eigen/test/product_trmm.cpp b/ground/gcs/src/libs/eigen/test/product_trmm.cpp index 506a1aeb9..d715b9a36 100644 --- a/ground/gcs/src/libs/eigen/test/product_trmm.cpp +++ b/ground/gcs/src/libs/eigen/test/product_trmm.cpp @@ -51,6 +51,7 @@ void trmm(int rows=internal::random(1,EIGEN_TEST_MAX_SIZE), ge_xs_save = ge_xs; VERIFY_IS_APPROX( (ge_xs_save + s1*triTr.conjugate() * (s2*ge_left.adjoint())).eval(), ge_xs.noalias() += (s1*mat.adjoint()).template triangularView() * (s2*ge_left.adjoint()) ); + ge_sx.setRandom(); ge_sx_save = ge_sx; VERIFY_IS_APPROX( ge_sx_save - (ge_right.adjoint() * (-s1 * triTr).conjugate()).eval(), ge_sx.noalias() -= (ge_right.adjoint() * (-s1 * mat).adjoint().template triangularView()).eval()); diff --git a/ground/gcs/src/libs/eigen/test/qr_fullpivoting.cpp b/ground/gcs/src/libs/eigen/test/qr_fullpivoting.cpp index 15d7299d7..511f2473f 100644 --- a/ground/gcs/src/libs/eigen/test/qr_fullpivoting.cpp +++ b/ground/gcs/src/libs/eigen/test/qr_fullpivoting.cpp @@ -130,4 +130,8 @@ void test_qr_fullpivoting() // Test problem size constructors CALL_SUBTEST_7(FullPivHouseholderQR(10, 20)); + CALL_SUBTEST_7((FullPivHouseholderQR >(10,20))); + CALL_SUBTEST_7((FullPivHouseholderQR >(Matrix::Random()))); + CALL_SUBTEST_7((FullPivHouseholderQR >(20,10))); + CALL_SUBTEST_7((FullPivHouseholderQR >(Matrix::Random()))); } diff --git a/ground/gcs/src/libs/eigen/test/real_qz.cpp b/ground/gcs/src/libs/eigen/test/real_qz.cpp index 7d743a734..a1766c6d9 100644 --- a/ground/gcs/src/libs/eigen/test/real_qz.cpp +++ b/ground/gcs/src/libs/eigen/test/real_qz.cpp @@ -25,6 +25,22 @@ template void real_qz(const MatrixType& m) MatrixType A = MatrixType::Random(dim,dim), B = MatrixType::Random(dim,dim); + + // Regression test for bug 985: Randomly set rows or columns to zero + Index k=internal::random(0, dim-1); + switch(internal::random(0,10)) { + case 0: + A.row(k).setZero(); break; + case 1: + A.col(k).setZero(); break; + case 2: + B.row(k).setZero(); break; + case 3: + B.col(k).setZero(); break; + default: + break; + } + RealQZ qz(A,B); VERIFY_IS_EQUAL(qz.info(), Success); diff --git a/ground/gcs/src/libs/eigen/test/redux.cpp b/ground/gcs/src/libs/eigen/test/redux.cpp index 0d176e500..50b473838 100644 --- a/ground/gcs/src/libs/eigen/test/redux.cpp +++ b/ground/gcs/src/libs/eigen/test/redux.cpp @@ -53,6 +53,14 @@ template void matrixRedux(const MatrixType& m) VERIFY_IS_APPROX(m1_for_prod.block(r0,c0,r1,c1).prod(), m1_for_prod.block(r0,c0,r1,c1).eval().prod()); VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).real().minCoeff(), m1.block(r0,c0,r1,c1).real().eval().minCoeff()); VERIFY_IS_APPROX(m1.block(r0,c0,r1,c1).real().maxCoeff(), m1.block(r0,c0,r1,c1).real().eval().maxCoeff()); + + // regression for bug 1090 + const int R1 = MatrixType::RowsAtCompileTime>=2 ? MatrixType::RowsAtCompileTime/2 : 6; + const int C1 = MatrixType::ColsAtCompileTime>=2 ? MatrixType::ColsAtCompileTime/2 : 6; + if(R1<=rows-r0 && C1<=cols-c0) + { + VERIFY_IS_APPROX( (m1.template block(r0,c0).sum()), m1.block(r0,c0,R1,C1).sum() ); + } // test empty objects VERIFY_IS_APPROX(m1.block(r0,c0,0,0).sum(), Scalar(0)); diff --git a/ground/gcs/src/libs/eigen/test/ref.cpp b/ground/gcs/src/libs/eigen/test/ref.cpp index 65b4f5a3e..8297e263a 100644 --- a/ground/gcs/src/libs/eigen/test/ref.cpp +++ b/ground/gcs/src/libs/eigen/test/ref.cpp @@ -14,9 +14,9 @@ static int nb_temporaries; -inline void on_temporary_creation(int size) { +inline void on_temporary_creation(int) { // here's a great place to set a breakpoint when debugging failures in this test! - if(size!=0) nb_temporaries++; + nb_temporaries++; } @@ -34,6 +34,18 @@ inline void on_temporary_creation(int size) { // test Ref.h +// Deal with i387 extended precision +#if EIGEN_ARCH_i386 && !(EIGEN_ARCH_x86_64) + +#if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(4,4) +#pragma GCC optimize ("-ffloat-store") +#else +#undef VERIFY_IS_EQUAL +#define VERIFY_IS_EQUAL(X,Y) VERIFY_IS_APPROX(X,Y) +#endif + +#endif + template void ref_matrix(const MatrixType& m) { typedef typename MatrixType::Index Index; @@ -71,7 +83,6 @@ template void ref_matrix(const MatrixType& m) rm2 = m2.block(i,j,brows,bcols); VERIFY_IS_EQUAL(m1, m2); - ConstRefDynMat rm3 = m1.block(i,j,brows,bcols); m1.block(i,j,brows,bcols) *= 2; m2.block(i,j,brows,bcols) *= 2; @@ -154,59 +165,101 @@ template void check_const_correctness(const PlainObjec VERIFY( !(Ref::Flags & LvalueBit) ); } -EIGEN_DONT_INLINE void call_ref_1(Ref ) { } -EIGEN_DONT_INLINE void call_ref_2(const Ref& ) { } -EIGEN_DONT_INLINE void call_ref_3(Ref > ) { } -EIGEN_DONT_INLINE void call_ref_4(const Ref >& ) { } -EIGEN_DONT_INLINE void call_ref_5(Ref > ) { } -EIGEN_DONT_INLINE void call_ref_6(const Ref >& ) { } +template +EIGEN_DONT_INLINE void call_ref_1(Ref a, const B &b) { VERIFY_IS_EQUAL(a,b); } +template +EIGEN_DONT_INLINE void call_ref_2(const Ref& a, const B &b) { VERIFY_IS_EQUAL(a,b); } +template +EIGEN_DONT_INLINE void call_ref_3(Ref > a, const B &b) { VERIFY_IS_EQUAL(a,b); } +template +EIGEN_DONT_INLINE void call_ref_4(const Ref >& a, const B &b) { VERIFY_IS_EQUAL(a,b); } +template +EIGEN_DONT_INLINE void call_ref_5(Ref > a, const B &b) { VERIFY_IS_EQUAL(a,b); } +template +EIGEN_DONT_INLINE void call_ref_6(const Ref >& a, const B &b) { VERIFY_IS_EQUAL(a,b); } +template +EIGEN_DONT_INLINE void call_ref_7(Ref > a, const B &b) { VERIFY_IS_EQUAL(a,b); } void call_ref() { - VectorXcf ca(10); - VectorXf a(10); + VectorXcf ca = VectorXcf::Random(10); + VectorXf a = VectorXf::Random(10); + RowVectorXf b = RowVectorXf::Random(10); + MatrixXf A = MatrixXf::Random(10,10); + RowVector3f c = RowVector3f::Random(); const VectorXf& ac(a); VectorBlock ab(a,0,3); - MatrixXf A(10,10); const VectorBlock abc(a,0,3); + - VERIFY_EVALUATION_COUNT( call_ref_1(a), 0); - //call_ref_1(ac); // does not compile because ac is const - VERIFY_EVALUATION_COUNT( call_ref_1(ab), 0); - VERIFY_EVALUATION_COUNT( call_ref_1(a.head(4)), 0); - VERIFY_EVALUATION_COUNT( call_ref_1(abc), 0); - VERIFY_EVALUATION_COUNT( call_ref_1(A.col(3)), 0); - // call_ref_1(A.row(3)); // does not compile because innerstride!=1 - VERIFY_EVALUATION_COUNT( call_ref_3(A.row(3)), 0); - VERIFY_EVALUATION_COUNT( call_ref_4(A.row(3)), 0); - //call_ref_1(a+a); // does not compile for obvious reason + VERIFY_EVALUATION_COUNT( call_ref_1(a,a), 0); + VERIFY_EVALUATION_COUNT( call_ref_1(b,b.transpose()), 0); +// call_ref_1(ac,a RowMatrixXd; +int test_ref_overload_fun1(Ref ) { return 1; } +int test_ref_overload_fun1(Ref ) { return 2; } +int test_ref_overload_fun1(Ref ) { return 3; } + +int test_ref_overload_fun2(Ref ) { return 4; } +int test_ref_overload_fun2(Ref ) { return 5; } + +// See also bug 969 +void test_ref_overloads() +{ + MatrixXd Ad, Bd; + RowMatrixXd rAd, rBd; + VERIFY( test_ref_overload_fun1(Ad)==1 ); + VERIFY( test_ref_overload_fun1(rAd)==2 ); + + MatrixXf Af, Bf; + VERIFY( test_ref_overload_fun2(Ad)==4 ); + VERIFY( test_ref_overload_fun2(Ad+Bd)==4 ); + VERIFY( test_ref_overload_fun2(Af+Bf)==5 ); } void test_ref() @@ -229,4 +282,6 @@ void test_ref() CALL_SUBTEST_5( ref_matrix(MatrixXi(internal::random(1,10),internal::random(1,10))) ); CALL_SUBTEST_6( call_ref() ); } + + CALL_SUBTEST_7( test_ref_overloads() ); } diff --git a/ground/gcs/src/libs/eigen/test/rvalue_types.cpp b/ground/gcs/src/libs/eigen/test/rvalue_types.cpp new file mode 100644 index 000000000..b3c85652c --- /dev/null +++ b/ground/gcs/src/libs/eigen/test/rvalue_types.cpp @@ -0,0 +1,62 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include + +#ifdef EIGEN_HAVE_RVALUE_REFERENCES +template +void rvalue_copyassign(const MatrixType& m) +{ + + typedef typename internal::traits::Scalar Scalar; + + // create a temporary which we are about to destroy by moving + MatrixType tmp = m; + long src_address = reinterpret_cast(tmp.data()); + + // move the temporary to n + MatrixType n = std::move(tmp); + long dst_address = reinterpret_cast(n.data()); + + if (MatrixType::RowsAtCompileTime==Dynamic|| MatrixType::ColsAtCompileTime==Dynamic) + { + // verify that we actually moved the guts + VERIFY_IS_EQUAL(src_address, dst_address); + } + + // verify that the content did not change + Scalar abs_diff = (m-n).array().abs().sum(); + VERIFY_IS_EQUAL(abs_diff, Scalar(0)); +} +#else +template +void rvalue_copyassign(const MatrixType&) {} +#endif + +void test_rvalue_types() +{ + CALL_SUBTEST_1(rvalue_copyassign( MatrixXf::Random(50,50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( ArrayXXf::Random(50,50).eval() )); + + CALL_SUBTEST_1(rvalue_copyassign( Matrix::Random(50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( Array::Random(50).eval() )); + + CALL_SUBTEST_1(rvalue_copyassign( Matrix::Random(50).eval() )); + CALL_SUBTEST_1(rvalue_copyassign( Array::Random(50).eval() )); + + CALL_SUBTEST_2(rvalue_copyassign( Array::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array::Random().eval() )); + + CALL_SUBTEST_2(rvalue_copyassign( Array::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array::Random().eval() )); + CALL_SUBTEST_2(rvalue_copyassign( Array::Random().eval() )); +} diff --git a/ground/gcs/src/libs/eigen/test/simplicial_cholesky.cpp b/ground/gcs/src/libs/eigen/test/simplicial_cholesky.cpp index e93a52e9c..786468421 100644 --- a/ground/gcs/src/libs/eigen/test/simplicial_cholesky.cpp +++ b/ground/gcs/src/libs/eigen/test/simplicial_cholesky.cpp @@ -11,26 +11,31 @@ template void test_simplicial_cholesky_T() { - SimplicialCholesky, Lower> chol_colmajor_lower; - SimplicialCholesky, Upper> chol_colmajor_upper; - SimplicialLLT, Lower> llt_colmajor_lower; - SimplicialLDLT, Upper> llt_colmajor_upper; - SimplicialLDLT, Lower> ldlt_colmajor_lower; - SimplicialLDLT, Upper> ldlt_colmajor_upper; + SimplicialCholesky, Lower> chol_colmajor_lower_amd; + SimplicialCholesky, Upper> chol_colmajor_upper_amd; + SimplicialLLT, Lower> llt_colmajor_lower_amd; + SimplicialLLT, Upper> llt_colmajor_upper_amd; + SimplicialLDLT, Lower> ldlt_colmajor_lower_amd; + SimplicialLDLT, Upper> ldlt_colmajor_upper_amd; + SimplicialLDLT, Lower, NaturalOrdering > ldlt_colmajor_lower_nat; + SimplicialLDLT, Upper, NaturalOrdering > ldlt_colmajor_upper_nat; - check_sparse_spd_solving(chol_colmajor_lower); - check_sparse_spd_solving(chol_colmajor_upper); - check_sparse_spd_solving(llt_colmajor_lower); - check_sparse_spd_solving(llt_colmajor_upper); - check_sparse_spd_solving(ldlt_colmajor_lower); - check_sparse_spd_solving(ldlt_colmajor_upper); + check_sparse_spd_solving(chol_colmajor_lower_amd); + check_sparse_spd_solving(chol_colmajor_upper_amd); + check_sparse_spd_solving(llt_colmajor_lower_amd); + check_sparse_spd_solving(llt_colmajor_upper_amd); + check_sparse_spd_solving(ldlt_colmajor_lower_amd); + check_sparse_spd_solving(ldlt_colmajor_upper_amd); - check_sparse_spd_determinant(chol_colmajor_lower); - check_sparse_spd_determinant(chol_colmajor_upper); - check_sparse_spd_determinant(llt_colmajor_lower); - check_sparse_spd_determinant(llt_colmajor_upper); - check_sparse_spd_determinant(ldlt_colmajor_lower); - check_sparse_spd_determinant(ldlt_colmajor_upper); + check_sparse_spd_determinant(chol_colmajor_lower_amd); + check_sparse_spd_determinant(chol_colmajor_upper_amd); + check_sparse_spd_determinant(llt_colmajor_lower_amd); + check_sparse_spd_determinant(llt_colmajor_upper_amd); + check_sparse_spd_determinant(ldlt_colmajor_lower_amd); + check_sparse_spd_determinant(ldlt_colmajor_upper_amd); + + check_sparse_spd_solving(ldlt_colmajor_lower_nat); + check_sparse_spd_solving(ldlt_colmajor_upper_nat); } void test_simplicial_cholesky() diff --git a/ground/gcs/src/libs/eigen/test/sparse.h b/ground/gcs/src/libs/eigen/test/sparse.h index a09c65e5f..e19a76316 100644 --- a/ground/gcs/src/libs/eigen/test/sparse.h +++ b/ground/gcs/src/libs/eigen/test/sparse.h @@ -154,16 +154,16 @@ initSparse(double density, sparseMat.finalize(); } -template void +template void initSparse(double density, Matrix& refVec, - SparseVector& sparseVec, + SparseVector& sparseVec, std::vector* zeroCoords = 0, std::vector* nonzeroCoords = 0) { sparseVec.reserve(int(refVec.size()*density)); sparseVec.setZero(); - for(int i=0; i(0,1) < density) ? internal::random() : Scalar(0); if (v!=Scalar(0)) @@ -178,10 +178,10 @@ initSparse(double density, } } -template void +template void initSparse(double density, Matrix& refVec, - SparseVector& sparseVec, + SparseVector& sparseVec, std::vector* zeroCoords = 0, std::vector* nonzeroCoords = 0) { diff --git a/ground/gcs/src/libs/eigen/test/sparse_basic.cpp b/ground/gcs/src/libs/eigen/test/sparse_basic.cpp index 498ecfe29..abe6a9d14 100644 --- a/ground/gcs/src/libs/eigen/test/sparse_basic.cpp +++ b/ground/gcs/src/libs/eigen/test/sparse_basic.cpp @@ -24,6 +24,7 @@ template void sparse_basic(const SparseMatrixType& re double density = (std::max)(8./(rows*cols), 0.01); typedef Matrix DenseMatrix; typedef Matrix DenseVector; + typedef Matrix RowDenseVector; Scalar eps = 1e-6; Scalar s1 = internal::random(); @@ -52,7 +53,7 @@ template void sparse_basic(const SparseMatrixType& re refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5); VERIFY_IS_APPROX(m, refMat); - /* + // test InnerIterators and Block expressions for (int t=0; t<10; ++t) { @@ -61,23 +62,54 @@ template void sparse_basic(const SparseMatrixType& re int w = internal::random(1,cols-j-1); int h = internal::random(1,rows-i-1); - // VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w)); + VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w)); for(int c=0; c void sparse_basic(const SparseMatrixType& re VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r)); VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r)); } - */ + // test assertion VERIFY_RAISES_ASSERT( m.coeffRef(-1,1) = 0 ); @@ -274,6 +306,8 @@ template void sparse_basic(const SparseMatrixType& re refM4.setRandom(); // sparse cwise* dense VERIFY_IS_APPROX(m3.cwiseProduct(refM4), refM3.cwiseProduct(refM4)); + // dense cwise* sparse + VERIFY_IS_APPROX(refM4.cwiseProduct(m3), refM4.cwiseProduct(refM3)); // VERIFY_IS_APPROX(m3.cwise()/refM4, refM3.cwise()/refM4); // test aliasing @@ -326,6 +360,15 @@ template void sparse_basic(const SparseMatrixType& re refMat2.col(i) = refMat2.col(i) * s1; VERIFY_IS_APPROX(m2,refMat2); } + + VERIFY_IS_APPROX(DenseVector(m2.col(j0)), refMat2.col(j0)); + VERIFY_IS_APPROX(m2.col(j0), refMat2.col(j0)); + + VERIFY_IS_APPROX(RowDenseVector(m2.row(j0)), refMat2.row(j0)); + VERIFY_IS_APPROX(m2.row(j0), refMat2.row(j0)); + + VERIFY_IS_APPROX(m2.block(j0,j1,n0,n0), refMat2.block(j0,j1,n0,n0)); + VERIFY_IS_APPROX((2*m2).block(j0,j1,n0,n0), (2*refMat2).block(j0,j1,n0,n0)); } // test prune @@ -488,6 +531,20 @@ template void sparse_basic(const SparseMatrixType& re SparseMatrixType m1(rows, rows); m1.setIdentity(); VERIFY_IS_APPROX(m1, refMat1); + for(int k=0; k(0,rows-1); + Index j = internal::random(0,rows-1); + Scalar v = internal::random(); + m1.coeffRef(i,j) = v; + refMat1.coeffRef(i,j) = v; + VERIFY_IS_APPROX(m1, refMat1); + if(internal::random(0,10)<2) + m1.makeCompressed(); + } + m1.setIdentity(); + refMat1.setIdentity(); + VERIFY_IS_APPROX(m1, refMat1); } } diff --git a/ground/gcs/src/libs/eigen/test/sparse_product.cpp b/ground/gcs/src/libs/eigen/test/sparse_product.cpp index 664e33887..a2ea9d5b7 100644 --- a/ground/gcs/src/libs/eigen/test/sparse_product.cpp +++ b/ground/gcs/src/libs/eigen/test/sparse_product.cpp @@ -13,8 +13,9 @@ template struct test_outer { static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) { - int c = internal::random(0,m2.cols()-1); - int c1 = internal::random(0,m2.cols()-1); + typedef typename SparseMatrixType::Index Index; + Index c = internal::random(0,m2.cols()-1); + Index c1 = internal::random(0,m2.cols()-1); VERIFY_IS_APPROX(m4=m2.col(c)*refMat2.col(c1).transpose(), refMat4=refMat2.col(c)*refMat2.col(c1).transpose()); VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.col(c).transpose(), refMat4=refMat2.col(c1)*refMat2.col(c).transpose()); } @@ -22,8 +23,9 @@ template struct test_outer struct test_outer { static void run(SparseMatrixType& m2, SparseMatrixType& m4, DenseMatrix& refMat2, DenseMatrix& refMat4) { - int r = internal::random(0,m2.rows()-1); - int c1 = internal::random(0,m2.cols()-1); + typedef typename SparseMatrixType::Index Index; + Index r = internal::random(0,m2.rows()-1); + Index c1 = internal::random(0,m2.cols()-1); VERIFY_IS_APPROX(m4=m2.row(r).transpose()*refMat2.col(c1).transpose(), refMat4=refMat2.row(r).transpose()*refMat2.col(c1).transpose()); VERIFY_IS_APPROX(m4=refMat2.col(c1)*m2.row(r), refMat4=refMat2.col(c1)*refMat2.row(r)); } @@ -37,9 +39,9 @@ template void sparse_product() { typedef typename SparseMatrixType::Index Index; Index n = 100; - const Index rows = internal::random(1,n); - const Index cols = internal::random(1,n); - const Index depth = internal::random(1,n); + const Index rows = internal::random(1,n); + const Index cols = internal::random(1,n); + const Index depth = internal::random(1,n); typedef typename SparseMatrixType::Scalar Scalar; enum { Flags = SparseMatrixType::Flags }; @@ -244,6 +246,7 @@ void test_sparse_product() CALL_SUBTEST_1( (sparse_product >()) ); CALL_SUBTEST_2( (sparse_product, ColMajor > >()) ); CALL_SUBTEST_2( (sparse_product, RowMajor > >()) ); + CALL_SUBTEST_3( (sparse_product >()) ); CALL_SUBTEST_4( (sparse_product_regression_test, Matrix >()) ); } } diff --git a/ground/gcs/src/libs/eigen/test/sparse_solver.h b/ground/gcs/src/libs/eigen/test/sparse_solver.h index d84aff070..e1619d62a 100644 --- a/ground/gcs/src/libs/eigen/test/sparse_solver.h +++ b/ground/gcs/src/libs/eigen/test/sparse_solver.h @@ -67,6 +67,22 @@ void check_sparse_solving(Solver& solver, const typename Solver::MatrixType& A, VERIFY(oldb.isApprox(db) && "sparse solver testing: the rhs should not be modified!"); VERIFY(x.isApprox(refX,test_precision())); } + + // if not too large, do some extra check: + if(A.rows()<2000) + { + + // test expression as input + { + solver.compute(0.5*(A+A)); + Rhs x = solver.solve(b); + VERIFY(x.isApprox(refX,test_precision())); + + Solver solver2(0.5*(A+A)); + Rhs x2 = solver2.solve(b); + VERIFY(x2.isApprox(refX,test_precision())); + } + } } template @@ -124,7 +140,23 @@ void check_sparse_determinant(Solver& solver, const typename Solver::MatrixType& Scalar refDet = dA.determinant(); VERIFY_IS_APPROX(refDet,solver.determinant()); } +template +void check_sparse_abs_determinant(Solver& solver, const typename Solver::MatrixType& A, const DenseMat& dA) +{ + using std::abs; + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + + solver.compute(A); + if (solver.info() != Success) + { + std::cerr << "sparse solver testing: factorization failed (check_sparse_abs_determinant)\n"; + return; + } + Scalar refDet = abs(dA.determinant()); + VERIFY_IS_APPROX(refDet,solver.absDeterminant()); +} template int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typename Solver::MatrixType& halfA, DenseMat& dA, int maxSize = 300) @@ -145,7 +177,10 @@ int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typena dA = dM * dM.adjoint(); halfA.resize(size,size); - halfA.template selfadjointView().rankUpdate(M); + if(Solver::UpLo==(Lower|Upper)) + halfA = A; + else + halfA.template selfadjointView().rankUpdate(M); return size; } @@ -258,7 +293,17 @@ int generate_sparse_square_problem(Solver&, typename Solver::MatrixType& A, Dens return size; } -template void check_sparse_square_solving(Solver& solver) + +struct prune_column { + int m_col; + prune_column(int col) : m_col(col) {} + template + bool operator()(int, int col, const Scalar&) const { + return col != m_col; + } +}; + +template void check_sparse_square_solving(Solver& solver, bool checkDeficient = false) { typedef typename Solver::MatrixType Mat; typedef typename Mat::Scalar Scalar; @@ -290,6 +335,13 @@ template void check_sparse_square_solving(Solver& solver) b = DenseVector::Zero(size); check_sparse_solving(solver, A, b, dA, b); } + // regression test for Bug 792 (structurally rank deficient matrices): + if(checkDeficient && size>1) { + int col = internal::random(0,size-1); + A.prune(prune_column(col)); + solver.compute(A); + VERIFY_IS_EQUAL(solver.info(), NumericalIssue); + } } // First, get the folder @@ -324,3 +376,20 @@ template void check_sparse_square_determinant(Solver& solver) check_sparse_determinant(solver, A, dA); } } + +template void check_sparse_square_abs_determinant(Solver& solver) +{ + typedef typename Solver::MatrixType Mat; + typedef typename Mat::Scalar Scalar; + typedef Matrix DenseMatrix; + + // generate the problem + Mat A; + DenseMatrix dA; + generate_sparse_square_problem(solver, A, dA, 30); + A.makeCompressed(); + for (int i = 0; i < g_repeat; i++) { + check_sparse_abs_determinant(solver, A, dA); + } +} + diff --git a/ground/gcs/src/libs/eigen/test/sparse_vector.cpp b/ground/gcs/src/libs/eigen/test/sparse_vector.cpp index ec5877b6a..0c9476803 100644 --- a/ground/gcs/src/libs/eigen/test/sparse_vector.cpp +++ b/ground/gcs/src/libs/eigen/test/sparse_vector.cpp @@ -9,14 +9,14 @@ #include "sparse.h" -template void sparse_vector(int rows, int cols) +template void sparse_vector(int rows, int cols) { double densityMat = (std::max)(8./(rows*cols), 0.01); double densityVec = (std::max)(8./float(rows), 0.1); typedef Matrix DenseMatrix; typedef Matrix DenseVector; - typedef SparseVector SparseVectorType; - typedef SparseMatrix SparseMatrixType; + typedef SparseVector SparseVectorType; + typedef SparseMatrix SparseMatrixType; Scalar eps = 1e-6; SparseMatrixType m1(rows,rows); @@ -101,9 +101,10 @@ template void sparse_vector(int rows, int cols) void test_sparse_vector() { for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST_1( sparse_vector(8, 8) ); - CALL_SUBTEST_2( sparse_vector >(16, 16) ); - CALL_SUBTEST_1( sparse_vector(299, 535) ); + CALL_SUBTEST_1(( sparse_vector(8, 8) )); + CALL_SUBTEST_2(( sparse_vector, int>(16, 16) )); + CALL_SUBTEST_1(( sparse_vector(299, 535) )); + CALL_SUBTEST_1(( sparse_vector(299, 535) )); } } diff --git a/ground/gcs/src/libs/eigen/test/sparselu.cpp b/ground/gcs/src/libs/eigen/test/sparselu.cpp index 37980defc..b3d67aea8 100644 --- a/ground/gcs/src/libs/eigen/test/sparselu.cpp +++ b/ground/gcs/src/libs/eigen/test/sparselu.cpp @@ -41,9 +41,15 @@ template void test_sparselu_T() SparseLU, AMDOrdering > sparselu_amd; SparseLU, NaturalOrdering > sparselu_natural; - check_sparse_square_solving(sparselu_colamd); - check_sparse_square_solving(sparselu_amd); - check_sparse_square_solving(sparselu_natural); + check_sparse_square_solving(sparselu_colamd, true); + check_sparse_square_solving(sparselu_amd, true); + check_sparse_square_solving(sparselu_natural,true); + + check_sparse_square_abs_determinant(sparselu_colamd); + check_sparse_square_abs_determinant(sparselu_amd); + + check_sparse_square_determinant(sparselu_colamd); + check_sparse_square_determinant(sparselu_amd); } void test_sparselu() diff --git a/ground/gcs/src/libs/eigen/test/sparseqr.cpp b/ground/gcs/src/libs/eigen/test/sparseqr.cpp index 6edba30b2..451c0e7f8 100644 --- a/ground/gcs/src/libs/eigen/test/sparseqr.cpp +++ b/ground/gcs/src/libs/eigen/test/sparseqr.cpp @@ -2,24 +2,23 @@ // for linear algebra. // // Copyright (C) 2012 Desire Nuentsa Wakam +// Copyright (C) 2014 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed #include "sparse.h" #include - template -int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 300) +int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300) { - eigen_assert(maxRows >= maxCols); typedef typename MatrixType::Scalar Scalar; int rows = internal::random(1,maxRows); int cols = internal::random(1,rows); double density = (std::max)(8./(rows*cols), 0.01); - A.resize(rows,rows); - dA.resize(rows,rows); + A.resize(rows,cols); + dA.resize(rows,cols); initSparse(density, dA, A,ForceNonZeroDiag); A.makeCompressed(); int nop = internal::random(0, internal::random(0,1) > 0.5 ? cols/2 : 0); @@ -31,6 +30,13 @@ int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows A.col(j0) = s * A.col(j1); dA.col(j0) = s * dA.col(j1); } + +// if(rows void test_sparseqr_scalar() MatrixType A; DenseMat dA; DenseVector refX,x,b; - SparseQR > solver; + SparseQR > solver; generate_sparse_rectangular_problem(A,dA); - int n = A.cols(); - b = DenseVector::Random(n); + b = dA * DenseVector::Random(A.cols()); solver.compute(A); + if(internal::random(0,1)>0.5) + solver.factorize(A); // this checks that calling analyzePattern is not needed if the pattern do not change. if (solver.info() != Success) { std::cerr << "sparse QR factorization failed\n"; @@ -60,17 +67,19 @@ template void test_sparseqr_scalar() std::cerr << "sparse QR factorization failed\n"; exit(0); return; - } + } + + VERIFY_IS_APPROX(A * x, b); + //Compare with a dense QR solver ColPivHouseholderQR dqr(dA); refX = dqr.solve(b); VERIFY_IS_EQUAL(dqr.rank(), solver.rank()); - - if(solver.rank() (A * x - b).norm() ); - else + if(solver.rank()==A.cols()) // full rank VERIFY_IS_APPROX(x, refX); +// else +// VERIFY((dA * refX - b).norm() * 2 > (A * x - b).norm() ); // Compute explicitly the matrix Q MatrixType Q, QtQ, idM; @@ -88,3 +97,4 @@ void test_sparseqr() CALL_SUBTEST_2(test_sparseqr_scalar >()); } } + diff --git a/ground/gcs/src/libs/eigen/test/stable_norm.cpp b/ground/gcs/src/libs/eigen/test/stable_norm.cpp index c09fc17b7..231dd9189 100644 --- a/ground/gcs/src/libs/eigen/test/stable_norm.cpp +++ b/ground/gcs/src/libs/eigen/test/stable_norm.cpp @@ -9,11 +9,6 @@ #include "main.h" -template bool isNotNaN(const T& x) -{ - return x==x; -} - // workaround aggressive optimization in ICC template EIGEN_DONT_INLINE T sub(T a, T b) { return a - b; } @@ -55,8 +50,16 @@ template void stable_norm(const MatrixType& m) Index rows = m.rows(); Index cols = m.cols(); - Scalar big = internal::random() * ((std::numeric_limits::max)() * RealScalar(1e-4)); - Scalar small = internal::random() * ((std::numeric_limits::min)() * RealScalar(1e4)); + // get a non-zero random factor + Scalar factor = internal::random(); + while(numext::abs2(factor)(); + Scalar big = factor * ((std::numeric_limits::max)() * RealScalar(1e-4)); + + factor = internal::random(); + while(numext::abs2(factor)(); + Scalar small = factor * ((std::numeric_limits::min)() * RealScalar(1e4)); MatrixType vzero = MatrixType::Zero(rows, cols), vrand = MatrixType::Random(rows, cols), @@ -91,7 +94,7 @@ template void stable_norm(const MatrixType& m) VERIFY_IS_APPROX(vsmall.blueNorm(), sqrt(size)*abs(small)); VERIFY_IS_APPROX(vsmall.hypotNorm(), sqrt(size)*abs(small)); -// Test compilation of cwise() version + // Test compilation of cwise() version VERIFY_IS_APPROX(vrand.colwise().stableNorm(), vrand.colwise().norm()); VERIFY_IS_APPROX(vrand.colwise().blueNorm(), vrand.colwise().norm()); VERIFY_IS_APPROX(vrand.colwise().hypotNorm(), vrand.colwise().norm()); diff --git a/ground/gcs/src/libs/eigen/test/stddeque_overload.cpp b/ground/gcs/src/libs/eigen/test/stddeque_overload.cpp new file mode 100644 index 000000000..4da618bbf --- /dev/null +++ b/ground/gcs/src/libs/eigen/test/stddeque_overload.cpp @@ -0,0 +1,158 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Benoit Jacob +// Copyright (C) 2010 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Vector4f) + +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Matrix2f) +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Matrix4f) +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Matrix4d) + +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Affine3f) +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Affine3d) + +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Quaternionf) +EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(Quaterniond) + +template +void check_stddeque_matrix(const MatrixType& m) +{ + typename MatrixType::Index rows = m.rows(); + typename MatrixType::Index cols = m.cols(); + MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); + std::deque v(10, MatrixType(rows,cols)), w(20, y); + v[5] = x; + w[6] = v[5]; + VERIFY_IS_APPROX(w[6], v[5]); + v = w; + for(int i = 0; i < 20; i++) + { + VERIFY_IS_APPROX(w[i], v[i]); + } + + v.resize(21); + v[20] = x; + VERIFY_IS_APPROX(v[20], x); + v.resize(22,y); + VERIFY_IS_APPROX(v[21], y); + v.push_back(x); + VERIFY_IS_APPROX(v[22], x); + + // do a lot of push_back such that the deque gets internally resized + // (with memory reallocation) + MatrixType* ref = &w[0]; + for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i) + v.push_back(w[i%w.size()]); + for(unsigned int i=23; i +void check_stddeque_transform(const TransformType&) +{ + typedef typename TransformType::MatrixType MatrixType; + TransformType x(MatrixType::Random()), y(MatrixType::Random()); + std::deque v(10), w(20, y); + v[5] = x; + w[6] = v[5]; + VERIFY_IS_APPROX(w[6], v[5]); + v = w; + for(int i = 0; i < 20; i++) + { + VERIFY_IS_APPROX(w[i], v[i]); + } + + v.resize(21); + v[20] = x; + VERIFY_IS_APPROX(v[20], x); + v.resize(22,y); + VERIFY_IS_APPROX(v[21], y); + v.push_back(x); + VERIFY_IS_APPROX(v[22], x); + + // do a lot of push_back such that the deque gets internally resized + // (with memory reallocation) + TransformType* ref = &w[0]; + for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i) + v.push_back(w[i%w.size()]); + for(unsigned int i=23; i +void check_stddeque_quaternion(const QuaternionType&) +{ + typedef typename QuaternionType::Coefficients Coefficients; + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); + std::deque v(10), w(20, y); + v[5] = x; + w[6] = v[5]; + VERIFY_IS_APPROX(w[6], v[5]); + v = w; + for(int i = 0; i < 20; i++) + { + VERIFY_IS_APPROX(w[i], v[i]); + } + + v.resize(21); + v[20] = x; + VERIFY_IS_APPROX(v[20], x); + v.resize(22,y); + VERIFY_IS_APPROX(v[21], y); + v.push_back(x); + VERIFY_IS_APPROX(v[22], x); + + // do a lot of push_back such that the deque gets internally resized + // (with memory reallocation) + QuaternionType* ref = &w[0]; + for(int i=0; i<30 || ((ref==&w[0]) && i<300); ++i) + v.push_back(w[i%w.size()]); + for(unsigned int i=23; i +// Copyright (C) 2010 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#include "main.h" + +#include +#include + +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Vector4f) + +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Matrix2f) +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Matrix4f) +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Matrix4d) + +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Affine3f) +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Affine3d) + +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Quaternionf) +EIGEN_DEFINE_STL_LIST_SPECIALIZATION(Quaterniond) + +template +typename Container::iterator get(Container & c, Position position) +{ + typename Container::iterator it = c.begin(); + std::advance(it, position); + return it; +} + +template +void set(Container & c, Position position, const Value & value) +{ + typename Container::iterator it = c.begin(); + std::advance(it, position); + *it = value; +} + +template +void check_stdlist_matrix(const MatrixType& m) +{ + typename MatrixType::Index rows = m.rows(); + typename MatrixType::Index cols = m.cols(); + MatrixType x = MatrixType::Random(rows,cols), y = MatrixType::Random(rows,cols); + std::list v(10, MatrixType(rows,cols)), w(20, y); + typename std::list::iterator itv = get(v, 5); + typename std::list::iterator itw = get(w, 6); + *itv = x; + *itw = *itv; + VERIFY_IS_APPROX(*itw, *itv); + v = w; + itv = v.begin(); + itw = w.begin(); + for(int i = 0; i < 20; i++) + { + VERIFY_IS_APPROX(*itw, *itv); + ++itv; + ++itw; + } + + v.resize(21); + set(v, 20, x); + VERIFY_IS_APPROX(*get(v, 20), x); + v.resize(22,y); + VERIFY_IS_APPROX(*get(v, 21), y); + v.push_back(x); + VERIFY_IS_APPROX(*get(v, 22), x); + + // do a lot of push_back such that the list gets internally resized + // (with memory reallocation) + MatrixType* ref = &(*get(w, 0)); + for(int i=0; i<30 || ((ref==&(*get(w, 0))) && i<300); ++i) + v.push_back(*get(w, i%w.size())); + for(unsigned int i=23; i +void check_stdlist_transform(const TransformType&) +{ + typedef typename TransformType::MatrixType MatrixType; + TransformType x(MatrixType::Random()), y(MatrixType::Random()); + std::list v(10), w(20, y); + typename std::list::iterator itv = get(v, 5); + typename std::list::iterator itw = get(w, 6); + *itv = x; + *itw = *itv; + VERIFY_IS_APPROX(*itw, *itv); + v = w; + itv = v.begin(); + itw = w.begin(); + for(int i = 0; i < 20; i++) + { + VERIFY_IS_APPROX(*itw, *itv); + ++itv; + ++itw; + } + + v.resize(21); + set(v, 20, x); + VERIFY_IS_APPROX(*get(v, 20), x); + v.resize(22,y); + VERIFY_IS_APPROX(*get(v, 21), y); + v.push_back(x); + VERIFY_IS_APPROX(*get(v, 22), x); + + // do a lot of push_back such that the list gets internally resized + // (with memory reallocation) + TransformType* ref = &(*get(w, 0)); + for(int i=0; i<30 || ((ref==&(*get(w, 0))) && i<300); ++i) + v.push_back(*get(w, i%w.size())); + for(unsigned int i=23; imatrix()==get(w, (i-23)%w.size())->matrix()); + } +} + +template +void check_stdlist_quaternion(const QuaternionType&) +{ + typedef typename QuaternionType::Coefficients Coefficients; + QuaternionType x(Coefficients::Random()), y(Coefficients::Random()); + std::list v(10), w(20, y); + typename std::list::iterator itv = get(v, 5); + typename std::list::iterator itw = get(w, 6); + *itv = x; + *itw = *itv; + VERIFY_IS_APPROX(*itw, *itv); + v = w; + itv = v.begin(); + itw = w.begin(); + for(int i = 0; i < 20; i++) + { + VERIFY_IS_APPROX(*itw, *itv); + ++itv; + ++itw; + } + + v.resize(21); + set(v, 20, x); + VERIFY_IS_APPROX(*get(v, 20), x); + v.resize(22,y); + VERIFY_IS_APPROX(*get(v, 21), y); + v.push_back(x); + VERIFY_IS_APPROX(*get(v, 22), x); + + // do a lot of push_back such that the list gets internally resized + // (with memory reallocation) + QuaternionType* ref = &(*get(w, 0)); + for(int i=0; i<30 || ((ref==&(*get(w, 0))) && i<300); ++i) + v.push_back(*get(w, i%w.size())); + for(unsigned int i=23; icoeffs()==get(w, (i-23)%w.size())->coeffs()); + } +} + +void test_stdlist_overload() +{ + // some non vectorizable fixed sizes + CALL_SUBTEST_1(check_stdlist_matrix(Vector2f())); + CALL_SUBTEST_1(check_stdlist_matrix(Matrix3f())); + CALL_SUBTEST_2(check_stdlist_matrix(Matrix3d())); + + // some vectorizable fixed sizes + CALL_SUBTEST_1(check_stdlist_matrix(Matrix2f())); + CALL_SUBTEST_1(check_stdlist_matrix(Vector4f())); + CALL_SUBTEST_1(check_stdlist_matrix(Matrix4f())); + CALL_SUBTEST_2(check_stdlist_matrix(Matrix4d())); + + // some dynamic sizes + CALL_SUBTEST_3(check_stdlist_matrix(MatrixXd(1,1))); + CALL_SUBTEST_3(check_stdlist_matrix(VectorXd(20))); + CALL_SUBTEST_3(check_stdlist_matrix(RowVectorXf(20))); + CALL_SUBTEST_3(check_stdlist_matrix(MatrixXcf(10,10))); + + // some Transform + CALL_SUBTEST_4(check_stdlist_transform(Affine2f())); // does not need the specialization (2+1)^2 = 9 + CALL_SUBTEST_4(check_stdlist_transform(Affine3f())); + CALL_SUBTEST_4(check_stdlist_transform(Affine3d())); + + // some Quaternion + CALL_SUBTEST_5(check_stdlist_quaternion(Quaternionf())); + CALL_SUBTEST_5(check_stdlist_quaternion(Quaterniond())); +} diff --git a/ground/gcs/src/libs/eigen/test/umeyama.cpp b/ground/gcs/src/libs/eigen/test/umeyama.cpp index 738d0af70..2e8092434 100644 --- a/ground/gcs/src/libs/eigen/test/umeyama.cpp +++ b/ground/gcs/src/libs/eigen/test/umeyama.cpp @@ -130,10 +130,11 @@ void run_fixed_size_test(int num_elements) // MUST be positive because in any other case det(cR_t) may become negative for // odd dimensions! - const Scalar c = abs(internal::random()); + // Also if c is to small compared to t.norm(), problem is ill-posed (cf. Bug 744) + const Scalar c = internal::random(0.5, 2.0); FixedMatrix R = randMatrixSpecialUnitary(dim); - FixedVector t = Scalar(50)*FixedVector::Random(dim,1); + FixedVector t = Scalar(32)*FixedVector::Random(dim,1); HomMatrix cR_t = HomMatrix::Identity(dim+1,dim+1); cR_t.block(0,0,dim,dim) = c*R; @@ -149,9 +150,9 @@ void run_fixed_size_test(int num_elements) HomMatrix cR_t_umeyama = umeyama(src_block, dst_block); - const Scalar error = ( cR_t_umeyama*src - dst ).array().square().sum(); + const Scalar error = ( cR_t_umeyama*src - dst ).squaredNorm(); - VERIFY(error < Scalar(10)*std::numeric_limits::epsilon()); + VERIFY(error < Scalar(16)*std::numeric_limits::epsilon()); } void test_umeyama() diff --git a/ground/gcs/src/libs/eigen/test/vectorwiseop.cpp b/ground/gcs/src/libs/eigen/test/vectorwiseop.cpp index 9d60b0388..d32fd10cc 100644 --- a/ground/gcs/src/libs/eigen/test/vectorwiseop.cpp +++ b/ground/gcs/src/libs/eigen/test/vectorwiseop.cpp @@ -101,6 +101,28 @@ template void vectorwiseop_array(const ArrayType& m) VERIFY_RAISES_ASSERT(m2.rowwise() /= rowvec.transpose()); VERIFY_RAISES_ASSERT(m1.rowwise() / rowvec.transpose()); + + m2 = m1; + // yes, there might be an aliasing issue there but ".rowwise() /=" + // is suppposed to evaluate " m2.colwise().sum()" into to temporary to avoid + // evaluating the reducions multiple times + if(ArrayType::RowsAtCompileTime>2 || ArrayType::RowsAtCompileTime==Dynamic) + { + m2.rowwise() /= m2.colwise().sum(); + VERIFY_IS_APPROX(m2, m1.rowwise() / m1.colwise().sum()); + } + + // all/any + Array mb(rows,cols); + mb = (m1.real()<=0.7).colwise().all(); + VERIFY( (mb.col(c) == (m1.real().col(c)<=0.7).all()).all() ); + mb = (m1.real()<=0.7).rowwise().all(); + VERIFY( (mb.row(r) == (m1.real().row(r)<=0.7).all()).all() ); + + mb = (m1.real()>=0.7).colwise().any(); + VERIFY( (mb.col(c) == (m1.real().col(c)>=0.7).any()).all() ); + mb = (m1.real()>=0.7).rowwise().any(); + VERIFY( (mb.row(r) == (m1.real().row(r)>=0.7).any()).all() ); } template void vectorwiseop_matrix(const MatrixType& m) diff --git a/ground/gcs/src/libs/eigen/test/visitor.cpp b/ground/gcs/src/libs/eigen/test/visitor.cpp index 39a5d6b5f..844170ec6 100644 --- a/ground/gcs/src/libs/eigen/test/visitor.cpp +++ b/ground/gcs/src/libs/eigen/test/visitor.cpp @@ -55,6 +55,11 @@ template void matrixVisitor(const MatrixType& p) VERIFY_IS_APPROX(maxc, eigen_maxc); VERIFY_IS_APPROX(minc, m.minCoeff()); VERIFY_IS_APPROX(maxc, m.maxCoeff()); + + eigen_maxc = (m.adjoint()*m).maxCoeff(&eigen_maxrow,&eigen_maxcol); + eigen_maxc = (m.adjoint()*m).eval().maxCoeff(&maxrow,&maxcol); + VERIFY(maxrow == eigen_maxrow); + VERIFY(maxcol == eigen_maxcol); } template void vectorVisitor(const VectorType& w) diff --git a/ground/gcs/src/libs/eigen/test/zerosized.cpp b/ground/gcs/src/libs/eigen/test/zerosized.cpp index 6905e584e..da7dd0481 100644 --- a/ground/gcs/src/libs/eigen/test/zerosized.cpp +++ b/ground/gcs/src/libs/eigen/test/zerosized.cpp @@ -9,12 +9,26 @@ #include "main.h" + +template void zeroReduction(const MatrixType& m) { + // Reductions that must hold for zero sized objects + VERIFY(m.all()); + VERIFY(!m.any()); + VERIFY(m.prod()==1); + VERIFY(m.sum()==0); + VERIFY(m.count()==0); + VERIFY(m.allFinite()); + VERIFY(!m.hasNaN()); +} + + template void zeroSizedMatrix() { MatrixType t1; - if (MatrixType::SizeAtCompileTime == Dynamic) + if (MatrixType::SizeAtCompileTime == Dynamic || MatrixType::SizeAtCompileTime == 0) { + zeroReduction(t1); if (MatrixType::RowsAtCompileTime == Dynamic) VERIFY(t1.rows() == 0); if (MatrixType::ColsAtCompileTime == Dynamic) @@ -22,9 +36,13 @@ template void zeroSizedMatrix() if (MatrixType::RowsAtCompileTime == Dynamic && MatrixType::ColsAtCompileTime == Dynamic) { + MatrixType t2(0, 0); VERIFY(t2.rows() == 0); VERIFY(t2.cols() == 0); + + zeroReduction(t2); + VERIFY(t1==t2); } } } @@ -33,11 +51,15 @@ template void zeroSizedVector() { VectorType t1; - if (VectorType::SizeAtCompileTime == Dynamic) + if (VectorType::SizeAtCompileTime == Dynamic || VectorType::SizeAtCompileTime==0) { + zeroReduction(t1); VERIFY(t1.size() == 0); VectorType t2(DenseIndex(0)); // DenseIndex disambiguates with 0-the-null-pointer (error with gcc 4.4 and MSVC8) VERIFY(t2.size() == 0); + zeroReduction(t2); + + VERIFY(t1==t2); } } @@ -51,9 +73,12 @@ void test_zerosized() zeroSizedMatrix >(); zeroSizedMatrix >(); zeroSizedMatrix >(); - + zeroSizedMatrix >(); + zeroSizedMatrix >(); + zeroSizedVector(); zeroSizedVector(); zeroSizedVector(); zeroSizedVector >(); + zeroSizedVector >(); } diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/AlignedVector3 b/ground/gcs/src/libs/eigen/unsupported/Eigen/AlignedVector3 index 7b45e6cce..29d5c90fb 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/AlignedVector3 +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/AlignedVector3 @@ -177,7 +177,7 @@ template class AlignedVector3 } template - inline bool isApprox(const MatrixBase& other, RealScalar eps=NumTraits::dummy_precision()) const + inline bool isApprox(const MatrixBase& other, const RealScalar& eps=NumTraits::dummy_precision()) const { return m_coeffs.template head<3>().isApprox(other,eps); } diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/Eigen/CMakeLists.txt index e06f1238b..e1fbf97e2 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/CMakeLists.txt @@ -1,6 +1,6 @@ -set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3 Polynomials - FFT NonLinearOptimization SparseExtra IterativeSolvers - NumericalDiff Skyline MPRealSupport OpenGLSupport KroneckerProduct Splines LevenbergMarquardt +set(Eigen_HEADERS AdolcForward AlignedVector3 ArpackSupport AutoDiff BVH FFT IterativeSolvers KroneckerProduct LevenbergMarquardt + MatrixFunctions MoreVectorization MPRealSupport NonLinearOptimization NumericalDiff OpenGLSupport Polynomials + Skyline SparseExtra Splines ) install(FILES diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/OpenGLSupport b/ground/gcs/src/libs/eigen/unsupported/Eigen/OpenGLSupport index 4454bf820..e2769449c 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/OpenGLSupport +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/OpenGLSupport @@ -11,7 +11,12 @@ #define EIGEN_OPENGL_MODULE #include -#include + +#if defined(__APPLE_CC__) + #include +#else + #include +#endif namespace Eigen { @@ -173,11 +178,11 @@ template void glLoadMatrix(const Transform& t) template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } template void glLoadMatrix(const Transform& t) { glLoadMatrix(Transform(t).matrix()); } -static void glRotate(const Rotation2D& rot) +inline void glRotate(const Rotation2D& rot) { glRotatef(rot.angle()*180.f/float(M_PI), 0.f, 0.f, 1.f); } -static void glRotate(const Rotation2D& rot) +inline void glRotate(const Rotation2D& rot) { glRotated(rot.angle()*180.0/M_PI, 0.0, 0.0, 1.0); } @@ -241,18 +246,18 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev) #ifdef GL_VERSION_2_0 -static void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } -static void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } +inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } +inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } -static void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } -static void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } +inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } +inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } -static void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } -static void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } +inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } +inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } -static void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } -static void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } -static void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } +inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } +inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } +inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) @@ -289,9 +294,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix #ifdef GL_VERSION_3_0 -static void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } -static void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } -static void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } +inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } +inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } +inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) @@ -300,9 +305,9 @@ EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei) #endif #ifdef GL_ARB_gpu_shader_fp64 -static void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } -static void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } -static void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } +inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } +inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } +inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h index 8d42e69b9..fde3ff61a 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h @@ -631,7 +631,7 @@ template struct NumTraits > { typedef AutoDiffScalar::Real,DerType::RowsAtCompileTime,DerType::ColsAtCompileTime> > Real; typedef AutoDiffScalar NonInteger; - typedef AutoDiffScalar& Nested; + typedef AutoDiffScalar Nested; enum{ RequireInitialization = 1 }; diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/CMakeLists.txt index f3180b52b..d8b9f4068 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/CMakeLists.txt @@ -1,7 +1,10 @@ ADD_SUBDIRECTORY(AutoDiff) ADD_SUBDIRECTORY(BVH) +ADD_SUBDIRECTORY(Eigenvalues) ADD_SUBDIRECTORY(FFT) ADD_SUBDIRECTORY(IterativeSolvers) +ADD_SUBDIRECTORY(KroneckerProduct) +ADD_SUBDIRECTORY(LevenbergMarquardt) ADD_SUBDIRECTORY(MatrixFunctions) ADD_SUBDIRECTORY(MoreVectorization) ADD_SUBDIRECTORY(NonLinearOptimization) @@ -9,5 +12,4 @@ ADD_SUBDIRECTORY(NumericalDiff) ADD_SUBDIRECTORY(Polynomials) ADD_SUBDIRECTORY(Skyline) ADD_SUBDIRECTORY(SparseExtra) -ADD_SUBDIRECTORY(KroneckerProduct) ADD_SUBDIRECTORY(Splines) diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/Eigenvalues/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/Eigenvalues/CMakeLists.txt new file mode 100644 index 000000000..1d4387c82 --- /dev/null +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/Eigenvalues/CMakeLists.txt @@ -0,0 +1,6 @@ +FILE(GLOB Eigen_Eigenvalues_SRCS "*.h") + +INSTALL(FILES + ${Eigen_Eigenvalues_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/Eigenvalues COMPONENT Devel + ) diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h index 3f18beeeb..dc0093eb9 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h @@ -58,7 +58,9 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) Scalar rho, rho_1, alpha; d.setZero(); - CINV.startFill(); // FIXME estimate the number of non-zeros + typedef Triplet T; + std::vector tripletList; + for (Index i = 0; i < rows; ++i) { d[i] = 1.0; @@ -84,11 +86,12 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse for (Index j=0; j TmpVec; diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/DGMRES.h index 9fcc8a8d9..68fc997f7 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/DGMRES.h +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/DGMRES.h @@ -133,8 +133,8 @@ class DGMRES : public IterativeSolverBase > * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - DGMRES(const MatrixType& A) : Base(A),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) - {} + template + explicit DGMRES(const EigenBase& A) : Base(A.derived()), m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} ~DGMRES() {} diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h index 073367506..ea5deb26d 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -2,7 +2,7 @@ // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud -// Copyright (C) 2012 Kolja Brix +// Copyright (C) 2012, 2014 Kolja Brix // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -72,16 +72,20 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition VectorType p0 = rhs - mat*x; VectorType r0 = precond.solve(p0); -// RealScalar r0_sqnorm = r0.squaredNorm(); + + // is initial guess already good enough? + if(abs(r0.norm()) < tol) { + return true; + } VectorType w = VectorType::Zero(restart + 1); - FMatrixType H = FMatrixType::Zero(m, restart + 1); + FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix VectorType tau = VectorType::Zero(restart + 1); std::vector < JacobiRotation < Scalar > > G(restart); // generate first Householder vector - VectorType e; + VectorType e(m-1); RealScalar beta; r0.makeHouseholder(e, tau.coeffRef(0), beta); w(0)=(Scalar) beta; @@ -242,20 +246,7 @@ struct traits > * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. Here is a step by - * step execution example starting with a random guess and printing the evolution - * of the estimated error: - * * \code - * x = VectorXd::Random(n); - * solver.setMaxIterations(1); - * int i = 0; - * do { - * x = solver.solveWithGuess(b,x); - * std::cout << i << " : " << solver.error() << std::endl; - * ++i; - * } while (solver.info()!=Success && i<100); - * \endcode - * Note that such a step by step excution is slightly slower. + * One can control the start using the solveWithGuess() method. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ @@ -294,7 +285,8 @@ public: * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - GMRES(const MatrixType& A) : Base(A), m_restart(30) {} + template + explicit GMRES(const EigenBase& A) : Base(A.derived()), m_restart(30) {} ~GMRES() {} diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h index 0e56342a8..670f274bb 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -37,22 +37,31 @@ namespace Eigen { typedef typename Dest::Scalar Scalar; typedef Matrix VectorType; + // Check for zero rhs + const RealScalar rhsNorm2(rhs.squaredNorm()); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + // initialize const int maxIters(iters); // initialize maxIters to iters const int N(mat.cols()); // the size of the matrix - const RealScalar rhsNorm2(rhs.squaredNorm()); const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) // Initialize preconditioned Lanczos -// VectorType v_old(N); // will be initialized inside loop + VectorType v_old(N); // will be initialized inside loop VectorType v( VectorType::Zero(N) ); //initialize v VectorType v_new(rhs-mat*x); //initialize v_new RealScalar residualNorm2(v_new.squaredNorm()); -// VectorType w(N); // will be initialized inside loop + VectorType w(N); // will be initialized inside loop VectorType w_new(precond.solve(v_new)); // initialize w_new // RealScalar beta; // will be initialized inside loop RealScalar beta_new2(v_new.dot(w_new)); - eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); RealScalar beta_new(sqrt(beta_new2)); const RealScalar beta_one(beta_new); v_new /= beta_new; @@ -62,14 +71,14 @@ namespace Eigen { RealScalar c_old(1.0); RealScalar s(0.0); // the sine of the Givens rotation RealScalar s_old(0.0); // the sine of the Givens rotation -// VectorType p_oold(N); // will be initialized in loop + VectorType p_oold(N); // will be initialized in loop VectorType p_old(VectorType::Zero(N)); // initialize p_old=0 VectorType p(p_old); // initialize p=0 RealScalar eta(1.0); iters = 0; // reset iters - while ( iters < maxIters ){ - + while ( iters < maxIters ) + { // Preconditioned Lanczos /* Note that there are 4 variants on the Lanczos algorithm. These are * described in Paige, C. C. (1972). Computational variants of @@ -81,17 +90,17 @@ namespace Eigen { * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987). */ const RealScalar beta(beta_new); -// v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter - const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT + v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter +// const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT v = v_new; // update -// w = w_new; // update - const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT + w = w_new; // update +// const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT v_new.noalias() = mat*w - beta*v_old; // compute v_new const RealScalar alpha = v_new.dot(w); v_new -= alpha*v; // overwrite v_new w_new = precond.solve(v_new); // overwrite w_new beta_new2 = v_new.dot(w_new); // compute beta_new - eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); beta_new = sqrt(beta_new2); // compute beta_new v_new /= beta_new; // overwrite v_new for next iteration w_new /= beta_new; // overwrite w_new for next iteration @@ -107,21 +116,28 @@ namespace Eigen { s=beta_new/r1; // new sine // Update solution -// p_oold = p_old; - const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT + p_oold = p_old; +// const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT p_old = p; p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED? x += beta_one*c*eta*p; + + /* Update the squared residual. Note that this is the estimated residual. + The real residual |Ax-b|^2 may be slightly larger */ residualNorm2 *= s*s; - if ( residualNorm2 < threshold2){ + if ( residualNorm2 < threshold2) + { break; } eta=-s*eta; // update eta iters++; // increment iteration number (for output purposes) } - tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger + + /* Compute error. Note that this is the estimated error. The real + error |Ax-b|/|b| may be slightly larger */ + tol_error = std::sqrt(residualNorm2 / rhsNorm2); } } @@ -174,20 +190,7 @@ namespace Eigen { * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. - * One can control the start using the solveWithGuess() method. Here is a step by - * step execution example starting with a random guess and printing the evolution - * of the estimated error: - * * \code - * x = VectorXd::Random(n); - * mr.setMaxIterations(1); - * int i = 0; - * do { - * x = mr.solveWithGuess(b,x); - * std::cout << i << " : " << mr.error() << std::endl; - * ++i; - * } while (mr.info()!=Success && i<100); - * \endcode - * Note that such a step by step excution is slightly slower. + * One can control the start using the solveWithGuess() method. * * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ @@ -225,7 +228,8 @@ namespace Eigen { * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ - MINRES(const MatrixType& A) : Base(A) {} + template + explicit MINRES(const EigenBase& A) : Base(A.derived()) {} /** Destructor. */ ~MINRES(){} @@ -250,6 +254,11 @@ namespace Eigen { template void _solveWithGuess(const Rhs& b, Dest& x) const { + typedef typename internal::conditional + >::type MatrixWrapperType; + m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; @@ -259,7 +268,7 @@ namespace Eigen { m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); - internal::minres(mp_matrix->template selfadjointView(), b.col(j), xj, + internal::minres(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); } diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt index 8513803ce..d9690854d 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt @@ -2,5 +2,5 @@ FILE(GLOB Eigen_LevenbergMarquardt_SRCS "*.h") INSTALL(FILES ${Eigen_LevenbergMarquardt_SRCS} - DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LevenbergMarquardt COMPONENT Devel + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/LevenbergMarquardt COMPONENT Devel ) diff --git a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h index c32437281..78a307e96 100644 --- a/ground/gcs/src/libs/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h +++ b/ground/gcs/src/libs/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h @@ -110,7 +110,6 @@ void MatrixPowerAtomic::compute2x2(MatrixType& res, RealScalar p) co using std::abs; using std::pow; - ArrayType logTdiag = m_A.diagonal().array().log(); res.coeffRef(0,0) = pow(m_A.coeff(0,0), p); for (Index i=1; i < m_A.cols(); ++i) { diff --git a/ground/gcs/src/libs/eigen/unsupported/doc/examples/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/doc/examples/CMakeLists.txt index 978f9afd8..c47646dfc 100644 --- a/ground/gcs/src/libs/eigen/unsupported/doc/examples/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/unsupported/doc/examples/CMakeLists.txt @@ -10,12 +10,10 @@ FOREACH(example_src ${examples_SRCS}) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) target_link_libraries(example_${example} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) endif() - GET_TARGET_PROPERTY(example_executable - example_${example} LOCATION) ADD_CUSTOM_COMMAND( TARGET example_${example} POST_BUILD - COMMAND ${example_executable} + COMMAND example_${example} ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out ) ADD_DEPENDENCIES(unsupported_examples example_${example}) diff --git a/ground/gcs/src/libs/eigen/unsupported/doc/snippets/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/doc/snippets/CMakeLists.txt index 4a4157933..f0c5cc2a8 100644 --- a/ground/gcs/src/libs/eigen/unsupported/doc/snippets/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/unsupported/doc/snippets/CMakeLists.txt @@ -14,12 +14,10 @@ FOREACH(snippet_src ${snippets_SRCS}) if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO) target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}) endif() - GET_TARGET_PROPERTY(compile_snippet_executable - ${compile_snippet_target} LOCATION) ADD_CUSTOM_COMMAND( TARGET ${compile_snippet_target} POST_BUILD - COMMAND ${compile_snippet_executable} + COMMAND ${compile_snippet_target} ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out ) ADD_DEPENDENCIES(unsupported_snippets ${compile_snippet_target}) diff --git a/ground/gcs/src/libs/eigen/unsupported/test/CMakeLists.txt b/ground/gcs/src/libs/eigen/unsupported/test/CMakeLists.txt index a94a3b5e5..2e4cfdb2e 100644 --- a/ground/gcs/src/libs/eigen/unsupported/test/CMakeLists.txt +++ b/ground/gcs/src/libs/eigen/unsupported/test/CMakeLists.txt @@ -29,11 +29,7 @@ ei_add_test(NonLinearOptimization) ei_add_test(NumericalDiff) ei_add_test(autodiff) - -if (NOT CMAKE_CXX_COMPILER MATCHES "clang\\+\\+$") ei_add_test(BVH) -endif() - ei_add_test(matrix_exponential) ei_add_test(matrix_function) ei_add_test(matrix_power) @@ -73,8 +69,9 @@ if(NOT EIGEN_TEST_NO_OPENGL) find_package(GLUT) find_package(GLEW) if(OPENGL_FOUND AND GLUT_FOUND AND GLEW_FOUND) + include_directories(${OPENGL_INCLUDE_DIR} ${GLUT_INCLUDE_DIR} ${GLEW_INCLUDE_DIRS}) ei_add_property(EIGEN_TESTED_BACKENDS "OpenGL, ") - set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES}) + set(EIGEN_GL_LIB ${GLUT_LIBRARIES} ${GLEW_LIBRARIES} ${OPENGL_LIBRARIES}) ei_add_test(openglsupport "" "${EIGEN_GL_LIB}" ) else() ei_add_property(EIGEN_MISSING_BACKENDS "OpenGL, ") diff --git a/ground/gcs/src/libs/eigen/unsupported/test/FFTW.cpp b/ground/gcs/src/libs/eigen/unsupported/test/FFTW.cpp index a07bf274b..d3718e2d2 100644 --- a/ground/gcs/src/libs/eigen/unsupported/test/FFTW.cpp +++ b/ground/gcs/src/libs/eigen/unsupported/test/FFTW.cpp @@ -16,9 +16,6 @@ std::complex RandomCpx() { return std::complex( (T)(rand()/(T)RAND_MAX - . using namespace std; using namespace Eigen; -float norm(float x) {return x*x;} -double norm(double x) {return x*x;} -long double norm(long double x) {return x*x;} template < typename T> complex promote(complex x) { return complex(x.real(),x.imag()); } @@ -40,11 +37,11 @@ complex promote(long double x) { return complex( x); for (size_t k1=0;k1<(size_t)timebuf.size();++k1) { acc += promote( timebuf[k1] ) * exp( complex(0,k1*phinc) ); } - totalpower += norm(acc); + totalpower += numext::abs2(acc); complex x = promote(fftbuf[k0]); complex dif = acc - x; - difpower += norm(dif); - //cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(norm(dif)) << endl; + difpower += numext::abs2(dif); + //cerr << k0 << "\t" << acc << "\t" << x << "\t" << sqrt(numext::abs2(dif)) << endl; } cerr << "rmse:" << sqrt(difpower/totalpower) << endl; return sqrt(difpower/totalpower); @@ -57,8 +54,8 @@ complex promote(long double x) { return complex( x); long double difpower=0; size_t n = (min)( buf1.size(),buf2.size() ); for (size_t k=0;k void forward_jacobian(const Func& f) VERIFY_IS_APPROX(j, jref); } + +// TODO also check actual derivatives! void test_autodiff_scalar() { - std::cerr << foo(1,2) << "\n"; + Vector2f p = Vector2f::Random(); typedef AutoDiffScalar AD; - AD ax(1,Vector2f::UnitX()); - AD ay(2,Vector2f::UnitY()); + AD ax(p.x(),Vector2f::UnitX()); + AD ay(p.y(),Vector2f::UnitY()); AD res = foo(ax,ay); - std::cerr << res.value() << " <> " - << res.derivatives().transpose() << "\n\n"; + VERIFY_IS_APPROX(res.value(), foo(p.x(),p.y())); } +// TODO also check actual derivatives! void test_autodiff_vector() { - std::cerr << foo(Vector2f(1,2)) << "\n"; + Vector2f p = Vector2f::Random(); typedef AutoDiffScalar AD; typedef Matrix VectorAD; - VectorAD p(AD(1),AD(-1)); - p.x().derivatives() = Vector2f::UnitX(); - p.y().derivatives() = Vector2f::UnitY(); + VectorAD ap = p.cast(); + ap.x().derivatives() = Vector2f::UnitX(); + ap.y().derivatives() = Vector2f::UnitY(); - AD res = foo(p); - std::cerr << res.value() << " <> " - << res.derivatives().transpose() << "\n\n"; + AD res = foo(ap); + VERIFY_IS_APPROX(res.value(), foo(p)); } void test_autodiff_jacobian() { - for(int i = 0; i < g_repeat; i++) { - CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); - CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); - CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); - CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); - CALL_SUBTEST(( forward_jacobian(TestFunc1(3,3)) )); - } + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1()) )); + CALL_SUBTEST(( forward_jacobian(TestFunc1(3,3)) )); } void test_autodiff() { - test_autodiff_scalar(); - test_autodiff_vector(); -// test_autodiff_jacobian(); + for(int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_1( test_autodiff_scalar() ); + CALL_SUBTEST_2( test_autodiff_vector() ); + CALL_SUBTEST_3( test_autodiff_jacobian() ); + } } diff --git a/ground/gcs/src/libs/eigen/unsupported/test/minres.cpp b/ground/gcs/src/libs/eigen/unsupported/test/minres.cpp index fd12da548..509ebe09a 100644 --- a/ground/gcs/src/libs/eigen/unsupported/test/minres.cpp +++ b/ground/gcs/src/libs/eigen/unsupported/test/minres.cpp @@ -14,15 +14,32 @@ template void test_minres_T() { - MINRES, Lower, DiagonalPreconditioner > minres_colmajor_diag; - MINRES, Lower, IdentityPreconditioner > minres_colmajor_I; + MINRES, Lower|Upper, DiagonalPreconditioner > minres_colmajor_diag; + MINRES, Lower, IdentityPreconditioner > minres_colmajor_lower_I; + MINRES, Upper, IdentityPreconditioner > minres_colmajor_upper_I; // MINRES, Lower, IncompleteLUT > minres_colmajor_ilut; //minres, SSORPreconditioner > minres_colmajor_ssor; - CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) ); - CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_I) ); + +// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) ); // CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) ); //CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) ); + + // Diagonal preconditioner + MINRES, Lower, DiagonalPreconditioner > minres_colmajor_lower_diag; + MINRES, Upper, DiagonalPreconditioner > minres_colmajor_upper_diag; + MINRES, Upper|Lower, DiagonalPreconditioner > minres_colmajor_uplo_diag; + + // call tests for SPD matrix + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_I) ); + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_I) ); + + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_diag) ); + CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_diag) ); +// CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) ); + + // TO DO: symmetric semi-definite matrix + // TO DO: symmetric indefinite matrix } void test_minres() diff --git a/ground/gcs/src/libs/eigen/unsupported/test/mpreal/mpreal.h b/ground/gcs/src/libs/eigen/unsupported/test/mpreal/mpreal.h index 0a31dc1a8..f83e52d09 100644 --- a/ground/gcs/src/libs/eigen/unsupported/test/mpreal/mpreal.h +++ b/ground/gcs/src/libs/eigen/unsupported/test/mpreal/mpreal.h @@ -1,59 +1,47 @@ /* - Multi-precision floating point number class for C++. + MPFR C++: Multi-precision floating point number class for C++. Based on MPFR library: http://mpfr.org Project homepage: http://www.holoborodko.com/pavel/mpfr Contact e-mail: pavel@holoborodko.com - Copyright (c) 2008-2012 Pavel Holoborodko + Copyright (c) 2008-2014 Pavel Holoborodko Contributors: Dmitriy Gubanov, Konstantin Holoborodko, Brian Gladman, Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, Heinz van Saanen, Pere Constans, Peter van Hoof, Gael Guennebaud, Tsai Chia Cheng, - Alexei Zubanov, Jauhien Piatlicki, Victor Berger, John Westwood. + Alexei Zubanov, Jauhien Piatlicki, Victor Berger, John Westwood, + Petr Aleksandrov, Orion Poplawski, Charles Karney. - **************************************************************************** - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. + Licensing: + (A) MPFR C++ is under GNU General Public License ("GPL"). + + (B) Non-free licenses may also be purchased from the author, for users who + do not want their programs protected by the GPL. - This library is distributed in the hope that it will be useful, + The non-free licenses are for users that wish to use MPFR C++ in + their products but are unwilling to release their software + under the GPL (which would require them to release source code + and allow free redistribution). + + Such users can purchase an unlimited-use license from the author. + Contact us for more details. + + GNU General Public License ("GPL") copyright permissions statement: + ************************************************************************** + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - - **************************************************************************** - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - SUCH DAMAGE. + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ #ifndef __MPREAL_H__ @@ -65,11 +53,22 @@ #include #include #include +#include #include // Options -#define MPREAL_HAVE_INT64_SUPPORT // Enable int64_t support if possible. Available only for MSVC 2010 & GCC. +// FIXME HAVE_INT64_SUPPORT leads to clashes with long int and int64_t on some systems. +//#define MPREAL_HAVE_INT64_SUPPORT // Enable int64_t support if possible. Available only for MSVC 2010 & GCC. #define MPREAL_HAVE_MSVC_DEBUGVIEW // Enable Debugger Visualizer for "Debug" builds in MSVC. +#define MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS // Enable extended std::numeric_limits specialization. + // Meaning that "digits", "round_style" and similar members are defined as functions, not constants. + // See std::numeric_limits at the end of the file for more information. + +// Library version +#define MPREAL_VERSION_MAJOR 3 +#define MPREAL_VERSION_MINOR 5 +#define MPREAL_VERSION_PATCHLEVEL 9 +#define MPREAL_VERSION_STRING "3.5.9" // Detect compiler using signatures from http://predef.sourceforge.net/ #if defined(__GNUC__) && defined(__INTEL_COMPILER) @@ -82,6 +81,32 @@ #define IsInf(x) std::isinf(x) // GNU C/C++ (and/or other compilers), just hope for C99 conformance #endif +// A Clang feature extension to determine compiler features. +#ifndef __has_feature + #define __has_feature(x) 0 +#endif + +// Detect support for r-value references (move semantic). Borrowed from Eigen. +#if (__has_feature(cxx_rvalue_references) || \ + defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \ + (defined(_MSC_VER) && _MSC_VER >= 1600)) + + #define MPREAL_HAVE_MOVE_SUPPORT + + // Use fields in mpfr_t structure to check if it was initialized / set dummy initialization + #define mpfr_is_initialized(x) (0 != (x)->_mpfr_d) + #define mpfr_set_uninitialized(x) ((x)->_mpfr_d = 0 ) +#endif + +// Detect support for explicit converters. +#if (__has_feature(cxx_explicit_conversions) || \ + defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \ + (defined(_MSC_VER) && _MSC_VER >= 1800)) + + #define MPREAL_HAVE_EXPLICIT_CONVERTERS +#endif + +// Detect available 64-bit capabilities #if defined(MPREAL_HAVE_INT64_SUPPORT) #define MPFR_USE_INTMAX_T // Should be defined before mpfr.h @@ -97,7 +122,7 @@ #endif #elif defined (__GNUC__) && defined(__linux__) - #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) + #if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) || defined (__PPC64__) #undef MPREAL_HAVE_INT64_SUPPORT // Remove all shaman dances for x64 builds since #undef MPFR_USE_INTMAX_T // GCC already supports x64 as of "long int" is 64-bit integer, nothing left to do #else @@ -111,7 +136,7 @@ #endif #if defined(MPREAL_HAVE_MSVC_DEBUGVIEW) && defined(_MSC_VER) && defined(_DEBUG) -#define MPREAL_MSVC_DEBUGVIEW_CODE DebugView = toString(); + #define MPREAL_MSVC_DEBUGVIEW_CODE DebugView = toString(); #define MPREAL_MSVC_DEBUGVIEW_DATA std::string DebugView; #else #define MPREAL_MSVC_DEBUGVIEW_CODE @@ -149,7 +174,6 @@ public: // Constructors && type conversions mpreal(); mpreal(const mpreal& u); - mpreal(const mpfr_t u); mpreal(const mpf_t u); mpreal(const mpz_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd()); mpreal(const mpq_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd()); @@ -159,6 +183,10 @@ public: mpreal(const unsigned int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd()); mpreal(const long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd()); mpreal(const int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd()); + + // Construct mpreal from mpfr_t structure. + // shared = true allows to avoid deep copy, so that mpreal and 'u' share the same data & pointers. + mpreal(const mpfr_t u, bool shared = false); #if defined (MPREAL_HAVE_INT64_SUPPORT) mpreal(const uint64_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd()); @@ -170,6 +198,11 @@ public: ~mpreal(); +#ifdef MPREAL_HAVE_MOVE_SUPPORT + mpreal& operator=(mpreal&& v); + mpreal(mpreal&& u); +#endif + // Operations // = // +, -, *, /, ++, --, <<, >> @@ -292,14 +325,34 @@ public: friend bool operator == (const mpreal& a, const double b); // Type Conversion operators + bool toBool (mp_rnd_t mode = GMP_RNDZ) const; long toLong (mp_rnd_t mode = GMP_RNDZ) const; unsigned long toULong (mp_rnd_t mode = GMP_RNDZ) const; + float toFloat (mp_rnd_t mode = GMP_RNDN) const; double toDouble (mp_rnd_t mode = GMP_RNDN) const; long double toLDouble (mp_rnd_t mode = GMP_RNDN) const; +#if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS) + explicit operator bool () const { return toBool(); } + explicit operator int () const { return toLong(); } + explicit operator long () const { return toLong(); } + explicit operator long long () const { return toLong(); } + explicit operator unsigned () const { return toULong(); } + explicit operator unsigned long () const { return toULong(); } + explicit operator unsigned long long () const { return toULong(); } + explicit operator float () const { return toFloat(); } + explicit operator double () const { return toDouble(); } + explicit operator long double () const { return toLDouble(); } +#endif + #if defined (MPREAL_HAVE_INT64_SUPPORT) int64_t toInt64 (mp_rnd_t mode = GMP_RNDZ) const; uint64_t toUInt64 (mp_rnd_t mode = GMP_RNDZ) const; + + #if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS) + explicit operator int64_t () const { return toInt64(); } + explicit operator uint64_t () const { return toUInt64(); } + #endif #endif // Get raw pointers so that mpreal can be directly used in raw mpfr_* functions @@ -308,121 +361,125 @@ public: ::mpfr_srcptr mpfr_srcptr() const; // Convert mpreal to string with n significant digits in base b - // n = 0 -> convert with the maximum available digits - std::string toString(int n = 0, int b = 10, mp_rnd_t mode = mpreal::get_default_rnd()) const; + // n = -1 -> convert with the maximum available digits + std::string toString(int n = -1, int b = 10, mp_rnd_t mode = mpreal::get_default_rnd()) const; #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - std::string toString(const std::string& format) const; + std::string toString(const std::string& format) const; #endif - // Math Functions - friend const mpreal sqr (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sqrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sqrt(const unsigned long int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal cbrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal root(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal pow (const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal pow (const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal pow (const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal pow (const mpreal& a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal pow (const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal pow (const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal fabs(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + std::ostream& output(std::ostream& os) const; - friend const mpreal abs(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + // Math Functions + friend const mpreal sqr (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal sqrt(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal sqrt(const unsigned long int v, mp_rnd_t rnd_mode); + friend const mpreal cbrt(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal root(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode); + friend const mpreal pow (const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode); + friend const mpreal pow (const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode); + friend const mpreal pow (const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode); + friend const mpreal pow (const mpreal& a, const long int b, mp_rnd_t rnd_mode); + friend const mpreal pow (const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode); + friend const mpreal pow (const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode); + friend const mpreal fabs(const mpreal& v, mp_rnd_t rnd_mode); + + friend const mpreal abs(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode); + friend inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode); + friend inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode); + friend inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode); + friend inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode); friend int cmpabs(const mpreal& a,const mpreal& b); - friend const mpreal log (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal log2 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal log10(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal exp2 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal exp10(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal log1p(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal expm1(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal log (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal log2 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal log10(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal exp2 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal exp10(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal log1p(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal expm1(const mpreal& v, mp_rnd_t rnd_mode); - friend const mpreal cos(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sin(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal tan(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sec(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal csc(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal cot(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal cos(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal sin(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal tan(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal sec(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal csc(const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal cot(const mpreal& v, mp_rnd_t rnd_mode); + friend int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode); - friend const mpreal acos (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal asin (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal atan (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal acot (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal asec (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal acsc (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal acos (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal asin (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal atan (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode); + friend const mpreal acot (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal asec (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal acsc (const mpreal& v, mp_rnd_t rnd_mode); - friend const mpreal cosh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sinh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal tanh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sech (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal csch (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal coth (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal acosh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal asinh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal acoth (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal asech (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal acsch (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal cosh (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal sinh (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal tanh (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal sech (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal csch (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal coth (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal acosh (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal asinh (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal acoth (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal asech (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal acsch (const mpreal& v, mp_rnd_t rnd_mode); - friend const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); - friend const mpreal fac_ui (unsigned long int v, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal eint (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal fac_ui (unsigned long int v, mp_prec_t prec, mp_rnd_t rnd_mode); + friend const mpreal eint (const mpreal& v, mp_rnd_t rnd_mode); - friend const mpreal gamma (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal lngamma (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal lgamma (const mpreal& v, int *signp = 0, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal zeta (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal erf (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal gamma (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal lngamma (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal lgamma (const mpreal& v, int *signp, mp_rnd_t rnd_mode); + friend const mpreal zeta (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal erf (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode); + friend const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode); + friend const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode); + friend const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode); friend int sgn(const mpreal& v); // returns -1 or +1 // MPFR 2.4.0 Specifics #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - friend int sinh_cosh (mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal li2 (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal rec_sqrt (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend int sinh_cosh (mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal li2 (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); + friend const mpreal rec_sqrt (const mpreal& v, mp_rnd_t rnd_mode); // MATLAB's semantic equivalents - friend const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); // Remainder after division - friend const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); // Modulus after division + friend const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Remainder after division + friend const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Modulus after division #endif // MPFR 3.0.0 Specifics #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) - friend const mpreal digamma (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal ai (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); // use gmp_randinit_default() to init state, gmp_randclear() to clear + friend const mpreal digamma (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal ai (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear + friend const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear + friend const mpreal grandom (unsigned int seed); #endif // Uniformly distributed random number generation in [0,1] using // Mersenne-Twister algorithm by default. // Use parameter to setup seed, e.g.: random((unsigned)time(NULL)) // Check urandom() for more precise control. - friend const mpreal random(unsigned int seed = 0); - + friend const mpreal random(unsigned int seed); + // Exponent and mantissa manipulation friend const mpreal frexp(const mpreal& v, mp_exp_t* exp); friend const mpreal ldexp(const mpreal& v, mp_exp_t exp); @@ -433,31 +490,31 @@ public: // Constants // don't forget to call mpfr_free_cache() for every thread where you are using const-functions - friend const mpreal const_log2 (mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal const_pi (mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal const_euler (mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal const_catalan (mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal const_log2 (mp_prec_t prec, mp_rnd_t rnd_mode); + friend const mpreal const_pi (mp_prec_t prec, mp_rnd_t rnd_mode); + friend const mpreal const_euler (mp_prec_t prec, mp_rnd_t rnd_mode); + friend const mpreal const_catalan (mp_prec_t prec, mp_rnd_t rnd_mode); // returns +inf iff sign>=0 otherwise -inf - friend const mpreal const_infinity(int sign = 1, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal const_infinity(int sign, mp_prec_t prec); // Output/ Input friend std::ostream& operator<<(std::ostream& os, const mpreal& v); friend std::istream& operator>>(std::istream& is, mpreal& v); // Integer Related Functions - friend const mpreal rint (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal rint (const mpreal& v, mp_rnd_t rnd_mode); friend const mpreal ceil (const mpreal& v); friend const mpreal floor(const mpreal& v); friend const mpreal round(const mpreal& v); friend const mpreal trunc(const mpreal& v); - friend const mpreal rint_ceil (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal rint_floor (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal rint_round (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal rint_trunc (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal frac (const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal remainder ( const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal rint_ceil (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal rint_floor (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal rint_round (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal rint_trunc (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal frac (const mpreal& v, mp_rnd_t rnd_mode); + friend const mpreal remainder ( const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); + friend const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Miscellaneous Functions friend const mpreal nexttoward (const mpreal& x, const mpreal& y); @@ -524,19 +581,22 @@ public: // Efficient swapping of two mpreal values - needed for std algorithms friend void swap(mpreal& x, mpreal& y); - friend const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); - friend const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + friend const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); + friend const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); private: // Human friendly Debug Preview in Visual Studio. // Put one of these lines: // - // mpfr::mpreal= ; Show value only + // mpfr::mpreal= ; Show value only // mpfr::mpreal=, bits ; Show value & precision // // at the beginning of // [Visual Studio Installation Folder]\Common7\Packages\Debugger\autoexp.dat MPREAL_MSVC_DEBUGVIEW_DATA + + // "Smart" resources deallocation. Checks if instance initialized before deletion. + void clear(::mpfr_ptr); }; ////////////////////////////////////////////////////////////////////////// @@ -551,64 +611,89 @@ public: // Default constructor: creates mp number and initializes it to 0. inline mpreal::mpreal() { - mpfr_init2(mp,mpreal::get_default_prec()); - mpfr_set_ui(mp,0,mpreal::get_default_rnd()); + mpfr_init2 (mpfr_ptr(), mpreal::get_default_prec()); + mpfr_set_ui(mpfr_ptr(), 0, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const mpreal& u) { - mpfr_init2(mp,mpfr_get_prec(u.mp)); - mpfr_set(mp,u.mp,mpreal::get_default_rnd()); + mpfr_init2(mpfr_ptr(),mpfr_get_prec(u.mpfr_srcptr())); + mpfr_set (mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; } -inline mpreal::mpreal(const mpfr_t u) +#ifdef MPREAL_HAVE_MOVE_SUPPORT +inline mpreal::mpreal(mpreal&& other) { - mpfr_init2(mp,mpfr_get_prec(u)); - mpfr_set(mp,u,mpreal::get_default_rnd()); + mpfr_set_uninitialized(mpfr_ptr()); // make sure "other" holds no pinter to actual data + mpfr_swap(mpfr_ptr(), other.mpfr_ptr()); + + MPREAL_MSVC_DEBUGVIEW_CODE; +} + +inline mpreal& mpreal::operator=(mpreal&& other) +{ + mpfr_swap(mpfr_ptr(), other.mpfr_ptr()); + + MPREAL_MSVC_DEBUGVIEW_CODE; + return *this; +} +#endif + +inline mpreal::mpreal(const mpfr_t u, bool shared) +{ + if(shared) + { + std::memcpy(mpfr_ptr(), u, sizeof(mpfr_t)); + } + else + { + mpfr_init2(mpfr_ptr(), mpfr_get_prec(u)); + mpfr_set (mpfr_ptr(), u, mpreal::get_default_rnd()); + } MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const mpf_t u) { - mpfr_init2(mp,(mp_prec_t) mpf_get_prec(u)); // (gmp: mp_bitcnt_t) unsigned long -> long (mpfr: mp_prec_t) - mpfr_set_f(mp,u,mpreal::get_default_rnd()); + mpfr_init2(mpfr_ptr(),(mp_prec_t) mpf_get_prec(u)); // (gmp: mp_bitcnt_t) unsigned long -> long (mpfr: mp_prec_t) + mpfr_set_f(mpfr_ptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const mpz_t u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_z(mp,u,mode); + mpfr_init2(mpfr_ptr(), prec); + mpfr_set_z(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const mpq_t u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_q(mp,u,mode); + mpfr_init2(mpfr_ptr(), prec); + mpfr_set_q(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp, prec); + mpfr_init2(mpfr_ptr(), prec); #if (MPREAL_DOUBLE_BITS_OVERFLOW > -1) - if(fits_in_bits(u, MPREAL_DOUBLE_BITS_OVERFLOW)) - { - mpfr_set_d(mp, u, mode); - }else - throw conversion_overflow(); + if(fits_in_bits(u, MPREAL_DOUBLE_BITS_OVERFLOW)) + { + mpfr_set_d(mpfr_ptr(), u, mode); + }else + throw conversion_overflow(); #else - mpfr_set_d(mp, u, mode); + mpfr_set_d(mpfr_ptr(), u, mode); #endif MPREAL_MSVC_DEBUGVIEW_CODE; @@ -616,40 +701,40 @@ inline mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode) inline mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_ld(mp,u,mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_ld(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_ui(mp,u,mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_ui(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_ui(mp,u,mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_ui(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_si(mp,u,mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_si(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_si(mp,u,mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_si(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } @@ -657,16 +742,16 @@ inline mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode) #if defined (MPREAL_HAVE_INT64_SUPPORT) inline mpreal::mpreal(const uint64_t u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_uj(mp, u, mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_uj(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const int64_t u, mp_prec_t prec, mp_rnd_t mode) { - mpfr_init2(mp,prec); - mpfr_set_sj(mp, u, mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_sj(mpfr_ptr(), u, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } @@ -674,23 +759,31 @@ inline mpreal::mpreal(const int64_t u, mp_prec_t prec, mp_rnd_t mode) inline mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode) { - mpfr_init2(mp, prec); - mpfr_set_str(mp, s, base, mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_str(mpfr_ptr(), s, base, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t mode) { - mpfr_init2(mp, prec); - mpfr_set_str(mp, s.c_str(), base, mode); + mpfr_init2 (mpfr_ptr(), prec); + mpfr_set_str(mpfr_ptr(), s.c_str(), base, mode); MPREAL_MSVC_DEBUGVIEW_CODE; } +inline void mpreal::clear(::mpfr_ptr x) +{ +#ifdef MPREAL_HAVE_MOVE_SUPPORT + if(mpfr_is_initialized(x)) +#endif + mpfr_clear(x); +} + inline mpreal::~mpreal() { - mpfr_clear(mp); + clear(mpfr_ptr()); } // internal namespace needed for template magic @@ -761,6 +854,9 @@ const mpreal sqrt(const int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); const mpreal sqrt(const long double v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); const mpreal sqrt(const double v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); +// abs +inline const mpreal abs(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()); + ////////////////////////////////////////////////////////////////////////// // pow const mpreal pow(const mpreal& a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); @@ -813,6 +909,11 @@ const mpreal pow(const double a, const unsigned int b, mp_rnd_t rnd_mode = mprea const mpreal pow(const double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); +inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); +inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); +inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); +inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd()); + ////////////////////////////////////////////////////////////////////////// // Estimate machine epsilon for the given precision // Returns smallest eps such that 1.0 + eps != 1.0 @@ -860,15 +961,15 @@ inline mpreal& mpreal::operator=(const mpreal& v) { if (this != &v) { - mp_prec_t tp = mpfr_get_prec(mp); - mp_prec_t vp = mpfr_get_prec(v.mp); + mp_prec_t tp = mpfr_get_prec( mpfr_srcptr()); + mp_prec_t vp = mpfr_get_prec(v.mpfr_srcptr()); - if(tp != vp){ - mpfr_clear(mp); - mpfr_init2(mp, vp); - } + if(tp != vp){ + clear(mpfr_ptr()); + mpfr_init2(mpfr_ptr(), vp); + } - mpfr_set(mp, v.mp, mpreal::get_default_rnd()); + mpfr_set(mpfr_ptr(), v.mpfr_srcptr(), mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; } @@ -877,7 +978,7 @@ inline mpreal& mpreal::operator=(const mpreal& v) inline mpreal& mpreal::operator=(const mpf_t v) { - mpfr_set_f(mp, v, mpreal::get_default_rnd()); + mpfr_set_f(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -885,7 +986,7 @@ inline mpreal& mpreal::operator=(const mpf_t v) inline mpreal& mpreal::operator=(const mpz_t v) { - mpfr_set_z(mp, v, mpreal::get_default_rnd()); + mpfr_set_z(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -893,7 +994,7 @@ inline mpreal& mpreal::operator=(const mpz_t v) inline mpreal& mpreal::operator=(const mpq_t v) { - mpfr_set_q(mp, v, mpreal::get_default_rnd()); + mpfr_set_q(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -901,7 +1002,7 @@ inline mpreal& mpreal::operator=(const mpq_t v) inline mpreal& mpreal::operator=(const long double v) { - mpfr_set_ld(mp, v, mpreal::get_default_rnd()); + mpfr_set_ld(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -910,22 +1011,22 @@ inline mpreal& mpreal::operator=(const long double v) inline mpreal& mpreal::operator=(const double v) { #if (MPREAL_DOUBLE_BITS_OVERFLOW > -1) - if(fits_in_bits(v, MPREAL_DOUBLE_BITS_OVERFLOW)) - { - mpfr_set_d(mp,v,mpreal::get_default_rnd()); - }else - throw conversion_overflow(); + if(fits_in_bits(v, MPREAL_DOUBLE_BITS_OVERFLOW)) + { + mpfr_set_d(mpfr_ptr(),v,mpreal::get_default_rnd()); + }else + throw conversion_overflow(); #else - mpfr_set_d(mp,v,mpreal::get_default_rnd()); + mpfr_set_d(mpfr_ptr(),v,mpreal::get_default_rnd()); #endif - MPREAL_MSVC_DEBUGVIEW_CODE; + MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator=(const unsigned long int v) { - mpfr_set_ui(mp, v, mpreal::get_default_rnd()); + mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -933,7 +1034,7 @@ inline mpreal& mpreal::operator=(const unsigned long int v) inline mpreal& mpreal::operator=(const unsigned int v) { - mpfr_set_ui(mp, v, mpreal::get_default_rnd()); + mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -941,7 +1042,7 @@ inline mpreal& mpreal::operator=(const unsigned int v) inline mpreal& mpreal::operator=(const long int v) { - mpfr_set_si(mp, v, mpreal::get_default_rnd()); + mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -949,7 +1050,7 @@ inline mpreal& mpreal::operator=(const long int v) inline mpreal& mpreal::operator=(const int v) { - mpfr_set_si(mp, v, mpreal::get_default_rnd()); + mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; @@ -966,15 +1067,15 @@ inline mpreal& mpreal::operator=(const char* s) mpfr_t t; - mpfr_init2(t, mpfr_get_prec(mp)); + mpfr_init2(t, mpfr_get_prec(mpfr_srcptr())); if(0 == mpfr_set_str(t, s, 10, mpreal::get_default_rnd())) { - mpfr_set(mp, t, mpreal::get_default_rnd()); + mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; } - mpfr_clear(t); + clear(t); return *this; } @@ -989,15 +1090,15 @@ inline mpreal& mpreal::operator=(const std::string& s) mpfr_t t; - mpfr_init2(t, mpfr_get_prec(mp)); + mpfr_init2(t, mpfr_get_prec(mpfr_srcptr())); if(0 == mpfr_set_str(t, s.c_str(), 10, mpreal::get_default_rnd())) { - mpfr_set(mp, t, mpreal::get_default_rnd()); + mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; } - mpfr_clear(t); + clear(t); return *this; } @@ -1006,7 +1107,7 @@ inline mpreal& mpreal::operator=(const std::string& s) // + Addition inline mpreal& mpreal::operator+=(const mpreal& v) { - mpfr_add(mp,mp,v.mp,mpreal::get_default_rnd()); + mpfr_add(mpfr_ptr(), mpfr_srcptr(), v.mpfr_srcptr(), mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1020,14 +1121,14 @@ inline mpreal& mpreal::operator+=(const mpf_t u) inline mpreal& mpreal::operator+=(const mpz_t u) { - mpfr_add_z(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_z(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const mpq_t u) { - mpfr_add_q(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_q(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1042,7 +1143,7 @@ inline mpreal& mpreal::operator+= (const long double u) inline mpreal& mpreal::operator+= (const double u) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - mpfr_add_d(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_d(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); #else *this += mpreal(u); #endif @@ -1053,28 +1154,28 @@ inline mpreal& mpreal::operator+= (const double u) inline mpreal& mpreal::operator+=(const unsigned long int u) { - mpfr_add_ui(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const unsigned int u) { - mpfr_add_ui(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const long int u) { - mpfr_add_si(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator+=(const int u) { - mpfr_add_si(mp,mp,u,mpreal::get_default_rnd()); + mpfr_add_si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1094,9 +1195,9 @@ inline const mpreal mpreal::operator+()const { return mpreal(*this); } inline const mpreal operator+(const mpreal& a, const mpreal& b) { - mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); - mpfr_add(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); - return c; + mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); + mpfr_add(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); + return c; } inline mpreal& mpreal::operator++() @@ -1127,21 +1228,21 @@ inline const mpreal mpreal::operator-- (int) // - Subtraction inline mpreal& mpreal::operator-=(const mpreal& v) { - mpfr_sub(mp,mp,v.mp,mpreal::get_default_rnd()); + mpfr_sub(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const mpz_t v) { - mpfr_sub_z(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const mpq_t v) { - mpfr_sub_q(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1156,7 +1257,7 @@ inline mpreal& mpreal::operator-=(const long double v) inline mpreal& mpreal::operator-=(const double v) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - mpfr_sub_d(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); #else *this -= mpreal(v); #endif @@ -1167,28 +1268,28 @@ inline mpreal& mpreal::operator-=(const double v) inline mpreal& mpreal::operator-=(const unsigned long int v) { - mpfr_sub_ui(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const unsigned int v) { - mpfr_sub_ui(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const long int v) { - mpfr_sub_si(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator-=(const int v) { - mpfr_sub_si(mp,mp,v,mpreal::get_default_rnd()); + mpfr_sub_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1196,15 +1297,15 @@ inline mpreal& mpreal::operator-=(const int v) inline const mpreal mpreal::operator-()const { mpreal u(*this); - mpfr_neg(u.mp,u.mp,mpreal::get_default_rnd()); + mpfr_neg(u.mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd()); return u; } inline const mpreal operator-(const mpreal& a, const mpreal& b) { - mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); - mpfr_sub(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); - return c; + mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); + mpfr_sub(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); + return c; } inline const mpreal operator-(const double b, const mpreal& a) @@ -1252,21 +1353,21 @@ inline const mpreal operator-(const int b, const mpreal& a) // * Multiplication inline mpreal& mpreal::operator*= (const mpreal& v) { - mpfr_mul(mp,mp,v.mp,mpreal::get_default_rnd()); + mpfr_mul(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const mpz_t v) { - mpfr_mul_z(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const mpq_t v) { - mpfr_mul_q(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1281,7 +1382,7 @@ inline mpreal& mpreal::operator*=(const long double v) inline mpreal& mpreal::operator*=(const double v) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - mpfr_mul_d(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); #else *this *= mpreal(v); #endif @@ -1291,58 +1392,58 @@ inline mpreal& mpreal::operator*=(const double v) inline mpreal& mpreal::operator*=(const unsigned long int v) { - mpfr_mul_ui(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const unsigned int v) { - mpfr_mul_ui(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const long int v) { - mpfr_mul_si(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator*=(const int v) { - mpfr_mul_si(mp,mp,v,mpreal::get_default_rnd()); + mpfr_mul_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline const mpreal operator*(const mpreal& a, const mpreal& b) { - mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); - mpfr_mul(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); - return c; + mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); + mpfr_mul(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); + return c; } ////////////////////////////////////////////////////////////////////////// // / Division inline mpreal& mpreal::operator/=(const mpreal& v) { - mpfr_div(mp,mp,v.mp,mpreal::get_default_rnd()); + mpfr_div(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const mpz_t v) { - mpfr_div_z(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const mpq_t v) { - mpfr_div_q(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1357,7 +1458,7 @@ inline mpreal& mpreal::operator/=(const long double v) inline mpreal& mpreal::operator/=(const double v) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - mpfr_div_d(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); #else *this /= mpreal(v); #endif @@ -1367,72 +1468,72 @@ inline mpreal& mpreal::operator/=(const double v) inline mpreal& mpreal::operator/=(const unsigned long int v) { - mpfr_div_ui(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const unsigned int v) { - mpfr_div_ui(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const long int v) { - mpfr_div_si(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator/=(const int v) { - mpfr_div_si(mp,mp,v,mpreal::get_default_rnd()); + mpfr_div_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline const mpreal operator/(const mpreal& a, const mpreal& b) { - mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr()))); - mpfr_div(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); - return c; + mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_srcptr()), mpfr_get_prec(b.mpfr_srcptr()))); + mpfr_div(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd()); + return c; } inline const mpreal operator/(const unsigned long int b, const mpreal& a) { - mpreal x(0, mpfr_get_prec(a.mpfr_ptr())); + mpreal x(0, mpfr_get_prec(a.mpfr_srcptr())); mpfr_ui_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd()); return x; } inline const mpreal operator/(const unsigned int b, const mpreal& a) { - mpreal x(0, mpfr_get_prec(a.mpfr_ptr())); + mpreal x(0, mpfr_get_prec(a.mpfr_srcptr())); mpfr_ui_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd()); return x; } inline const mpreal operator/(const long int b, const mpreal& a) { - mpreal x(0, mpfr_get_prec(a.mpfr_ptr())); - mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(),mpreal::get_default_rnd()); + mpreal x(0, mpfr_get_prec(a.mpfr_srcptr())); + mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd()); return x; } inline const mpreal operator/(const int b, const mpreal& a) { - mpreal x(0, mpfr_get_prec(a.mpfr_ptr())); - mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(),mpreal::get_default_rnd()); + mpreal x(0, mpfr_get_prec(a.mpfr_srcptr())); + mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd()); return x; } inline const mpreal operator/(const double b, const mpreal& a) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - mpreal x(0, mpfr_get_prec(a.mpfr_ptr())); - mpfr_d_div(x.mpfr_ptr(), b, a.mpfr_srcptr(),mpreal::get_default_rnd()); + mpreal x(0, mpfr_get_prec(a.mpfr_srcptr())); + mpfr_d_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd()); return x; #else mpreal x(0, mpfr_get_prec(a.mpfr_ptr())); @@ -1445,56 +1546,56 @@ inline const mpreal operator/(const double b, const mpreal& a) // Shifts operators - Multiplication/Division by power of 2 inline mpreal& mpreal::operator<<=(const unsigned long int u) { - mpfr_mul_2ui(mp,mp,u,mpreal::get_default_rnd()); + mpfr_mul_2ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator<<=(const unsigned int u) { - mpfr_mul_2ui(mp,mp,static_cast(u),mpreal::get_default_rnd()); + mpfr_mul_2ui(mpfr_ptr(),mpfr_srcptr(),static_cast(u),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator<<=(const long int u) { - mpfr_mul_2si(mp,mp,u,mpreal::get_default_rnd()); + mpfr_mul_2si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator<<=(const int u) { - mpfr_mul_2si(mp,mp,static_cast(u),mpreal::get_default_rnd()); + mpfr_mul_2si(mpfr_ptr(),mpfr_srcptr(),static_cast(u),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const unsigned long int u) { - mpfr_div_2ui(mp,mp,u,mpreal::get_default_rnd()); + mpfr_div_2ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const unsigned int u) { - mpfr_div_2ui(mp,mp,static_cast(u),mpreal::get_default_rnd()); + mpfr_div_2ui(mpfr_ptr(),mpfr_srcptr(),static_cast(u),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const long int u) { - mpfr_div_2si(mp,mp,u,mpreal::get_default_rnd()); + mpfr_div_2si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::operator>>=(const int u) { - mpfr_div_2si(mp,mp,static_cast(u),mpreal::get_default_rnd()); + mpfr_div_2si(mpfr_ptr(),mpfr_srcptr(),static_cast(u),mpreal::get_default_rnd()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1543,7 +1644,7 @@ inline const mpreal operator>>(const mpreal& v, const int k) inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode) { mpreal x(v); - mpfr_mul_2ui(x.mp,v.mp,k,rnd_mode); + mpfr_mul_2ui(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode); return x; } @@ -1551,61 +1652,63 @@ inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_m inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode) { mpreal x(v); - mpfr_mul_2si(x.mp,v.mp,k,rnd_mode); + mpfr_mul_2si(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode); return x; } inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode) { mpreal x(v); - mpfr_div_2ui(x.mp,v.mp,k,rnd_mode); + mpfr_div_2ui(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode); return x; } inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode) { mpreal x(v); - mpfr_div_2si(x.mp,v.mp,k,rnd_mode); + mpfr_div_2si(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode); return x; } ////////////////////////////////////////////////////////////////////////// //Boolean operators -inline bool operator > (const mpreal& a, const mpreal& b){ return (mpfr_greater_p(a.mp,b.mp) !=0); } -inline bool operator >= (const mpreal& a, const mpreal& b){ return (mpfr_greaterequal_p(a.mp,b.mp) !=0); } -inline bool operator < (const mpreal& a, const mpreal& b){ return (mpfr_less_p(a.mp,b.mp) !=0); } -inline bool operator <= (const mpreal& a, const mpreal& b){ return (mpfr_lessequal_p(a.mp,b.mp) !=0); } -inline bool operator == (const mpreal& a, const mpreal& b){ return (mpfr_equal_p(a.mp,b.mp) !=0); } -inline bool operator != (const mpreal& a, const mpreal& b){ return (mpfr_lessgreater_p(a.mp,b.mp) !=0); } +inline bool operator > (const mpreal& a, const mpreal& b){ return (mpfr_greater_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); } +inline bool operator >= (const mpreal& a, const mpreal& b){ return (mpfr_greaterequal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); } +inline bool operator < (const mpreal& a, const mpreal& b){ return (mpfr_less_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); } +inline bool operator <= (const mpreal& a, const mpreal& b){ return (mpfr_lessequal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); } +inline bool operator == (const mpreal& a, const mpreal& b){ return (mpfr_equal_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); } +inline bool operator != (const mpreal& a, const mpreal& b){ return (mpfr_lessgreater_p (a.mpfr_srcptr(),b.mpfr_srcptr()) !=0 ); } -inline bool operator == (const mpreal& a, const unsigned long int b ){ return (mpfr_cmp_ui(a.mp,b) == 0); } -inline bool operator == (const mpreal& a, const unsigned int b ){ return (mpfr_cmp_ui(a.mp,b) == 0); } -inline bool operator == (const mpreal& a, const long int b ){ return (mpfr_cmp_si(a.mp,b) == 0); } -inline bool operator == (const mpreal& a, const int b ){ return (mpfr_cmp_si(a.mp,b) == 0); } -inline bool operator == (const mpreal& a, const long double b ){ return (mpfr_cmp_ld(a.mp,b) == 0); } -inline bool operator == (const mpreal& a, const double b ){ return (mpfr_cmp_d(a.mp,b) == 0); } +inline bool operator == (const mpreal& a, const unsigned long int b ){ return (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); } +inline bool operator == (const mpreal& a, const unsigned int b ){ return (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); } +inline bool operator == (const mpreal& a, const long int b ){ return (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); } +inline bool operator == (const mpreal& a, const int b ){ return (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); } +inline bool operator == (const mpreal& a, const long double b ){ return (mpfr_cmp_ld(a.mpfr_srcptr(),b) == 0 ); } +inline bool operator == (const mpreal& a, const double b ){ return (mpfr_cmp_d (a.mpfr_srcptr(),b) == 0 ); } -inline bool isnan (const mpreal& v){ return (mpfr_nan_p(v.mp) != 0); } -inline bool isinf (const mpreal& v){ return (mpfr_inf_p(v.mp) != 0); } -inline bool isfinite (const mpreal& v){ return (mpfr_number_p(v.mp) != 0); } -inline bool iszero (const mpreal& v){ return (mpfr_zero_p(v.mp) != 0); } -inline bool isint (const mpreal& v){ return (mpfr_integer_p(v.mp) != 0); } +inline bool isnan (const mpreal& op){ return (mpfr_nan_p (op.mpfr_srcptr()) != 0 ); } +inline bool isinf (const mpreal& op){ return (mpfr_inf_p (op.mpfr_srcptr()) != 0 ); } +inline bool isfinite (const mpreal& op){ return (mpfr_number_p (op.mpfr_srcptr()) != 0 ); } +inline bool iszero (const mpreal& op){ return (mpfr_zero_p (op.mpfr_srcptr()) != 0 ); } +inline bool isint (const mpreal& op){ return (mpfr_integer_p(op.mpfr_srcptr()) != 0 ); } #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) -inline bool isregular(const mpreal& v){ return (mpfr_regular_p(v.mp));} +inline bool isregular(const mpreal& op){ return (mpfr_regular_p(op.mpfr_srcptr()));} #endif ////////////////////////////////////////////////////////////////////////// // Type Converters -inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si(mp, mode); } -inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui(mp, mode); } -inline double mpreal::toDouble (mp_rnd_t mode) const { return mpfr_get_d (mp, mode); } -inline long double mpreal::toLDouble(mp_rnd_t mode) const { return mpfr_get_ld(mp, mode); } +inline bool mpreal::toBool (mp_rnd_t /*mode*/) const { return mpfr_zero_p (mpfr_srcptr()) == 0; } +inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si (mpfr_srcptr(), mode); } +inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui (mpfr_srcptr(), mode); } +inline float mpreal::toFloat (mp_rnd_t mode) const { return mpfr_get_flt(mpfr_srcptr(), mode); } +inline double mpreal::toDouble (mp_rnd_t mode) const { return mpfr_get_d (mpfr_srcptr(), mode); } +inline long double mpreal::toLDouble(mp_rnd_t mode) const { return mpfr_get_ld (mpfr_srcptr(), mode); } #if defined (MPREAL_HAVE_INT64_SUPPORT) -inline int64_t mpreal::toInt64 (mp_rnd_t mode) const{ return mpfr_get_sj(mp, mode); } -inline uint64_t mpreal::toUInt64(mp_rnd_t mode) const{ return mpfr_get_uj(mp, mode); } +inline int64_t mpreal::toInt64 (mp_rnd_t mode) const{ return mpfr_get_sj(mpfr_srcptr(), mode); } +inline uint64_t mpreal::toUInt64(mp_rnd_t mode) const{ return mpfr_get_uj(mpfr_srcptr(), mode); } #endif inline ::mpfr_ptr mpreal::mpfr_ptr() { return mp; } @@ -1629,7 +1732,7 @@ inline std::string mpreal::toString(const std::string& format) const if( !format.empty() ) { - if(!(mpfr_asprintf(&s,format.c_str(),mp) < 0)) + if(!(mpfr_asprintf(&s, format.c_str(), mpfr_srcptr()) < 0)) { out = std::string(s); @@ -1644,20 +1747,19 @@ inline std::string mpreal::toString(const std::string& format) const inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const { + // TODO: Add extended format specification (f, e, rounding mode) as it done in output operator (void)b; (void)mode; #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) - // Use MPFR native function for output - char format[128]; - int digits; + std::ostringstream format; - digits = n > 0 ? n : bits2digits(mpfr_get_prec(mp)); + int digits = (n >= 0) ? n : bits2digits(mpfr_get_prec(mpfr_srcptr())); + + format << "%." << digits << "RNg"; - sprintf(format,"%%.%dRNg",digits); // Default format - - return toString(std::string(format)); + return toString(format.str()); #else @@ -1675,8 +1777,8 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const if(mpfr_zero_p(mp)) return "0"; if(mpfr_nan_p(mp)) return "NaN"; - s = mpfr_get_str(NULL,&exp,b,0,mp,mode); - ns = mpfr_get_str(NULL,&exp,b,n,mp,mode); + s = mpfr_get_str(NULL, &exp, b, 0, mp, mode); + ns = mpfr_get_str(NULL, &exp, b, (std::max)(0,n), mp, mode); if(s!=NULL && ns!=NULL) { @@ -1761,17 +1863,43 @@ inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const ////////////////////////////////////////////////////////////////////////// // I/O +inline std::ostream& mpreal::output(std::ostream& os) const +{ + std::ostringstream format; + const std::ios::fmtflags flags = os.flags(); + + format << ((flags & std::ios::showpos) ? "%+" : "%"); + if (os.precision() >= 0) + format << '.' << os.precision() << "R*" + << ((flags & std::ios::floatfield) == std::ios::fixed ? 'f' : + (flags & std::ios::floatfield) == std::ios::scientific ? 'e' : + 'g'); + else + format << "R*e"; + + char *s = NULL; + if(!(mpfr_asprintf(&s, format.str().c_str(), + mpfr::mpreal::get_default_rnd(), + mpfr_srcptr()) + < 0)) + { + os << std::string(s); + mpfr_free_str(s); + } + return os; +} + inline std::ostream& operator<<(std::ostream& os, const mpreal& v) { - return os<(os.precision())); + return v.output(os); } inline std::istream& operator>>(std::istream &is, mpreal& v) { - // ToDo, use cout::hexfloat and other flags to setup base + // TODO: use cout::hexfloat and other flags to setup base std::string tmp; is >> tmp; - mpfr_set_str(v.mp, tmp.c_str(), 10, mpreal::get_default_rnd()); + mpfr_set_str(v.mpfr_ptr(), tmp.c_str(), 10, mpreal::get_default_rnd()); return is; } @@ -1784,53 +1912,53 @@ inline mp_prec_t digits2bits(int d) { const double LOG2_10 = 3.3219280948873624; - return (mp_prec_t) std::ceil( d * LOG2_10 ); + return mp_prec_t(std::ceil( d * LOG2_10 )); } inline int bits2digits(mp_prec_t b) { const double LOG10_2 = 0.30102999566398119; - return (int) std::floor( b * LOG10_2 ); + return int(std::floor( b * LOG10_2 )); } ////////////////////////////////////////////////////////////////////////// // Set/Get number properties -inline int sgn(const mpreal& v) +inline int sgn(const mpreal& op) { - int r = mpfr_signbit(v.mp); - return (r>0?-1:1); + int r = mpfr_signbit(op.mpfr_srcptr()); + return (r > 0? -1 : 1); } inline mpreal& mpreal::setSign(int sign, mp_rnd_t RoundingMode) { - mpfr_setsign(mp,mp,(sign < 0 ? 1 : 0),RoundingMode); + mpfr_setsign(mpfr_ptr(), mpfr_srcptr(), (sign < 0 ? 1 : 0), RoundingMode); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline int mpreal::getPrecision() const { - return mpfr_get_prec(mp); + return int(mpfr_get_prec(mpfr_srcptr())); } inline mpreal& mpreal::setPrecision(int Precision, mp_rnd_t RoundingMode) { - mpfr_prec_round(mp, Precision, RoundingMode); + mpfr_prec_round(mpfr_ptr(), Precision, RoundingMode); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::setInf(int sign) { - mpfr_set_inf(mp,sign); + mpfr_set_inf(mpfr_ptr(), sign); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } inline mpreal& mpreal::setNan() { - mpfr_set_nan(mp); + mpfr_set_nan(mpfr_ptr()); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; } @@ -1839,9 +1967,9 @@ inline mpreal& mpreal::setZero(int sign) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) - mpfr_set_zero(mp, sign); + mpfr_set_zero(mpfr_ptr(), sign); #else - mpfr_set_si(mp, 0, (mpfr_get_default_rounding_mode)()); + mpfr_set_si(mpfr_ptr(), 0, (mpfr_get_default_rounding_mode)()); setSign(sign); #endif @@ -1851,23 +1979,23 @@ inline mpreal& mpreal::setZero(int sign) inline mp_prec_t mpreal::get_prec() const { - return mpfr_get_prec(mp); + return mpfr_get_prec(mpfr_srcptr()); } inline void mpreal::set_prec(mp_prec_t prec, mp_rnd_t rnd_mode) { - mpfr_prec_round(mp,prec,rnd_mode); + mpfr_prec_round(mpfr_ptr(),prec,rnd_mode); MPREAL_MSVC_DEBUGVIEW_CODE; } inline mp_exp_t mpreal::get_exp () { - return mpfr_get_exp(mp); + return mpfr_get_exp(mpfr_srcptr()); } inline int mpreal::set_exp (mp_exp_t e) { - int x = mpfr_set_exp(mp, e); + int x = mpfr_set_exp(mpfr_ptr(), e); MPREAL_MSVC_DEBUGVIEW_CODE; return x; } @@ -1885,7 +2013,7 @@ inline const mpreal ldexp(const mpreal& v, mp_exp_t exp) mpreal x(v); // rounding is not important since we just increasing the exponent - mpfr_mul_2si(x.mp,x.mp,exp,mpreal::get_default_rnd()); + mpfr_mul_2si(x.mpfr_ptr(), x.mpfr_srcptr(), exp, mpreal::get_default_rnd()); return x; } @@ -1900,9 +2028,9 @@ inline mpreal machine_epsilon(const mpreal& x) /* the smallest eps such that x + eps != x */ if( x < 0) { - return nextabove(-x)+x; + return nextabove(-x) + x; }else{ - return nextabove(x)-x; + return nextabove( x) - x; } } @@ -1922,37 +2050,37 @@ inline mpreal maxval(mp_prec_t prec) inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps) { - return abs(a - b) <= machine_epsilon((max)(abs(a), abs(b))) * maxUlps; + return abs(a - b) <= machine_epsilon((max)(abs(a), abs(b))) * maxUlps; } inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps) { - return abs(a - b) <= (min)(abs(a), abs(b)) * eps; + return abs(a - b) <= eps; } inline bool isEqualFuzzy(const mpreal& a, const mpreal& b) { - return isEqualFuzzy(a, b, machine_epsilon((min)(abs(a), abs(b)))); + return isEqualFuzzy(a, b, machine_epsilon((max)(1, (min)(abs(a), abs(b))))); } inline const mpreal modf(const mpreal& v, mpreal& n) { - mpreal frac(v); + mpreal f(v); // rounding is not important since we are using the same number - mpfr_frac(frac.mp,frac.mp,mpreal::get_default_rnd()); - mpfr_trunc(n.mp,v.mp); - return frac; + mpfr_frac (f.mpfr_ptr(),f.mpfr_srcptr(),mpreal::get_default_rnd()); + mpfr_trunc(n.mpfr_ptr(),v.mpfr_srcptr()); + return f; } inline int mpreal::check_range (int t, mp_rnd_t rnd_mode) { - return mpfr_check_range(mp,t,rnd_mode); + return mpfr_check_range(mpfr_ptr(),t,rnd_mode); } inline int mpreal::subnormalize (int t,mp_rnd_t rnd_mode) { - int r = mpfr_subnormalize(mp,t,rnd_mode); + int r = mpfr_subnormalize(mpfr_ptr(),t,rnd_mode); MPREAL_MSVC_DEBUGVIEW_CODE; return r; } @@ -2005,8 +2133,11 @@ inline mp_exp_t mpreal::get_emax_max (void) mpfr_##f(y.mpfr_ptr(), x.mpfr_srcptr(), r); \ return y; -inline const mpreal sqr (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(sqr ); } -inline const mpreal sqrt (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(sqrt); } +inline const mpreal sqr (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) +{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqr ); } + +inline const mpreal sqrt (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) +{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqrt); } inline const mpreal sqrt(const unsigned long int x, mp_rnd_t r) { @@ -2032,14 +2163,14 @@ inline const mpreal sqrt(const int v, mp_rnd_t rnd_mode) else return mpreal().setNan(); // NaN } -inline const mpreal root(const mpreal& x, unsigned long int k, mp_rnd_t r) +inline const mpreal root(const mpreal& x, unsigned long int k, mp_rnd_t r = mpreal::get_default_rnd()) { mpreal y(0, mpfr_get_prec(x.mpfr_srcptr())); mpfr_root(y.mpfr_ptr(), x.mpfr_srcptr(), k, r); return y; } -inline const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t r) +inline const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t r = mpreal::get_default_rnd()) { mpreal y(0, mpfr_get_prec(a.mpfr_srcptr())); mpfr_dim(y.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), r); @@ -2048,161 +2179,130 @@ inline const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t r) inline int cmpabs(const mpreal& a,const mpreal& b) { - return mpfr_cmpabs(a.mp,b.mp); + return mpfr_cmpabs(a.mpfr_ptr(), b.mpfr_srcptr()); } -inline int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode) +inline int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { - return mpfr_sin_cos(s.mp,c.mp,v.mp,rnd_mode); + return mpfr_sin_cos(s.mpfr_ptr(), c.mpfr_ptr(), v.mpfr_srcptr(), rnd_mode); } inline const mpreal sqrt (const long double v, mp_rnd_t rnd_mode) { return sqrt(mpreal(v),rnd_mode); } inline const mpreal sqrt (const double v, mp_rnd_t rnd_mode) { return sqrt(mpreal(v),rnd_mode); } -inline const mpreal cbrt (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(cbrt ); } -inline const mpreal fabs (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); } -inline const mpreal abs (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); } -inline const mpreal log (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(log ); } -inline const mpreal log2 (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(log2 ); } -inline const mpreal log10 (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(log10); } -inline const mpreal exp (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp ); } -inline const mpreal exp2 (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp2 ); } -inline const mpreal exp10 (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp10); } -inline const mpreal cos (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(cos ); } -inline const mpreal sin (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(sin ); } -inline const mpreal tan (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(tan ); } -inline const mpreal sec (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(sec ); } -inline const mpreal csc (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(csc ); } -inline const mpreal cot (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(cot ); } -inline const mpreal acos (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(acos); } -inline const mpreal asin (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(asin); } -inline const mpreal atan (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(atan); } +inline const mpreal cbrt (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cbrt ); } +inline const mpreal fabs (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); } +inline const mpreal abs (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); } +inline const mpreal log (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log ); } +inline const mpreal log2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log2 ); } +inline const mpreal log10 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log10); } +inline const mpreal exp (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp ); } +inline const mpreal exp2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp2 ); } +inline const mpreal exp10 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp10); } +inline const mpreal cos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cos ); } +inline const mpreal sin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sin ); } +inline const mpreal tan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(tan ); } +inline const mpreal sec (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sec ); } +inline const mpreal csc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(csc ); } +inline const mpreal cot (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cot ); } +inline const mpreal acos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(acos ); } +inline const mpreal asin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asin ); } +inline const mpreal atan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atan ); } -inline const mpreal acot (const mpreal& v, mp_rnd_t r) { return atan (1/v, r); } -inline const mpreal asec (const mpreal& v, mp_rnd_t r) { return acos (1/v, r); } -inline const mpreal acsc (const mpreal& v, mp_rnd_t r) { return asin (1/v, r); } -inline const mpreal acoth (const mpreal& v, mp_rnd_t r) { return atanh(1/v, r); } -inline const mpreal asech (const mpreal& v, mp_rnd_t r) { return acosh(1/v, r); } -inline const mpreal acsch (const mpreal& v, mp_rnd_t r) { return asinh(1/v, r); } +inline const mpreal acot (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atan (1/v, r); } +inline const mpreal asec (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acos (1/v, r); } +inline const mpreal acsc (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asin (1/v, r); } +inline const mpreal acoth (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atanh(1/v, r); } +inline const mpreal asech (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acosh(1/v, r); } +inline const mpreal acsch (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asinh(1/v, r); } -inline const mpreal cosh (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(cosh ); } -inline const mpreal sinh (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(sinh ); } -inline const mpreal tanh (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(tanh ); } -inline const mpreal sech (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(sech ); } -inline const mpreal csch (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(csch ); } -inline const mpreal coth (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(coth ); } -inline const mpreal acosh (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(acosh); } -inline const mpreal asinh (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(asinh); } -inline const mpreal atanh (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(atanh); } +inline const mpreal cosh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cosh ); } +inline const mpreal sinh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sinh ); } +inline const mpreal tanh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(tanh ); } +inline const mpreal sech (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sech ); } +inline const mpreal csch (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(csch ); } +inline const mpreal coth (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(coth ); } +inline const mpreal acosh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(acosh); } +inline const mpreal asinh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asinh); } +inline const mpreal atanh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atanh); } -inline const mpreal log1p (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(log1p ); } -inline const mpreal expm1 (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(expm1 ); } -inline const mpreal eint (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(eint ); } -inline const mpreal gamma (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); } -inline const mpreal lngamma (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(lngamma); } -inline const mpreal zeta (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(zeta ); } -inline const mpreal erf (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(erf ); } -inline const mpreal erfc (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(erfc ); } -inline const mpreal besselj0(const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(j0 ); } -inline const mpreal besselj1(const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(j1 ); } -inline const mpreal bessely0(const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(y0 ); } -inline const mpreal bessely1(const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(y1 ); } +inline const mpreal log1p (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log1p ); } +inline const mpreal expm1 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(expm1 ); } +inline const mpreal eint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(eint ); } +inline const mpreal gamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); } +inline const mpreal lngamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(lngamma); } +inline const mpreal zeta (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(zeta ); } +inline const mpreal erf (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erf ); } +inline const mpreal erfc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erfc ); } +inline const mpreal besselj0(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(j0 ); } +inline const mpreal besselj1(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(j1 ); } +inline const mpreal bessely0(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(y0 ); } +inline const mpreal bessely1(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(y1 ); } -inline const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode) +inline const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { - mpreal a; - mp_prec_t yp, xp; - - yp = y.get_prec(); - xp = x.get_prec(); - - a.set_prec(yp>xp?yp:xp); - - mpfr_atan2(a.mp, y.mp, x.mp, rnd_mode); - + mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision())); + mpfr_atan2(a.mpfr_ptr(), y.mpfr_srcptr(), x.mpfr_srcptr(), rnd_mode); return a; } -inline const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +inline const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { - mpreal a; - mp_prec_t yp, xp; - - yp = y.get_prec(); - xp = x.get_prec(); - - a.set_prec(yp>xp?yp:xp); - - mpfr_hypot(a.mp, x.mp, y.mp, rnd_mode); - + mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision())); + mpfr_hypot(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode); return a; } -inline const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +inline const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { - mpreal a; - mp_prec_t yp, xp; - - yp = y.get_prec(); - xp = x.get_prec(); - - a.set_prec(yp>xp?yp:xp); - - mpfr_remainder(a.mp, x.mp, y.mp, rnd_mode); - + mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision())); + mpfr_remainder(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode); return a; } -inline const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +inline const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { - mpreal a; - mp_prec_t yp, xp; - - yp = y.get_prec(); - xp = x.get_prec(); - - a.set_prec(yp>xp?yp:xp); - - mpfr_remquo(a.mp,q, x.mp, y.mp, rnd_mode); - + mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision())); + mpfr_remquo(a.mpfr_ptr(),q, x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode); return a; } -inline const mpreal fac_ui (unsigned long int v, mp_prec_t prec, mp_rnd_t rnd_mode) +inline const mpreal fac_ui (unsigned long int v, mp_prec_t prec = mpreal::get_default_prec(), + mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(0, prec); - mpfr_fac_ui(x.mp,v,rnd_mode); + mpfr_fac_ui(x.mpfr_ptr(),v,rnd_mode); return x; } -inline const mpreal lgamma (const mpreal& v, int *signp, mp_rnd_t rnd_mode) +inline const mpreal lgamma (const mpreal& v, int *signp = 0, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(v); int tsignp; - if(signp) mpfr_lgamma(x.mp,signp,v.mp,rnd_mode); - else mpfr_lgamma(x.mp,&tsignp,v.mp,rnd_mode); + if(signp) mpfr_lgamma(x.mpfr_ptr(), signp,v.mpfr_srcptr(),rnd_mode); + else mpfr_lgamma(x.mpfr_ptr(),&tsignp,v.mpfr_srcptr(),rnd_mode); return x; } -inline const mpreal besseljn (long n, const mpreal& x, mp_rnd_t r) +inline const mpreal besseljn (long n, const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { - mpreal y(0, mpfr_get_prec(x.mpfr_srcptr())); + mpreal y(0, x.getPrecision()); mpfr_jn(y.mpfr_ptr(), n, x.mpfr_srcptr(), r); return y; } -inline const mpreal besselyn (long n, const mpreal& x, mp_rnd_t r) +inline const mpreal besselyn (long n, const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { - mpreal y(0, mpfr_get_prec(x.mpfr_srcptr())); + mpreal y(0, x.getPrecision()); mpfr_yn(y.mpfr_ptr(), n, x.mpfr_srcptr(), r); return y; } -inline const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode) +inline const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal a; mp_prec_t p1, p2, p3; @@ -2217,7 +2317,7 @@ inline const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, m return a; } -inline const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode) +inline const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal a; mp_prec_t p1, p2, p3; @@ -2232,7 +2332,7 @@ inline const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, m return a; } -inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode) +inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal a; mp_prec_t p1, p2; @@ -2247,7 +2347,7 @@ inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode) return a; } -inline const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode) +inline const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x; mpfr_ptr* t; @@ -2264,23 +2364,23 @@ inline const mpreal sum (const mpreal tab[], unsigned long int n, mp_rnd_t rnd_m // MPFR 2.4.0 Specifics #if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0)) -inline int sinh_cosh(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode) +inline int sinh_cosh(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { return mpfr_sinh_cosh(s.mp,c.mp,v.mp,rnd_mode); } -inline const mpreal li2 (const mpreal& x, mp_rnd_t r) +inline const mpreal li2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(li2); } -inline const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +inline const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { /* R = rem(X,Y) if Y != 0, returns X - n * Y where n = trunc(X/Y). */ return fmod(x, y, rnd_mode); } -inline const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +inline const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { (void)rnd_mode; @@ -2305,7 +2405,7 @@ inline const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) return m; } -inline const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) +inline const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal a; mp_prec_t yp, xp; @@ -2320,7 +2420,7 @@ inline const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode) return a; } -inline const mpreal rec_sqrt(const mpreal& v, mp_rnd_t rnd_mode) +inline const mpreal rec_sqrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(v); mpfr_rec_sqrt(x.mp,v.mp,rnd_mode); @@ -2331,41 +2431,41 @@ inline const mpreal rec_sqrt(const mpreal& v, mp_rnd_t rnd_mode) ////////////////////////////////////////////////////////////////////////// // MPFR 3.0.0 Specifics #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) -inline const mpreal digamma (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(digamma); } -inline const mpreal ai (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(ai); } +inline const mpreal digamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(digamma); } +inline const mpreal ai (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(ai); } #endif // MPFR 3.0.0 Specifics ////////////////////////////////////////////////////////////////////////// // Constants -inline const mpreal const_log2 (mp_prec_t p, mp_rnd_t r) +inline const mpreal const_log2 (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd()) { mpreal x(0, p); mpfr_const_log2(x.mpfr_ptr(), r); return x; } -inline const mpreal const_pi (mp_prec_t p, mp_rnd_t r) +inline const mpreal const_pi (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd()) { mpreal x(0, p); mpfr_const_pi(x.mpfr_ptr(), r); return x; } -inline const mpreal const_euler (mp_prec_t p, mp_rnd_t r) +inline const mpreal const_euler (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd()) { mpreal x(0, p); mpfr_const_euler(x.mpfr_ptr(), r); return x; } -inline const mpreal const_catalan (mp_prec_t p, mp_rnd_t r) +inline const mpreal const_catalan (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd()) { mpreal x(0, p); mpfr_const_catalan(x.mpfr_ptr(), r); return x; } -inline const mpreal const_infinity (int sign, mp_prec_t p, mp_rnd_t /*r*/) +inline const mpreal const_infinity (int sign = 1, mp_prec_t p = mpreal::get_default_prec()) { mpreal x(0, p); mpfr_set_inf(x.mpfr_ptr(), sign); @@ -2402,12 +2502,12 @@ inline const mpreal trunc(const mpreal& v) return x; } -inline const mpreal rint (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint ); } -inline const mpreal rint_ceil (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_ceil ); } -inline const mpreal rint_floor (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_floor); } -inline const mpreal rint_round (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_round); } -inline const mpreal rint_trunc (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_trunc); } -inline const mpreal frac (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(frac ); } +inline const mpreal rint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint ); } +inline const mpreal rint_ceil (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_ceil ); } +inline const mpreal rint_floor (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_floor); } +inline const mpreal rint_round (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_round); } +inline const mpreal rint_trunc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_trunc); } +inline const mpreal frac (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(frac ); } ////////////////////////////////////////////////////////////////////////// // Miscellaneous Functions @@ -2415,14 +2515,14 @@ inline void swap (mpreal& a, mpreal& b) { mpfr_swap(a.mp,b inline const mpreal (max)(const mpreal& x, const mpreal& y){ return (x>y?x:y); } inline const mpreal (min)(const mpreal& x, const mpreal& y){ return (x= MPFR_VERSION_NUM(3,0,0)) +#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0)) // use gmp_randinit_default() to init state, gmp_randclear() to clear -inline const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode) +inline const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x; mpfr_urandom(x.mp,state,rnd_mode); return x; } + +inline const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) +{ + mpreal x; + mpfr_grandom(x.mp, NULL, state, rnd_mode); + return x; +} + #endif #if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2)) @@ -2480,7 +2588,7 @@ inline const mpreal random2 (mp_size_t size, mp_exp_t exp) // a = random(seed); <- initialization & first random number generation // a = random(); <- next random numbers generation // seed != 0 -inline const mpreal random(unsigned int seed) +inline const mpreal random(unsigned int seed = 0) { #if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) @@ -2504,6 +2612,25 @@ inline const mpreal random(unsigned int seed) } +#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0)) +inline const mpreal grandom(unsigned int seed = 0) +{ + static gmp_randstate_t state; + static bool isFirstTime = true; + + if(isFirstTime) + { + gmp_randinit_default(state); + gmp_randseed_ui(state,0); + isFirstTime = false; + } + + if(seed != 0) gmp_randseed_ui(state,seed); + + return mpfr::grandom(state); +} +#endif + ////////////////////////////////////////////////////////////////////////// // Set/Get global properties inline void mpreal::set_default_prec(mp_prec_t prec) @@ -2523,21 +2650,21 @@ inline bool mpreal::fits_in_bits(double x, int n) return IsInf(x) || (std::modf ( std::ldexp ( std::frexp ( x, &i ), n ), &t ) == 0.0); } -inline const mpreal pow(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode) +inline const mpreal pow(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(a); mpfr_pow(x.mp,x.mp,b.mp,rnd_mode); return x; } -inline const mpreal pow(const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode) +inline const mpreal pow(const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(a); mpfr_pow_z(x.mp,x.mp,b,rnd_mode); return x; } -inline const mpreal pow(const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode) +inline const mpreal pow(const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(a); mpfr_pow_ui(x.mp,x.mp,b,rnd_mode); @@ -2549,7 +2676,7 @@ inline const mpreal pow(const mpreal& a, const unsigned int b, mp_rnd_t rnd_mode return pow(a,static_cast(b),rnd_mode); } -inline const mpreal pow(const mpreal& a, const long int b, mp_rnd_t rnd_mode) +inline const mpreal pow(const mpreal& a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(a); mpfr_pow_si(x.mp,x.mp,b,rnd_mode); @@ -2571,7 +2698,7 @@ inline const mpreal pow(const mpreal& a, const double b, mp_rnd_t rnd_mode) return pow(a,mpreal(b),rnd_mode); } -inline const mpreal pow(const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode) +inline const mpreal pow(const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd()) { mpreal x(a); mpfr_ui_pow(x.mp,a,b.mp,rnd_mode); @@ -2586,13 +2713,13 @@ inline const mpreal pow(const unsigned int a, const mpreal& b, mp_rnd_t rnd_mode inline const mpreal pow(const long int a, const mpreal& b, mp_rnd_t rnd_mode) { if (a>=0) return pow(static_cast(a),b,rnd_mode); - else return pow(mpreal(a),b,rnd_mode); + else return pow(mpreal(a),b,rnd_mode); } inline const mpreal pow(const int a, const mpreal& b, mp_rnd_t rnd_mode) { if (a>=0) return pow(static_cast(a),b,rnd_mode); - else return pow(mpreal(a),b,rnd_mode); + else return pow(mpreal(a),b,rnd_mode); } inline const mpreal pow(const long double a, const mpreal& b, mp_rnd_t rnd_mode) @@ -2621,13 +2748,13 @@ inline const mpreal pow(const unsigned long int a, const unsigned int b, mp_rnd_ inline const mpreal pow(const unsigned long int a, const long int b, mp_rnd_t rnd_mode) { if(b>0) return pow(a,static_cast(b),rnd_mode); //mpfr_ui_pow_ui - else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow + else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow } inline const mpreal pow(const unsigned long int a, const int b, mp_rnd_t rnd_mode) { if(b>0) return pow(a,static_cast(b),rnd_mode); //mpfr_ui_pow_ui - else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow + else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow } inline const mpreal pow(const unsigned long int a, const long double b, mp_rnd_t rnd_mode) @@ -2824,7 +2951,7 @@ inline const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode) // Non-throwing swap C++ idiom: http://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Non-throwing_swap namespace std { - // only allowed to extend namespace std with specializations + // we are allowed to extend namespace std with specializations only template <> inline void swap(mpfr::mpreal& x, mpfr::mpreal& y) { @@ -2852,20 +2979,6 @@ namespace std static const bool tinyness_before = true; static const float_denorm_style has_denorm = denorm_absent; - - inline static float_round_style round_style() - { - mp_rnd_t r = mpfr::mpreal::get_default_rnd(); - - switch (r) - { - case MPFR_RNDN: return round_to_nearest; - case MPFR_RNDZ: return round_toward_zero; - case MPFR_RNDU: return round_toward_infinity; - case MPFR_RNDD: return round_toward_neg_infinity; - default: return round_indeterminate; - } - } inline static mpfr::mpreal (min) (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::minval(precision); } inline static mpfr::mpreal (max) (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(precision); } @@ -2873,7 +2986,7 @@ namespace std // Returns smallest eps such that 1 + eps != 1 (classic machine epsilon) inline static mpfr::mpreal epsilon(mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(precision); } - + // Returns smallest eps such that x + eps != x (relative machine epsilon) inline static mpfr::mpreal epsilon(const mpfr::mpreal& x) { return mpfr::machine_epsilon(x); } @@ -2881,7 +2994,7 @@ namespace std { mp_rnd_t r = mpfr::mpreal::get_default_rnd(); - if(r == MPFR_RNDN) return mpfr::mpreal(0.5, precision); + if(r == GMP_RNDN) return mpfr::mpreal(0.5, precision); else return mpfr::mpreal(1.0, precision); } @@ -2896,10 +3009,30 @@ namespace std MPREAL_PERMISSIVE_EXPR static const int min_exponent10 = (int) (MPFR_EMIN_DEFAULT * 0.3010299956639811); MPREAL_PERMISSIVE_EXPR static const int max_exponent10 = (int) (MPFR_EMAX_DEFAULT * 0.3010299956639811); - // Should be constant according to standard, but 'digits' depends on precision in MPFR +#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS - inline static int digits() { return mpfr::mpreal::get_default_prec(); } - inline static int digits(const mpfr::mpreal& x) { return x.getPrecision(); } + // Following members should be constant according to standard, but they can be variable in MPFR + // So we define them as functions here. + // + // This is preferable way for std::numeric_limits specialization. + // But it is incompatible with standard std::numeric_limits and might not work with other libraries, e.g. boost. + // See below for compatible implementation. + inline static float_round_style round_style() + { + mp_rnd_t r = mpfr::mpreal::get_default_rnd(); + + switch (r) + { + case GMP_RNDN: return round_to_nearest; + case GMP_RNDZ: return round_toward_zero; + case GMP_RNDU: return round_toward_infinity; + case GMP_RNDD: return round_toward_neg_infinity; + default: return round_indeterminate; + } + } + + inline static int digits() { return int(mpfr::mpreal::get_default_prec()); } + inline static int digits(const mpfr::mpreal& x) { return x.getPrecision(); } inline static int digits10(mp_prec_t precision = mpfr::mpreal::get_default_prec()) { @@ -2915,6 +3048,25 @@ namespace std { return digits10(precision); } +#else + // Digits and round_style are NOT constants when it comes to mpreal. + // If possible, please use functions digits() and round_style() defined above. + // + // These (default) values are preserved for compatibility with existing libraries, e.g. boost. + // Change them accordingly to your application. + // + // For example, if you use 256 bits of precision uniformly in your program, then: + // digits = 256 + // digits10 = 77 + // max_digits10 = 78 + // + // Approximate formula for decimal digits is: digits10 = floor(log10(2) * digits). See bits2digits() for more details. + + static const std::float_round_style round_style = round_to_nearest; + static const int digits = 53; + static const int digits10 = 15; + static const int max_digits10 = 16; +#endif }; } diff --git a/ground/gcs/src/libs/eigen/unsupported/test/polynomialsolver.cpp b/ground/gcs/src/libs/eigen/unsupported/test/polynomialsolver.cpp index 13f92169e..de79f1538 100644 --- a/ground/gcs/src/libs/eigen/unsupported/test/polynomialsolver.cpp +++ b/ground/gcs/src/libs/eigen/unsupported/test/polynomialsolver.cpp @@ -104,9 +104,7 @@ void evalSolverSugarFunction( const POLYNOMIAL& pols, const ROOTS& roots, const // 1) the roots found are correct // 2) the roots have distinct moduli - typedef typename POLYNOMIAL::Scalar Scalar; typedef typename REAL_ROOTS::Scalar Real; - typedef PolynomialSolver PolynomialSolverType; //Test realRoots std::vector< Real > calc_realRoots;