mirror of
https://bitbucket.org/librepilot/librepilot.git
synced 2024-12-01 09:24:10 +01:00
Merged in alessiomorale/librepilot/Qt-5.6_Support_Win (pull request #220)
Qt 5.6_support_win
This commit is contained in:
commit
1adcc36d2c
@ -1,4 +1,4 @@
|
||||
repo: 8a21fd850624c931e448cbcfb38168cb2717c790
|
||||
node: ffa86ffb557094721ca71dcea6aed2651b9fd610
|
||||
node: 07105f7124f9aef00a68c85e0fc606e65d3d6c15
|
||||
branch: 3.2
|
||||
tag: 3.2.0
|
||||
tag: 3.2.8
|
||||
|
@ -23,3 +23,11 @@ bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2
|
||||
da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1
|
||||
4b687cad1d23066f66863f4f87298447298443df 3.2-rc1
|
||||
1eeda7b1258bcd306018c0738e2b6a8543661141 3.2-rc2
|
||||
ffa86ffb557094721ca71dcea6aed2651b9fd610 3.2.0
|
||||
6b38706d90a9fe182e66ab88477b3dbde34b9f66 3.2.1
|
||||
1306d75b4a21891e59ff9bd96678882cf831e39f 3.2.2
|
||||
36fd1ba04c120cfdd90f3e4cede47f43b21d19ad 3.2.3
|
||||
10219c95fe653d4962aa9db4946f6fbea96dd740 3.2.4
|
||||
bdd17ee3b1b3a166cd5ec36dcad4fc1f3faf774a 3.2.5
|
||||
c58038c56923e0fd86de3ded18e03df442e66dfb 3.2.6
|
||||
b30b87236a1b1552af32ac34075ee5696a9b5a33 3.2.7
|
||||
|
@ -1,6 +1,5 @@
|
||||
project(Eigen)
|
||||
|
||||
cmake_minimum_required(VERSION 2.8.2)
|
||||
cmake_minimum_required(VERSION 2.8.5)
|
||||
|
||||
# guard against in-source builds
|
||||
|
||||
@ -55,6 +54,7 @@ endif(EIGEN_HG_CHANGESET)
|
||||
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
include(GNUInstallDirs)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
|
||||
@ -204,7 +204,7 @@ if(NOT MSVC)
|
||||
|
||||
option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF)
|
||||
if(EIGEN_TEST_NEON)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a"8)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -mcpu=cortex-a8")
|
||||
message(STATUS "Enabling NEON in tests/examples")
|
||||
endif()
|
||||
|
||||
@ -288,25 +288,26 @@ option(EIGEN_TEST_C++0x "Enables all C++0x features." OFF)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
# the user modifiable install path for header files
|
||||
set(EIGEN_INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR} CACHE PATH "The directory where we install the header files (optional)")
|
||||
|
||||
# set the internal install path for header files which depends on wether the user modifiable
|
||||
# EIGEN_INCLUDE_INSTALL_DIR has been set by the user or not.
|
||||
if(EIGEN_INCLUDE_INSTALL_DIR)
|
||||
set(INCLUDE_INSTALL_DIR
|
||||
${EIGEN_INCLUDE_INSTALL_DIR}
|
||||
CACHE INTERNAL
|
||||
"The directory where we install the header files (internal)"
|
||||
)
|
||||
# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR
|
||||
if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR)
|
||||
set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR}
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed")
|
||||
else()
|
||||
set(INCLUDE_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_PREFIX}/include/eigen3"
|
||||
CACHE INTERNAL
|
||||
"The directory where we install the header files (internal)"
|
||||
)
|
||||
"${CMAKE_INSTALL_INCLUDEDIR}/eigen3"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed"
|
||||
)
|
||||
endif()
|
||||
|
||||
set(CMAKEPACKAGE_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_LIBDIR}/cmake/eigen3"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen3Config.cmake is installed"
|
||||
)
|
||||
set(PKGCONFIG_INSTALL_DIR
|
||||
"${CMAKE_INSTALL_DATADIR}/pkgconfig"
|
||||
CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where eigen3.pc is installed"
|
||||
)
|
||||
|
||||
# similar to set_target_properties but append the property instead of overwriting it
|
||||
macro(ei_add_target_property target prop value)
|
||||
|
||||
@ -324,21 +325,9 @@ install(FILES
|
||||
)
|
||||
|
||||
if(EIGEN_BUILD_PKGCONFIG)
|
||||
SET(path_separator ":")
|
||||
STRING(REPLACE ${path_separator} ";" pkg_config_libdir_search "$ENV{PKG_CONFIG_LIBDIR}")
|
||||
message(STATUS "searching for 'pkgconfig' directory in PKG_CONFIG_LIBDIR ( $ENV{PKG_CONFIG_LIBDIR} ), ${CMAKE_INSTALL_PREFIX}/share, and ${CMAKE_INSTALL_PREFIX}/lib")
|
||||
FIND_PATH(pkg_config_libdir pkgconfig ${pkg_config_libdir_search} ${CMAKE_INSTALL_PREFIX}/share ${CMAKE_INSTALL_PREFIX}/lib ${pkg_config_libdir_search})
|
||||
if(pkg_config_libdir)
|
||||
SET(pkg_config_install_dir ${pkg_config_libdir})
|
||||
message(STATUS "found ${pkg_config_libdir}/pkgconfig" )
|
||||
else(pkg_config_libdir)
|
||||
SET(pkg_config_install_dir ${CMAKE_INSTALL_PREFIX}/share)
|
||||
message(STATUS "pkgconfig not found; installing in ${pkg_config_install_dir}" )
|
||||
endif(pkg_config_libdir)
|
||||
|
||||
configure_file(eigen3.pc.in eigen3.pc)
|
||||
configure_file(eigen3.pc.in eigen3.pc @ONLY)
|
||||
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc
|
||||
DESTINATION ${pkg_config_install_dir}/pkgconfig
|
||||
DESTINATION ${PKGCONFIG_INSTALL_DIR}
|
||||
)
|
||||
endif(EIGEN_BUILD_PKGCONFIG)
|
||||
|
||||
@ -401,12 +390,15 @@ if(cmake_generator_tolower MATCHES "makefile")
|
||||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "Command | Description")
|
||||
message(STATUS "--------------+--------------------------------------------------------------")
|
||||
message(STATUS "make install | Install to ${CMAKE_INSTALL_PREFIX}. To change that:")
|
||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourpath")
|
||||
message(STATUS " | Eigen headers will then be installed to:")
|
||||
message(STATUS " | ${INCLUDE_INSTALL_DIR}")
|
||||
message(STATUS " | To install Eigen headers to a separate location, do:")
|
||||
message(STATUS " | cmake . -DEIGEN_INCLUDE_INSTALL_DIR=yourpath")
|
||||
message(STATUS "make install | Install Eigen. Headers will be installed to:")
|
||||
message(STATUS " | <CMAKE_INSTALL_PREFIX>/<INCLUDE_INSTALL_DIR>")
|
||||
message(STATUS " | Using the following values:")
|
||||
message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
|
||||
message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}")
|
||||
message(STATUS " | Change the install location of Eigen headers using:")
|
||||
message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix")
|
||||
message(STATUS " | Or:")
|
||||
message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
|
||||
message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
|
||||
message(STATUS "make check | Build and run the unit-tests. Read this page:")
|
||||
message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
|
||||
|
@ -4,14 +4,10 @@
|
||||
## # The following are required to uses Dart and the Cdash dashboard
|
||||
## ENABLE_TESTING()
|
||||
## INCLUDE(CTest)
|
||||
set(CTEST_PROJECT_NAME "Eigen")
|
||||
set(CTEST_PROJECT_NAME "Eigen3.2")
|
||||
set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
|
||||
|
||||
set(CTEST_DROP_METHOD "http")
|
||||
set(CTEST_DROP_SITE "manao.inria.fr")
|
||||
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen")
|
||||
set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen3.2")
|
||||
set(CTEST_DROP_SITE_CDASH TRUE)
|
||||
set(CTEST_PROJECT_SUBPROJECTS
|
||||
Official
|
||||
Unsupported
|
||||
)
|
||||
|
@ -12,7 +12,7 @@ extern "C" {
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup CholmodSupport_Module CholmodSupport module
|
||||
*
|
||||
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
||||
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
* It provides the two following main factorization classes:
|
||||
* - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
|
||||
* - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
|
||||
|
@ -95,7 +95,7 @@
|
||||
extern "C" {
|
||||
// In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
|
||||
// Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
|
||||
#ifdef __INTEL_COMPILER
|
||||
#if defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1110
|
||||
#include <immintrin.h>
|
||||
#else
|
||||
#include <emmintrin.h>
|
||||
@ -123,7 +123,7 @@
|
||||
#undef bool
|
||||
#undef vector
|
||||
#undef pixel
|
||||
#elif defined __ARM_NEON__
|
||||
#elif defined __ARM_NEON
|
||||
#define EIGEN_VECTORIZE
|
||||
#define EIGEN_VECTORIZE_NEON
|
||||
#include <arm_neon.h>
|
||||
@ -165,7 +165,7 @@
|
||||
#endif
|
||||
|
||||
// required for __cpuid, needs to be included after cmath
|
||||
#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64))
|
||||
#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) && (!defined(_WIN32_WCE))
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
|
@ -14,12 +14,25 @@
|
||||
#error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_NO_EIGEN2_DEPRECATED_WARNING
|
||||
|
||||
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
|
||||
#warning "Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)"
|
||||
#else
|
||||
#pragma message ("Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3. (Define EIGEN_NO_EIGEN2_DEPRECATED_WARNING to disable this warning)")
|
||||
#endif
|
||||
|
||||
#endif // EIGEN_NO_EIGEN2_DEPRECATED_WARNING
|
||||
|
||||
#include "src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup Eigen2Support_Module Eigen2 support module
|
||||
* This module provides a couple of deprecated functions improving the compatibility with Eigen2.
|
||||
*
|
||||
* \warning Eigen2 support is deprecated in Eigen 3.2.x and it will be removed in Eigen 3.3.
|
||||
*
|
||||
* This module provides a couple of deprecated functions improving the compatibility with Eigen2.
|
||||
*
|
||||
* To use it, define EIGEN2_SUPPORT before including any Eigen header
|
||||
* \code
|
||||
* #define EIGEN2_SUPPORT
|
||||
|
@ -10,7 +10,7 @@
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup SPQRSupport_Module SuiteSparseQR module
|
||||
*
|
||||
* This module provides an interface to the SPQR library, which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
||||
* This module provides an interface to the SPQR library, which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/SPQRSupport>
|
||||
|
@ -14,7 +14,7 @@
|
||||
/**
|
||||
* \defgroup SparseCore_Module SparseCore module
|
||||
*
|
||||
* This module provides a sparse matrix representation, and basic associatd matrix manipulations
|
||||
* This module provides a sparse matrix representation, and basic associated matrix manipulations
|
||||
* and operations.
|
||||
*
|
||||
* See the \ref TutorialSparse "Sparse tutorial"
|
||||
|
@ -12,7 +12,7 @@ extern "C" {
|
||||
/** \ingroup Support_modules
|
||||
* \defgroup UmfPackSupport_Module UmfPackSupport module
|
||||
*
|
||||
* This module provides an interface to the UmfPack library which is part of the <a href="http://www.cise.ufl.edu/research/sparse/SuiteSparse/">suitesparse</a> package.
|
||||
* This module provides an interface to the UmfPack library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
|
||||
* It provides the following factorization class:
|
||||
* - class UmfPackLU: a multifrontal sequential LU factorization.
|
||||
*
|
||||
|
@ -16,7 +16,10 @@
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template<typename MatrixType, int UpLo> struct LDLT_Traits;
|
||||
template<typename MatrixType, int UpLo> struct LDLT_Traits;
|
||||
|
||||
// PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef
|
||||
enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };
|
||||
}
|
||||
|
||||
/** \ingroup Cholesky_Module
|
||||
@ -69,7 +72,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
* The default constructor is useful in cases in which the user intends to
|
||||
* perform decompositions via LDLT::compute(const MatrixType&).
|
||||
*/
|
||||
LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {}
|
||||
LDLT()
|
||||
: m_matrix(),
|
||||
m_transpositions(),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false)
|
||||
{}
|
||||
|
||||
/** \brief Default Constructor with memory preallocation
|
||||
*
|
||||
@ -81,6 +89,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
: m_matrix(size, size),
|
||||
m_transpositions(size),
|
||||
m_temporary(size),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false)
|
||||
{}
|
||||
|
||||
@ -93,6 +102,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
: m_matrix(matrix.rows(), matrix.cols()),
|
||||
m_transpositions(matrix.rows()),
|
||||
m_temporary(matrix.rows()),
|
||||
m_sign(internal::ZeroSign),
|
||||
m_isInitialized(false)
|
||||
{
|
||||
compute(matrix);
|
||||
@ -139,7 +149,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
inline bool isPositive() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_sign == 1;
|
||||
return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
|
||||
}
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
@ -153,7 +163,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
inline bool isNegative(void) const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "LDLT is not initialized.");
|
||||
return m_sign == -1;
|
||||
return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;
|
||||
}
|
||||
|
||||
/** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A.
|
||||
@ -225,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
|
||||
@ -235,7 +250,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
|
||||
MatrixType m_matrix;
|
||||
TranspositionType m_transpositions;
|
||||
TmpMatrixType m_temporary;
|
||||
int m_sign;
|
||||
internal::SignMatrix m_sign;
|
||||
bool m_isInitialized;
|
||||
};
|
||||
|
||||
@ -246,7 +261,7 @@ template<int UpLo> struct ldlt_inplace;
|
||||
template<> struct ldlt_inplace<Lower>
|
||||
{
|
||||
template<typename MatrixType, typename TranspositionType, typename Workspace>
|
||||
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
|
||||
static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
|
||||
{
|
||||
using std::abs;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
@ -258,36 +273,19 @@ template<> struct ldlt_inplace<Lower>
|
||||
if (size <= 1)
|
||||
{
|
||||
transpositions.setIdentity();
|
||||
if(sign)
|
||||
*sign = numext::real(mat.coeff(0,0))>0 ? 1:-1;
|
||||
if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef;
|
||||
else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef;
|
||||
else sign = ZeroSign;
|
||||
return true;
|
||||
}
|
||||
|
||||
RealScalar cutoff(0), biggest_in_corner;
|
||||
|
||||
for (Index k = 0; k < size; ++k)
|
||||
{
|
||||
// Find largest diagonal element
|
||||
Index index_of_biggest_in_corner;
|
||||
biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
|
||||
index_of_biggest_in_corner += k;
|
||||
|
||||
if(k == 0)
|
||||
{
|
||||
// The biggest overall is the point of reference to which further diagonals
|
||||
// are compared; if any diagonal is negligible compared
|
||||
// to the largest overall, the algorithm bails.
|
||||
cutoff = abs(NumTraits<Scalar>::epsilon() * biggest_in_corner);
|
||||
}
|
||||
|
||||
// Finish early if the matrix is not full rank.
|
||||
if(biggest_in_corner < cutoff)
|
||||
{
|
||||
for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i;
|
||||
if(sign) *sign = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
transpositions.coeffRef(k) = index_of_biggest_in_corner;
|
||||
if(k != index_of_biggest_in_corner)
|
||||
{
|
||||
@ -318,22 +316,27 @@ template<> struct ldlt_inplace<Lower>
|
||||
|
||||
if(k>0)
|
||||
{
|
||||
temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint();
|
||||
temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();
|
||||
mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();
|
||||
if(rs>0)
|
||||
A21.noalias() -= A20 * temp.head(k);
|
||||
}
|
||||
if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff))
|
||||
A21 /= mat.coeffRef(k,k);
|
||||
|
||||
if(sign)
|
||||
{
|
||||
// LDLT is not guaranteed to work for indefinite matrices, but let's try to get the sign right
|
||||
int newSign = numext::real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0;
|
||||
if(k == 0)
|
||||
*sign = newSign;
|
||||
else if(*sign != newSign)
|
||||
*sign = 0;
|
||||
// In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
|
||||
// was smaller than the cutoff value. However, soince LDLT is not rank-revealing
|
||||
// we should only make sure we do not introduce INF or NaN values.
|
||||
// LAPACK also uses 0 as the cutoff value.
|
||||
RealScalar realAkk = numext::real(mat.coeffRef(k,k));
|
||||
if((rs>0) && (abs(realAkk) > RealScalar(0)))
|
||||
A21 /= realAkk;
|
||||
|
||||
if (sign == PositiveSemiDef) {
|
||||
if (realAkk < 0) sign = Indefinite;
|
||||
} else if (sign == NegativeSemiDef) {
|
||||
if (realAkk > 0) sign = Indefinite;
|
||||
} else if (sign == ZeroSign) {
|
||||
if (realAkk > 0) sign = PositiveSemiDef;
|
||||
else if (realAkk < 0) sign = NegativeSemiDef;
|
||||
}
|
||||
}
|
||||
|
||||
@ -399,7 +402,7 @@ template<> struct ldlt_inplace<Lower>
|
||||
template<> struct ldlt_inplace<Upper>
|
||||
{
|
||||
template<typename MatrixType, typename TranspositionType, typename Workspace>
|
||||
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0)
|
||||
static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)
|
||||
{
|
||||
Transpose<MatrixType> matt(mat);
|
||||
return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);
|
||||
@ -436,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
|
||||
template<typename MatrixType, int _UpLo>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
|
||||
@ -444,8 +449,9 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
m_transpositions.resize(size);
|
||||
m_isInitialized = false;
|
||||
m_temporary.resize(size);
|
||||
m_sign = internal::ZeroSign;
|
||||
|
||||
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign);
|
||||
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign);
|
||||
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
@ -458,7 +464,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
*/
|
||||
template<typename MatrixType, int _UpLo>
|
||||
template<typename Derived>
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename NumTraits<typename MatrixType::Scalar>::Real& sigma)
|
||||
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
|
||||
{
|
||||
const Index size = w.rows();
|
||||
if (m_isInitialized)
|
||||
@ -473,7 +479,7 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
|
||||
for (Index i = 0; i < size; i++)
|
||||
m_transpositions.coeffRef(i) = i;
|
||||
m_temporary.resize(size);
|
||||
m_sign = sigma>=0 ? 1 : -1;
|
||||
m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
|
||||
m_isInitialized = true;
|
||||
}
|
||||
|
||||
@ -504,16 +510,21 @@ struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs>
|
||||
using std::abs;
|
||||
using std::max;
|
||||
typedef typename LDLTType::MatrixType MatrixType;
|
||||
typedef typename LDLTType::Scalar Scalar;
|
||||
typedef typename LDLTType::RealScalar RealScalar;
|
||||
const Diagonal<const MatrixType> vectorD = dec().vectorD();
|
||||
RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits<Scalar>::epsilon(),
|
||||
RealScalar(1) / NumTraits<RealScalar>::highest()); // motivated by LAPACK's xGELSS
|
||||
const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
|
||||
// In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
|
||||
// as motivated by LAPACK's xGELSS:
|
||||
// RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
|
||||
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
|
||||
// diagonal element is not well justified and to numerical issues in some cases.
|
||||
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
|
||||
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
|
||||
|
||||
for (Index i = 0; i < vectorD.size(); ++i) {
|
||||
if(abs(vectorD(i)) > tolerance)
|
||||
dst.row(i) /= vectorD(i);
|
||||
dst.row(i) /= vectorD(i);
|
||||
else
|
||||
dst.row(i).setZero();
|
||||
dst.row(i).setZero();
|
||||
}
|
||||
|
||||
// dst = L^-T (D^-1 L^-1 P b)
|
||||
@ -566,7 +577,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
|
||||
// L^* P
|
||||
res = matrixU() * res;
|
||||
// D(L^*P)
|
||||
res = vectorD().asDiagonal() * res;
|
||||
res = vectorD().real().asDiagonal() * res;
|
||||
// L(DL^*P)
|
||||
res = matrixL() * res;
|
||||
// P^T (LDL^*P)
|
||||
|
@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
|
||||
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Used to compute and store L
|
||||
* The strict upper part is not used and even not initialized.
|
||||
@ -283,7 +289,7 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
|
||||
return k;
|
||||
mat.coeffRef(k,k) = x = sqrt(x);
|
||||
if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();
|
||||
if (rs>0) A21 *= RealScalar(1)/x;
|
||||
if (rs>0) A21 /= x;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
|
||||
template<typename MatrixType, int _UpLo>
|
||||
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
const Index size = a.rows();
|
||||
m_matrix.resize(size, size);
|
||||
|
@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
|
||||
lda = m.outerStride(); \
|
||||
\
|
||||
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
|
||||
info = (info==0) ? Success : NumericalIssue; \
|
||||
info = (info==0) ? -1 : info>0 ? info-1 : size; \
|
||||
return info; \
|
||||
} \
|
||||
}; \
|
||||
|
@ -58,10 +58,12 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
|
||||
res.p = mat.outerIndexPtr();
|
||||
res.i = mat.innerIndexPtr();
|
||||
res.x = mat.valuePtr();
|
||||
res.z = 0;
|
||||
res.sorted = 1;
|
||||
if(mat.isCompressed())
|
||||
{
|
||||
res.packed = 1;
|
||||
res.nz = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -76,7 +78,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
|
||||
{
|
||||
res.itype = CHOLMOD_INT;
|
||||
}
|
||||
else if (internal::is_same<_Index,UF_long>::value)
|
||||
else if (internal::is_same<_Index,SuiteSparse_long>::value)
|
||||
{
|
||||
res.itype = CHOLMOD_LONG;
|
||||
}
|
||||
@ -170,6 +172,7 @@ class CholmodBase : internal::noncopyable
|
||||
CholmodBase()
|
||||
: m_cholmodFactor(0), m_info(Success), m_isInitialized(false)
|
||||
{
|
||||
m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0);
|
||||
cholmod_start(&m_cholmod);
|
||||
}
|
||||
|
||||
@ -241,7 +244,7 @@ class CholmodBase : internal::noncopyable
|
||||
return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
|
||||
}
|
||||
|
||||
/** Performs a symbolic decomposition on the sparcity of \a matrix.
|
||||
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
|
||||
*
|
||||
* This function is particularly useful when solving for several problems having the same structure.
|
||||
*
|
||||
@ -265,7 +268,7 @@ class CholmodBase : internal::noncopyable
|
||||
|
||||
/** Performs a numeric decomposition of \a matrix
|
||||
*
|
||||
* The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.
|
||||
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
|
||||
*
|
||||
* \sa analyzePattern()
|
||||
*/
|
||||
@ -302,7 +305,7 @@ class CholmodBase : internal::noncopyable
|
||||
{
|
||||
this->m_info = NumericalIssue;
|
||||
}
|
||||
// TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
|
||||
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
|
||||
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
|
||||
cholmod_free_dense(&x_cd, &m_cholmod);
|
||||
}
|
||||
@ -323,7 +326,7 @@ class CholmodBase : internal::noncopyable
|
||||
{
|
||||
this->m_info = NumericalIssue;
|
||||
}
|
||||
// TODO optimize this copy by swapping when possible (be carreful with alignment, etc.)
|
||||
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
|
||||
dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs);
|
||||
cholmod_free_sparse(&x_cs, &m_cholmod);
|
||||
}
|
||||
@ -365,8 +368,8 @@ class CholmodBase : internal::noncopyable
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization
|
||||
* using the Cholmod library.
|
||||
* This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest.
|
||||
* The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
|
||||
* This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest.
|
||||
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
@ -392,7 +395,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
|
||||
CholmodSimplicialLLT(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSimplicialLLT() {}
|
||||
@ -412,8 +415,8 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization
|
||||
* using the Cholmod library.
|
||||
* This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest.
|
||||
* The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
|
||||
* This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest.
|
||||
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
@ -439,7 +442,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
|
||||
CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSimplicialLDLT() {}
|
||||
@ -458,7 +461,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
|
||||
* This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization
|
||||
* using the Cholmod library.
|
||||
* This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.
|
||||
* The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
|
||||
* The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>
|
||||
@ -484,7 +487,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
|
||||
CholmodSupernodalLLT(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodSupernodalLLT() {}
|
||||
@ -501,7 +504,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
|
||||
* \brief A general Cholesky factorization and solver based on Cholmod
|
||||
*
|
||||
* This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization
|
||||
* using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices
|
||||
* using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices
|
||||
* X and B can be either dense or sparse.
|
||||
*
|
||||
* This variant permits to change the underlying Cholesky method at runtime.
|
||||
@ -531,7 +534,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
|
||||
CholmodDecomposition(const MatrixType& matrix) : Base()
|
||||
{
|
||||
init();
|
||||
compute(matrix);
|
||||
Base::compute(matrix);
|
||||
}
|
||||
|
||||
~CholmodDecomposition() {}
|
||||
|
@ -124,6 +124,21 @@ class Array
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
Array(Array&& other)
|
||||
: Base(std::move(other))
|
||||
{
|
||||
Base::_check_template_params();
|
||||
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
|
||||
Base::_set_noalias(other);
|
||||
}
|
||||
Array& operator=(Array&& other)
|
||||
{
|
||||
other.swap(*this);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||
*
|
||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
||||
@ -210,7 +225,7 @@ class Array
|
||||
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
|
||||
{
|
||||
Base::_check_template_params();
|
||||
Base::resize(other.rows(), other.cols());
|
||||
Base::_resize_to_match(other);
|
||||
*this = other;
|
||||
}
|
||||
|
||||
|
@ -46,9 +46,6 @@ template<typename Derived> class ArrayBase
|
||||
|
||||
typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;
|
||||
|
||||
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
||||
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
typedef typename internal::traits<Derived>::Index Index;
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
@ -56,6 +53,7 @@ template<typename Derived> class ArrayBase
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
typedef DenseBase<Derived> Base;
|
||||
using Base::operator*;
|
||||
using Base::RowsAtCompileTime;
|
||||
using Base::ColsAtCompileTime;
|
||||
using Base::SizeAtCompileTime;
|
||||
|
@ -29,6 +29,11 @@ struct traits<ArrayWrapper<ExpressionType> >
|
||||
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
|
||||
{
|
||||
typedef ArrayXpr XprKind;
|
||||
// Let's remove NestByRefBit
|
||||
enum {
|
||||
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
|
||||
Flags = Flags0 & ~NestByRefBit
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@ -149,6 +154,11 @@ struct traits<MatrixWrapper<ExpressionType> >
|
||||
: public traits<typename remove_all<typename ExpressionType::Nested>::type >
|
||||
{
|
||||
typedef MatrixXpr XprKind;
|
||||
// Let's remove NestByRefBit
|
||||
enum {
|
||||
Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,
|
||||
Flags = Flags0 & ~NestByRefBit
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
|
||||
typedef typename Derived1::Index Index;
|
||||
static inline void run(Derived1 &dst, const Derived2 &src)
|
||||
{
|
||||
typedef packet_traits<typename Derived1::Scalar> PacketTraits;
|
||||
typedef typename Derived1::Scalar Scalar;
|
||||
typedef packet_traits<Scalar> PacketTraits;
|
||||
enum {
|
||||
packetSize = PacketTraits::size,
|
||||
alignable = PacketTraits::AlignedOnScalar,
|
||||
dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
|
||||
dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
|
||||
dstAlignment = alignable ? Aligned : int(dstIsAligned),
|
||||
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
|
||||
};
|
||||
const Scalar *dst_ptr = &dst.coeffRef(0,0);
|
||||
if((!bool(dstIsAligned)) && (size_t(dst_ptr) % sizeof(Scalar))>0)
|
||||
{
|
||||
// the pointer is not aligend-on scalar, so alignment is not possible
|
||||
return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
|
||||
}
|
||||
const Index packetAlignedMask = packetSize - 1;
|
||||
const Index innerSize = dst.innerSize();
|
||||
const Index outerSize = dst.outerSize();
|
||||
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
|
||||
Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
|
||||
: internal::first_aligned(&dst.coeffRef(0,0), innerSize);
|
||||
Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
|
||||
|
||||
for(Index outer = 0; outer < outerSize; ++outer)
|
||||
{
|
||||
|
@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
||||
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
|
||||
: int(traits<XprType>::MaxColsAtCompileTime),
|
||||
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
|
||||
IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||
: (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||
IsDense = is_same<StorageKind,Dense>::value,
|
||||
IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
|
||||
: (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
|
||||
: XprTypeIsRowMajor,
|
||||
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
|
||||
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
@ -81,7 +82,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
||||
&& (InnerStrideAtCompileTime == 1)
|
||||
? PacketAccessBit : 0,
|
||||
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
|
||||
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
|
||||
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
|
||||
Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
|
||||
|
@ -29,9 +29,9 @@ struct all_unroller
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct all_unroller<Derived, 1>
|
||||
struct all_unroller<Derived, 0>
|
||||
{
|
||||
static inline bool run(const Derived &mat) { return mat.coeff(0, 0); }
|
||||
static inline bool run(const Derived &/*mat*/) { return true; }
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
@ -55,9 +55,9 @@ struct any_unroller
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct any_unroller<Derived, 1>
|
||||
struct any_unroller<Derived, 0>
|
||||
{
|
||||
static inline bool run(const Derived &mat) { return mat.coeff(0, 0); }
|
||||
static inline bool run(const Derived & /*mat*/) { return false; }
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
|
@ -43,6 +43,17 @@ struct CommaInitializer
|
||||
m_xpr.block(0, 0, other.rows(), other.cols()) = other;
|
||||
}
|
||||
|
||||
/* Copy/Move constructor which transfers ownership. This is crucial in
|
||||
* absence of return value optimization to avoid assertions during destruction. */
|
||||
// FIXME in C++11 mode this could be replaced by a proper RValue constructor
|
||||
inline CommaInitializer(const CommaInitializer& o)
|
||||
: m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {
|
||||
// Mark original object as finished. In absence of R-value references we need to const_cast:
|
||||
const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();
|
||||
const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();
|
||||
const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;
|
||||
}
|
||||
|
||||
/* inserts a scalar value in the target matrix */
|
||||
CommaInitializer& operator,(const Scalar& s)
|
||||
{
|
||||
|
@ -81,7 +81,8 @@ struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
)
|
||||
),
|
||||
Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
|
||||
CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits<BinaryOp>::Cost
|
||||
Cost0 = EIGEN_ADD_COST(LhsCoeffReadCost,RhsCoeffReadCost),
|
||||
CoeffReadCost = EIGEN_ADD_COST(Cost0,functor_traits<BinaryOp>::Cost)
|
||||
};
|
||||
};
|
||||
} // end namespace internal
|
||||
|
@ -47,7 +47,7 @@ struct traits<CwiseUnaryOp<UnaryOp, XprType> >
|
||||
Flags = _XprTypeNested::Flags & (
|
||||
HereditaryBits | LinearAccessBit | AlignedBit
|
||||
| (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
|
||||
CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits<UnaryOp>::Cost
|
||||
CoeffReadCost = EIGEN_ADD_COST(_XprTypeNested::CoeffReadCost, functor_traits<UnaryOp>::Cost)
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ struct traits<CwiseUnaryView<ViewOp, MatrixType> >
|
||||
typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
enum {
|
||||
Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)),
|
||||
CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits<ViewOp>::Cost,
|
||||
CoeffReadCost = EIGEN_ADD_COST(traits<_MatrixTypeNested>::CoeffReadCost, functor_traits<ViewOp>::Cost),
|
||||
MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret,
|
||||
// need to cast the sizeof's from size_t to int explicitly, otherwise:
|
||||
// "error: no integral type can represent all of the enumerator values
|
||||
|
@ -40,15 +40,14 @@ static inline void check_DenseIndex_is_signed() {
|
||||
*/
|
||||
template<typename Derived> class DenseBase
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
: public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>
|
||||
: public internal::special_scalar_op_base<Derived, typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
|
||||
DenseCoeffsBase<Derived> >
|
||||
#else
|
||||
: public DenseCoeffsBase<Derived>
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
{
|
||||
public:
|
||||
using internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
|
||||
typename NumTraits<typename internal::traits<Derived>::Scalar>::Real>::operator*;
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
@ -63,8 +62,9 @@ template<typename Derived> class DenseBase
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
typedef internal::special_scalar_op_base<Derived,Scalar,RealScalar, DenseCoeffsBase<Derived> > Base;
|
||||
|
||||
typedef DenseCoeffsBase<Derived> Base;
|
||||
using Base::operator*;
|
||||
using Base::derived;
|
||||
using Base::const_cast_derived;
|
||||
using Base::rows;
|
||||
@ -183,10 +183,6 @@ template<typename Derived> class DenseBase
|
||||
/** \returns the number of nonzero coefficients which is in practice the number
|
||||
* of stored coefficients. */
|
||||
inline Index nonZeros() const { return size(); }
|
||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
||||
* In other words, this function returns
|
||||
* \code rows()==1 || cols()==1 \endcode
|
||||
* \sa rows(), cols(), IsVectorAtCompileTime. */
|
||||
|
||||
/** \returns the outer size.
|
||||
*
|
||||
@ -266,11 +262,13 @@ template<typename Derived> class DenseBase
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
/** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
|
||||
template<typename OtherDerived>
|
||||
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
/** \internal Evaluates \a other into *this. \returns a reference to *this. */
|
||||
template<typename OtherDerived>
|
||||
Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
CommaInitializer<Derived> operator<< (const Scalar& s);
|
||||
|
||||
@ -462,8 +460,10 @@ template<typename Derived> class DenseBase
|
||||
template<int p> RealScalar lpNorm() const;
|
||||
|
||||
template<int RowFactor, int ColFactor>
|
||||
const Replicate<Derived,RowFactor,ColFactor> replicate() const;
|
||||
const Replicate<Derived,Dynamic,Dynamic> replicate(Index rowFacor,Index colFactor) const;
|
||||
inline const Replicate<Derived,RowFactor,ColFactor> replicate() const;
|
||||
|
||||
typedef Replicate<Derived,Dynamic,Dynamic> ReplicateReturnType;
|
||||
inline const ReplicateReturnType replicate(Index rowFacor,Index colFactor) const;
|
||||
|
||||
typedef Reverse<Derived, BothDirections> ReverseReturnType;
|
||||
typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;
|
||||
|
@ -24,6 +24,14 @@ namespace internal {
|
||||
|
||||
struct constructor_without_unaligned_array_assert {};
|
||||
|
||||
template<typename T, int Size> void check_static_allocation_size()
|
||||
{
|
||||
// if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
|
||||
#if EIGEN_STACK_ALLOCATION_LIMIT
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
#endif
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:
|
||||
* to 16 bytes boundary if the total size is a multiple of 16 bytes.
|
||||
@ -38,12 +46,12 @@ struct plain_array
|
||||
|
||||
plain_array()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
|
||||
plain_array(constructor_without_unaligned_array_assert)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
};
|
||||
|
||||
@ -76,12 +84,12 @@ struct plain_array<T, Size, MatrixOrArrayOptions, 16>
|
||||
plain_array()
|
||||
{
|
||||
EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf);
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
|
||||
plain_array(constructor_without_unaligned_array_assert)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(Size * sizeof(T) <= 128 * 128 * 8, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
|
||||
check_static_allocation_size<T,Size>();
|
||||
}
|
||||
};
|
||||
|
||||
@ -114,33 +122,41 @@ template<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseSt
|
||||
{
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
public:
|
||||
inline DenseStorage() {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()) {}
|
||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
||||
static inline DenseIndex rows(void) {return _Rows;}
|
||||
static inline DenseIndex cols(void) {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other) m_data = other.m_data;
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }
|
||||
static DenseIndex rows(void) {return _Rows;}
|
||||
static DenseIndex cols(void) {return _Cols;}
|
||||
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// null matrix
|
||||
template<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>
|
||||
{
|
||||
public:
|
||||
inline DenseStorage() {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||
inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void swap(DenseStorage& ) {}
|
||||
static inline DenseIndex rows(void) {return _Rows;}
|
||||
static inline DenseIndex cols(void) {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
inline const T *data() const { return 0; }
|
||||
inline T *data() { return 0; }
|
||||
DenseStorage() {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert) {}
|
||||
DenseStorage(const DenseStorage&) {}
|
||||
DenseStorage& operator=(const DenseStorage&) { return *this; }
|
||||
DenseStorage(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void swap(DenseStorage& ) {}
|
||||
static DenseIndex rows(void) {return _Rows;}
|
||||
static DenseIndex cols(void) {return _Cols;}
|
||||
void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
void resize(DenseIndex,DenseIndex,DenseIndex) {}
|
||||
const T *data() const { return 0; }
|
||||
T *data() { return 0; }
|
||||
};
|
||||
|
||||
// more specializations for null matrices; these are necessary to resolve ambiguities
|
||||
@ -160,18 +176,29 @@ template<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic
|
||||
DenseIndex m_rows;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_rows(0), m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||
inline void swap(DenseStorage& other)
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
m_data = other.m_data;
|
||||
m_rows = other.m_rows;
|
||||
m_cols = other.m_cols;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) : m_rows(nbRows), m_cols(nbCols) {}
|
||||
void swap(DenseStorage& other)
|
||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||
inline DenseIndex rows() const {return m_rows;}
|
||||
inline DenseIndex cols() const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseIndex rows() const {return m_rows;}
|
||||
DenseIndex cols() const {return m_cols;}
|
||||
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
void resize(DenseIndex, DenseIndex nbRows, DenseIndex nbCols) { m_rows = nbRows; m_cols = nbCols; }
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage and fixed width
|
||||
@ -180,17 +207,27 @@ template<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Si
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
DenseIndex m_rows;
|
||||
public:
|
||||
inline DenseStorage() : m_rows(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_rows(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
inline DenseIndex rows(void) const {return m_rows;}
|
||||
inline DenseIndex cols(void) const {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
inline void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
m_data = other.m_data;
|
||||
m_rows = other.m_rows;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex, DenseIndex nbRows, DenseIndex) : m_rows(nbRows) {}
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
DenseIndex rows(void) const {return m_rows;}
|
||||
DenseIndex cols(void) const {return _Cols;}
|
||||
void conservativeResize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
void resize(DenseIndex, DenseIndex nbRows, DenseIndex) { m_rows = nbRows; }
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage and fixed height
|
||||
@ -199,17 +236,27 @@ template<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Si
|
||||
internal::plain_array<T,Size,_Options> m_data;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
inline DenseIndex rows(void) const {return _Rows;}
|
||||
inline DenseIndex cols(void) const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
inline void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
inline const T *data() const { return m_data.array; }
|
||||
inline T *data() { return m_data.array; }
|
||||
DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}
|
||||
DenseStorage& operator=(const DenseStorage& other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
m_data = other.m_data;
|
||||
m_cols = other.m_cols;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
DenseStorage(DenseIndex, DenseIndex, DenseIndex nbCols) : m_cols(nbCols) {}
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
DenseIndex rows(void) const {return _Rows;}
|
||||
DenseIndex cols(void) const {return m_cols;}
|
||||
void conservativeResize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
void resize(DenseIndex, DenseIndex, DenseIndex nbCols) { m_cols = nbCols; }
|
||||
const T *data() const { return m_data.array; }
|
||||
T *data() { return m_data.array; }
|
||||
};
|
||||
|
||||
// purely dynamic matrix.
|
||||
@ -219,18 +266,35 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||
DenseIndex m_rows;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert)
|
||||
: m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
: m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows), m_cols(nbCols)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
||||
inline void swap(DenseStorage& other)
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
DenseStorage(DenseStorage&& other)
|
||||
: m_data(std::move(other.m_data))
|
||||
, m_rows(std::move(other.m_rows))
|
||||
, m_cols(std::move(other.m_cols))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
DenseStorage& operator=(DenseStorage&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_rows, other.m_rows);
|
||||
swap(m_cols, other.m_cols);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }
|
||||
void swap(DenseStorage& other)
|
||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||
inline DenseIndex rows(void) const {return m_rows;}
|
||||
inline DenseIndex cols(void) const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
DenseIndex rows(void) const {return m_rows;}
|
||||
DenseIndex cols(void) const {return m_cols;}
|
||||
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex nbCols)
|
||||
{
|
||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);
|
||||
m_rows = nbRows;
|
||||
@ -250,8 +314,11 @@ template<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynam
|
||||
m_rows = nbRows;
|
||||
m_cols = nbCols;
|
||||
}
|
||||
inline const T *data() const { return m_data; }
|
||||
inline T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
T *data() { return m_data; }
|
||||
private:
|
||||
DenseStorage(const DenseStorage&);
|
||||
DenseStorage& operator=(const DenseStorage&);
|
||||
};
|
||||
|
||||
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
|
||||
@ -260,15 +327,30 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||
T *m_data;
|
||||
DenseIndex m_cols;
|
||||
public:
|
||||
inline DenseStorage() : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||
DenseStorage() : m_data(0), m_cols(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}
|
||||
DenseStorage(DenseIndex size, DenseIndex, DenseIndex nbCols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(nbCols)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
static inline DenseIndex rows(void) {return _Rows;}
|
||||
inline DenseIndex cols(void) const {return m_cols;}
|
||||
inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
DenseStorage(DenseStorage&& other)
|
||||
: m_data(std::move(other.m_data))
|
||||
, m_cols(std::move(other.m_cols))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
DenseStorage& operator=(DenseStorage&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_cols, other.m_cols);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
static DenseIndex rows(void) {return _Rows;}
|
||||
DenseIndex cols(void) const {return m_cols;}
|
||||
void conservativeResize(DenseIndex size, DenseIndex, DenseIndex nbCols)
|
||||
{
|
||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);
|
||||
m_cols = nbCols;
|
||||
@ -286,8 +368,11 @@ template<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Ro
|
||||
}
|
||||
m_cols = nbCols;
|
||||
}
|
||||
inline const T *data() const { return m_data; }
|
||||
inline T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
T *data() { return m_data; }
|
||||
private:
|
||||
DenseStorage(const DenseStorage&);
|
||||
DenseStorage& operator=(const DenseStorage&);
|
||||
};
|
||||
|
||||
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
|
||||
@ -296,15 +381,30 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||
T *m_data;
|
||||
DenseIndex m_rows;
|
||||
public:
|
||||
inline DenseStorage() : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||
inline DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||
DenseStorage() : m_data(0), m_rows(0) {}
|
||||
DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}
|
||||
DenseStorage(DenseIndex size, DenseIndex nbRows, DenseIndex) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(nbRows)
|
||||
{ EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN }
|
||||
inline ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
||||
inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
inline DenseIndex rows(void) const {return m_rows;}
|
||||
static inline DenseIndex cols(void) {return _Cols;}
|
||||
inline void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
DenseStorage(DenseStorage&& other)
|
||||
: m_data(std::move(other.m_data))
|
||||
, m_rows(std::move(other.m_rows))
|
||||
{
|
||||
other.m_data = nullptr;
|
||||
}
|
||||
DenseStorage& operator=(DenseStorage&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_data, other.m_data);
|
||||
swap(m_rows, other.m_rows);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }
|
||||
void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
DenseIndex rows(void) const {return m_rows;}
|
||||
static DenseIndex cols(void) {return _Cols;}
|
||||
void conservativeResize(DenseIndex size, DenseIndex nbRows, DenseIndex)
|
||||
{
|
||||
m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);
|
||||
m_rows = nbRows;
|
||||
@ -322,8 +422,11 @@ template<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dyn
|
||||
}
|
||||
m_rows = nbRows;
|
||||
}
|
||||
inline const T *data() const { return m_data; }
|
||||
inline T *data() { return m_data; }
|
||||
const T *data() const { return m_data; }
|
||||
T *data() { return m_data; }
|
||||
private:
|
||||
DenseStorage(const DenseStorage&);
|
||||
DenseStorage& operator=(const DenseStorage&);
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -190,18 +190,18 @@ MatrixBase<Derived>::diagonal() const
|
||||
*
|
||||
* \sa MatrixBase::diagonal(), class Diagonal */
|
||||
template<typename Derived>
|
||||
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<DynamicIndex>::Type
|
||||
inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
|
||||
MatrixBase<Derived>::diagonal(Index index)
|
||||
{
|
||||
return typename DiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
|
||||
return DiagonalDynamicIndexReturnType(derived(), index);
|
||||
}
|
||||
|
||||
/** This is the const version of diagonal(Index). */
|
||||
template<typename Derived>
|
||||
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<DynamicIndex>::Type
|
||||
inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
|
||||
MatrixBase<Derived>::diagonal(Index index) const
|
||||
{
|
||||
return typename ConstDiagonalIndexReturnType<DynamicIndex>::Type(derived(), index);
|
||||
return ConstDiagonalDynamicIndexReturnType(derived(), index);
|
||||
}
|
||||
|
||||
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
|
||||
|
@ -34,8 +34,9 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
|
||||
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
|
||||
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
|
||||
|
||||
Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
|
||||
Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
|
||||
Cost0 = EIGEN_ADD_COST(NumTraits<Scalar>::MulCost, MatrixType::CoeffReadCost),
|
||||
CoeffReadCost = EIGEN_ADD_COST(Cost0,DiagonalType::DiagonalVectorType::CoeffReadCost)
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -126,36 +126,6 @@ Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this * \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived&
|
||||
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this * \a other. */
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheLeft(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_EIGENBASE_H
|
||||
|
@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
|
||||
};
|
||||
};
|
||||
|
||||
/** \internal
|
||||
* \brief Template functors for comparison of two scalars
|
||||
* \todo Implement packet-comparisons
|
||||
*/
|
||||
template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
|
||||
|
||||
template<typename Scalar, ComparisonName cmp>
|
||||
struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
|
||||
enum {
|
||||
Cost = NumTraits<Scalar>::AddCost,
|
||||
PacketAccess = false
|
||||
};
|
||||
};
|
||||
|
||||
template<ComparisonName Cmp, typename Scalar>
|
||||
struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
|
||||
typedef bool type;
|
||||
};
|
||||
|
||||
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
|
||||
};
|
||||
template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
|
||||
EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
|
||||
EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
|
||||
};
|
||||
|
||||
// unary functors:
|
||||
|
||||
/** \internal
|
||||
@ -589,7 +630,7 @@ struct linspaced_op_impl<Scalar,true>
|
||||
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE const Packet packetOp(Index i) const
|
||||
{ return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(i),m_interPacket))); }
|
||||
{ return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1<Packet>(Scalar(i)),m_interPacket))); }
|
||||
|
||||
const Scalar m_low;
|
||||
const Scalar m_step;
|
||||
@ -609,7 +650,7 @@ template <typename Scalar, bool RandomAccess> struct functor_traits< linspaced_o
|
||||
template <typename Scalar, bool RandomAccess> struct linspaced_op
|
||||
{
|
||||
typedef typename packet_traits<Scalar>::type Packet;
|
||||
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {}
|
||||
linspaced_op(const Scalar& low, const Scalar& high, DenseIndex num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1))) {}
|
||||
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); }
|
||||
|
@ -232,7 +232,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
|
||||
// FIXME not very good if rhs is real and lhs complex while alpha is real too
|
||||
const Index cols = dest.cols();
|
||||
for (Index j=0; j<cols; ++j)
|
||||
func(dest.col(j), prod.rhs().coeff(j) * prod.lhs());
|
||||
func(dest.col(j), prod.rhs().coeff(0,j) * prod.lhs());
|
||||
}
|
||||
|
||||
// Row major
|
||||
@ -243,7 +243,7 @@ EIGEN_DONT_INLINE void outer_product_selector_run(const ProductType& prod, Dest&
|
||||
// FIXME not very good if lhs is real and rhs complex while alpha is real too
|
||||
const Index rows = dest.rows();
|
||||
for (Index i=0; i<rows; ++i)
|
||||
func(dest.row(i), prod.lhs().coeff(i) * prod.rhs());
|
||||
func(dest.row(i), prod.lhs().coeff(i,0) * prod.rhs());
|
||||
}
|
||||
|
||||
template<typename Lhs, typename Rhs>
|
||||
@ -257,7 +257,7 @@ template<typename Lhs, typename Rhs>
|
||||
class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||
: public ProductBase<GeneralProduct<Lhs,Rhs,OuterProduct>, Lhs, Rhs>
|
||||
{
|
||||
template<typename T> struct IsRowMajor : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||
template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};
|
||||
|
||||
public:
|
||||
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
|
||||
@ -281,22 +281,22 @@ class GeneralProduct<Lhs, Rhs, OuterProduct>
|
||||
|
||||
template<typename Dest>
|
||||
inline void evalTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, set(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, set(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest>
|
||||
inline void addTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, add(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, add(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest>
|
||||
inline void subTo(Dest& dest) const {
|
||||
internal::outer_product_selector_run(*this, dest, sub(), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, sub(), is_row_major<Dest>());
|
||||
}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
|
||||
{
|
||||
internal::outer_product_selector_run(*this, dest, adds(alpha), IsRowMajor<Dest>());
|
||||
internal::outer_product_selector_run(*this, dest, adds(alpha), is_row_major<Dest>());
|
||||
}
|
||||
};
|
||||
|
||||
@ -425,15 +425,18 @@ template<> struct gemv_selector<OnTheRight,ColMajor,true>
|
||||
ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs())
|
||||
* RhsBlasTraits::extractScalarFactor(prod.rhs());
|
||||
|
||||
// make sure Dest is a compile-time vector type (bug 1166)
|
||||
typedef typename conditional<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr>::type ActualDest;
|
||||
|
||||
enum {
|
||||
// FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1
|
||||
// on, the other hand it is good for the cache to pack the vector anyways...
|
||||
EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,
|
||||
EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),
|
||||
ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),
|
||||
MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||
MightCannotUseDest = (ActualDest::InnerStrideAtCompileTime!=1) || ComplexByReal
|
||||
};
|
||||
|
||||
gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
gemv_static_vector_if<ResScalar,ActualDest::SizeAtCompileTime,ActualDest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;
|
||||
|
||||
bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));
|
||||
bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;
|
||||
@ -522,7 +525,7 @@ template<> struct gemv_selector<OnTheRight,RowMajor,true>
|
||||
actualLhs.rows(), actualLhs.cols(),
|
||||
actualLhs.data(), actualLhs.outerStride(),
|
||||
actualRhsPtr, 1,
|
||||
dest.data(), dest.innerStride(),
|
||||
dest.data(), dest.col(0).innerStride(), //NOTE if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166)
|
||||
actualAlpha);
|
||||
}
|
||||
};
|
||||
|
@ -185,21 +185,22 @@ std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat&
|
||||
explicit_precision = fmt.precision;
|
||||
}
|
||||
|
||||
std::streamsize old_precision = 0;
|
||||
if(explicit_precision) old_precision = s.precision(explicit_precision);
|
||||
|
||||
bool align_cols = !(fmt.flags & DontAlignCols);
|
||||
if(align_cols)
|
||||
{
|
||||
// compute the largest width
|
||||
for(Index j = 1; j < m.cols(); ++j)
|
||||
for(Index j = 0; j < m.cols(); ++j)
|
||||
for(Index i = 0; i < m.rows(); ++i)
|
||||
{
|
||||
std::stringstream sstr;
|
||||
if(explicit_precision) sstr.precision(explicit_precision);
|
||||
sstr.copyfmt(s);
|
||||
sstr << m.coeff(i,j);
|
||||
width = std::max<Index>(width, Index(sstr.str().length()));
|
||||
}
|
||||
}
|
||||
std::streamsize old_precision = 0;
|
||||
if(explicit_precision) old_precision = s.precision(explicit_precision);
|
||||
s << fmt.matPrefix;
|
||||
for(Index i = 0; i < m.rows(); ++i)
|
||||
{
|
||||
|
@ -123,7 +123,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||
return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());
|
||||
}
|
||||
|
||||
inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||
explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
|
||||
checkSanity();
|
||||
@ -149,6 +149,10 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||
checkSanity();
|
||||
}
|
||||
|
||||
#ifdef EIGEN_MAPBASE_PLUGIN
|
||||
#include EIGEN_MAPBASE_PLUGIN
|
||||
#endif
|
||||
|
||||
protected:
|
||||
|
||||
void checkSanity() const
|
||||
@ -157,7 +161,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||
internal::inner_stride_at_compile_time<Derived>::ret==1),
|
||||
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
|
||||
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
|
||||
&& "data is not aligned");
|
||||
&& "input pointer is not aligned on a 16 byte boundary");
|
||||
}
|
||||
|
||||
PointerType m_data;
|
||||
@ -168,6 +172,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
|
||||
template<typename Derived> class MapBase<Derived, WriteAccessors>
|
||||
: public MapBase<Derived, ReadOnlyAccessors>
|
||||
{
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
|
||||
public:
|
||||
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> Base;
|
||||
@ -230,13 +235,17 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
|
||||
|
||||
Derived& operator=(const MapBase& other)
|
||||
{
|
||||
Base::Base::operator=(other);
|
||||
ReadOnlyMapBase::Base::operator=(other);
|
||||
return derived();
|
||||
}
|
||||
|
||||
using Base::Base::operator=;
|
||||
// In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,
|
||||
// see bugs 821 and 920.
|
||||
using ReadOnlyMapBase::Base::operator=;
|
||||
};
|
||||
|
||||
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAPBASE_H
|
||||
|
@ -294,7 +294,7 @@ struct hypot_impl
|
||||
RealScalar _x = abs(x);
|
||||
RealScalar _y = abs(y);
|
||||
RealScalar p = (max)(_x, _y);
|
||||
if(p==RealScalar(0)) return 0;
|
||||
if(p==RealScalar(0)) return RealScalar(0);
|
||||
RealScalar q = (min)(_x, _y);
|
||||
RealScalar qp = q/p;
|
||||
return p * sqrt(RealScalar(1) + qp*qp);
|
||||
@ -707,21 +707,21 @@ struct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::
|
||||
|
||||
template<typename Scalar, typename OtherScalar>
|
||||
inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,
|
||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
||||
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
inline bool isApprox(const Scalar& x, const Scalar& y,
|
||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
||||
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,
|
||||
typename NumTraits<Scalar>::Real precision = NumTraits<Scalar>::dummy_precision())
|
||||
const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
|
||||
}
|
||||
|
@ -211,6 +211,21 @@ class Matrix
|
||||
: Base(internal::constructor_without_unaligned_array_assert())
|
||||
{ Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
Matrix(Matrix&& other)
|
||||
: Base(std::move(other))
|
||||
{
|
||||
Base::_check_template_params();
|
||||
if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)
|
||||
Base::_set_noalias(other);
|
||||
}
|
||||
Matrix& operator=(Matrix&& other)
|
||||
{
|
||||
other.swap(*this);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors
|
||||
*
|
||||
* Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
|
||||
@ -304,7 +319,7 @@ class Matrix
|
||||
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
|
||||
{
|
||||
Base::_check_template_params();
|
||||
Base::resize(other.rows(), other.cols());
|
||||
Base::_resize_to_match(other);
|
||||
// FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to
|
||||
// go for pure _set() implementations, right?
|
||||
*this = other;
|
||||
|
@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
|
||||
|
||||
template<typename MatrixPower, typename Lhs, typename Rhs>
|
||||
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const MatrixBase<OtherDerived>& other);
|
||||
@ -215,7 +213,7 @@ template<typename Derived> class MatrixBase
|
||||
|
||||
typedef Diagonal<Derived> DiagonalReturnType;
|
||||
DiagonalReturnType diagonal();
|
||||
typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
|
||||
typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;
|
||||
ConstDiagonalReturnType diagonal() const;
|
||||
|
||||
template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };
|
||||
@ -223,16 +221,12 @@ template<typename Derived> class MatrixBase
|
||||
|
||||
template<int Index> typename DiagonalIndexReturnType<Index>::Type diagonal();
|
||||
template<int Index> typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;
|
||||
|
||||
typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType;
|
||||
typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType;
|
||||
|
||||
// Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations.
|
||||
// On the other hand they confuse MSVC8...
|
||||
#if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later
|
||||
typename MatrixBase::template DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
|
||||
typename MatrixBase::template ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
|
||||
#else
|
||||
typename DiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index);
|
||||
typename ConstDiagonalIndexReturnType<DynamicIndex>::Type diagonal(Index index) const;
|
||||
#endif
|
||||
DiagonalDynamicIndexReturnType diagonal(Index index);
|
||||
ConstDiagonalDynamicIndexReturnType diagonal(Index index) const;
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
template<unsigned int Mode> typename internal::eigen2_part_return_type<Derived, Mode>::type part();
|
||||
@ -446,6 +440,15 @@ template<typename Derived> class MatrixBase
|
||||
template<typename OtherScalar>
|
||||
void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);
|
||||
|
||||
///////// SparseCore module /////////
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type
|
||||
cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return other.cwiseProduct(derived());
|
||||
}
|
||||
|
||||
///////// MatrixFunctions module /////////
|
||||
|
||||
typedef typename internal::stem_function<Scalar>::type StemFunction;
|
||||
@ -510,6 +513,51 @@ template<typename Derived> class MatrixBase
|
||||
{EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}
|
||||
};
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of matrix base methods
|
||||
***************************************************************************/
|
||||
|
||||
/** replaces \c *this by \c *this * \a other.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*
|
||||
* Example: \include MatrixBase_applyOnTheRight.cpp
|
||||
* Output: \verbinclude MatrixBase_applyOnTheRight.out
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived&
|
||||
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=().
|
||||
*
|
||||
* Example: \include MatrixBase_applyOnTheRight.cpp
|
||||
* Output: \verbinclude MatrixBase_applyOnTheRight.out
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
}
|
||||
|
||||
/** replaces \c *this by \a other * \c *this.
|
||||
*
|
||||
* Example: \include MatrixBase_applyOnTheLeft.cpp
|
||||
* Output: \verbinclude MatrixBase_applyOnTheLeft.out
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheLeft(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MATRIXBASE_H
|
||||
|
@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
|
||||
template<typename Other> friend
|
||||
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
|
||||
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
|
||||
|
||||
/** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
|
||||
*
|
||||
* This function is O(\c n) procedure allocating a buffer of \c n booleans.
|
||||
*/
|
||||
Index determinant() const
|
||||
{
|
||||
Index res = 1;
|
||||
Index n = size();
|
||||
Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
|
||||
mask.fill(false);
|
||||
Index r = 0;
|
||||
while(r < n)
|
||||
{
|
||||
// search for the next seed
|
||||
while(r<n && mask[r]) r++;
|
||||
if(r>=n)
|
||||
break;
|
||||
// we got one, let's follow it until we are back to the seed
|
||||
Index k0 = r++;
|
||||
mask.coeffRef(k0) = true;
|
||||
for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
|
||||
{
|
||||
mask.coeffRef(k) = true;
|
||||
res = -res;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
@ -553,8 +582,12 @@ struct permut_matrix_product_retval
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
const Index n = Side==OnTheLeft ? rows() : cols();
|
||||
|
||||
if(is_same<MatrixTypeNestedCleaned,Dest>::value && extract_data(dst) == extract_data(m_matrix))
|
||||
// FIXME we need an is_same for expression that is not sensitive to constness. For instance
|
||||
// is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.
|
||||
if( is_same<MatrixTypeNestedCleaned,Dest>::value
|
||||
&& blas_traits<MatrixTypeNestedCleaned>::HasUsableDirectAccess
|
||||
&& blas_traits<Dest>::HasUsableDirectAccess
|
||||
&& extract_data(dst) == extract_data(m_matrix))
|
||||
{
|
||||
// apply the permutation inplace
|
||||
Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(m_permutation.size());
|
||||
|
@ -47,7 +47,10 @@ template<> struct check_rows_cols_for_overflow<Dynamic> {
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Derived, typename OtherDerived = Derived, bool IsVector = bool(Derived::IsVectorAtCompileTime)> struct conservative_resize_like_impl;
|
||||
template <typename Derived,
|
||||
typename OtherDerived = Derived,
|
||||
bool IsVector = bool(Derived::IsVectorAtCompileTime) && bool(OtherDerived::IsVectorAtCompileTime)>
|
||||
struct conservative_resize_like_impl;
|
||||
|
||||
template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;
|
||||
|
||||
@ -434,6 +437,36 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_HAVE_RVALUE_REFERENCES
|
||||
PlainObjectBase(PlainObjectBase&& other)
|
||||
: m_storage( std::move(other.m_storage) )
|
||||
{
|
||||
}
|
||||
|
||||
PlainObjectBase& operator=(PlainObjectBase&& other)
|
||||
{
|
||||
using std::swap;
|
||||
swap(m_storage, other.m_storage);
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Copy constructor */
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)
|
||||
: m_storage()
|
||||
{
|
||||
_check_template_params();
|
||||
lazyAssign(other);
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)
|
||||
: m_storage()
|
||||
{
|
||||
_check_template_params();
|
||||
lazyAssign(other);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE PlainObjectBase(Index a_size, Index nbRows, Index nbCols)
|
||||
: m_storage(a_size, nbRows, nbCols)
|
||||
{
|
||||
@ -570,6 +603,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
|
||||
: (rows() == other.rows() && cols() == other.cols())))
|
||||
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(other);
|
||||
if(this->size()==0)
|
||||
resizeLike(other);
|
||||
#else
|
||||
resizeLike(other);
|
||||
#endif
|
||||
@ -668,8 +703,10 @@ private:
|
||||
enum { ThisConstantIsPrivateInPlainObjectBase };
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename Derived, typename OtherDerived, bool IsVector>
|
||||
struct internal::conservative_resize_like_impl
|
||||
struct conservative_resize_like_impl
|
||||
{
|
||||
typedef typename Derived::Index Index;
|
||||
static void run(DenseBase<Derived>& _this, Index rows, Index cols)
|
||||
@ -729,11 +766,14 @@ struct internal::conservative_resize_like_impl
|
||||
}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Here, the specialization for vectors inherits from the general matrix case
|
||||
// to allow calling .conservativeResize(rows,cols) on vectors.
|
||||
template <typename Derived, typename OtherDerived>
|
||||
struct conservative_resize_like_impl<Derived,OtherDerived,true>
|
||||
: conservative_resize_like_impl<Derived,OtherDerived,false>
|
||||
{
|
||||
using conservative_resize_like_impl<Derived,OtherDerived,false>::run;
|
||||
|
||||
typedef typename Derived::Index Index;
|
||||
static void run(DenseBase<Derived>& _this, Index size)
|
||||
{
|
||||
|
@ -85,7 +85,14 @@ class ProductBase : public MatrixBase<Derived>
|
||||
|
||||
public:
|
||||
|
||||
#ifndef EIGEN_NO_MALLOC
|
||||
typedef typename Base::PlainObject BasePlainObject;
|
||||
typedef Matrix<Scalar,RowsAtCompileTime==1?1:Dynamic,ColsAtCompileTime==1?1:Dynamic,BasePlainObject::Options> DynPlainObject;
|
||||
typedef typename internal::conditional<(BasePlainObject::SizeAtCompileTime==Dynamic) || (BasePlainObject::SizeAtCompileTime*int(sizeof(Scalar)) < int(EIGEN_STACK_ALLOCATION_LIMIT)),
|
||||
BasePlainObject, DynPlainObject>::type PlainObject;
|
||||
#else
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
#endif
|
||||
|
||||
ProductBase(const Lhs& a_lhs, const Rhs& a_rhs)
|
||||
: m_lhs(a_lhs), m_rhs(a_rhs)
|
||||
@ -180,7 +187,12 @@ namespace internal {
|
||||
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
|
||||
struct nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
|
||||
{
|
||||
typedef PlainObject const& type;
|
||||
typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
|
||||
};
|
||||
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
|
||||
struct nested<const GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
|
||||
{
|
||||
typedef typename GeneralProduct<Lhs,Rhs,Mode>::PlainObject const& type;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -247,8 +247,9 @@ struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Func, typename Derived>
|
||||
struct redux_impl<Func, Derived, SliceVectorizedTraversal, NoUnrolling>
|
||||
// NOTE: for SliceVectorizedTraversal we simply bypass unrolling
|
||||
template<typename Func, typename Derived, int Unrolling>
|
||||
struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
|
||||
{
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename packet_traits<Scalar>::type PacketScalar;
|
||||
|
@ -94,24 +94,26 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
|
||||
typedef _PlainObjectType PlainObjectType;
|
||||
typedef _StrideType StrideType;
|
||||
enum {
|
||||
Options = _Options
|
||||
Options = _Options,
|
||||
Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit
|
||||
};
|
||||
|
||||
template<typename Derived> struct match {
|
||||
enum {
|
||||
HasDirectAccess = internal::has_direct_access<Derived>::ret,
|
||||
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
|
||||
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
|
||||
InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)
|
||||
|| int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)
|
||||
|| (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),
|
||||
OuterStrideMatch = Derived::IsVectorAtCompileTime
|
||||
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
|
||||
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
|
||||
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
|
||||
ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
|
||||
MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
|
||||
};
|
||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
@ -171,8 +173,12 @@ protected:
|
||||
}
|
||||
else
|
||||
::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());
|
||||
::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
|
||||
StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
|
||||
|
||||
if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))
|
||||
::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);
|
||||
else
|
||||
::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),
|
||||
StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());
|
||||
}
|
||||
|
||||
StrideBase m_stride;
|
||||
@ -182,7 +188,11 @@ protected:
|
||||
template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
||||
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
|
||||
{
|
||||
private:
|
||||
typedef internal::traits<Ref> Traits;
|
||||
template<typename Derived>
|
||||
inline Ref(const PlainObjectBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
|
||||
public:
|
||||
|
||||
typedef RefBase<Ref> Base;
|
||||
@ -194,17 +204,20 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
|
||||
inline Ref(PlainObjectBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
|
||||
{
|
||||
Base::construct(expr);
|
||||
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
template<typename Derived>
|
||||
inline Ref(const DenseBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(internal::is_lvalue<Derived>::value&&bool(Traits::template match<Derived>::MatchAtCompileTime)),Derived>::type* = 0,
|
||||
int = Derived::ThisConstantIsPrivateInPlainObjectBase)
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
|
||||
#else
|
||||
template<typename Derived>
|
||||
inline Ref(DenseBase<Derived>& expr)
|
||||
#endif
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(static_cast<bool>(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
|
||||
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
enum { THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY = Derived::ThisConstantIsPrivateInPlainObjectBase};
|
||||
Base::construct(expr.const_cast_derived());
|
||||
}
|
||||
|
||||
@ -223,13 +236,23 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const DenseBase<Derived>& expr)
|
||||
inline Ref(const DenseBase<Derived>& expr,
|
||||
typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
|
||||
{
|
||||
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
|
||||
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
|
||||
// std::cout << int(StrideType::InnerStrideAtCompileTime) << " - " << int(Derived::InnerStrideAtCompileTime) << "\n";
|
||||
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
||||
}
|
||||
|
||||
inline Ref(const Ref& other) : Base(other) {
|
||||
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
|
||||
}
|
||||
|
||||
template<typename OtherRef>
|
||||
inline Ref(const RefBase<OtherRef>& other) {
|
||||
construct(other.derived(), typename Traits::template match<OtherRef>::type());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -135,7 +135,7 @@ template<typename MatrixType,int RowFactor,int ColFactor> class Replicate
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<int RowFactor, int ColFactor>
|
||||
inline const Replicate<Derived,RowFactor,ColFactor>
|
||||
const Replicate<Derived,RowFactor,ColFactor>
|
||||
DenseBase<Derived>::replicate() const
|
||||
{
|
||||
return Replicate<Derived,RowFactor,ColFactor>(derived());
|
||||
@ -150,7 +150,7 @@ DenseBase<Derived>::replicate() const
|
||||
* \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const Replicate<Derived,Dynamic,Dynamic>
|
||||
const typename DenseBase<Derived>::ReplicateReturnType
|
||||
DenseBase<Derived>::replicate(Index rowFactor,Index colFactor) const
|
||||
{
|
||||
return Replicate<Derived,Dynamic,Dynamic>(derived(),rowFactor,colFactor);
|
||||
|
@ -72,6 +72,8 @@ template<typename Derived> class ReturnByValue
|
||||
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
|
||||
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
|
||||
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
|
||||
template<int LoadMode> Unusable& packet(Index) const;
|
||||
template<int LoadMode> Unusable& packet(Index, Index) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -83,6 +85,15 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
|
||||
{
|
||||
other.evalTo(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_RETURNBYVALUE_H
|
||||
|
@ -180,15 +180,9 @@ inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
|
||||
template<typename Derived>
|
||||
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
|
||||
{
|
||||
typedef typename internal::conditional<NumTraits<Scalar>::IsInteger,
|
||||
internal::scalar_quotient_op<Scalar>,
|
||||
internal::scalar_product_op<Scalar> >::type BinOp;
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
SelfCwiseBinaryOp<BinOp, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
|
||||
Scalar actual_other;
|
||||
if(NumTraits<Scalar>::IsInteger) actual_other = other;
|
||||
else actual_other = Scalar(1)/other;
|
||||
tmp = PlainObject::Constant(rows(),cols(), actual_other);
|
||||
SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, Derived, typename PlainObject::ConstantReturnType> tmp(derived());
|
||||
tmp = PlainObject::Constant(rows(),cols(), other);
|
||||
return derived();
|
||||
}
|
||||
|
||||
|
@ -116,17 +116,17 @@ template<typename Lhs, typename Rhs, int Mode, int Index, int Size>
|
||||
struct triangular_solver_unroller<Lhs,Rhs,Mode,Index,Size,false> {
|
||||
enum {
|
||||
IsLower = ((Mode&Lower)==Lower),
|
||||
I = IsLower ? Index : Size - Index - 1,
|
||||
S = IsLower ? 0 : I+1
|
||||
RowIndex = IsLower ? Index : Size - Index - 1,
|
||||
S = IsLower ? 0 : RowIndex+1
|
||||
};
|
||||
static void run(const Lhs& lhs, Rhs& rhs)
|
||||
{
|
||||
if (Index>0)
|
||||
rhs.coeffRef(I) -= lhs.row(I).template segment<Index>(S).transpose()
|
||||
rhs.coeffRef(RowIndex) -= lhs.row(RowIndex).template segment<Index>(S).transpose()
|
||||
.cwiseProduct(rhs.template segment<Index>(S)).sum();
|
||||
|
||||
if(!(Mode & UnitDiag))
|
||||
rhs.coeffRef(I) /= lhs.coeff(I,I);
|
||||
rhs.coeffRef(RowIndex) /= lhs.coeff(RowIndex,RowIndex);
|
||||
|
||||
triangular_solver_unroller<Lhs,Rhs,Mode,Index+1,Size>::run(lhs,rhs);
|
||||
}
|
||||
|
@ -17,16 +17,29 @@ namespace internal {
|
||||
template<typename ExpressionType, typename Scalar>
|
||||
inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)
|
||||
{
|
||||
Scalar max = bl.cwiseAbs().maxCoeff();
|
||||
if (max>scale)
|
||||
using std::max;
|
||||
Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
|
||||
|
||||
if (maxCoeff>scale)
|
||||
{
|
||||
ssq = ssq * numext::abs2(scale/max);
|
||||
scale = max;
|
||||
invScale = Scalar(1)/scale;
|
||||
ssq = ssq * numext::abs2(scale/maxCoeff);
|
||||
Scalar tmp = Scalar(1)/maxCoeff;
|
||||
if(tmp > NumTraits<Scalar>::highest())
|
||||
{
|
||||
invScale = NumTraits<Scalar>::highest();
|
||||
scale = Scalar(1)/invScale;
|
||||
}
|
||||
else
|
||||
{
|
||||
scale = maxCoeff;
|
||||
invScale = tmp;
|
||||
}
|
||||
}
|
||||
// TODO if the max is much much smaller than the current scale,
|
||||
|
||||
// TODO if the maxCoeff is much much smaller than the current scale,
|
||||
// then we can neglect this sub vector
|
||||
ssq += (bl*invScale).squaredNorm();
|
||||
if(scale>Scalar(0)) // if scale==0, then bl is 0
|
||||
ssq += (bl*invScale).squaredNorm();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
|
@ -284,7 +284,8 @@ struct inplace_transpose_selector<MatrixType,false> { // non square matrix
|
||||
* Notice however that this method is only useful if you want to replace a matrix by its own transpose.
|
||||
* If you just need the transpose of a matrix, use transpose().
|
||||
*
|
||||
* \note if the matrix is not square, then \c *this must be a resizable matrix.
|
||||
* \note if the matrix is not square, then \c *this must be a resizable matrix.
|
||||
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
|
||||
*
|
||||
* \sa transpose(), adjoint(), adjointInPlace() */
|
||||
template<typename Derived>
|
||||
@ -315,6 +316,7 @@ inline void DenseBase<Derived>::transposeInPlace()
|
||||
* If you just need the adjoint of a matrix, use adjoint().
|
||||
*
|
||||
* \note if the matrix is not square, then \c *this must be a resizable matrix.
|
||||
* This excludes (non-square) fixed-size matrices, block-expressions and maps.
|
||||
*
|
||||
* \sa transpose(), adjoint(), transposeInPlace() */
|
||||
template<typename Derived>
|
||||
|
@ -278,21 +278,21 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
|
||||
/** Efficient triangular matrix times vector/matrix product */
|
||||
template<typename OtherDerived>
|
||||
TriangularProduct<Mode,true,MatrixType,false,OtherDerived, OtherDerived::IsVectorAtCompileTime>
|
||||
TriangularProduct<Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
|
||||
operator*(const MatrixBase<OtherDerived>& rhs) const
|
||||
{
|
||||
return TriangularProduct
|
||||
<Mode,true,MatrixType,false,OtherDerived,OtherDerived::IsVectorAtCompileTime>
|
||||
<Mode, true, MatrixType, false, OtherDerived, OtherDerived::ColsAtCompileTime==1>
|
||||
(m_matrix, rhs.derived());
|
||||
}
|
||||
|
||||
/** Efficient vector/matrix times triangular matrix product */
|
||||
template<typename OtherDerived> friend
|
||||
TriangularProduct<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
|
||||
TriangularProduct<Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
|
||||
operator*(const MatrixBase<OtherDerived>& lhs, const TriangularView& rhs)
|
||||
{
|
||||
return TriangularProduct
|
||||
<Mode,false,OtherDerived,OtherDerived::IsVectorAtCompileTime,MatrixType,false>
|
||||
<Mode, false, OtherDerived, OtherDerived::RowsAtCompileTime==1, MatrixType, false>
|
||||
(lhs.derived(),rhs.m_matrix);
|
||||
}
|
||||
|
||||
@ -380,19 +380,19 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
EIGEN_STRONG_INLINE TriangularView& operator=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
|
||||
{
|
||||
setZero();
|
||||
return assignProduct(other,1);
|
||||
return assignProduct(other.derived(),1);
|
||||
}
|
||||
|
||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||
EIGEN_STRONG_INLINE TriangularView& operator+=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
|
||||
{
|
||||
return assignProduct(other,1);
|
||||
return assignProduct(other.derived(),1);
|
||||
}
|
||||
|
||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||
EIGEN_STRONG_INLINE TriangularView& operator-=(const ProductBase<ProductDerived, Lhs,Rhs>& other)
|
||||
{
|
||||
return assignProduct(other,-1);
|
||||
return assignProduct(other.derived(),-1);
|
||||
}
|
||||
|
||||
|
||||
@ -400,25 +400,34 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
EIGEN_STRONG_INLINE TriangularView& operator=(const ScaledProduct<ProductDerived>& other)
|
||||
{
|
||||
setZero();
|
||||
return assignProduct(other,other.alpha());
|
||||
return assignProduct(other.derived(),other.alpha());
|
||||
}
|
||||
|
||||
template<typename ProductDerived>
|
||||
EIGEN_STRONG_INLINE TriangularView& operator+=(const ScaledProduct<ProductDerived>& other)
|
||||
{
|
||||
return assignProduct(other,other.alpha());
|
||||
return assignProduct(other.derived(),other.alpha());
|
||||
}
|
||||
|
||||
template<typename ProductDerived>
|
||||
EIGEN_STRONG_INLINE TriangularView& operator-=(const ScaledProduct<ProductDerived>& other)
|
||||
{
|
||||
return assignProduct(other,-other.alpha());
|
||||
return assignProduct(other.derived(),-other.alpha());
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
template<typename ProductDerived, typename Lhs, typename Rhs>
|
||||
EIGEN_STRONG_INLINE TriangularView& assignProduct(const ProductBase<ProductDerived, Lhs,Rhs>& prod, const Scalar& alpha);
|
||||
|
||||
template<int Mode, bool LhsIsTriangular,
|
||||
typename Lhs, bool LhsIsVector,
|
||||
typename Rhs, bool RhsIsVector>
|
||||
EIGEN_STRONG_INLINE TriangularView& assignProduct(const TriangularProduct<Mode, LhsIsTriangular, Lhs, LhsIsVector, Rhs, RhsIsVector>& prod, const Scalar& alpha)
|
||||
{
|
||||
lazyAssign(alpha*prod.eval());
|
||||
return *this;
|
||||
}
|
||||
|
||||
MatrixTypeNested m_matrix;
|
||||
};
|
||||
|
@ -50,7 +50,7 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
|
||||
MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
|
||||
Flags0 = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits,
|
||||
Flags = (Flags0 & ~RowMajorBit) | (RowsAtCompileTime == 1 ? RowMajorBit : 0),
|
||||
TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
|
||||
TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime : MatrixType::ColsAtCompileTime
|
||||
};
|
||||
#if EIGEN_GNUC_AT_LEAST(3,4)
|
||||
typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
|
||||
@ -58,7 +58,8 @@ struct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
|
||||
typedef typename MemberOp::template Cost<InputScalar,TraversalSize> CostOpType;
|
||||
#endif
|
||||
enum {
|
||||
CoeffReadCost = TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
|
||||
CoeffReadCost = TraversalSize==Dynamic ? Dynamic
|
||||
: TraversalSize * traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value)
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -76,14 +76,17 @@ template<typename Derived>
|
||||
template<typename Visitor>
|
||||
void DenseBase<Derived>::visit(Visitor& visitor) const
|
||||
{
|
||||
typedef typename internal::remove_all<typename Derived::Nested>::type ThisNested;
|
||||
typename Derived::Nested thisNested(derived());
|
||||
|
||||
enum { unroll = SizeAtCompileTime != Dynamic
|
||||
&& CoeffReadCost != Dynamic
|
||||
&& (SizeAtCompileTime == 1 || internal::functor_traits<Visitor>::Cost != Dynamic)
|
||||
&& SizeAtCompileTime * CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost
|
||||
<= EIGEN_UNROLLING_LIMIT };
|
||||
return internal::visitor_impl<Visitor, Derived,
|
||||
return internal::visitor_impl<Visitor, ThisNested,
|
||||
unroll ? int(SizeAtCompileTime) : Dynamic
|
||||
>::run(derived(), visitor);
|
||||
>::run(thisNested, visitor);
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
@ -110,7 +110,7 @@ template<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<
|
||||
template<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> * to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { __pld((float *)addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr) { EIGEN_ARM_PREFETCH((float *)addr); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE std::complex<float> pfirst<Packet2cf>(const Packet2cf& a)
|
||||
{
|
||||
|
@ -48,9 +48,18 @@ typedef uint32x4_t Packet4ui;
|
||||
#define EIGEN_INIT_NEON_PACKET2(X, Y) {X, Y}
|
||||
#define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W}
|
||||
#endif
|
||||
|
||||
#ifndef __pld
|
||||
#define __pld(x) asm volatile ( " pld [%[addr]]\n" :: [addr] "r" (x) : "cc" );
|
||||
|
||||
// arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function
|
||||
// which available on LLVM and GCC (at least)
|
||||
#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
|
||||
#define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
|
||||
#elif defined __pld
|
||||
#define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
|
||||
#elif !defined(__aarch64__)
|
||||
#define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
|
||||
#else
|
||||
// by default no explicit prefetching
|
||||
#define EIGEN_ARM_PREFETCH(ADDR)
|
||||
#endif
|
||||
|
||||
template<> struct packet_traits<float> : default_packet_traits
|
||||
@ -209,8 +218,8 @@ template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& f
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }
|
||||
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }
|
||||
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { __pld(addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { __pld(addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||
template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ARM_PREFETCH(addr); }
|
||||
|
||||
// FIXME only store the 2 first elements ?
|
||||
template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }
|
||||
@ -375,6 +384,7 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
||||
a_lo = vget_low_s32(a);
|
||||
a_hi = vget_high_s32(a);
|
||||
max = vpmax_s32(a_lo, a_hi);
|
||||
max = vpmax_s32(max, max);
|
||||
|
||||
return vget_lane_s32(max, 0);
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ Packet4f plog<Packet4f>(const Packet4f& _x)
|
||||
|
||||
Packet4i emm0;
|
||||
|
||||
Packet4f invalid_mask = _mm_cmplt_ps(x, _mm_setzero_ps());
|
||||
Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
|
||||
Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
|
||||
|
||||
x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
|
||||
@ -126,7 +126,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
|
||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
|
||||
|
||||
Packet4f tmp = _mm_setzero_ps(), fx;
|
||||
Packet4f tmp, fx;
|
||||
Packet4i emm0;
|
||||
|
||||
// clamp x
|
||||
@ -166,7 +166,7 @@ Packet4f pexp<Packet4f>(const Packet4f& _x)
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
emm0 = _mm_add_epi32(emm0, p4i_0x7f);
|
||||
emm0 = _mm_slli_epi32(emm0, 23);
|
||||
return pmul(y, _mm_castsi128_ps(emm0));
|
||||
return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
|
||||
}
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet2d pexp<Packet2d>(const Packet2d& _x)
|
||||
@ -195,7 +195,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
|
||||
_EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
|
||||
static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
|
||||
|
||||
Packet2d tmp = _mm_setzero_pd(), fx;
|
||||
Packet2d tmp, fx;
|
||||
Packet4i emm0;
|
||||
|
||||
// clamp x
|
||||
@ -239,7 +239,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
|
||||
emm0 = _mm_add_epi32(emm0, p4i_1023_0);
|
||||
emm0 = _mm_slli_epi32(emm0, 20);
|
||||
emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
|
||||
return pmul(x, _mm_castsi128_pd(emm0));
|
||||
return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
|
||||
}
|
||||
|
||||
/* evaluation of 4 sines at onces, using SSE2 intrinsics.
|
||||
@ -279,7 +279,7 @@ Packet4f psin<Packet4f>(const Packet4f& _x)
|
||||
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
||||
|
||||
Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
|
||||
Packet4f xmm1, xmm2, xmm3, sign_bit, y;
|
||||
|
||||
Packet4i emm0, emm2;
|
||||
sign_bit = x;
|
||||
@ -378,7 +378,7 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
|
||||
_EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
|
||||
_EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
|
||||
|
||||
Packet4f xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
|
||||
Packet4f xmm1, xmm2, xmm3, y;
|
||||
Packet4i emm0, emm2;
|
||||
|
||||
x = pabs(x);
|
||||
@ -442,8 +442,11 @@ Packet4f pcos<Packet4f>(const Packet4f& _x)
|
||||
return _mm_xor_ps(y, sign_bit);
|
||||
}
|
||||
|
||||
#if EIGEN_FAST_MATH
|
||||
|
||||
// This is based on Quake3's fast inverse square root.
|
||||
// For detail see here: http://www.beyond3d.com/content/articles/8/
|
||||
// It lacks 1 (or 2 bits in some rare cases) of precision, and does not handle negative, +inf, or denormalized numbers correctly.
|
||||
template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED
|
||||
Packet4f psqrt<Packet4f>(const Packet4f& _x)
|
||||
{
|
||||
@ -457,6 +460,14 @@ Packet4f psqrt<Packet4f>(const Packet4f& _x)
|
||||
return pmul(_x,x);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }
|
||||
|
||||
#endif
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -14,9 +14,6 @@ namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Deprecated in C++11.
|
||||
#define register
|
||||
|
||||
#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
|
||||
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
|
||||
#endif
|
||||
@ -86,7 +83,8 @@ template<> struct packet_traits<double> : default_packet_traits
|
||||
size=2,
|
||||
|
||||
HasDiv = 1,
|
||||
HasExp = 1
|
||||
HasExp = 1,
|
||||
HasSqrt = 1
|
||||
};
|
||||
};
|
||||
template<> struct packet_traits<int> : default_packet_traits
|
||||
@ -237,63 +235,27 @@ template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { E
|
||||
return _mm_loadu_ps(from);
|
||||
#endif
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
|
||||
#else
|
||||
// Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
|
||||
// require pointer casting to incompatible pointer types and leads to invalid code
|
||||
// because of the strict aliasing rule. The "dummy" stuff are required to enforce
|
||||
// a correct instruction dependency.
|
||||
// TODO: do the same for MSVC (ICC is compatible)
|
||||
// NOTE: with the code below, MSVC's compiler crashes!
|
||||
|
||||
#if defined(__GNUC__) && defined(__i386__)
|
||||
// bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
|
||||
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
|
||||
#elif defined(__clang__)
|
||||
// bug 201: Segfaults in __mm_loadh_pd with clang 2.8
|
||||
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
|
||||
#else
|
||||
#define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
|
||||
#endif
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
|
||||
{
|
||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
|
||||
return _mm_loadu_ps(from);
|
||||
#else
|
||||
__m128d res;
|
||||
res = _mm_load_sd((const double*)(from)) ;
|
||||
res = _mm_loadh_pd(res, (const double*)(from+2)) ;
|
||||
return _mm_castpd_ps(res);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
|
||||
{
|
||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
|
||||
return _mm_loadu_pd(from);
|
||||
#else
|
||||
__m128d res;
|
||||
res = _mm_load_sd(from) ;
|
||||
res = _mm_loadh_pd(res,from+1);
|
||||
return res;
|
||||
#endif
|
||||
}
|
||||
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
|
||||
{
|
||||
EIGEN_DEBUG_UNALIGNED_LOAD
|
||||
#if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
|
||||
return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
|
||||
#else
|
||||
__m128d res;
|
||||
res = _mm_load_sd((const double*)(from)) ;
|
||||
res = _mm_loadh_pd(res, (const double*)(from+2)) ;
|
||||
return _mm_castpd_si128(res);
|
||||
#endif
|
||||
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
|
||||
{
|
||||
@ -510,8 +472,8 @@ template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
|
||||
// for GCC (eg., it does not like using std::min after the pstore !!)
|
||||
EIGEN_ALIGN16 int aux[4];
|
||||
pstore(aux, a);
|
||||
register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
|
||||
register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
|
||||
int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
|
||||
int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
|
||||
return aux0<aux2 ? aux0 : aux2;
|
||||
}
|
||||
|
||||
@ -531,8 +493,8 @@ template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
|
||||
// for GCC (eg., it does not like using std::min after the pstore !!)
|
||||
EIGEN_ALIGN16 int aux[4];
|
||||
pstore(aux, a);
|
||||
register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
|
||||
register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
|
||||
int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
|
||||
int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
|
||||
return aux0>aux2 ? aux0 : aux2;
|
||||
}
|
||||
|
||||
|
@ -90,6 +90,7 @@ struct traits<CoeffBasedProduct<LhsNested,RhsNested,NestingFlags> >
|
||||
| (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0),
|
||||
|
||||
CoeffReadCost = InnerSize == Dynamic ? Dynamic
|
||||
: InnerSize == 0 ? 0
|
||||
: InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
|
||||
+ (InnerSize - 1) * NumTraits<Scalar>::AddCost,
|
||||
|
||||
@ -133,7 +134,7 @@ class CoeffBasedProduct
|
||||
};
|
||||
|
||||
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
|
||||
Unroll ? InnerSize-1 : Dynamic,
|
||||
Unroll ? InnerSize : Dynamic,
|
||||
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
|
||||
|
||||
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
|
||||
@ -184,7 +185,7 @@ class CoeffBasedProduct
|
||||
{
|
||||
PacketScalar res;
|
||||
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
Unroll ? InnerSize-1 : Dynamic,
|
||||
Unroll ? InnerSize : Dynamic,
|
||||
_LhsNested, _RhsNested, PacketScalar, LoadMode>
|
||||
::run(row, col, m_lhs, m_rhs, res);
|
||||
return res;
|
||||
@ -242,12 +243,12 @@ struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
{
|
||||
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
|
||||
res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
|
||||
res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
|
||||
struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
@ -256,16 +257,23 @@ struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
|
||||
{
|
||||
res = RetScalar(0);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar& res)
|
||||
{
|
||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
||||
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
|
||||
for(Index i = 1; i < lhs.cols(); ++i)
|
||||
res += lhs.coeff(row, i) * rhs.coeff(i, col);
|
||||
res = (lhs.row(row).transpose().cwiseProduct( rhs.col(col) )).sum();
|
||||
}
|
||||
};
|
||||
|
||||
@ -295,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
|
||||
{
|
||||
res = 0;
|
||||
}
|
||||
};
|
||||
|
||||
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
|
||||
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
|
||||
{
|
||||
@ -304,8 +322,7 @@ struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, Re
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
|
||||
{
|
||||
Packet pres;
|
||||
product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
|
||||
product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
|
||||
product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
|
||||
res = predux(pres);
|
||||
}
|
||||
};
|
||||
@ -373,7 +390,7 @@ struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
{
|
||||
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
|
||||
}
|
||||
};
|
||||
|
||||
@ -384,12 +401,12 @@ struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
{
|
||||
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
@ -399,7 +416,7 @@ struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
|
||||
@ -408,16 +425,35 @@ struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
|
||||
{
|
||||
res = pset1<Packet>(0);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
|
||||
{
|
||||
res = pset1<Packet>(0);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
|
||||
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||
{
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
||||
{
|
||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
||||
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
|
||||
for(Index i = 1; i < lhs.cols(); ++i)
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
|
||||
res = pset1<Packet>(0);
|
||||
for(Index i = 0; i < lhs.cols(); ++i)
|
||||
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
|
||||
}
|
||||
};
|
||||
|
||||
@ -427,10 +463,9 @@ struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
|
||||
typedef typename Lhs::Index Index;
|
||||
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
|
||||
{
|
||||
eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
|
||||
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
|
||||
for(Index i = 1; i < lhs.cols(); ++i)
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
|
||||
res = pset1<Packet>(0);
|
||||
for(Index i = 0; i < lhs.cols(); ++i)
|
||||
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1128,6 +1128,8 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder,
|
||||
enum { PacketSize = packet_traits<Scalar>::size };
|
||||
|
||||
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
|
||||
EIGEN_UNUSED_VARIABLE(stride)
|
||||
EIGEN_UNUSED_VARIABLE(offset)
|
||||
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
|
||||
eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
|
||||
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
|
||||
@ -1215,6 +1217,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, Pan
|
||||
::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
|
||||
{
|
||||
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
|
||||
EIGEN_UNUSED_VARIABLE(stride)
|
||||
EIGEN_UNUSED_VARIABLE(offset)
|
||||
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
|
||||
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
|
||||
Index packet_cols = (cols/nr) * nr;
|
||||
@ -1266,6 +1270,8 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, Pan
|
||||
::operator()(Scalar* blockB, const Scalar* rhs, Index rhsStride, Index depth, Index cols, Index stride, Index offset)
|
||||
{
|
||||
EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
|
||||
EIGEN_UNUSED_VARIABLE(stride)
|
||||
EIGEN_UNUSED_VARIABLE(offset)
|
||||
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
|
||||
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
|
||||
Index packet_cols = (cols/nr) * nr;
|
||||
|
@ -140,8 +140,10 @@ static void run(Index rows, Index cols, Index depth,
|
||||
// Release all the sub blocks B'_j of B' for the current thread,
|
||||
// i.e., we simply decrement the number of users by 1
|
||||
for(Index j=0; j<threads; ++j)
|
||||
{
|
||||
#pragma omp atomic
|
||||
--(info[j].users);
|
||||
info[j].users -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -390,13 +392,17 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
|
||||
|
||||
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
|
||||
{
|
||||
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
|
||||
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
|
||||
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
|
||||
{
|
||||
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
|
||||
if(m_lhs.cols()==0 || m_lhs.rows()==0 || m_rhs.cols()==0)
|
||||
return;
|
||||
|
||||
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
|
||||
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
|
||||
|
@ -52,11 +52,7 @@ EIGEN_DONT_INLINE static void run(
|
||||
Index rows, Index cols,
|
||||
const LhsScalar* lhs, Index lhsStride,
|
||||
const RhsScalar* rhs, Index rhsIncr,
|
||||
ResScalar* res, Index
|
||||
#ifdef EIGEN_INTERNAL_DEBUGGING
|
||||
resIncr
|
||||
#endif
|
||||
, RhsScalar alpha);
|
||||
ResScalar* res, Index resIncr, RhsScalar alpha);
|
||||
};
|
||||
|
||||
template<typename Index, typename LhsScalar, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs, int Version>
|
||||
@ -64,12 +60,9 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
|
||||
Index rows, Index cols,
|
||||
const LhsScalar* lhs, Index lhsStride,
|
||||
const RhsScalar* rhs, Index rhsIncr,
|
||||
ResScalar* res, Index
|
||||
#ifdef EIGEN_INTERNAL_DEBUGGING
|
||||
resIncr
|
||||
#endif
|
||||
, RhsScalar alpha)
|
||||
ResScalar* res, Index resIncr, RhsScalar alpha)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(resIncr)
|
||||
eigen_internal_assert(resIncr==1);
|
||||
#ifdef _EIGEN_ACCUMULATE_PACKETS
|
||||
#error _EIGEN_ACCUMULATE_PACKETS has already been defined
|
||||
@ -265,7 +258,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,ColMajor,Co
|
||||
// process aligned result's coeffs
|
||||
if ((size_t(lhs0+alignedStart)%sizeof(LhsPacket))==0)
|
||||
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
|
||||
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
|
||||
pstore(&res[i], pcj.pmadd(pload<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
|
||||
else
|
||||
for (Index i = alignedStart;i<alignedSize;i+=ResPacketSize)
|
||||
pstore(&res[i], pcj.pmadd(ploadu<LhsPacket>(&lhs0[i]), ptmp0, pload<ResPacket>(&res[i])));
|
||||
|
@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
|
||||
if(transpose)
|
||||
std::swap(rows,cols);
|
||||
|
||||
Index blockCols = (cols / threads) & ~Index(0x3);
|
||||
Index blockRows = (rows / threads) & ~Index(0x7);
|
||||
|
||||
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
|
||||
|
||||
#pragma omp parallel for schedule(static,1) num_threads(threads)
|
||||
for(Index i=0; i<threads; ++i)
|
||||
#pragma omp parallel num_threads(threads)
|
||||
{
|
||||
Index i = omp_get_thread_num();
|
||||
// Note that the actual number of threads might be lower than the number of request ones.
|
||||
Index actual_threads = omp_get_num_threads();
|
||||
|
||||
Index blockCols = (cols / actual_threads) & ~Index(0x3);
|
||||
Index blockRows = (rows / actual_threads) & ~Index(0x7);
|
||||
|
||||
Index r0 = i*blockRows;
|
||||
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
|
||||
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
|
||||
|
||||
Index c0 = i*blockCols;
|
||||
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
|
||||
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
|
||||
|
||||
info[i].rhs_start = c0;
|
||||
info[i].rhs_length = actualBlockCols;
|
||||
|
@ -79,8 +79,8 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
|
||||
for (Index j=FirstTriangular ? bound : 0;
|
||||
j<(FirstTriangular ? size : bound);j+=2)
|
||||
{
|
||||
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
|
||||
register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
|
||||
const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
|
||||
const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
|
||||
|
||||
Scalar t0 = cjAlpha * rhs[j];
|
||||
Packet ptmp0 = pset1<Packet>(t0);
|
||||
@ -147,7 +147,7 @@ EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrd
|
||||
}
|
||||
for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
|
||||
{
|
||||
register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
|
||||
const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
|
||||
|
||||
Scalar t1 = cjAlpha * rhs[j];
|
||||
Scalar t2(0);
|
||||
|
@ -109,7 +109,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \
|
||||
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
||||
if (rows != depth) { \
|
||||
\
|
||||
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
|
||||
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
|
||||
\
|
||||
if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \
|
||||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||
@ -223,7 +223,7 @@ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \
|
||||
/* Non-square case - doesn't fit to MKL ?TRMM. Fall to default triangular product or call MKL ?GEMM*/ \
|
||||
if (cols != depth) { \
|
||||
\
|
||||
int nthr = mkl_domain_get_max_threads(MKL_BLAS); \
|
||||
int nthr = mkl_domain_get_max_threads(EIGEN_MKL_DOMAIN_BLAS); \
|
||||
\
|
||||
if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \
|
||||
/* Most likely no benefit to call TRMM or GEMM from MKL*/ \
|
||||
|
@ -115,8 +115,9 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
||||
{
|
||||
// TODO write a small kernel handling this (can be shared with trsv)
|
||||
Index i = IsLower ? k2+k1+k : k2-k1-k-1;
|
||||
Index s = IsLower ? k2+k1 : i+1;
|
||||
Index rs = actualPanelWidth - k - 1; // remaining size
|
||||
Index s = TriStorageOrder==RowMajor ? (IsLower ? k2+k1 : i+1)
|
||||
: IsLower ? i+1 : i-rs;
|
||||
|
||||
Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));
|
||||
for (Index j=j2; j<j2+actual_cols; ++j)
|
||||
@ -133,7 +134,6 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conju
|
||||
}
|
||||
else
|
||||
{
|
||||
Index s = IsLower ? i+1 : i-rs;
|
||||
Scalar b = (other(i,j) *= a);
|
||||
Scalar* r = &other(s,j);
|
||||
const Scalar* l = &tri(s,i);
|
||||
@ -302,9 +302,12 @@ EIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conj
|
||||
for (Index i=0; i<actual_mc; ++i)
|
||||
r[i] -= a[i] * b;
|
||||
}
|
||||
Scalar b = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(rhs(j,j));
|
||||
for (Index i=0; i<actual_mc; ++i)
|
||||
r[i] *= b;
|
||||
if((Mode & UnitDiag)==0)
|
||||
{
|
||||
Scalar b = conj(rhs(j,j));
|
||||
for (Index i=0; i<actual_mc; ++i)
|
||||
r[i] /= b;
|
||||
}
|
||||
}
|
||||
|
||||
// pack the just computed part of lhs to A
|
||||
|
@ -433,6 +433,19 @@ struct MatrixXpr {};
|
||||
/** The type used to identify an array expression */
|
||||
struct ArrayXpr {};
|
||||
|
||||
namespace internal {
|
||||
/** \internal
|
||||
* Constants for comparison functors
|
||||
*/
|
||||
enum ComparisonName {
|
||||
cmp_EQ = 0,
|
||||
cmp_LT = 1,
|
||||
cmp_LE = 2,
|
||||
cmp_UNORD = 3,
|
||||
cmp_NEQ = 4
|
||||
};
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CONSTANTS_H
|
||||
|
@ -235,6 +235,9 @@ template<typename Scalar> class Rotation2D;
|
||||
template<typename Scalar> class AngleAxis;
|
||||
template<typename Scalar,int Dim> class Translation;
|
||||
|
||||
// Sparse module:
|
||||
template<typename Derived> class SparseMatrixBase;
|
||||
|
||||
#ifdef EIGEN2_SUPPORT
|
||||
template<typename Derived, int _Dim> class eigen2_RotationBase;
|
||||
template<typename Lhs, typename Rhs> class eigen2_Cross;
|
||||
|
@ -54,11 +54,60 @@
|
||||
#endif
|
||||
|
||||
#if defined EIGEN_USE_MKL
|
||||
# include <mkl.h>
|
||||
/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/
|
||||
# ifndef INTEL_MKL_VERSION
|
||||
# undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */
|
||||
# elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/
|
||||
# undef EIGEN_USE_MKL
|
||||
# endif
|
||||
# ifndef EIGEN_USE_MKL
|
||||
/*If the MKL version is too old, undef everything*/
|
||||
# undef EIGEN_USE_MKL_ALL
|
||||
# undef EIGEN_USE_BLAS
|
||||
# undef EIGEN_USE_LAPACKE
|
||||
# undef EIGEN_USE_MKL_VML
|
||||
# undef EIGEN_USE_LAPACKE_STRICT
|
||||
# undef EIGEN_USE_LAPACKE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#include <mkl.h>
|
||||
#if defined EIGEN_USE_MKL
|
||||
#include <mkl_lapacke.h>
|
||||
#define EIGEN_MKL_VML_THRESHOLD 128
|
||||
|
||||
/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */
|
||||
/* MKL_BLAS, etc are not defined in 11.2 */
|
||||
#ifdef MKL_DOMAIN_ALL
|
||||
#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_ALL MKL_ALL
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_BLAS
|
||||
#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_FFT
|
||||
#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_FFT MKL_FFT
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_VML
|
||||
#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_VML MKL_VML
|
||||
#endif
|
||||
|
||||
#ifdef MKL_DOMAIN_PARDISO
|
||||
#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO
|
||||
#else
|
||||
#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
typedef std::complex<double> dcomplex;
|
||||
|
@ -13,23 +13,292 @@
|
||||
|
||||
#define EIGEN_WORLD_VERSION 3
|
||||
#define EIGEN_MAJOR_VERSION 2
|
||||
#define EIGEN_MINOR_VERSION 0
|
||||
#define EIGEN_MINOR_VERSION 8
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
EIGEN_MINOR_VERSION>=z))))
|
||||
|
||||
|
||||
// Compiler identification, EIGEN_COMP_*
|
||||
|
||||
/// \internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC
|
||||
#ifdef __GNUC__
|
||||
#define EIGEN_COMP_GNUC 1
|
||||
#else
|
||||
#define EIGEN_COMP_GNUC 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_CLANG set to 1 if the compiler is clang (alias for __clang__)
|
||||
#if defined(__clang__)
|
||||
#define EIGEN_COMP_CLANG 1
|
||||
#else
|
||||
#define EIGEN_COMP_CLANG 0
|
||||
#endif
|
||||
|
||||
|
||||
/// \internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm
|
||||
#if defined(__llvm__)
|
||||
#define EIGEN_COMP_LLVM 1
|
||||
#else
|
||||
#define EIGEN_COMP_LLVM 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise
|
||||
#if defined(__INTEL_COMPILER)
|
||||
#define EIGEN_COMP_ICC __INTEL_COMPILER
|
||||
#else
|
||||
#define EIGEN_COMP_ICC 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw
|
||||
#if defined(__MINGW32__)
|
||||
#define EIGEN_COMP_MINGW 1
|
||||
#else
|
||||
#define EIGEN_COMP_MINGW 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio
|
||||
#if defined(__SUNPRO_CC)
|
||||
#define EIGEN_COMP_SUNCC 1
|
||||
#else
|
||||
#define EIGEN_COMP_SUNCC 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise.
|
||||
#if defined(_MSC_VER)
|
||||
#define EIGEN_COMP_MSVC _MSC_VER
|
||||
#else
|
||||
#define EIGEN_COMP_MSVC 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC
|
||||
#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC)
|
||||
#define EIGEN_COMP_MSVC_STRICT _MSC_VER
|
||||
#else
|
||||
#define EIGEN_COMP_MSVC_STRICT 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++
|
||||
#if defined(__IBMCPP__) || defined(__xlc__)
|
||||
#define EIGEN_COMP_IBM 1
|
||||
#else
|
||||
#define EIGEN_COMP_IBM 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler
|
||||
#if defined(__PGI)
|
||||
#define EIGEN_COMP_PGI 1
|
||||
#else
|
||||
#define EIGEN_COMP_PGI 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler
|
||||
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
|
||||
#define EIGEN_COMP_ARM 1
|
||||
#else
|
||||
#define EIGEN_COMP_ARM 0
|
||||
#endif
|
||||
|
||||
|
||||
/// \internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)
|
||||
#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM )
|
||||
#define EIGEN_COMP_GNUC_STRICT 1
|
||||
#else
|
||||
#define EIGEN_COMP_GNUC_STRICT 0
|
||||
#endif
|
||||
|
||||
|
||||
#if EIGEN_COMP_GNUC
|
||||
#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)
|
||||
#define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
|
||||
#define EIGEN_GNUC_AT(x,y) ( __GNUC__==x && __GNUC_MINOR__==y )
|
||||
#else
|
||||
#define EIGEN_GNUC_AT_LEAST(x,y) 0
|
||||
#define EIGEN_GNUC_AT_MOST(x,y) 0
|
||||
#define EIGEN_GNUC_AT(x,y) 0
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
#define EIGEN_GNUC_AT_MOST(x,y) ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)
|
||||
|
||||
// FIXME: could probably be removed as we do not support gcc 3.x anymore
|
||||
#if EIGEN_COMP_GNUC && (__GNUC__ <= 3)
|
||||
#define EIGEN_GCC3_OR_OLDER 1
|
||||
#else
|
||||
#define EIGEN_GNUC_AT_MOST(x,y) 0
|
||||
#define EIGEN_GCC3_OR_OLDER 0
|
||||
#endif
|
||||
|
||||
|
||||
// Architecture identification, EIGEN_ARCH_*
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)
|
||||
#define EIGEN_ARCH_x86_64 1
|
||||
#else
|
||||
#define EIGEN_ARCH_x86_64 0
|
||||
#endif
|
||||
|
||||
#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)
|
||||
#define EIGEN_ARCH_i386 1
|
||||
#else
|
||||
#define EIGEN_ARCH_i386 0
|
||||
#endif
|
||||
|
||||
#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_i386
|
||||
#define EIGEN_ARCH_i386_OR_x86_64 1
|
||||
#else
|
||||
#define EIGEN_ARCH_i386_OR_x86_64 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_ARCH_ARM set to 1 if the architecture is ARM
|
||||
#if defined(__arm__)
|
||||
#define EIGEN_ARCH_ARM 1
|
||||
#else
|
||||
#define EIGEN_ARCH_ARM 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64
|
||||
#if defined(__aarch64__)
|
||||
#define EIGEN_ARCH_ARM64 1
|
||||
#else
|
||||
#define EIGEN_ARCH_ARM64 0
|
||||
#endif
|
||||
|
||||
#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64
|
||||
#define EIGEN_ARCH_ARM_OR_ARM64 1
|
||||
#else
|
||||
#define EIGEN_ARCH_ARM_OR_ARM64 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS
|
||||
#if defined(__mips__) || defined(__mips)
|
||||
#define EIGEN_ARCH_MIPS 1
|
||||
#else
|
||||
#define EIGEN_ARCH_MIPS 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC
|
||||
#if defined(__sparc__) || defined(__sparc)
|
||||
#define EIGEN_ARCH_SPARC 1
|
||||
#else
|
||||
#define EIGEN_ARCH_SPARC 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium
|
||||
#if defined(__ia64__)
|
||||
#define EIGEN_ARCH_IA64 1
|
||||
#else
|
||||
#define EIGEN_ARCH_IA64 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC
|
||||
#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)
|
||||
#define EIGEN_ARCH_PPC 1
|
||||
#else
|
||||
#define EIGEN_ARCH_PPC 0
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// Operating system identification, EIGEN_OS_*
|
||||
|
||||
/// \internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant
|
||||
#if defined(__unix__) || defined(__unix)
|
||||
#define EIGEN_OS_UNIX 1
|
||||
#else
|
||||
#define EIGEN_OS_UNIX 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel
|
||||
#if defined(__linux__)
|
||||
#define EIGEN_OS_LINUX 1
|
||||
#else
|
||||
#define EIGEN_OS_LINUX 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_ANDROID set to 1 if the OS is Android
|
||||
// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain.
|
||||
#if defined(__ANDROID__) || defined(ANDROID)
|
||||
#define EIGEN_OS_ANDROID 1
|
||||
#else
|
||||
#define EIGEN_OS_ANDROID 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android)
|
||||
#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID)
|
||||
#define EIGEN_OS_GNULINUX 1
|
||||
#else
|
||||
#define EIGEN_OS_GNULINUX 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant
|
||||
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)
|
||||
#define EIGEN_OS_BSD 1
|
||||
#else
|
||||
#define EIGEN_OS_BSD 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_MAC set to 1 if the OS is MacOS
|
||||
#if defined(__APPLE__)
|
||||
#define EIGEN_OS_MAC 1
|
||||
#else
|
||||
#define EIGEN_OS_MAC 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_QNX set to 1 if the OS is QNX
|
||||
#if defined(__QNX__)
|
||||
#define EIGEN_OS_QNX 1
|
||||
#else
|
||||
#define EIGEN_OS_QNX 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_WIN set to 1 if the OS is Windows based
|
||||
#if defined(_WIN32)
|
||||
#define EIGEN_OS_WIN 1
|
||||
#else
|
||||
#define EIGEN_OS_WIN 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits
|
||||
#if defined(_WIN64)
|
||||
#define EIGEN_OS_WIN64 1
|
||||
#else
|
||||
#define EIGEN_OS_WIN64 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE
|
||||
#if defined(_WIN32_WCE)
|
||||
#define EIGEN_OS_WINCE 1
|
||||
#else
|
||||
#define EIGEN_OS_WINCE 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin
|
||||
#if defined(__CYGWIN__)
|
||||
#define EIGEN_OS_CYGWIN 1
|
||||
#else
|
||||
#define EIGEN_OS_CYGWIN 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants
|
||||
#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN )
|
||||
#define EIGEN_OS_WIN_STRICT 1
|
||||
#else
|
||||
#define EIGEN_OS_WIN_STRICT 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_SUN set to 1 if the OS is SUN
|
||||
#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))
|
||||
#define EIGEN_OS_SUN 1
|
||||
#else
|
||||
#define EIGEN_OS_SUN 0
|
||||
#endif
|
||||
|
||||
/// \internal EIGEN_OS_SOLARIS set to 1 if the OS is Solaris
|
||||
#if (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))
|
||||
#define EIGEN_OS_SOLARIS 1
|
||||
#else
|
||||
#define EIGEN_OS_SOLARIS 0
|
||||
#endif
|
||||
|
||||
|
||||
#if EIGEN_GNUC_AT_MOST(4,3) && !defined(__clang__)
|
||||
// see bug 89
|
||||
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0
|
||||
@ -37,12 +306,6 @@
|
||||
#define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ <= 3)
|
||||
#define EIGEN_GCC3_OR_OLDER 1
|
||||
#else
|
||||
#define EIGEN_GCC3_OR_OLDER 0
|
||||
#endif
|
||||
|
||||
// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable
|
||||
// 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always
|
||||
// enable alignment, but it can be a cause of problems on some platforms, so we just disable it in
|
||||
@ -96,6 +359,27 @@
|
||||
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t
|
||||
#endif
|
||||
|
||||
// A Clang feature extension to determine compiler features.
|
||||
// We use it to determine 'cxx_rvalue_references'
|
||||
#ifndef __has_feature
|
||||
# define __has_feature(x) 0
|
||||
#endif
|
||||
|
||||
// Do we support r-value references?
|
||||
#if (__has_feature(cxx_rvalue_references) || \
|
||||
(defined(__cplusplus) && __cplusplus >= 201103L) || \
|
||||
(defined(_MSC_VER) && _MSC_VER >= 1600))
|
||||
#define EIGEN_HAVE_RVALUE_REFERENCES
|
||||
#endif
|
||||
|
||||
|
||||
// Cross compiler wrapper around LLVM's __has_builtin
|
||||
#ifdef __has_builtin
|
||||
# define EIGEN_HAS_BUILTIN(x) __has_builtin(x)
|
||||
#else
|
||||
# define EIGEN_HAS_BUILTIN(x) 0
|
||||
#endif
|
||||
|
||||
/** Allows to disable some optimizations which might affect the accuracy of the result.
|
||||
* Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.
|
||||
* They currently include:
|
||||
@ -238,11 +522,16 @@
|
||||
#endif
|
||||
|
||||
// Suppresses 'unused variable' warnings.
|
||||
#define EIGEN_UNUSED_VARIABLE(var) (void)var;
|
||||
namespace Eigen {
|
||||
namespace internal {
|
||||
template<typename T> void ignore_unused_variable(const T&) {}
|
||||
}
|
||||
}
|
||||
#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);
|
||||
|
||||
#if !defined(EIGEN_ASM_COMMENT)
|
||||
#if (defined __GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
|
||||
#define EIGEN_ASM_COMMENT(X) asm("#" X)
|
||||
#define EIGEN_ASM_COMMENT(X) __asm__("#" X)
|
||||
#else
|
||||
#define EIGEN_ASM_COMMENT(X)
|
||||
#endif
|
||||
@ -266,6 +555,7 @@
|
||||
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
|
||||
#endif
|
||||
|
||||
#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
|
||||
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
|
||||
|
||||
#if EIGEN_ALIGN_STATICALLY
|
||||
@ -284,7 +574,8 @@
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_STACK_ALLOCATION_LIMIT
|
||||
#define EIGEN_STACK_ALLOCATION_LIMIT 20000
|
||||
// 131072 == 128 KB
|
||||
#define EIGEN_STACK_ALLOCATION_LIMIT 131072
|
||||
#endif
|
||||
|
||||
#ifndef EIGEN_DEFAULT_IO_FORMAT
|
||||
@ -300,7 +591,7 @@
|
||||
// just an empty macro !
|
||||
#define EIGEN_EMPTY
|
||||
|
||||
#if defined(_MSC_VER) && (!defined(__INTEL_COMPILER))
|
||||
#if defined(_MSC_VER) && (_MSC_VER < 1900) && (!defined(__INTEL_COMPILER))
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
|
||||
using Base::operator =;
|
||||
#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
|
||||
@ -319,8 +610,11 @@
|
||||
}
|
||||
#endif
|
||||
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
|
||||
/** \internal
|
||||
* \brief Macro to manually inherit assignment operators.
|
||||
* This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
|
||||
*/
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
|
||||
|
||||
/**
|
||||
* Just a side note. Commenting within defines works only by documenting
|
||||
@ -392,6 +686,8 @@
|
||||
#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \
|
||||
: ((int)a >= (int)b) ? (int)a : (int)b)
|
||||
|
||||
#define EIGEN_ADD_COST(a,b) int(a)==Dynamic || int(b)==Dynamic ? Dynamic : int(a)+int(b)
|
||||
|
||||
#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))
|
||||
|
||||
#define EIGEN_IMPLIES(a,b) (!(a) || (b))
|
||||
|
@ -63,7 +63,7 @@
|
||||
// Currently, let's include it only on unix systems:
|
||||
#if defined(__unix__) || defined(__unix)
|
||||
#include <unistd.h>
|
||||
#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
|
||||
#if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
|
||||
#define EIGEN_HAS_POSIX_MEMALIGN 1
|
||||
#endif
|
||||
#endif
|
||||
@ -272,12 +272,12 @@ inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
|
||||
// The defined(_mm_free) is just here to verify that this MSVC version
|
||||
// implements _mm_malloc/_mm_free based on the corresponding _aligned_
|
||||
// functions. This may not always be the case and we just try to be safe.
|
||||
#if defined(_MSC_VER) && defined(_mm_free)
|
||||
#if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free)
|
||||
result = _aligned_realloc(ptr,new_size,16);
|
||||
#else
|
||||
result = generic_aligned_realloc(ptr,new_size,old_size);
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
#elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
|
||||
result = _aligned_realloc(ptr,new_size,16);
|
||||
#else
|
||||
result = handmade_aligned_realloc(ptr,new_size,old_size);
|
||||
@ -417,6 +417,8 @@ template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pt
|
||||
|
||||
template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
|
||||
{
|
||||
if(size==0)
|
||||
return 0; // short-cut. Also fixes Bug 884
|
||||
check_size_for_overflow<T>(size);
|
||||
T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
|
||||
if(NumTraits<T>::RequireInitialization)
|
||||
@ -464,9 +466,8 @@ template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *
|
||||
template<typename Scalar, typename Index>
|
||||
static inline Index first_aligned(const Scalar* array, Index size)
|
||||
{
|
||||
enum { PacketSize = packet_traits<Scalar>::size,
|
||||
PacketAlignedMask = PacketSize-1
|
||||
};
|
||||
static const Index PacketSize = packet_traits<Scalar>::size;
|
||||
static const Index PacketAlignedMask = PacketSize-1;
|
||||
|
||||
if(PacketSize==1)
|
||||
{
|
||||
@ -522,7 +523,7 @@ template<typename T> struct smart_copy_helper<T,false> {
|
||||
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
|
||||
// to the appropriate stack allocation function
|
||||
#ifndef EIGEN_ALLOCA
|
||||
#if (defined __linux__)
|
||||
#if (defined __linux__) || (defined __APPLE__) || (defined alloca)
|
||||
#define EIGEN_ALLOCA alloca
|
||||
#elif defined(_MSC_VER)
|
||||
#define EIGEN_ALLOCA _alloca
|
||||
@ -578,7 +579,7 @@ template<typename T> class aligned_stack_memory_handler
|
||||
*/
|
||||
#ifdef EIGEN_ALLOCA
|
||||
|
||||
#ifdef __arm__
|
||||
#if defined(__arm__) || defined(_WIN32)
|
||||
#define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
|
||||
#else
|
||||
#define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
|
||||
@ -612,7 +613,6 @@ template<typename T> class aligned_stack_memory_handler
|
||||
void* operator new(size_t size, const std::nothrow_t&) throw() { \
|
||||
try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
|
||||
catch (...) { return 0; } \
|
||||
return 0; \
|
||||
}
|
||||
#else
|
||||
#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
|
||||
@ -630,11 +630,15 @@ template<typename T> class aligned_stack_memory_handler
|
||||
} \
|
||||
void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
||||
void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
||||
void operator delete(void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
||||
void operator delete[](void * ptr, std::size_t /* sz */) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
|
||||
/* in-place new and delete. since (at least afaik) there is no actual */ \
|
||||
/* memory allocated we can safely let the default implementation handle */ \
|
||||
/* this particular case. */ \
|
||||
static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
|
||||
static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \
|
||||
void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
|
||||
void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \
|
||||
/* nothrow-new (returns zero instead of std::bad_alloc) */ \
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
|
||||
void operator delete(void *ptr, const std::nothrow_t&) throw() { \
|
||||
@ -729,15 +733,6 @@ public:
|
||||
::new( p ) T( value );
|
||||
}
|
||||
|
||||
// Support for c++11
|
||||
#if (__cplusplus >= 201103L)
|
||||
template<typename... Args>
|
||||
void construct(pointer p, Args&&... args)
|
||||
{
|
||||
::new(p) T(std::forward<Args>(args)...);
|
||||
}
|
||||
#endif
|
||||
|
||||
void destroy( pointer p )
|
||||
{
|
||||
p->~T();
|
||||
@ -784,9 +779,9 @@ namespace internal {
|
||||
|
||||
#ifdef EIGEN_CPUID
|
||||
|
||||
inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
|
||||
inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
|
||||
{
|
||||
return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2];
|
||||
return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
|
||||
}
|
||||
|
||||
inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
|
||||
@ -928,13 +923,16 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3)
|
||||
{
|
||||
#ifdef EIGEN_CPUID
|
||||
int abcd[4];
|
||||
const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
|
||||
const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
|
||||
const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
|
||||
|
||||
// identify the CPU vendor
|
||||
EIGEN_CPUID(abcd,0x0,0);
|
||||
int max_std_funcs = abcd[1];
|
||||
if(cpuid_is_vendor(abcd,"GenuineIntel"))
|
||||
if(cpuid_is_vendor(abcd,GenuineIntel))
|
||||
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
|
||||
else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
|
||||
else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
|
||||
queryCacheSizes_amd(l1,l2,l3);
|
||||
else
|
||||
// by default let's use Intel's API
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
#ifndef EIGEN_NO_STATIC_ASSERT
|
||||
|
||||
#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
|
||||
#if __has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600)
|
||||
|
||||
// if native static_assert is enabled, let's use it
|
||||
#define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);
|
||||
@ -90,7 +90,9 @@
|
||||
YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,
|
||||
THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,
|
||||
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,
|
||||
OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG
|
||||
OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,
|
||||
IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,
|
||||
STORAGE_LAYOUT_DOES_NOT_MATCH
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -341,7 +341,7 @@ template<typename T, int n=1, typename PlainObject = typename eval<T>::type> str
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
T* const_cast_ptr(const T* ptr)
|
||||
inline T* const_cast_ptr(const T* ptr)
|
||||
{
|
||||
return const_cast<T*>(ptr);
|
||||
}
|
||||
@ -366,17 +366,17 @@ struct dense_xpr_base<Derived, ArrayXpr>
|
||||
|
||||
/** \internal Helper base class to add a scalar multiple operator
|
||||
* overloads for complex types */
|
||||
template<typename Derived,typename Scalar,typename OtherScalar,
|
||||
template<typename Derived, typename Scalar, typename OtherScalar, typename BaseType,
|
||||
bool EnableIt = !is_same<Scalar,OtherScalar>::value >
|
||||
struct special_scalar_op_base : public DenseCoeffsBase<Derived>
|
||||
struct special_scalar_op_base : public BaseType
|
||||
{
|
||||
// dummy operator* so that the
|
||||
// "using special_scalar_op_base::operator*" compiles
|
||||
void operator*() const;
|
||||
};
|
||||
|
||||
template<typename Derived,typename Scalar,typename OtherScalar>
|
||||
struct special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public DenseCoeffsBase<Derived>
|
||||
template<typename Derived,typename Scalar,typename OtherScalar, typename BaseType>
|
||||
struct special_scalar_op_base<Derived,Scalar,OtherScalar,BaseType,true> : public BaseType
|
||||
{
|
||||
const CwiseUnaryOp<scalar_multiple2_op<Scalar,OtherScalar>, Derived>
|
||||
operator*(const OtherScalar& scalar) const
|
||||
|
@ -147,7 +147,6 @@ void fitHyperplane(int numPoints,
|
||||
|
||||
// compute the covariance matrix
|
||||
CovMatrixType covMat = CovMatrixType::Zero(size, size);
|
||||
VectorType remean = VectorType::Zero(size);
|
||||
for(int i = 0; i < numPoints; ++i)
|
||||
{
|
||||
VectorType diff = (*(points[i]) - mean).conjugate();
|
||||
|
@ -512,8 +512,7 @@ template<typename MatrixType>
|
||||
template<typename OtherDerived, typename ResultType>
|
||||
bool SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
|
||||
{
|
||||
const int rows = m_matU.rows();
|
||||
ei_assert(b.rows() == rows);
|
||||
ei_assert(b.rows() == m_matU.rows());
|
||||
|
||||
Scalar maxVal = m_sigma.cwise().abs().maxCoeff();
|
||||
for (int j=0; j<b.cols(); ++j)
|
||||
|
@ -234,6 +234,12 @@ template<typename _MatrixType> class ComplexEigenSolver
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
EigenvectorType m_eivec;
|
||||
EigenvalueType m_eivalues;
|
||||
ComplexSchur<MatrixType> m_schur;
|
||||
@ -251,6 +257,8 @@ template<typename MatrixType>
|
||||
ComplexEigenSolver<MatrixType>&
|
||||
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
// this code is inspired from Jampack
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
||||
|
@ -45,7 +45,6 @@ ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
|
||||
ComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
|
||||
{ \
|
||||
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
|
||||
typedef MatrixType::Scalar Scalar; \
|
||||
typedef MatrixType::RealScalar RealScalar; \
|
||||
typedef std::complex<RealScalar> ComplexScalar; \
|
||||
\
|
||||
|
@ -298,6 +298,13 @@ template<typename _MatrixType> class EigenSolver
|
||||
void doComputeEigenvectors();
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
|
||||
}
|
||||
|
||||
MatrixType m_eivec;
|
||||
EigenvalueType m_eivalues;
|
||||
bool m_isInitialized;
|
||||
@ -364,6 +371,8 @@ template<typename MatrixType>
|
||||
EigenSolver<MatrixType>&
|
||||
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
|
@ -263,6 +263,13 @@ template<typename _MatrixType> class GeneralizedEigenSolver
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
|
||||
}
|
||||
|
||||
MatrixType m_eivec;
|
||||
ComplexVectorType m_alphas;
|
||||
VectorType m_betas;
|
||||
@ -290,6 +297,8 @@ template<typename MatrixType>
|
||||
GeneralizedEigenSolver<MatrixType>&
|
||||
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
|
||||
|
@ -240,10 +240,10 @@ namespace Eigen {
|
||||
m_S.coeffRef(i,j) = Scalar(0.0);
|
||||
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
|
||||
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
|
||||
// update Q
|
||||
if (m_computeQZ)
|
||||
m_Q.applyOnTheRight(i-1,i,G);
|
||||
}
|
||||
// update Q
|
||||
if (m_computeQZ)
|
||||
m_Q.applyOnTheRight(i-1,i,G);
|
||||
// kill T(i,i-1)
|
||||
if(m_T.coeff(i,i-1)!=Scalar(0))
|
||||
{
|
||||
@ -251,10 +251,10 @@ namespace Eigen {
|
||||
m_T.coeffRef(i,i-1) = Scalar(0.0);
|
||||
m_S.applyOnTheRight(i,i-1,G);
|
||||
m_T.topRows(i).applyOnTheRight(i,i-1,G);
|
||||
// update Z
|
||||
if (m_computeQZ)
|
||||
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
|
||||
}
|
||||
// update Z
|
||||
if (m_computeQZ)
|
||||
m_Z.applyOnTheLeft(i,i-1,G.adjoint());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -313,7 +313,7 @@ namespace Eigen {
|
||||
using std::abs;
|
||||
using std::sqrt;
|
||||
const Index dim=m_S.cols();
|
||||
if (abs(m_S.coeff(i+1,i)==Scalar(0)))
|
||||
if (abs(m_S.coeff(i+1,i))==Scalar(0))
|
||||
return;
|
||||
Index z = findSmallDiagEntry(i,i+1);
|
||||
if (z==i-1)
|
||||
|
@ -234,7 +234,7 @@ template<typename _MatrixType> class RealSchur
|
||||
typedef Matrix<Scalar,3,1> Vector3s;
|
||||
|
||||
Scalar computeNormOfT();
|
||||
Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
|
||||
Index findSmallSubdiagEntry(Index iu);
|
||||
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
|
||||
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
|
||||
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
|
||||
@ -286,7 +286,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
|
||||
{
|
||||
while (iu >= 0)
|
||||
{
|
||||
Index il = findSmallSubdiagEntry(iu, norm);
|
||||
Index il = findSmallSubdiagEntry(iu);
|
||||
|
||||
// Check for convergence
|
||||
if (il == iu) // One root found
|
||||
@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
|
||||
|
||||
/** \internal Look for single small sub-diagonal element and returns its index */
|
||||
template<typename MatrixType>
|
||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
|
||||
inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
|
||||
{
|
||||
using std::abs;
|
||||
Index res = iu;
|
||||
while (res > 0)
|
||||
{
|
||||
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
|
||||
if (s == 0.0)
|
||||
s = norm;
|
||||
if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
|
||||
if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
|
||||
break;
|
||||
res--;
|
||||
}
|
||||
@ -457,9 +455,7 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
|
||||
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
|
||||
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
|
||||
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,10 +44,6 @@ template<> inline \
|
||||
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \
|
||||
RealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW>& matrix, bool computeU) \
|
||||
{ \
|
||||
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \
|
||||
typedef MatrixType::Scalar Scalar; \
|
||||
typedef MatrixType::RealScalar RealScalar; \
|
||||
\
|
||||
eigen_assert(matrix.cols() == matrix.rows()); \
|
||||
\
|
||||
lapack_int n = matrix.cols(), sdim, info; \
|
||||
|
@ -80,6 +80,8 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
/** \brief Scalar type for matrices of type \p _MatrixType. */
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
|
||||
typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
|
||||
|
||||
/** \brief Real scalar type for \p _MatrixType.
|
||||
*
|
||||
@ -225,7 +227,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
*
|
||||
* \sa eigenvalues()
|
||||
*/
|
||||
const MatrixType& eigenvectors() const
|
||||
const EigenvectorsType& eigenvectors() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
||||
eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
||||
@ -351,7 +353,12 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
#endif // EIGEN2_SUPPORT
|
||||
|
||||
protected:
|
||||
MatrixType m_eivec;
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
EigenvectorsType m_eivec;
|
||||
RealVectorType m_eivalues;
|
||||
typename TridiagonalizationType::SubDiagonalType m_subdiag;
|
||||
ComputationInfo m_info;
|
||||
@ -376,7 +383,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
|
||||
* "implicit symmetric QR step with Wilkinson shift"
|
||||
*/
|
||||
namespace internal {
|
||||
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
|
||||
template<typename RealScalar, typename Scalar, typename Index>
|
||||
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
|
||||
}
|
||||
|
||||
@ -384,6 +391,8 @@ template<typename MatrixType>
|
||||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
::compute(const MatrixType& matrix, int options)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
using std::abs;
|
||||
eigen_assert(matrix.cols() == matrix.rows());
|
||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||
@ -406,7 +415,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
|
||||
// declare some aliases
|
||||
RealVectorType& diag = m_eivalues;
|
||||
MatrixType& mat = m_eivec;
|
||||
EigenvectorsType& mat = m_eivec;
|
||||
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
mat = matrix.template triangularView<Lower>();
|
||||
@ -442,7 +451,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
while (start>0 && m_subdiag[start-1]!=0)
|
||||
start--;
|
||||
|
||||
internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
||||
internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
||||
}
|
||||
|
||||
if (iter <= m_maxIterations * n)
|
||||
@ -490,7 +499,13 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
|
||||
typedef typename SolverType::MatrixType MatrixType;
|
||||
typedef typename SolverType::RealVectorType VectorType;
|
||||
typedef typename SolverType::Scalar Scalar;
|
||||
typedef typename MatrixType::Index Index;
|
||||
typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
||||
|
||||
/** \internal
|
||||
* Computes the roots of the characteristic polynomial of \a m.
|
||||
* For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
|
||||
*/
|
||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||
{
|
||||
using std::sqrt;
|
||||
@ -510,148 +525,123 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
|
||||
// Construct the parameters used in classifying the roots of the equation
|
||||
// and in solving the equation for the roots in closed form.
|
||||
Scalar c2_over_3 = c2*s_inv3;
|
||||
Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
|
||||
if (a_over_3 > Scalar(0))
|
||||
Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
|
||||
if(a_over_3<Scalar(0))
|
||||
a_over_3 = Scalar(0);
|
||||
|
||||
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
|
||||
|
||||
Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3;
|
||||
if (q > Scalar(0))
|
||||
Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
|
||||
if(q<Scalar(0))
|
||||
q = Scalar(0);
|
||||
|
||||
// Compute the eigenvalues by solving for the roots of the polynomial.
|
||||
Scalar rho = sqrt(-a_over_3);
|
||||
Scalar theta = atan2(sqrt(-q),half_b)*s_inv3;
|
||||
Scalar rho = sqrt(a_over_3);
|
||||
Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
|
||||
Scalar cos_theta = cos(theta);
|
||||
Scalar sin_theta = sin(theta);
|
||||
roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
||||
roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
|
||||
roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
|
||||
|
||||
// Sort in increasing order.
|
||||
if (roots(0) >= roots(1))
|
||||
std::swap(roots(0),roots(1));
|
||||
if (roots(1) >= roots(2))
|
||||
{
|
||||
std::swap(roots(1),roots(2));
|
||||
if (roots(0) >= roots(1))
|
||||
std::swap(roots(0),roots(1));
|
||||
}
|
||||
// roots are already sorted, since cos is monotonically decreasing on [0, pi]
|
||||
roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
|
||||
roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
|
||||
roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
||||
}
|
||||
|
||||
|
||||
static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
|
||||
{
|
||||
using std::abs;
|
||||
Index i0;
|
||||
// Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
|
||||
mat.diagonal().cwiseAbs().maxCoeff(&i0);
|
||||
// mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
|
||||
// so let's save it:
|
||||
representative = mat.col(i0);
|
||||
Scalar n0, n1;
|
||||
VectorType c0, c1;
|
||||
n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
|
||||
n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
|
||||
if(n0>n1) res = c0/std::sqrt(n0);
|
||||
else res = c1/std::sqrt(n1);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
||||
{
|
||||
using std::sqrt;
|
||||
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
|
||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||
&& (options&EigVecMask)!=EigVecMask
|
||||
&& "invalid option parameter");
|
||||
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
||||
|
||||
MatrixType& eivecs = solver.m_eivec;
|
||||
EigenvectorsType& eivecs = solver.m_eivec;
|
||||
VectorType& eivals = solver.m_eivalues;
|
||||
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
Scalar scale = mat.cwiseAbs().maxCoeff();
|
||||
MatrixType scaledMat = mat / scale;
|
||||
// Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
Scalar shift = mat.trace() / Scalar(3);
|
||||
// TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
|
||||
MatrixType scaledMat = mat.template selfadjointView<Lower>();
|
||||
scaledMat.diagonal().array() -= shift;
|
||||
Scalar scale = scaledMat.cwiseAbs().maxCoeff();
|
||||
if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
|
||||
|
||||
// compute the eigenvalues
|
||||
computeRoots(scaledMat,eivals);
|
||||
|
||||
// compute the eigen vectors
|
||||
// compute the eigenvectors
|
||||
if(computeEigenvectors)
|
||||
{
|
||||
Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
|
||||
safeNorm2 *= safeNorm2;
|
||||
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
|
||||
{
|
||||
// All three eigenvalues are numerically the same
|
||||
eivecs.setIdentity();
|
||||
}
|
||||
else
|
||||
{
|
||||
scaledMat = scaledMat.template selfadjointView<Lower>();
|
||||
MatrixType tmp;
|
||||
tmp = scaledMat;
|
||||
|
||||
// Compute the eigenvector of the most distinct eigenvalue
|
||||
Scalar d0 = eivals(2) - eivals(1);
|
||||
Scalar d1 = eivals(1) - eivals(0);
|
||||
int k = d0 > d1 ? 2 : 0;
|
||||
d0 = d0 > d1 ? d1 : d0;
|
||||
|
||||
tmp.diagonal().array () -= eivals(k);
|
||||
VectorType cross;
|
||||
Scalar n;
|
||||
n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
|
||||
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(k) = cross / sqrt(n);
|
||||
else
|
||||
Index k(0), l(2);
|
||||
if(d0 > d1)
|
||||
{
|
||||
n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
|
||||
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(k) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
|
||||
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(k) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
// the input matrix and/or the eigenvaues probably contains some inf/NaN,
|
||||
// => exit
|
||||
// scale back to the original size.
|
||||
eivals *= scale;
|
||||
|
||||
solver.m_info = NumericalIssue;
|
||||
solver.m_isInitialized = true;
|
||||
solver.m_eigenvectorsOk = computeEigenvectors;
|
||||
return;
|
||||
}
|
||||
}
|
||||
std::swap(k,l);
|
||||
d0 = d1;
|
||||
}
|
||||
|
||||
tmp = scaledMat;
|
||||
tmp.diagonal().array() -= eivals(1);
|
||||
|
||||
if(d0<=Eigen::NumTraits<Scalar>::epsilon())
|
||||
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
|
||||
else
|
||||
// Compute the eigenvector of index k
|
||||
{
|
||||
n = (cross = eivecs.col(k).cross(tmp.row(0).normalized())).squaredNorm();
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(1) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(1) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm();
|
||||
if(n>safeNorm2)
|
||||
eivecs.col(1) = cross / sqrt(n);
|
||||
else
|
||||
{
|
||||
// we should never reach this point,
|
||||
// if so the last two eigenvalues are likely to ve very closed to each other
|
||||
eivecs.col(1) = eivecs.col(k).unitOrthogonal();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that eivecs[1] is orthogonal to eivecs[2]
|
||||
Scalar d = eivecs.col(1).dot(eivecs.col(k));
|
||||
eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
|
||||
tmp.diagonal().array () -= eivals(k);
|
||||
// By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
|
||||
extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
|
||||
}
|
||||
|
||||
eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized();
|
||||
// Compute eigenvector of index l
|
||||
if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
|
||||
{
|
||||
// If d0 is too small, then the two other eigenvalues are numerically the same,
|
||||
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
|
||||
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
|
||||
eivecs.col(l).normalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
tmp = scaledMat;
|
||||
tmp.diagonal().array () -= eivals(l);
|
||||
|
||||
VectorType dummy;
|
||||
extract_kernel(tmp, eivecs.col(l), dummy);
|
||||
}
|
||||
|
||||
// Compute last eigenvector from the other two
|
||||
eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
|
||||
}
|
||||
}
|
||||
|
||||
// Rescale back to the original size.
|
||||
eivals *= scale;
|
||||
eivals.array() += shift;
|
||||
|
||||
solver.m_info = Success;
|
||||
solver.m_isInitialized = true;
|
||||
@ -665,11 +655,12 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||
typedef typename SolverType::MatrixType MatrixType;
|
||||
typedef typename SolverType::RealVectorType VectorType;
|
||||
typedef typename SolverType::Scalar Scalar;
|
||||
typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
||||
|
||||
static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
||||
{
|
||||
using std::sqrt;
|
||||
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
|
||||
const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
|
||||
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
|
||||
roots(0) = t1 - t0;
|
||||
roots(1) = t1 + t0;
|
||||
@ -678,13 +669,15 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||
static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
||||
{
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
|
||||
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
|
||||
eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
||||
&& (options&EigVecMask)!=EigVecMask
|
||||
&& "invalid option parameter");
|
||||
bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
||||
|
||||
MatrixType& eivecs = solver.m_eivec;
|
||||
EigenvectorsType& eivecs = solver.m_eivec;
|
||||
VectorType& eivals = solver.m_eivalues;
|
||||
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
@ -698,22 +691,29 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
|
||||
// compute the eigen vectors
|
||||
if(computeEigenvectors)
|
||||
{
|
||||
scaledMat.diagonal().array () -= eivals(1);
|
||||
Scalar a2 = numext::abs2(scaledMat(0,0));
|
||||
Scalar c2 = numext::abs2(scaledMat(1,1));
|
||||
Scalar b2 = numext::abs2(scaledMat(1,0));
|
||||
if(a2>c2)
|
||||
if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
||||
eivecs.col(1) /= sqrt(a2+b2);
|
||||
eivecs.setIdentity();
|
||||
}
|
||||
else
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
||||
eivecs.col(1) /= sqrt(c2+b2);
|
||||
}
|
||||
scaledMat.diagonal().array () -= eivals(1);
|
||||
Scalar a2 = numext::abs2(scaledMat(0,0));
|
||||
Scalar c2 = numext::abs2(scaledMat(1,1));
|
||||
Scalar b2 = numext::abs2(scaledMat(1,0));
|
||||
if(a2>c2)
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
||||
eivecs.col(1) /= sqrt(a2+b2);
|
||||
}
|
||||
else
|
||||
{
|
||||
eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
||||
eivecs.col(1) /= sqrt(c2+b2);
|
||||
}
|
||||
|
||||
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
||||
eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
||||
}
|
||||
}
|
||||
|
||||
// Rescale back to the original size.
|
||||
@ -736,7 +736,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
|
||||
template<typename RealScalar, typename Scalar, typename Index>
|
||||
static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
|
||||
{
|
||||
using std::abs;
|
||||
@ -788,8 +788,7 @@ static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index sta
|
||||
// apply the givens rotation to the unit matrix Q = Q * G
|
||||
if (matrixQ)
|
||||
{
|
||||
// FIXME if StorageOrder == RowMajor this operation is not very efficient
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
|
||||
Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
|
||||
q.applyOnTheRight(k,k+1,rot);
|
||||
}
|
||||
}
|
||||
|
@ -19,10 +19,12 @@ namespace Eigen {
|
||||
*
|
||||
* \brief An axis aligned box
|
||||
*
|
||||
* \param _Scalar the type of the scalar coefficients
|
||||
* \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||
* \tparam _Scalar the type of the scalar coefficients
|
||||
* \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
|
||||
*
|
||||
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
|
||||
* \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
|
||||
* \sa alignedboxtypedefs
|
||||
*/
|
||||
template <typename _Scalar, int _AmbientDim>
|
||||
class AlignedBox
|
||||
@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
|
||||
enum CornerType
|
||||
{
|
||||
/** 1D names */
|
||||
/** 1D names @{ */
|
||||
Min=0, Max=1,
|
||||
/** @} */
|
||||
|
||||
/** Added names for 2D */
|
||||
/** Identifier for 2D corner @{ */
|
||||
BottomLeft=0, BottomRight=1,
|
||||
TopLeft=2, TopRight=3,
|
||||
/** @} */
|
||||
|
||||
/** Added names for 3D */
|
||||
/** Identifier for 3D corner @{ */
|
||||
BottomLeftFloor=0, BottomRightFloor=1,
|
||||
TopLeftFloor=2, TopRightFloor=3,
|
||||
BottomLeftCeil=4, BottomRightCeil=5,
|
||||
TopLeftCeil=6, TopRightCeil=7
|
||||
/** @} */
|
||||
};
|
||||
|
||||
|
||||
@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
|
||||
{ setEmpty(); }
|
||||
|
||||
/** Constructs a box with extremities \a _min and \a _max. */
|
||||
/** Constructs a box with extremities \a _min and \a _max.
|
||||
* \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
|
||||
template<typename OtherVectorType1, typename OtherVectorType2>
|
||||
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
|
||||
|
||||
/** Constructs a box containing a single point \a p. */
|
||||
template<typename Derived>
|
||||
inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
|
||||
{
|
||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
||||
m_min = p;
|
||||
m_max = p;
|
||||
}
|
||||
inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
|
||||
{ }
|
||||
|
||||
~AlignedBox() {}
|
||||
|
||||
/** \returns the dimension in which the box holds */
|
||||
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
|
||||
|
||||
/** \deprecated use isEmpty */
|
||||
/** \deprecated use isEmpty() */
|
||||
inline bool isNull() const { return isEmpty(); }
|
||||
|
||||
/** \deprecated use setEmpty */
|
||||
/** \deprecated use setEmpty() */
|
||||
inline void setNull() { setEmpty(); }
|
||||
|
||||
/** \returns true if the box is empty. */
|
||||
/** \returns true if the box is empty.
|
||||
* \sa setEmpty */
|
||||
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
|
||||
|
||||
/** Makes \c *this an empty box. */
|
||||
/** Makes \c *this an empty box.
|
||||
* \sa isEmpty */
|
||||
inline void setEmpty()
|
||||
{
|
||||
m_min.setConstant( ScalarTraits::highest() );
|
||||
@ -159,7 +163,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
* a uniform distribution */
|
||||
inline VectorType sample() const
|
||||
{
|
||||
VectorType r;
|
||||
VectorType r(dim());
|
||||
for(Index d=0; d<dim(); ++d)
|
||||
{
|
||||
if(!ScalarTraits::IsInteger)
|
||||
@ -175,27 +179,34 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
|
||||
/** \returns true if the point \a p is inside the box \c *this. */
|
||||
template<typename Derived>
|
||||
inline bool contains(const MatrixBase<Derived>& a_p) const
|
||||
inline bool contains(const MatrixBase<Derived>& p) const
|
||||
{
|
||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
||||
return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
|
||||
typename internal::nested<Derived,2>::type p_n(p.derived());
|
||||
return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
|
||||
}
|
||||
|
||||
/** \returns true if the box \a b is entirely inside the box \c *this. */
|
||||
inline bool contains(const AlignedBox& b) const
|
||||
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
|
||||
/** \returns true if the box \a b is intersecting the box \c *this.
|
||||
* \sa intersection, clamp */
|
||||
inline bool intersects(const AlignedBox& b) const
|
||||
{ return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
|
||||
|
||||
/** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
|
||||
* \sa extend(const AlignedBox&) */
|
||||
template<typename Derived>
|
||||
inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
|
||||
inline AlignedBox& extend(const MatrixBase<Derived>& p)
|
||||
{
|
||||
typename internal::nested<Derived,2>::type p(a_p.derived());
|
||||
m_min = m_min.cwiseMin(p);
|
||||
m_max = m_max.cwiseMax(p);
|
||||
typename internal::nested<Derived,2>::type p_n(p.derived());
|
||||
m_min = m_min.cwiseMin(p_n);
|
||||
m_max = m_max.cwiseMax(p_n);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
|
||||
/** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
|
||||
* \sa merged, extend(const MatrixBase&) */
|
||||
inline AlignedBox& extend(const AlignedBox& b)
|
||||
{
|
||||
m_min = m_min.cwiseMin(b.m_min);
|
||||
@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this. */
|
||||
/** Clamps \c *this by the box \a b and returns a reference to \c *this.
|
||||
* \note If the boxes don't intersect, the resulting box is empty.
|
||||
* \sa intersection(), intersects() */
|
||||
inline AlignedBox& clamp(const AlignedBox& b)
|
||||
{
|
||||
m_min = m_min.cwiseMax(b.m_min);
|
||||
@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Returns an AlignedBox that is the intersection of \a b and \c *this */
|
||||
/** Returns an AlignedBox that is the intersection of \a b and \c *this
|
||||
* \note If the boxes don't intersect, the resulting box is empty.
|
||||
* \sa intersects(), clamp, contains() */
|
||||
inline AlignedBox intersection(const AlignedBox& b) const
|
||||
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
|
||||
|
||||
/** Returns an AlignedBox that is the union of \a b and \c *this */
|
||||
/** Returns an AlignedBox that is the union of \a b and \c *this.
|
||||
* \note Merging with an empty box may result in a box bigger than \c *this.
|
||||
* \sa extend(const AlignedBox&) */
|
||||
inline AlignedBox merged(const AlignedBox& b) const
|
||||
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
|
||||
|
||||
@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
|
||||
/** \returns the squared distance between the point \a p and the box \c *this,
|
||||
* and zero if \a p is inside the box.
|
||||
* \sa exteriorDistance()
|
||||
* \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
|
||||
inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
|
||||
|
||||
/** \returns the squared distance between the boxes \a b and \c *this,
|
||||
* and zero if the boxes intersect.
|
||||
* \sa exteriorDistance()
|
||||
* \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
|
||||
*/
|
||||
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
|
||||
|
||||
/** \returns the distance between the point \a p and the box \c *this,
|
||||
* and zero if \a p is inside the box.
|
||||
* \sa squaredExteriorDistance()
|
||||
* \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
|
||||
@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
|
||||
|
||||
/** \returns the distance between the boxes \a b and \c *this,
|
||||
* and zero if the boxes intersect.
|
||||
* \sa squaredExteriorDistance()
|
||||
* \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
|
||||
*/
|
||||
inline NonInteger exteriorDistance(const AlignedBox& b) const
|
||||
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
|
||||
|
@ -83,10 +83,17 @@ public:
|
||||
template<typename Derived>
|
||||
inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
|
||||
|
||||
/** \returns the value of the rotation angle in radian */
|
||||
Scalar angle() const { return m_angle; }
|
||||
/** \returns a read-write reference to the stored angle in radian */
|
||||
Scalar& angle() { return m_angle; }
|
||||
|
||||
/** \returns the rotation axis */
|
||||
const Vector3& axis() const { return m_axis; }
|
||||
/** \returns a read-write reference to the stored rotation axis.
|
||||
*
|
||||
* \warning The rotation axis must remain a \b unit vector.
|
||||
*/
|
||||
Vector3& axis() { return m_axis; }
|
||||
|
||||
/** Concatenates two rotations */
|
||||
@ -131,7 +138,7 @@ public:
|
||||
m_angle = Scalar(other.angle());
|
||||
}
|
||||
|
||||
static inline const AngleAxis Identity() { return AngleAxis(0, Vector3::UnitX()); }
|
||||
static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); }
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
* determined by \a prec.
|
||||
@ -165,8 +172,8 @@ AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived
|
||||
Scalar n2 = q.vec().squaredNorm();
|
||||
if (n2 < NumTraits<Scalar>::dummy_precision()*NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
m_angle = 0;
|
||||
m_axis << 1, 0, 0;
|
||||
m_angle = Scalar(0);
|
||||
m_axis << Scalar(1), Scalar(0), Scalar(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -28,7 +28,7 @@ namespace Eigen {
|
||||
* * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode
|
||||
* This corresponds to the right-multiply conventions (with right hand side frames).
|
||||
*
|
||||
* The returned angles are in the ranges [0:pi]x[0:pi]x[-pi:pi].
|
||||
* The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi].
|
||||
*
|
||||
* \sa class AngleAxis
|
||||
*/
|
||||
|
@ -79,7 +79,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
|
||||
{
|
||||
if( (int(Direction)==Vertical && row==m_matrix.rows())
|
||||
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
|
||||
return 1;
|
||||
return Scalar(1);
|
||||
return m_matrix.coeff(row, col);
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,17 @@ public:
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)
|
||||
Hyperplane result(p0.size());
|
||||
result.normal() = (p2 - p0).cross(p1 - p0).normalized();
|
||||
VectorType v0(p2 - p0), v1(p1 - p0);
|
||||
result.normal() = v0.cross(v1);
|
||||
RealScalar norm = result.normal().norm();
|
||||
if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())
|
||||
{
|
||||
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
|
||||
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
|
||||
result.normal() = svd.matrixV().col(2);
|
||||
}
|
||||
else
|
||||
result.normal() /= norm;
|
||||
result.offset() = -p0.dot(result.normal());
|
||||
return result;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ public:
|
||||
* determined by \a prec.
|
||||
*
|
||||
* \sa MatrixBase::isApprox() */
|
||||
bool isApprox(const ParametrizedLine& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
|
||||
bool isApprox(const ParametrizedLine& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
|
||||
{ return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }
|
||||
|
||||
protected:
|
||||
|
@ -102,11 +102,11 @@ public:
|
||||
/** \returns a quaternion representing an identity rotation
|
||||
* \sa MatrixBase::Identity()
|
||||
*/
|
||||
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(1, 0, 0, 0); }
|
||||
static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }
|
||||
|
||||
/** \sa QuaternionBase::Identity(), MatrixBase::setIdentity()
|
||||
*/
|
||||
inline QuaternionBase& setIdentity() { coeffs() << 0, 0, 0, 1; return *this; }
|
||||
inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }
|
||||
|
||||
/** \returns the squared norm of the quaternion's coefficients
|
||||
* \sa QuaternionBase::norm(), MatrixBase::squaredNorm()
|
||||
@ -150,10 +150,6 @@ public:
|
||||
/** \returns the conjugated quaternion */
|
||||
Quaternion<Scalar> conjugate() const;
|
||||
|
||||
/** \returns an interpolation for a constant motion between \a other and \c *this
|
||||
* \a t in [0;1]
|
||||
* see http://en.wikipedia.org/wiki/Slerp
|
||||
*/
|
||||
template<class OtherDerived> Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;
|
||||
|
||||
/** \returns \c true if \c *this is approximately equal to \a other, within the precision
|
||||
@ -165,7 +161,7 @@ public:
|
||||
{ return coeffs().isApprox(other.coeffs(), prec); }
|
||||
|
||||
/** return the result vector of \a v through the rotation*/
|
||||
EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
|
||||
EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
|
||||
|
||||
/** \returns \c *this with scalar type casted to \a NewScalarType
|
||||
*
|
||||
@ -194,11 +190,11 @@ public:
|
||||
* \brief The quaternion class used to represent 3D orientations and rotations
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e., the type of the coefficients
|
||||
* \tparam _Options controls the memory alignement of the coeffecients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
|
||||
* \tparam _Options controls the memory alignment of the coefficients. Can be \# AutoAlign or \# DontAlign. Default is AutoAlign.
|
||||
*
|
||||
* This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
|
||||
* orientations and rotations of objects in three dimensions. Compared to other representations
|
||||
* like Euler angles or 3x3 matrices, quatertions offer the following advantages:
|
||||
* like Euler angles or 3x3 matrices, quaternions offer the following advantages:
|
||||
* \li \b compact storage (4 scalars)
|
||||
* \li \b efficient to compose (28 flops),
|
||||
* \li \b stable spherical interpolation
|
||||
@ -207,6 +203,8 @@ public:
|
||||
* \li \c Quaternionf for \c float
|
||||
* \li \c Quaterniond for \c double
|
||||
*
|
||||
* \warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized.
|
||||
*
|
||||
* \sa class AngleAxis, class Transform
|
||||
*/
|
||||
|
||||
@ -233,7 +231,7 @@ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
|
||||
using Base::operator*=;
|
||||
|
||||
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
|
||||
@ -343,12 +341,12 @@ class Map<const Quaternion<_Scalar>, _Options >
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||
using Base::operator*=;
|
||||
|
||||
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
||||
*
|
||||
* The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
|
||||
* The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
|
||||
* \code *coeffs == {x, y, z, w} \endcode
|
||||
*
|
||||
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
|
||||
@ -380,12 +378,12 @@ class Map<Quaternion<_Scalar>, _Options >
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename internal::traits<Map>::Coefficients Coefficients;
|
||||
EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
|
||||
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
|
||||
using Base::operator*=;
|
||||
|
||||
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
|
||||
*
|
||||
* The pointer \a coeffs must reference the four coeffecients of Quaternion in the following order:
|
||||
* The pointer \a coeffs must reference the four coefficients of Quaternion in the following order:
|
||||
* \code *coeffs == {x, y, z, w} \endcode
|
||||
*
|
||||
* If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */
|
||||
@ -399,16 +397,16 @@ class Map<Quaternion<_Scalar>, _Options >
|
||||
};
|
||||
|
||||
/** \ingroup Geometry_Module
|
||||
* Map an unaligned array of single precision scalar as a quaternion */
|
||||
* Map an unaligned array of single precision scalars as a quaternion */
|
||||
typedef Map<Quaternion<float>, 0> QuaternionMapf;
|
||||
/** \ingroup Geometry_Module
|
||||
* Map an unaligned array of double precision scalar as a quaternion */
|
||||
* Map an unaligned array of double precision scalars as a quaternion */
|
||||
typedef Map<Quaternion<double>, 0> QuaternionMapd;
|
||||
/** \ingroup Geometry_Module
|
||||
* Map a 16-bits aligned array of double precision scalars as a quaternion */
|
||||
* Map a 16-byte aligned array of single precision scalars as a quaternion */
|
||||
typedef Map<Quaternion<float>, Aligned> QuaternionMapAlignedf;
|
||||
/** \ingroup Geometry_Module
|
||||
* Map a 16-bits aligned array of double precision scalars as a quaternion */
|
||||
* Map a 16-byte aligned array of double precision scalars as a quaternion */
|
||||
typedef Map<Quaternion<double>, Aligned> QuaternionMapAlignedd;
|
||||
|
||||
/***************************************************************************
|
||||
@ -463,12 +461,12 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const Quaterni
|
||||
*/
|
||||
template <class Derived>
|
||||
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
|
||||
QuaternionBase<Derived>::_transformVector(Vector3 v) const
|
||||
QuaternionBase<Derived>::_transformVector(const Vector3& v) const
|
||||
{
|
||||
// Note that this algorithm comes from the optimization by hand
|
||||
// of the conversion to a Matrix followed by a Matrix/Vector product.
|
||||
// It appears to be much faster than the common algorithm found
|
||||
// in the litterature (30 versus 39 flops). It also requires two
|
||||
// in the literature (30 versus 39 flops). It also requires two
|
||||
// Vector3 as temporaries.
|
||||
Vector3 uv = this->vec().cross(v);
|
||||
uv += uv;
|
||||
@ -579,7 +577,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
|
||||
Scalar c = v1.dot(v0);
|
||||
|
||||
// if dot == -1, vectors are nearly opposites
|
||||
// => accuraletly compute the rotation axis by computing the
|
||||
// => accurately compute the rotation axis by computing the
|
||||
// intersection of the two planes. This is done by solving:
|
||||
// x^T v0 = 0
|
||||
// x^T v1 = 0
|
||||
@ -588,7 +586,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
|
||||
// which yields a singular value problem
|
||||
if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
c = max<Scalar>(c,-1);
|
||||
c = (max)(c,Scalar(-1));
|
||||
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
|
||||
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
|
||||
Vector3 axis = svd.matrixV().col(2);
|
||||
@ -639,7 +637,7 @@ inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Der
|
||||
{
|
||||
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
|
||||
Scalar n2 = this->squaredNorm();
|
||||
if (n2 > 0)
|
||||
if (n2 > Scalar(0))
|
||||
return Quaternion<Scalar>(conjugate().coeffs() / n2);
|
||||
else
|
||||
{
|
||||
@ -669,16 +667,19 @@ template <class OtherDerived>
|
||||
inline typename internal::traits<Derived>::Scalar
|
||||
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
|
||||
{
|
||||
using std::acos;
|
||||
using std::atan2;
|
||||
using std::abs;
|
||||
double d = abs(this->dot(other));
|
||||
if (d>=1.0)
|
||||
return Scalar(0);
|
||||
return static_cast<Scalar>(2 * acos(d));
|
||||
Quaternion<Scalar> d = (*this) * other.conjugate();
|
||||
return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
|
||||
}
|
||||
|
||||
|
||||
|
||||
/** \returns the spherical linear interpolation between the two quaternions
|
||||
* \c *this and \a other at the parameter \a t
|
||||
* \c *this and \a other at the parameter \a t in [0;1].
|
||||
*
|
||||
* This represents an interpolation for a constant motion between \c *this and \a other,
|
||||
* see also http://en.wikipedia.org/wiki/Slerp.
|
||||
*/
|
||||
template <class Derived>
|
||||
template <class OtherDerived>
|
||||
@ -709,7 +710,7 @@ QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerive
|
||||
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
|
||||
scale1 = sin( ( t * theta) ) / sinTheta;
|
||||
}
|
||||
if(d<0) scale1 = -scale1;
|
||||
if(d<Scalar(0)) scale1 = -scale1;
|
||||
|
||||
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
|
||||
}
|
||||
|
@ -60,6 +60,9 @@ public:
|
||||
|
||||
/** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
|
||||
inline Rotation2D(const Scalar& a) : m_angle(a) {}
|
||||
|
||||
/** Default constructor wihtout initialization. The represented rotation is undefined. */
|
||||
Rotation2D() {}
|
||||
|
||||
/** \returns the rotation angle */
|
||||
inline Scalar angle() const { return m_angle; }
|
||||
@ -81,10 +84,10 @@ public:
|
||||
/** Applies the rotation to a 2D vector */
|
||||
Vector2 operator* (const Vector2& vec) const
|
||||
{ return toRotationMatrix() * vec; }
|
||||
|
||||
|
||||
template<typename Derived>
|
||||
Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
|
||||
Matrix2 toRotationMatrix(void) const;
|
||||
Matrix2 toRotationMatrix() const;
|
||||
|
||||
/** \returns the spherical interpolation between \c *this and \a other using
|
||||
* parameter \a t. It is in fact equivalent to a linear interpolation.
|
||||
|
@ -62,6 +62,8 @@ struct transform_construct_from_matrix;
|
||||
|
||||
template<typename TransformType> struct transform_take_affine_part;
|
||||
|
||||
template<int Mode> struct transform_make_affine;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \geometry_module \ingroup Geometry_Module
|
||||
@ -100,15 +102,15 @@ template<typename TransformType> struct transform_take_affine_part;
|
||||
*
|
||||
* However, unlike a plain matrix, the Transform class provides many features
|
||||
* simplifying both its assembly and usage. In particular, it can be composed
|
||||
* with any other transformations (Transform,Translation,RotationBase,Matrix)
|
||||
* with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix)
|
||||
* and can be directly used to transform implicit homogeneous vectors. All these
|
||||
* operations are handled via the operator*. For the composition of transformations,
|
||||
* its principle consists to first convert the right/left hand sides of the product
|
||||
* to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.
|
||||
* Of course, internally, operator* tries to perform the minimal number of operations
|
||||
* according to the nature of each terms. Likewise, when applying the transform
|
||||
* to non homogeneous vectors, the latters are automatically promoted to homogeneous
|
||||
* one before doing the matrix product. The convertions to homogeneous representations
|
||||
* to points, the latters are automatically promoted to homogeneous vectors
|
||||
* before doing the matrix product. The conventions to homogeneous representations
|
||||
* are performed as follow:
|
||||
*
|
||||
* \b Translation t (Dim)x(1):
|
||||
@ -122,7 +124,7 @@ template<typename TransformType> struct transform_take_affine_part;
|
||||
* R & 0\\
|
||||
* 0\,...\,0 & 1
|
||||
* \end{array} \right) \f$
|
||||
*
|
||||
*<!--
|
||||
* \b Linear \b Matrix L (Dim)x(Dim):
|
||||
* \f$ \left( \begin{array}{cc}
|
||||
* L & 0\\
|
||||
@ -134,14 +136,20 @@ template<typename TransformType> struct transform_take_affine_part;
|
||||
* A\\
|
||||
* 0\,...\,0\,1
|
||||
* \end{array} \right) \f$
|
||||
*-->
|
||||
* \b Scaling \b DiagonalMatrix S (Dim)x(Dim):
|
||||
* \f$ \left( \begin{array}{cc}
|
||||
* S & 0\\
|
||||
* 0\,...\,0 & 1
|
||||
* \end{array} \right) \f$
|
||||
*
|
||||
* \b Column \b vector v (Dim)x(1):
|
||||
* \b Column \b point v (Dim)x(1):
|
||||
* \f$ \left( \begin{array}{c}
|
||||
* v\\
|
||||
* 1
|
||||
* \end{array} \right) \f$
|
||||
*
|
||||
* \b Set \b of \b column \b vectors V1...Vn (Dim)x(n):
|
||||
* \b Set \b of \b column \b points V1...Vn (Dim)x(n):
|
||||
* \f$ \left( \begin{array}{ccc}
|
||||
* v_1 & ... & v_n\\
|
||||
* 1 & ... & 1
|
||||
@ -194,9 +202,9 @@ public:
|
||||
/** type of the matrix used to represent the linear part of the transformation */
|
||||
typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
|
||||
/** type of read/write reference to the linear part of the transformation */
|
||||
typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart;
|
||||
typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
|
||||
/** type of read reference to the linear part of the transformation */
|
||||
typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart;
|
||||
typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
|
||||
/** type of read/write reference to the affine part of the transformation */
|
||||
typedef typename internal::conditional<int(Mode)==int(AffineCompact),
|
||||
MatrixType&,
|
||||
@ -230,8 +238,7 @@ public:
|
||||
inline Transform()
|
||||
{
|
||||
check_template_params();
|
||||
if (int(Mode)==Affine)
|
||||
makeAffine();
|
||||
internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);
|
||||
}
|
||||
|
||||
inline Transform(const Transform& other)
|
||||
@ -383,26 +390,39 @@ public:
|
||||
/** \returns a writable expression of the translation vector of the transformation */
|
||||
inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }
|
||||
|
||||
/** \returns an expression of the product between the transform \c *this and a matrix expression \a other
|
||||
/** \returns an expression of the product between the transform \c *this and a matrix expression \a other.
|
||||
*
|
||||
* The right hand side \a other might be either:
|
||||
* \li a vector of size Dim,
|
||||
* The right-hand-side \a other can be either:
|
||||
* \li an homogeneous vector of size Dim+1,
|
||||
* \li a set of vectors of size Dim x Dynamic,
|
||||
* \li a set of homogeneous vectors of size Dim+1 x Dynamic,
|
||||
* \li a linear transformation matrix of size Dim x Dim,
|
||||
* \li an affine transformation matrix of size Dim x Dim+1,
|
||||
* \li a set of homogeneous vectors of size Dim+1 x N,
|
||||
* \li a transformation matrix of size Dim+1 x Dim+1.
|
||||
*
|
||||
* Moreover, if \c *this represents an affine transformation (i.e., Mode!=Projective), then \a other can also be:
|
||||
* \li a point of size Dim (computes: \code this->linear() * other + this->translation()\endcode),
|
||||
* \li a set of N points as a Dim x N matrix (computes: \code (this->linear() * other).colwise() + this->translation()\endcode),
|
||||
*
|
||||
* In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \a other.
|
||||
*
|
||||
* If you want to interpret \a other as a linear or affine transformation, then first convert it to a Transform<> type,
|
||||
* or do your own cooking.
|
||||
*
|
||||
* Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only:
|
||||
* \code
|
||||
* Affine3f A;
|
||||
* Vector3f v1, v2;
|
||||
* v2 = A.linear() * v1;
|
||||
* \endcode
|
||||
*
|
||||
*/
|
||||
// note: this function is defined here because some compilers cannot find the respective declaration
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType
|
||||
EIGEN_STRONG_INLINE const typename OtherDerived::PlainObject
|
||||
operator * (const EigenBase<OtherDerived> &other) const
|
||||
{ return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }
|
||||
|
||||
/** \returns the product expression of a transformation matrix \a a times a transform \a b
|
||||
*
|
||||
* The left hand side \a other might be either:
|
||||
* The left hand side \a other can be either:
|
||||
* \li a linear transformation matrix of size Dim x Dim,
|
||||
* \li an affine transformation matrix of size Dim x Dim+1,
|
||||
* \li a general transformation matrix of size Dim+1 x Dim+1.
|
||||
@ -530,9 +550,9 @@ public:
|
||||
|
||||
inline Transform& operator=(const UniformScaling<Scalar>& t);
|
||||
inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }
|
||||
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry)> operator*(const UniformScaling<Scalar>& s) const
|
||||
inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode))> operator*(const UniformScaling<Scalar>& s) const
|
||||
{
|
||||
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Isometry),Options> res = *this;
|
||||
Transform<Scalar,Dim,(int(Mode)==int(Isometry)?int(Affine):int(Mode)),Options> res = *this;
|
||||
res.scale(s.factor());
|
||||
return res;
|
||||
}
|
||||
@ -591,11 +611,7 @@ public:
|
||||
*/
|
||||
void makeAffine()
|
||||
{
|
||||
if(int(Mode)!=int(AffineCompact))
|
||||
{
|
||||
matrix().template block<1,Dim>(Dim,0).setZero();
|
||||
matrix().coeffRef(Dim,Dim) = Scalar(1);
|
||||
}
|
||||
internal::transform_make_affine<int(Mode)>::run(m_matrix);
|
||||
}
|
||||
|
||||
/** \internal
|
||||
@ -1079,6 +1095,24 @@ Transform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBas
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<int Mode>
|
||||
struct transform_make_affine
|
||||
{
|
||||
template<typename MatrixType>
|
||||
static void run(MatrixType &mat)
|
||||
{
|
||||
static const int Dim = MatrixType::ColsAtCompileTime-1;
|
||||
mat.template block<1,Dim>(Dim,0).setZero();
|
||||
mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1);
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct transform_make_affine<AffineCompact>
|
||||
{
|
||||
template<typename MatrixType> static void run(MatrixType &) { }
|
||||
};
|
||||
|
||||
// selector needed to avoid taking the inverse of a 3x4 matrix
|
||||
template<typename TransformType, int Mode=TransformType::Mode>
|
||||
struct projective_transform_inverse
|
||||
|
@ -162,7 +162,7 @@ public:
|
||||
* determined by \a prec.
|
||||
*
|
||||
* \sa MatrixBase::isApprox() */
|
||||
bool isApprox(const Translation& other, typename NumTraits<Scalar>::Real prec = NumTraits<Scalar>::dummy_precision()) const
|
||||
bool isApprox(const Translation& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const
|
||||
{ return m_coeffs.isApprox(other.m_coeffs, prec); }
|
||||
|
||||
};
|
||||
|
@ -113,7 +113,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
const Index n = src.cols(); // number of measurements
|
||||
|
||||
// required for demeaning ...
|
||||
const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
|
||||
const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);
|
||||
|
||||
// computation of mean
|
||||
const VectorType src_mean = src.rowwise().sum() * one_over_n;
|
||||
@ -136,16 +136,16 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
|
||||
// Eq. (39)
|
||||
VectorType S = VectorType::Ones(m);
|
||||
if (sigma.determinant()<0) S(m-1) = -1;
|
||||
if (sigma.determinant()<Scalar(0)) S(m-1) = Scalar(-1);
|
||||
|
||||
// Eq. (40) and (43)
|
||||
const VectorType& d = svd.singularValues();
|
||||
Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
|
||||
if (rank == m-1) {
|
||||
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
|
||||
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) {
|
||||
Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
|
||||
} else {
|
||||
const Scalar s = S(m-1); S(m-1) = -1;
|
||||
const Scalar s = S(m-1); S(m-1) = Scalar(-1);
|
||||
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
|
||||
S(m-1) = s;
|
||||
}
|
||||
@ -156,7 +156,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
if (with_scaling)
|
||||
{
|
||||
// Eq. (42)
|
||||
const Scalar c = 1/src_var * svd.singularValues().dot(S);
|
||||
const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);
|
||||
|
||||
// Eq. (41)
|
||||
Rt.col(m).head(m) = dst_mean;
|
||||
|
@ -48,7 +48,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec
|
||||
typedef typename MatrixType::Index Index;
|
||||
enum { TFactorSize = MatrixType::ColsAtCompileTime };
|
||||
Index nbVecs = vectors.cols();
|
||||
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize> T(nbVecs,nbVecs);
|
||||
Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, ColMajor> T(nbVecs,nbVecs);
|
||||
make_block_householder_triangular_factor(T, vectors, hCoeffs);
|
||||
|
||||
const TriangularView<const VectorsType, UnitLower>& V(vectors);
|
||||
|
@ -65,10 +65,10 @@ class DiagonalPreconditioner
|
||||
{
|
||||
typename MatType::InnerIterator it(mat,j);
|
||||
while(it && it.index()!=j) ++it;
|
||||
if(it && it.index()==j)
|
||||
if(it && it.index()==j && it.value()!=Scalar(0))
|
||||
m_invdiag(j) = Scalar(1)/it.value();
|
||||
else
|
||||
m_invdiag(j) = 0;
|
||||
m_invdiag(j) = Scalar(1);
|
||||
}
|
||||
m_isInitialized = true;
|
||||
return *this;
|
||||
|
@ -39,7 +39,6 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
int maxIters = iters;
|
||||
|
||||
int n = mat.cols();
|
||||
x = precond.solve(x);
|
||||
VectorType r = rhs - mat * x;
|
||||
VectorType r0 = r;
|
||||
|
||||
@ -61,6 +60,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
VectorType s(n), t(n);
|
||||
|
||||
RealScalar tol2 = tol*tol;
|
||||
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
|
||||
int i = 0;
|
||||
int restarts = 0;
|
||||
|
||||
@ -69,7 +69,7 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
Scalar rho_old = rho;
|
||||
|
||||
rho = r0.dot(r);
|
||||
if (internal::isMuchSmallerThan(rho,r0_sqnorm))
|
||||
if (abs(rho) < eps2*r0_sqnorm)
|
||||
{
|
||||
// The new residual vector became too orthogonal to the arbitrarily choosen direction r0
|
||||
// Let's restart with a new r0:
|
||||
@ -142,7 +142,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
|
||||
* SparseMatrix<double> A(n,n);
|
||||
* // fill A and b
|
||||
* BiCGSTAB<SparseMatrix<double> > solver;
|
||||
* solver(A);
|
||||
* solver.compute(A);
|
||||
* x = solver.solve(b);
|
||||
* std::cout << "#iterations: " << solver.iterations() << std::endl;
|
||||
* std::cout << "estimated error: " << solver.error() << std::endl;
|
||||
@ -151,20 +151,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
|
||||
* \endcode
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method. Here is a step by
|
||||
* step execution example starting with a random guess and printing the evolution
|
||||
* of the estimated error:
|
||||
* * \code
|
||||
* x = VectorXd::Random(n);
|
||||
* solver.setMaxIterations(1);
|
||||
* int i = 0;
|
||||
* do {
|
||||
* x = solver.solveWithGuess(b,x);
|
||||
* std::cout << i << " : " << solver.error() << std::endl;
|
||||
* ++i;
|
||||
* } while (solver.info()!=Success && i<100);
|
||||
* \endcode
|
||||
* Note that such a step by step excution is slightly slower.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
@ -199,7 +186,8 @@ public:
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
BiCGSTAB(const MatrixType& A) : Base(A) {}
|
||||
template<typename MatrixDerived>
|
||||
explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
||||
|
||||
~BiCGSTAB() {}
|
||||
|
||||
|
@ -112,9 +112,9 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
||||
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
|
||||
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
|
||||
*
|
||||
* \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
|
||||
* or Upper. Default is Lower.
|
||||
* \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
|
||||
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
|
||||
* Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
|
||||
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
|
||||
*
|
||||
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
|
||||
@ -137,21 +137,10 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
|
||||
* \endcode
|
||||
*
|
||||
* By default the iterations start with x=0 as an initial guess of the solution.
|
||||
* One can control the start using the solveWithGuess() method. Here is a step by
|
||||
* step execution example starting with a random guess and printing the evolution
|
||||
* of the estimated error:
|
||||
* * \code
|
||||
* x = VectorXd::Random(n);
|
||||
* cg.setMaxIterations(1);
|
||||
* int i = 0;
|
||||
* do {
|
||||
* x = cg.solveWithGuess(b,x);
|
||||
* std::cout << i << " : " << cg.error() << std::endl;
|
||||
* ++i;
|
||||
* } while (cg.info()!=Success && i<100);
|
||||
* \endcode
|
||||
* Note that such a step by step excution is slightly slower.
|
||||
* One can control the start using the solveWithGuess() method.
|
||||
*
|
||||
* ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
|
||||
*
|
||||
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
|
||||
*/
|
||||
template< typename _MatrixType, int _UpLo, typename _Preconditioner>
|
||||
@ -189,7 +178,8 @@ public:
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
ConjugateGradient(const MatrixType& A) : Base(A) {}
|
||||
template<typename MatrixDerived>
|
||||
explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
|
||||
|
||||
~ConjugateGradient() {}
|
||||
|
||||
@ -213,6 +203,10 @@ public:
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solveWithGuess(const Rhs& b, Dest& x) const
|
||||
{
|
||||
typedef typename internal::conditional<UpLo==(Lower|Upper),
|
||||
const MatrixType&,
|
||||
SparseSelfAdjointView<const MatrixType, UpLo>
|
||||
>::type MatrixWrapperType;
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
@ -222,8 +216,7 @@ public:
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
typename Dest::ColXpr xj(x,j);
|
||||
internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
|
||||
Base::m_preconditioner, m_iterations, m_error);
|
||||
internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
|
||||
}
|
||||
|
||||
m_isInitialized = true;
|
||||
@ -234,7 +227,7 @@ public:
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve(const Rhs& b, Dest& x) const
|
||||
{
|
||||
x.setOnes();
|
||||
x.setZero();
|
||||
_solveWithGuess(b,x);
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable
|
||||
{
|
||||
analyzePattern(amat);
|
||||
factorize(amat);
|
||||
m_isInitialized = m_factorizationIsOk;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -160,7 +159,7 @@ class IncompleteLUT : internal::noncopyable
|
||||
template<typename Rhs, typename Dest>
|
||||
void _solve(const Rhs& b, Dest& x) const
|
||||
{
|
||||
x = m_Pinv * b;
|
||||
x = m_Pinv * b;
|
||||
x = m_lu.template triangularView<UnitLower>().solve(x);
|
||||
x = m_lu.template triangularView<Upper>().solve(x);
|
||||
x = m_P * x;
|
||||
@ -223,18 +222,29 @@ template<typename _MatrixType>
|
||||
void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
|
||||
{
|
||||
// Compute the Fill-reducing permutation
|
||||
// Since ILUT does not perform any numerical pivoting,
|
||||
// it is highly preferable to keep the diagonal through symmetric permutations.
|
||||
#ifndef EIGEN_MPL2_ONLY
|
||||
// To this end, let's symmetrize the pattern and perform AMD on it.
|
||||
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
|
||||
SparseMatrix<Scalar,ColMajor, Index> mat2 = amat.transpose();
|
||||
// Symmetrize the pattern
|
||||
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
|
||||
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...
|
||||
SparseMatrix<Scalar,ColMajor, Index> AtA = mat2 + mat1;
|
||||
AtA.prune(keep_diag());
|
||||
internal::minimum_degree_ordering<Scalar, Index>(AtA, m_P); // Then compute the AMD ordering...
|
||||
|
||||
m_Pinv = m_P.inverse(); // ... and the inverse permutation
|
||||
AMDOrdering<Index> ordering;
|
||||
ordering(AtA,m_P);
|
||||
m_Pinv = m_P.inverse(); // cache the inverse permutation
|
||||
#else
|
||||
// If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.
|
||||
SparseMatrix<Scalar,ColMajor, Index> mat1 = amat;
|
||||
COLAMDOrdering<Index> ordering;
|
||||
ordering(mat1,m_Pinv);
|
||||
m_P = m_Pinv.inverse();
|
||||
#endif
|
||||
|
||||
m_analysisIsOk = true;
|
||||
m_factorizationIsOk = false;
|
||||
m_isInitialized = false;
|
||||
}
|
||||
|
||||
template <typename Scalar>
|
||||
@ -442,6 +452,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
|
||||
m_lu.makeCompressed();
|
||||
|
||||
m_factorizationIsOk = true;
|
||||
m_isInitialized = m_factorizationIsOk;
|
||||
m_info = Success;
|
||||
}
|
||||
|
||||
|
@ -49,10 +49,11 @@ public:
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
IterativeSolverBase(const MatrixType& A)
|
||||
template<typename InputDerived>
|
||||
IterativeSolverBase(const EigenBase<InputDerived>& A)
|
||||
{
|
||||
init();
|
||||
compute(A);
|
||||
compute(A.derived());
|
||||
}
|
||||
|
||||
~IterativeSolverBase() {}
|
||||
@ -62,9 +63,11 @@ public:
|
||||
* Currently, this function mostly call analyzePattern on the preconditioner. In the future
|
||||
* we might, for instance, implement column reodering for faster matrix vector products.
|
||||
*/
|
||||
Derived& analyzePattern(const MatrixType& A)
|
||||
template<typename InputDerived>
|
||||
Derived& analyzePattern(const EigenBase<InputDerived>& A)
|
||||
{
|
||||
m_preconditioner.analyzePattern(A);
|
||||
grabInput(A.derived());
|
||||
m_preconditioner.analyzePattern(*mp_matrix);
|
||||
m_isInitialized = true;
|
||||
m_analysisIsOk = true;
|
||||
m_info = Success;
|
||||
@ -80,11 +83,12 @@ public:
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
Derived& factorize(const MatrixType& A)
|
||||
template<typename InputDerived>
|
||||
Derived& factorize(const EigenBase<InputDerived>& A)
|
||||
{
|
||||
grabInput(A.derived());
|
||||
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
||||
mp_matrix = &A;
|
||||
m_preconditioner.factorize(A);
|
||||
m_preconditioner.factorize(*mp_matrix);
|
||||
m_factorizationIsOk = true;
|
||||
m_info = Success;
|
||||
return derived();
|
||||
@ -100,10 +104,11 @@ public:
|
||||
* this class becomes invalid. Call compute() to update it with the new
|
||||
* matrix A, or modify a copy of A.
|
||||
*/
|
||||
Derived& compute(const MatrixType& A)
|
||||
template<typename InputDerived>
|
||||
Derived& compute(const EigenBase<InputDerived>& A)
|
||||
{
|
||||
mp_matrix = &A;
|
||||
m_preconditioner.compute(A);
|
||||
grabInput(A.derived());
|
||||
m_preconditioner.compute(*mp_matrix);
|
||||
m_isInitialized = true;
|
||||
m_analysisIsOk = true;
|
||||
m_factorizationIsOk = true;
|
||||
@ -212,6 +217,28 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
template<typename InputDerived>
|
||||
void grabInput(const EigenBase<InputDerived>& A)
|
||||
{
|
||||
// we const cast to prevent the creation of a MatrixType temporary by the compiler.
|
||||
grabInput_impl(A.const_cast_derived());
|
||||
}
|
||||
|
||||
template<typename InputDerived>
|
||||
void grabInput_impl(const EigenBase<InputDerived>& A)
|
||||
{
|
||||
m_copyMatrix = A;
|
||||
mp_matrix = &m_copyMatrix;
|
||||
}
|
||||
|
||||
void grabInput_impl(MatrixType& A)
|
||||
{
|
||||
if(MatrixType::RowsAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==Dynamic)
|
||||
m_copyMatrix.resize(0,0);
|
||||
mp_matrix = &A;
|
||||
}
|
||||
|
||||
void init()
|
||||
{
|
||||
m_isInitialized = false;
|
||||
@ -220,6 +247,7 @@ protected:
|
||||
m_maxIterations = -1;
|
||||
m_tolerance = NumTraits<Scalar>::epsilon();
|
||||
}
|
||||
MatrixType m_copyMatrix;
|
||||
const MatrixType* mp_matrix;
|
||||
Preconditioner m_preconditioner;
|
||||
|
||||
|
@ -20,10 +20,11 @@ namespace Eigen {
|
||||
*
|
||||
* \param MatrixType the type of the matrix of which we are computing the LU decomposition
|
||||
*
|
||||
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A
|
||||
* is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q
|
||||
* are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal
|
||||
* coefficients) of U are sorted in such a way that any zeros are at the end.
|
||||
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
|
||||
* decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
|
||||
* upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
|
||||
* decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
|
||||
* zeros are at the end.
|
||||
*
|
||||
* This decomposition provides the generic approach to solving systems of linear equations, computing
|
||||
* the rank, invertibility, inverse, kernel, and determinant.
|
||||
@ -373,6 +374,12 @@ template<typename _MatrixType> class FullPivLU
|
||||
inline Index cols() const { return m_lu.cols(); }
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_lu;
|
||||
PermutationPType m_p;
|
||||
PermutationQType m_q;
|
||||
@ -417,6 +424,8 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
|
||||
template<typename MatrixType>
|
||||
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
// the permutations are stored as int indices, so just to be sure:
|
||||
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
|
||||
|
||||
@ -511,8 +520,8 @@ typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant
|
||||
}
|
||||
|
||||
/** \returns the matrix represented by the decomposition,
|
||||
* i.e., it returns the product: P^{-1} L U Q^{-1}.
|
||||
* This function is provided for debug purpose. */
|
||||
* i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
|
||||
* This function is provided for debug purposes. */
|
||||
template<typename MatrixType>
|
||||
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
|
||||
{
|
||||
@ -679,7 +688,7 @@ struct solve_retval<FullPivLU<_MatrixType>, Rhs>
|
||||
*/
|
||||
|
||||
const Index rows = dec().rows(), cols = dec().cols(),
|
||||
nonzero_pivots = dec().nonzeroPivots();
|
||||
nonzero_pivots = dec().rank();
|
||||
eigen_assert(rhs().rows() == rows);
|
||||
const Index smalldim = (std::min)(rows, cols);
|
||||
|
||||
|
@ -171,6 +171,12 @@ template<typename _MatrixType> class PartialPivLU
|
||||
inline Index cols() const { return m_lu.cols(); }
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
||||
}
|
||||
|
||||
MatrixType m_lu;
|
||||
PermutationType m_p;
|
||||
TranspositionType m_rowsTranspositions;
|
||||
@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
|
||||
template<typename MatrixType>
|
||||
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
|
||||
{
|
||||
check_template_parameters();
|
||||
|
||||
// the row permutation is stored as int indices, so just to be sure:
|
||||
eigen_assert(matrix.rows()<NumTraits<int>::highest());
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
NOTE: this routine has been adapted from the CSparse library:
|
||||
|
||||
Copyright (c) 2006, Timothy A. Davis.
|
||||
http://www.cise.ufl.edu/research/sparse/CSparse
|
||||
http://www.suitesparse.com
|
||||
|
||||
CSparse is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
@ -137,22 +137,27 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
degree[i] = len[i]; // degree of node i
|
||||
}
|
||||
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
|
||||
elen[n] = -2; /* n is a dead element */
|
||||
Cp[n] = -1; /* n is a root of assembly tree */
|
||||
w[n] = 0; /* n is a dead element */
|
||||
|
||||
/* --- Initialize degree lists ------------------------------------------ */
|
||||
for(i = 0; i < n; i++)
|
||||
{
|
||||
bool has_diag = false;
|
||||
for(p = Cp[i]; p<Cp[i+1]; ++p)
|
||||
if(Ci[p]==i)
|
||||
{
|
||||
has_diag = true;
|
||||
break;
|
||||
}
|
||||
|
||||
d = degree[i];
|
||||
if(d == 0) /* node i is empty */
|
||||
if(d == 1 && has_diag) /* node i is empty */
|
||||
{
|
||||
elen[i] = -2; /* element i is dead */
|
||||
nel++;
|
||||
Cp[i] = -1; /* i is a root of assembly tree */
|
||||
w[i] = 0;
|
||||
}
|
||||
else if(d > dense) /* node i is dense */
|
||||
else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
|
||||
{
|
||||
nv[i] = 0; /* absorb i into element n */
|
||||
elen[i] = -1; /* node i is dead */
|
||||
@ -168,6 +173,10 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
}
|
||||
}
|
||||
|
||||
elen[n] = -2; /* n is a dead element */
|
||||
Cp[n] = -1; /* n is a root of assembly tree */
|
||||
w[n] = 0; /* n is a dead element */
|
||||
|
||||
while (nel < n) /* while (selecting pivots) do */
|
||||
{
|
||||
/* --- Select node of minimum approximate degree -------------------- */
|
||||
|
@ -41,12 +41,8 @@
|
||||
//
|
||||
// The colamd/symamd library is available at
|
||||
//
|
||||
// http://www.cise.ufl.edu/research/sparse/colamd/
|
||||
// http://www.suitesparse.com
|
||||
|
||||
// This is the http://www.cise.ufl.edu/research/sparse/colamd/colamd.h
|
||||
// file. It is required by the colamd.c, colamdmex.c, and symamdmex.c
|
||||
// files, and by any C code that calls the routines whose prototypes are
|
||||
// listed below, or that uses the colamd/symamd definitions listed below.
|
||||
|
||||
#ifndef EIGEN_COLAMD_H
|
||||
#define EIGEN_COLAMD_H
|
||||
@ -102,9 +98,6 @@ namespace internal {
|
||||
/* === Definitions ========================================================== */
|
||||
/* ========================================================================== */
|
||||
|
||||
#define COLAMD_MAX(a,b) (((a) > (b)) ? (a) : (b))
|
||||
#define COLAMD_MIN(a,b) (((a) < (b)) ? (a) : (b))
|
||||
|
||||
#define ONES_COMPLEMENT(r) (-(r)-1)
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
@ -516,7 +509,7 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */
|
||||
Col [col].start = p [col] ;
|
||||
Col [col].length = p [col+1] - p [col] ;
|
||||
|
||||
if (Col [col].length < 0)
|
||||
if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200
|
||||
{
|
||||
/* column pointers must be non-decreasing */
|
||||
stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;
|
||||
@ -739,8 +732,8 @@ static void init_scoring
|
||||
|
||||
/* === Extract knobs ==================================================== */
|
||||
|
||||
dense_row_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_ROW] * n_col, n_col)) ;
|
||||
dense_col_count = COLAMD_MAX (0, COLAMD_MIN (knobs [COLAMD_DENSE_COL] * n_row, n_row)) ;
|
||||
dense_row_count = std::max<Index>(0, (std::min)(Index(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ;
|
||||
dense_col_count = std::max<Index>(0, (std::min)(Index(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ;
|
||||
COLAMD_DEBUG1 (("colamd: densecount: %d %d\n", dense_row_count, dense_col_count)) ;
|
||||
max_deg = 0 ;
|
||||
n_col2 = n_col ;
|
||||
@ -804,7 +797,7 @@ static void init_scoring
|
||||
else
|
||||
{
|
||||
/* keep track of max degree of remaining rows */
|
||||
max_deg = COLAMD_MAX (max_deg, deg) ;
|
||||
max_deg = (std::max)(max_deg, deg) ;
|
||||
}
|
||||
}
|
||||
COLAMD_DEBUG1 (("colamd: Dense and null rows killed: %d\n", n_row - n_row2)) ;
|
||||
@ -842,7 +835,7 @@ static void init_scoring
|
||||
/* add row's external degree */
|
||||
score += Row [row].shared1.degree - 1 ;
|
||||
/* guard against integer overflow */
|
||||
score = COLAMD_MIN (score, n_col) ;
|
||||
score = (std::min)(score, n_col) ;
|
||||
}
|
||||
/* determine pruned column length */
|
||||
col_length = (Index) (new_cp - &A [Col [c].start]) ;
|
||||
@ -914,7 +907,7 @@ static void init_scoring
|
||||
head [score] = c ;
|
||||
|
||||
/* see if this score is less than current min */
|
||||
min_score = COLAMD_MIN (min_score, score) ;
|
||||
min_score = (std::min)(min_score, score) ;
|
||||
|
||||
|
||||
}
|
||||
@ -1040,7 +1033,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
|
||||
/* === Garbage_collection, if necessary ============================= */
|
||||
|
||||
needed_memory = COLAMD_MIN (pivot_col_score, n_col - k) ;
|
||||
needed_memory = (std::min)(pivot_col_score, n_col - k) ;
|
||||
if (pfree + needed_memory >= Alen)
|
||||
{
|
||||
pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;
|
||||
@ -1099,7 +1092,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
|
||||
/* clear tag on pivot column */
|
||||
Col [pivot_col].shared1.thickness = pivot_col_thickness ;
|
||||
max_deg = COLAMD_MAX (max_deg, pivot_row_degree) ;
|
||||
max_deg = (std::max)(max_deg, pivot_row_degree) ;
|
||||
|
||||
|
||||
/* === Kill all rows used to construct pivot row ==================== */
|
||||
@ -1273,7 +1266,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
/* add set difference */
|
||||
cur_score += row_mark - tag_mark ;
|
||||
/* integer overflow... */
|
||||
cur_score = COLAMD_MIN (cur_score, n_col) ;
|
||||
cur_score = (std::min)(cur_score, n_col) ;
|
||||
}
|
||||
|
||||
/* recompute the column's length */
|
||||
@ -1386,7 +1379,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
cur_score -= Col [col].shared1.thickness ;
|
||||
|
||||
/* make sure score is less or equal than the max score */
|
||||
cur_score = COLAMD_MIN (cur_score, max_score) ;
|
||||
cur_score = (std::min)(cur_score, max_score) ;
|
||||
COLAMD_ASSERT (cur_score >= 0) ;
|
||||
|
||||
/* store updated score */
|
||||
@ -1409,7 +1402,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
head [cur_score] = col ;
|
||||
|
||||
/* see if this score is less than current min */
|
||||
min_score = COLAMD_MIN (min_score, cur_score) ;
|
||||
min_score = (std::min)(min_score, cur_score) ;
|
||||
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user