Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jcarpent/eigenpy
  • gsaurel/eigenpy
  • stack-of-tasks/eigenpy
3 results
Show changes
Showing
with 1767 additions and 341 deletions
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decompositions_sparse_sparse_solver_base_hpp__
#define __eigenpy_decompositions_sparse_sparse_solver_base_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include <Eigen/SparseCholesky>
namespace eigenpy {
template <typename SimplicialDerived>
struct SparseSolverBaseVisitor
: public boost::python::def_visitor<
SparseSolverBaseVisitor<SimplicialDerived> > {
typedef SimplicialDerived Solver;
typedef typename SimplicialDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1, MatrixType::Options>
DenseVectorXs;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic,
MatrixType::Options>
DenseMatrixXs;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("solve", &solve<DenseVectorXs>, bp::args("self", "b"),
"Returns the solution x of A x = b using the current "
"decomposition of A.")
.def("solve", &solve<DenseMatrixXs>, bp::args("self", "B"),
"Returns the solution X of A X = B using the current "
"decomposition of A where B is a right hand side matrix.")
.def("solve", &solve<MatrixType>, bp::args("self", "B"),
"Returns the solution X of A X = B using the current "
"decomposition of A where B is a right hand side matrix.");
}
private:
template <typename MatrixOrVector>
static MatrixOrVector solve(const Solver &self, const MatrixOrVector &vec) {
return self.solve(vec);
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decompositions_sparse_sparse_solver_base_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_accelerate_accelerate_hpp__
#define __eigenpy_decomposition_sparse_accelerate_accelerate_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include "eigenpy/decompositions/sparse/SparseSolverBase.hpp"
#include <Eigen/AccelerateSupport>
namespace eigenpy {
template <typename AccelerateDerived>
struct AccelerateImplVisitor : public boost::python::def_visitor<
AccelerateImplVisitor<AccelerateDerived> > {
typedef AccelerateDerived Solver;
typedef typename AccelerateDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef MatrixType CholMatrixType;
typedef typename MatrixType::StorageIndex StorageIndex;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def("analyzePattern", &Solver::analyzePattern,
bp::args("self", "matrix"),
"Performs a symbolic decomposition on the sparcity of matrix.\n"
"This function is particularly useful when solving for several "
"problems having the same structure.")
.def(EigenBaseVisitor<Solver>())
.def(SparseSolverBaseVisitor<Solver>())
.def("compute",
(Solver & (Solver::*)(const MatrixType &matrix)) & Solver::compute,
bp::args("self", "matrix"),
"Computes the sparse Cholesky decomposition of a given matrix.",
bp::return_self<>())
.def("factorize", &Solver::factorize, bp::args("self", "matrix"),
"Performs a numeric decomposition of a given matrix.\n"
"The given matrix must has the same sparcity than the matrix on "
"which the symbolic decomposition has been performed.\n"
"See also analyzePattern().")
.def("info", &Solver::info, bp::arg("self"),
"NumericalIssue if the input contains INF or NaN values or "
"overflow occured. Returns Success otherwise.")
.def("setOrder", &Solver::setOrder, bp::arg("self"), "Set order");
}
static void expose(const std::string &name, const std::string &doc = "") {
bp::class_<Solver, boost::noncopyable>(name.c_str(), doc.c_str(),
bp::no_init)
.def(AccelerateImplVisitor())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the "
"factorization from a given matrix."));
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decomposition_sparse_accelerate_accelerate_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_base_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_base_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include "eigenpy/decompositions/sparse/SparseSolverBase.hpp"
#include <Eigen/CholmodSupport>
namespace eigenpy {
template <typename CholdmodDerived>
struct CholmodBaseVisitor
: public boost::python::def_visitor<CholmodBaseVisitor<CholdmodDerived> > {
typedef CholdmodDerived Solver;
typedef typename CholdmodDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef MatrixType CholMatrixType;
typedef typename MatrixType::StorageIndex StorageIndex;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("analyzePattern", &Solver::analyzePattern,
bp::args("self", "matrix"),
"Performs a symbolic decomposition on the sparcity of matrix.\n"
"This function is particularly useful when solving for several "
"problems having the same structure.")
.def(EigenBaseVisitor<Solver>())
.def(SparseSolverBaseVisitor<Solver>())
.def("compute",
(Solver & (Solver::*)(const MatrixType &matrix)) & Solver::compute,
bp::args("self", "matrix"),
"Computes the sparse Cholesky decomposition of a given matrix.",
bp::return_self<>())
.def("determinant", &Solver::determinant, bp::arg("self"),
"Returns the determinant of the underlying matrix from the "
"current factorization.")
.def("factorize", &Solver::factorize, bp::args("self", "matrix"),
"Performs a numeric decomposition of a given matrix.\n"
"The given matrix must has the same sparcity than the matrix on "
"which the symbolic decomposition has been performed.\n"
"See also analyzePattern().")
.def("info", &Solver::info, bp::arg("self"),
"NumericalIssue if the input contains INF or NaN values or "
"overflow occured. Returns Success otherwise.")
.def("logDeterminant", &Solver::logDeterminant, bp::arg("self"),
"Returns the log determinant of the underlying matrix from the "
"current factorization.")
.def("setShift", &Solver::setShift, (bp::args("self", "offset")),
"Sets the shift parameters that will be used to adjust the "
"diagonal coefficients during the numerical factorization.\n"
"During the numerical factorization, the diagonal coefficients "
"are transformed by the following linear model: d_ii = offset + "
"d_ii.\n"
"The default is the identity transformation with offset=0.",
bp::return_self<>());
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_base_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_decomposition_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_decomposition_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodBase.hpp"
namespace eigenpy {
template <typename CholdmodDerived>
struct CholmodDecompositionVisitor
: public boost::python::def_visitor<
CholmodDecompositionVisitor<CholdmodDerived> > {
typedef CholdmodDerived Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def("setMode", &Solver::setMode, bp::args("self", "mode"),
"Set the mode for the Cholesky decomposition.");
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_decomposition_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_ldlt_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_ldlt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodDecomposition.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename MatrixType_, int UpLo_ = Eigen::Lower>
struct CholmodSimplicialLDLTVisitor
: public boost::python::def_visitor<
CholmodSimplicialLDLTVisitor<MatrixType_, UpLo_> > {
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::CholmodSimplicialLDLT<MatrixType_, UpLo_> Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LDLT "
"factorization from a given matrix."))
;
}
static void expose() {
static const std::string classname =
"CholmodSimplicialLDLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A simplicial direct Cholesky (LDLT) factorization and solver based on "
"Cholmod.\n\n"
"This class allows to solve for A.X = B sparse linear problems via a "
"simplicial LL^T Cholesky factorization using the Cholmod library."
"This simplicial variant is equivalent to Eigen's built-in "
"SimplicialLDLT class."
"Therefore, it has little practical interest. The sparse matrix A must "
"be selfadjoint and positive definite."
"The vectors or matrices X and B can be either dense or sparse.",
bp::no_init)
.def(CholmodSimplicialLDLTVisitor());
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_ldlt_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_llt_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_llt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodDecomposition.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename MatrixType_, int UpLo_ = Eigen::Lower>
struct CholmodSimplicialLLTVisitor
: public boost::python::def_visitor<
CholmodSimplicialLLTVisitor<MatrixType_, UpLo_> > {
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::CholmodSimplicialLLT<MatrixType_, UpLo_> Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LLT "
"factorization from a given matrix."))
;
}
static void expose() {
static const std::string classname =
"CholmodSimplicialLLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A simplicial direct Cholesky (LLT) factorization and solver based on "
"Cholmod.\n\n"
"This class allows to solve for A.X = B sparse linear problems via a "
"simplicial LL^T Cholesky factorization using the Cholmod library."
"This simplicial variant is equivalent to Eigen's built-in "
"SimplicialLLT class."
"Therefore, it has little practical interest. The sparse matrix A must "
"be selfadjoint and positive definite."
"The vectors or matrices X and B can be either dense or sparse.",
bp::no_init)
.def(CholmodSimplicialLLTVisitor());
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_llt_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_supernodal_llt_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_supernodal_llt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodDecomposition.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename MatrixType_, int UpLo_ = Eigen::Lower>
struct CholmodSupernodalLLTVisitor
: public boost::python::def_visitor<
CholmodSupernodalLLTVisitor<MatrixType_, UpLo_> > {
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::CholmodSupernodalLLT<MatrixType_, UpLo_> Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LLT "
"factorization from a given matrix."))
;
}
static void expose() {
static const std::string classname =
"CholmodSupernodalLLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A supernodal direct Cholesky (LLT) factorization and solver based on "
"Cholmod.\n\n"
"This class allows to solve for A.X = B sparse linear problems via a "
"supernodal LL^T Cholesky factorization using the Cholmod library."
"This supernodal variant performs best on dense enough problems, e.g., "
"3D FEM, or very high order 2D FEM."
"The sparse matrix A must be selfadjoint and positive definite. The "
"vectors or matrices X and B can be either dense or sparse.",
bp::no_init)
.def(CholmodSupernodalLLTVisitor());
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_supernodal_llt_hpp__
//
// Copyright (C) 2020 INRIA
// Copyright (C) 2024 LAAS-CNRS, INRIA
//
#ifndef __eigenpy_deprecation_hpp__
#define __eigenpy_deprecation_hpp__
#include "eigenpy/fwd.hpp"
namespace eigenpy {
enum class DeprecationType { DEPRECATION, FUTURE };
namespace detail {
inline PyObject *deprecationTypeToPyObj(DeprecationType dep) {
switch (dep) {
case DeprecationType::DEPRECATION:
return PyExc_DeprecationWarning;
case DeprecationType::FUTURE:
return PyExc_FutureWarning;
default: // The switch handles all cases explicitly, this should never be
// triggered.
throw std::invalid_argument(
"Undefined DeprecationType - this should never be triggered.");
}
}
} // namespace detail
/// @brief A Boost.Python call policy which triggers a Python warning on
/// precall.
template <DeprecationType deprecation_type = DeprecationType::DEPRECATION,
class BasePolicy = bp::default_call_policies>
struct deprecation_warning_policy : BasePolicy {
using result_converter = typename BasePolicy::result_converter;
using argument_package = typename BasePolicy::argument_package;
deprecation_warning_policy(const std::string &warning_msg)
: BasePolicy(), m_what(warning_msg) {}
std::string what() const { return m_what; }
const BasePolicy *derived() const {
return static_cast<const BasePolicy *>(this);
}
template <class ArgPackage>
bool precall(const ArgPackage &args) const {
PyErr_WarnEx(detail::deprecationTypeToPyObj(deprecation_type),
m_what.c_str(), 1);
return derived()->precall(args);
}
protected:
const std::string m_what;
};
template <DeprecationType deprecation_type = DeprecationType::DEPRECATION,
class BasePolicy = bp::default_call_policies>
struct deprecated_function
: deprecation_warning_policy<deprecation_type, BasePolicy> {
deprecated_function(const std::string &msg =
"This function has been marked as deprecated, and "
"will be removed in the future.")
: deprecation_warning_policy<deprecation_type, BasePolicy>(msg) {}
};
template <DeprecationType deprecation_type = DeprecationType::DEPRECATION,
class BasePolicy = bp::default_call_policies>
struct deprecated_member
: deprecation_warning_policy<deprecation_type, BasePolicy> {
deprecated_member(const std::string &msg =
"This attribute or method has been marked as "
"deprecated, and will be removed in the future.")
: deprecation_warning_policy<deprecation_type, BasePolicy>(msg) {}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_deprecation_hpp__
/* /*
* Copyright 2014-2019, CNRS * Copyright 2014-2019, CNRS
* Copyright 2018-2023, INRIA * Copyright 2018-2024, INRIA
*/ */
#ifndef __eigenpy_details_hpp__ #ifndef __eigenpy_details_hpp__
...@@ -17,24 +17,69 @@ ...@@ -17,24 +17,69 @@
#include "eigenpy/scalar-conversion.hpp" #include "eigenpy/scalar-conversion.hpp"
namespace eigenpy { namespace eigenpy {
template <typename MatType, typename EigenEquivalentType>
EIGENPY_DEPRECATED void enableEigenPySpecific() {
enableEigenPySpecific<MatType>();
}
template <typename MatType> template <typename EigenType,
void enableEigenPySpecific() { typename BaseType = typename get_eigen_base_type<EigenType>::type,
if (check_registration<MatType>()) return; typename Scalar = typename EigenType::Scalar>
struct expose_eigen_type_impl;
template <typename MatType, typename Scalar>
struct expose_eigen_type_impl<MatType, Eigen::MatrixBase<MatType>, Scalar> {
static void run() {
if (check_registration<MatType>()) return;
// to-python // to-python
EigenToPyConverter<MatType>::registration(); EigenToPyConverter<MatType>::registration();
#if EIGEN_VERSION_AT_LEAST(3, 2, 0) #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
EigenToPyConverter<Eigen::Ref<MatType> >::registration(); EigenToPyConverter<Eigen::Ref<MatType> >::registration();
EigenToPyConverter<const Eigen::Ref<const MatType> >::registration(); EigenToPyConverter<const Eigen::Ref<const MatType> >::registration();
#endif
// from-python
EigenFromPyConverter<MatType>::registration();
}
};
template <typename MatType, typename Scalar>
struct expose_eigen_type_impl<MatType, Eigen::SparseMatrixBase<MatType>,
Scalar> {
static void run() {
if (check_registration<MatType>()) return;
// to-python
EigenToPyConverter<MatType>::registration();
// #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
// EigenToPyConverter<Eigen::Ref<MatType> >::registration();
// EigenToPyConverter<const Eigen::Ref<const MatType> >::registration();
// #endif
// from-python
EigenFromPyConverter<MatType>::registration();
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType, typename Scalar>
struct expose_eigen_type_impl<TensorType, Eigen::TensorBase<TensorType>,
Scalar> {
static void run() {
if (check_registration<TensorType>()) return;
// to-python
EigenToPyConverter<TensorType>::registration();
EigenToPyConverter<Eigen::TensorRef<TensorType> >::registration();
EigenToPyConverter<
const Eigen::TensorRef<const TensorType> >::registration();
// from-python
EigenFromPyConverter<TensorType>::registration();
}
};
#endif #endif
// from-python template <typename MatType>
EigenFromPyConverter<MatType>::registration(); void enableEigenPySpecific() {
expose_eigen_type_impl<MatType>::run();
} }
} // namespace eigenpy } // namespace eigenpy
......
// //
// Copyright (c) 2014-2020 CNRS INRIA // Copyright (c) 2014-2023 CNRS INRIA
// //
#ifndef __eigenpy_eigen_allocator_hpp__ #ifndef __eigenpy_eigen_allocator_hpp__
...@@ -70,31 +70,114 @@ struct init_matrix_or_array<MatType, true> { ...@@ -70,31 +70,114 @@ struct init_matrix_or_array<MatType, true> {
} }
}; };
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename Tensor>
struct init_tensor {
static Tensor *run(PyArrayObject *pyArray, void *storage = NULL) {
enum { Rank = Tensor::NumDimensions };
assert(PyArray_NDIM(pyArray) == Rank);
typedef typename Tensor::Index Index;
Eigen::array<Index, Rank> dimensions;
for (int k = 0; k < PyArray_NDIM(pyArray); ++k)
dimensions[k] = PyArray_DIMS(pyArray)[k];
if (storage)
return new (storage) Tensor(dimensions);
else
return new Tensor(dimensions);
}
};
#endif
template <typename MatType> template <typename MatType>
bool check_swap(PyArrayObject *pyArray, const Eigen::MatrixBase<MatType> &mat) { struct check_swap_impl_matrix;
if (PyArray_NDIM(pyArray) == 0) return false;
if (mat.rows() == PyArray_DIMS(pyArray)[0]) template <typename EigenType,
return false; typename BaseType = typename get_eigen_base_type<EigenType>::type>
else struct check_swap_impl;
return true;
template <typename MatType>
struct check_swap_impl<MatType, Eigen::MatrixBase<MatType> >
: check_swap_impl_matrix<MatType> {};
template <typename MatType>
struct check_swap_impl_matrix {
static bool run(PyArrayObject *pyArray,
const Eigen::MatrixBase<MatType> &mat) {
if (PyArray_NDIM(pyArray) == 0) return false;
if (mat.rows() == PyArray_DIMS(pyArray)[0])
return false;
else
return true;
}
};
template <typename EigenType>
bool check_swap(PyArrayObject *pyArray, const EigenType &mat) {
return check_swap_impl<EigenType>::run(pyArray, mat);
} }
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct check_swap_impl_tensor {
static bool run(PyArrayObject * /*pyArray*/, const TensorType & /*tensor*/) {
return false;
}
};
template <typename TensorType>
struct check_swap_impl<TensorType, Eigen::TensorBase<TensorType> >
: check_swap_impl_tensor<TensorType> {};
#endif
// template <typename MatType>
// struct cast_impl_matrix;
//
// template <typename EigenType,
// typename BaseType = typename get_eigen_base_type<EigenType>::type>
// struct cast_impl;
//
// template <typename MatType>
// struct cast_impl<MatType, Eigen::MatrixBase<MatType> >
// : cast_impl_matrix<MatType> {};
//
// template <typename MatType>
// struct cast_impl_matrix
//{
// template <typename NewScalar, typename MatrixIn, typename MatrixOut>
// static void run(const Eigen::MatrixBase<MatrixIn> &input,
// const Eigen::MatrixBase<MatrixOut> &dest) {
// dest.const_cast_derived() = input.template cast<NewScalar>();
// }
// };
template <typename Scalar, typename NewScalar, template <typename Scalar, typename NewScalar,
template <typename D> class EigenBase = Eigen::MatrixBase,
bool cast_is_valid = FromTypeToType<Scalar, NewScalar>::value> bool cast_is_valid = FromTypeToType<Scalar, NewScalar>::value>
struct cast_matrix_or_array { struct cast {
template <typename MatrixIn, typename MatrixOut> template <typename MatrixIn, typename MatrixOut>
static void run(const Eigen::MatrixBase<MatrixIn> &input, static void run(const Eigen::MatrixBase<MatrixIn> &input,
const Eigen::MatrixBase<MatrixOut> &dest) { const Eigen::MatrixBase<MatrixOut> &dest) {
MatrixOut &dest_ = const_cast<MatrixOut &>(dest.derived()); dest.const_cast_derived() = input.template cast<NewScalar>();
dest_ = input.template cast<NewScalar>();
} }
}; };
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename Scalar, typename NewScalar> template <typename Scalar, typename NewScalar>
struct cast_matrix_or_array<Scalar, NewScalar, false> { struct cast<Scalar, NewScalar, Eigen::TensorRef, true> {
template <typename TensorIn, typename TensorOut>
static void run(const TensorIn &input, TensorOut &dest) {
dest = input.template cast<NewScalar>();
}
};
#endif
template <typename Scalar, typename NewScalar,
template <typename D> class EigenBase>
struct cast<Scalar, NewScalar, EigenBase, false> {
template <typename MatrixIn, typename MatrixOut> template <typename MatrixIn, typename MatrixOut>
static void run(const Eigen::MatrixBase<MatrixIn> & /*input*/, static void run(const MatrixIn /*input*/, const MatrixOut /*dest*/) {
const Eigen::MatrixBase<MatrixOut> & /*dest*/) {
// do nothing // do nothing
assert(false && "Must never happened"); assert(false && "Must never happened");
} }
...@@ -104,19 +187,122 @@ struct cast_matrix_or_array<Scalar, NewScalar, false> { ...@@ -104,19 +187,122 @@ struct cast_matrix_or_array<Scalar, NewScalar, false> {
#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, \ #define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, \
pyArray, mat) \ pyArray, mat) \
details::cast_matrix_or_array<Scalar, NewScalar>::run( \ details::cast<Scalar, NewScalar>::run( \
NumpyMap<MatType, Scalar>::map(pyArray, \ NumpyMap<MatType, Scalar>::map(pyArray, \
details::check_swap(pyArray, mat)), \ details::check_swap(pyArray, mat)), \
mat) mat)
#define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, \ #define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, \
mat, pyArray) \ mat, pyArray) \
details::cast_matrix_or_array<Scalar, NewScalar>::run( \ details::cast<Scalar, NewScalar>::run( \
mat, NumpyMap<MatType, NewScalar>::map( \ mat, NumpyMap<MatType, NewScalar>::map( \
pyArray, details::check_swap(pyArray, mat))) pyArray, details::check_swap(pyArray, mat)))
// Define specific cast for Windows and Mac
#if defined _WIN32 || defined __CYGWIN__
// Manage NPY_INT on Windows (NPY_INT32 is NPY_LONG).
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO) \
case NPY_INT: \
CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT: \
CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \
break;
#elif defined __APPLE__
// Manage NPY_LONGLONG on Mac (NPY_INT64 is NPY_LONG).
// long long and long are both the same type
// but NPY_LONGLONG and NPY_LONG are different dtype.
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO) \
case NPY_LONGLONG: \
CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \
break; \
case NPY_ULONGLONG: \
CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \
break;
#else
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO)
#endif
/// Define casting between Numpy matrix type to Eigen type.
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH( \
pyArray_type_code, MatType, Scalar, pyArray, mat, CAST_MACRO) \
switch (pyArray_type_code) { \
case NPY_BOOL: \
CAST_MACRO(MatType, bool, Scalar, pyArray, mat); \
break; \
case NPY_INT8: \
CAST_MACRO(MatType, int8_t, Scalar, pyArray, mat); \
break; \
case NPY_INT16: \
CAST_MACRO(MatType, int16_t, Scalar, pyArray, mat); \
break; \
case NPY_INT32: \
CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \
break; \
case NPY_INT64: \
CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT8: \
CAST_MACRO(MatType, uint8_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT16: \
CAST_MACRO(MatType, uint16_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT32: \
CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT64: \
CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \
break; \
case NPY_FLOAT: \
CAST_MACRO(MatType, float, Scalar, pyArray, mat); \
break; \
case NPY_CFLOAT: \
CAST_MACRO(MatType, std::complex<float>, Scalar, pyArray, mat); \
break; \
case NPY_DOUBLE: \
CAST_MACRO(MatType, double, Scalar, pyArray, mat); \
break; \
case NPY_CDOUBLE: \
CAST_MACRO(MatType, std::complex<double>, Scalar, pyArray, mat); \
break; \
case NPY_LONGDOUBLE: \
CAST_MACRO(MatType, long double, Scalar, pyArray, mat); \
break; \
case NPY_CLONGDOUBLE: \
CAST_MACRO(MatType, std::complex<long double>, Scalar, pyArray, mat); \
break; \
EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO) \
default: \
throw Exception("You asked for a conversion which is not implemented."); \
}
template <typename EigenType>
struct EigenAllocator;
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_allocator_impl;
template <typename MatType>
struct eigen_allocator_impl_matrix;
template <typename MatType>
struct eigen_allocator_impl<MatType, Eigen::MatrixBase<MatType> >
: eigen_allocator_impl_matrix<MatType> {};
template <typename MatType>
struct eigen_allocator_impl<const MatType, const Eigen::MatrixBase<MatType> >
: eigen_allocator_impl_matrix<const MatType> {};
template <typename MatType> template <typename MatType>
struct EigenAllocator { struct eigen_allocator_impl_matrix {
typedef MatType Type; typedef MatType Type;
typedef typename MatType::Scalar Scalar; typedef typename MatType::Scalar Scalar;
...@@ -130,50 +316,25 @@ struct EigenAllocator { ...@@ -130,50 +316,25 @@ struct EigenAllocator {
Type *mat_ptr = details::init_matrix_or_array<Type>::run(pyArray, raw_ptr); Type *mat_ptr = details::init_matrix_or_array<Type>::run(pyArray, raw_ptr);
Type &mat = *mat_ptr; Type &mat = *mat_ptr;
copy(pyArray, mat);
}
/// \brief Copy Python array into the input matrix mat.
template <typename MatrixDerived>
static void copy(PyArrayObject *pyArray,
const Eigen::MatrixBase<MatrixDerived> &mat_) {
MatrixDerived &mat = mat_.const_cast_derived();
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>(); const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code == Scalar_type_code) { if (pyArray_type_code == Scalar_type_code) {
mat = NumpyMap<MatType, Scalar>::map( mat = NumpyMap<MatType, Scalar>::map(
pyArray, details::check_swap(pyArray, mat)); // avoid useless cast pyArray, details::check_swap(pyArray, mat)); // avoid useless cast
return; return;
} }
EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH(
switch (pyArray_type_code) { pyArray_type_code, MatType, Scalar, pyArray, mat,
case NPY_INT: EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX);
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, int, Scalar, pyArray,
mat);
break;
case NPY_LONG:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long, Scalar,
pyArray, mat);
break;
case NPY_FLOAT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, float, Scalar,
pyArray, mat);
break;
case NPY_CFLOAT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, std::complex<float>,
Scalar, pyArray, mat);
break;
case NPY_DOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, double, Scalar,
pyArray, mat);
break;
case NPY_CDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, std::complex<double>,
Scalar, pyArray, mat);
break;
case NPY_LONGDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long double, Scalar,
pyArray, mat);
break;
case NPY_CLONGDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<long double>, Scalar, pyArray, mat);
break;
default:
throw Exception("You asked for a conversion which is not implemented.");
}
} }
/// \brief Copy mat into the Python array using Eigen::Map /// \brief Copy mat into the Python array using Eigen::Map
...@@ -185,54 +346,102 @@ struct EigenAllocator { ...@@ -185,54 +346,102 @@ struct EigenAllocator {
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray); const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>(); const int Scalar_type_code = Register::getTypeCode<Scalar>();
typedef typename NumpyMap<MatType, Scalar>::EigenMap MapType;
if (pyArray_type_code == Scalar_type_code) // no cast needed if (pyArray_type_code == Scalar_type_code) // no cast needed
{ {
MapType map_pyArray = NumpyMap<MatType, Scalar>::map( NumpyMap<MatType, Scalar>::map(pyArray,
pyArray, details::check_swap(pyArray, mat)); details::check_swap(pyArray, mat)) = mat;
map_pyArray = mat; return;
}
throw Exception(
"Scalar conversion from Eigen to Numpy is not implemented.");
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct eigen_allocator_impl_tensor;
template <typename TensorType>
struct eigen_allocator_impl<TensorType, Eigen::TensorBase<TensorType> >
: eigen_allocator_impl_tensor<TensorType> {};
template <typename TensorType>
struct eigen_allocator_impl<const TensorType,
const Eigen::TensorBase<TensorType> >
: eigen_allocator_impl_tensor<const TensorType> {};
template <typename TensorType>
struct eigen_allocator_impl_tensor {
typedef typename TensorType::Scalar Scalar;
static void allocate(
PyArrayObject *pyArray,
boost::python::converter::rvalue_from_python_storage<TensorType>
*storage) {
void *raw_ptr = storage->storage.bytes;
assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
"The pointer is not aligned.");
TensorType *tensor_ptr =
details::init_tensor<TensorType>::run(pyArray, raw_ptr);
TensorType &tensor = *tensor_ptr;
copy(pyArray, tensor);
}
#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, Scalar, \
NewScalar, pyArray, tensor) \
{ \
typename NumpyMap<TensorType, Scalar>::EigenMap pyArray_map = \
NumpyMap<TensorType, Scalar>::map( \
pyArray, details::check_swap(pyArray, tensor)); \
details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(pyArray_map, \
tensor); \
}
/// \brief Copy Python array into the input matrix mat.
template <typename TensorDerived>
static void copy(PyArrayObject *pyArray, TensorDerived &tensor) {
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code == Scalar_type_code) {
tensor = NumpyMap<TensorType, Scalar>::map(
pyArray, details::check_swap(pyArray, tensor)); // avoid useless cast
return; return;
} }
switch (pyArray_type_code) { EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH(
case NPY_INT: pyArray_type_code, TensorType, Scalar, pyArray, tensor,
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, int, mat, EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR);
pyArray); }
break;
case NPY_LONG: #define EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, \
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, long, mat, NewScalar, tensor, pyArray) \
pyArray); { \
break; typename NumpyMap<TensorType, NewScalar>::EigenMap pyArray_map = \
case NPY_FLOAT: NumpyMap<TensorType, NewScalar>::map( \
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, float, mat, pyArray, details::check_swap(pyArray, tensor)); \
pyArray); details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(tensor, \
break; pyArray_map); \
case NPY_CFLOAT: }
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(
MatType, Scalar, std::complex<float>, mat, pyArray); /// \brief Copy mat into the Python array using Eigen::Map
break; static void copy(const TensorType &tensor, PyArrayObject *pyArray) {
case NPY_DOUBLE: const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, double, mat, const int Scalar_type_code = Register::getTypeCode<Scalar>();
pyArray);
break; if (pyArray_type_code == Scalar_type_code) // no cast needed
case NPY_CDOUBLE: {
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY( NumpyMap<TensorType, Scalar>::map(
MatType, Scalar, std::complex<double>, mat, pyArray); pyArray, details::check_swap(pyArray, tensor)) = tensor;
break; return;
case NPY_LONGDOUBLE:
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, long double,
mat, pyArray);
break;
case NPY_CLONGDOUBLE:
EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(
MatType, Scalar, std::complex<long double>, mat, pyArray);
break;
default:
throw Exception("You asked for a conversion which is not implemented.");
} }
throw Exception(
"Scalar conversion from Eigen to Numpy is not implemented.");
} }
}; };
#endif
#if EIGEN_VERSION_AT_LEAST(3, 2, 0) #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
/// @brief Check if we need to allocate @tparam MatType to convert @param /// @brief Check if we need to allocate @tparam MatType to convert @param
...@@ -253,7 +462,7 @@ inline bool is_arr_layout_compatible_with_mat_type(PyArrayObject *pyArray) { ...@@ -253,7 +462,7 @@ inline bool is_arr_layout_compatible_with_mat_type(PyArrayObject *pyArray) {
} }
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct EigenAllocator<Eigen::Ref<MatType, Options, Stride> > { struct eigen_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride> > {
typedef Eigen::Ref<MatType, Options, Stride> RefType; typedef Eigen::Ref<MatType, Options, Stride> RefType;
typedef typename MatType::Scalar Scalar; typedef typename MatType::Scalar Scalar;
...@@ -296,49 +505,7 @@ struct EigenAllocator<Eigen::Ref<MatType, Options, Stride> > { ...@@ -296,49 +505,7 @@ struct EigenAllocator<Eigen::Ref<MatType, Options, Stride> > {
new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr); new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
RefType &mat = *reinterpret_cast<RefType *>(raw_ptr); RefType &mat = *reinterpret_cast<RefType *>(raw_ptr);
if (pyArray_type_code == Scalar_type_code) { EigenAllocator<MatType>::copy(pyArray, mat);
mat = NumpyMap<MatType, Scalar>::map(
pyArray, details::check_swap(pyArray, mat)); // avoid useless cast
return;
}
switch (pyArray_type_code) {
case NPY_INT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, int, Scalar,
pyArray, mat);
break;
case NPY_LONG:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long, Scalar,
pyArray, mat);
break;
case NPY_FLOAT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, float, Scalar,
pyArray, mat);
break;
case NPY_CFLOAT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<float>, Scalar, pyArray, mat);
break;
case NPY_DOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, double, Scalar,
pyArray, mat);
break;
case NPY_CDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<double>, Scalar, pyArray, mat);
break;
case NPY_LONGDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long double,
Scalar, pyArray, mat);
break;
case NPY_CLONGDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<long double>, Scalar, pyArray, mat);
break;
default:
throw Exception(
"You asked for a conversion which is not implemented.");
}
} else { } else {
assert(pyArray_type_code == Scalar_type_code); assert(pyArray_type_code == Scalar_type_code);
typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap
...@@ -355,7 +522,8 @@ struct EigenAllocator<Eigen::Ref<MatType, Options, Stride> > { ...@@ -355,7 +522,8 @@ struct EigenAllocator<Eigen::Ref<MatType, Options, Stride> > {
}; };
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct EigenAllocator<const Eigen::Ref<const MatType, Options, Stride> > { struct eigen_allocator_impl_matrix<
const Eigen::Ref<const MatType, Options, Stride> > {
typedef const Eigen::Ref<const MatType, Options, Stride> RefType; typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
typedef typename MatType::Scalar Scalar; typedef typename MatType::Scalar Scalar;
...@@ -399,49 +567,7 @@ struct EigenAllocator<const Eigen::Ref<const MatType, Options, Stride> > { ...@@ -399,49 +567,7 @@ struct EigenAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr); new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
MatType &mat = *mat_ptr; MatType &mat = *mat_ptr;
if (pyArray_type_code == Scalar_type_code) { EigenAllocator<MatType>::copy(pyArray, mat);
mat = NumpyMap<MatType, Scalar>::map(
pyArray, details::check_swap(pyArray, mat)); // avoid useless cast
return;
}
switch (pyArray_type_code) {
case NPY_INT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, int, Scalar,
pyArray, mat);
break;
case NPY_LONG:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long, Scalar,
pyArray, mat);
break;
case NPY_FLOAT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, float, Scalar,
pyArray, mat);
break;
case NPY_CFLOAT:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<float>, Scalar, pyArray, mat);
break;
case NPY_DOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, double, Scalar,
pyArray, mat);
break;
case NPY_CDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<double>, Scalar, pyArray, mat);
break;
case NPY_LONGDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, long double,
Scalar, pyArray, mat);
break;
case NPY_CLONGDOUBLE:
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(
MatType, std::complex<long double>, Scalar, pyArray, mat);
break;
default:
throw Exception(
"You asked for a conversion which is not implemented.");
}
} else { } else {
assert(pyArray_type_code == Scalar_type_code); assert(pyArray_type_code == Scalar_type_code);
typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap
...@@ -457,6 +583,90 @@ struct EigenAllocator<const Eigen::Ref<const MatType, Options, Stride> > { ...@@ -457,6 +583,90 @@ struct EigenAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
} }
}; };
#endif #endif
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType, typename TensorRef>
struct eigen_allocator_impl_tensor_ref;
template <typename TensorType>
struct eigen_allocator_impl_tensor<Eigen::TensorRef<TensorType> >
: eigen_allocator_impl_tensor_ref<TensorType,
Eigen::TensorRef<TensorType> > {};
template <typename TensorType>
struct eigen_allocator_impl_tensor<const Eigen::TensorRef<const TensorType> >
: eigen_allocator_impl_tensor_ref<
const TensorType, const Eigen::TensorRef<const TensorType> > {};
template <typename TensorType, typename RefType>
struct eigen_allocator_impl_tensor_ref {
typedef typename TensorType::Scalar Scalar;
typedef
typename ::boost::python::detail::referent_storage<RefType &>::StorageType
StorageType;
static void allocate(
PyArrayObject *pyArray,
::boost::python::converter::rvalue_from_python_storage<RefType>
*storage) {
// typedef typename StrideType<
// MatType,
// Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
// Eigen::internal::traits<RefType>::StrideType::
// OuterStrideAtCompileTime>::type NumpyMapStride;
static const int Options = Eigen::internal::traits<TensorType>::Options;
bool need_to_allocate = false;
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
// bool incompatible_layout =
// !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
// need_to_allocate |= incompatible_layout;
// if (Options !=
// Eigen::Unaligned) // we need to check whether the memory is
// correctly
// // aligned and composed of a continuous segment
// {
// void *data_ptr = PyArray_DATA(pyArray);
// if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr,
// Options))
// need_to_allocate |= true;
// }
void *raw_ptr = storage->storage.bytes;
if (need_to_allocate) {
typedef typename boost::remove_const<TensorType>::type TensorTypeNonConst;
TensorTypeNonConst *tensor_ptr;
tensor_ptr = details::init_tensor<TensorTypeNonConst>::run(pyArray);
RefType tensor_ref(*tensor_ptr);
new (raw_ptr) StorageType(tensor_ref, pyArray, tensor_ptr);
TensorTypeNonConst &tensor = *tensor_ptr;
EigenAllocator<TensorTypeNonConst>::copy(pyArray, tensor);
} else {
assert(pyArray_type_code == Scalar_type_code);
typename NumpyMap<TensorType, Scalar, Options>::EigenMap numpyMap =
NumpyMap<TensorType, Scalar, Options>::map(pyArray);
RefType tensor_ref(numpyMap);
new (raw_ptr) StorageType(tensor_ref, pyArray);
}
}
static void copy(RefType const &ref, PyArrayObject *pyArray) {
EigenAllocator<TensorType>::copy(ref, pyArray);
}
};
#endif
template <typename EigenType>
struct EigenAllocator : eigen_allocator_impl<EigenType> {};
} // namespace eigenpy } // namespace eigenpy
#endif // __eigenpy_eigen_allocator_hpp__ #endif // __eigenpy_eigen_allocator_hpp__
...@@ -12,13 +12,12 @@ ...@@ -12,13 +12,12 @@
namespace eigenpy { namespace eigenpy {
template <typename C> template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct expected_pytype_for_arg {}; struct expected_pytype_for_arg {};
template <typename Scalar, int Rows, int Cols, int Options, int MaxRows, template <typename MatType>
int MaxCols> struct expected_pytype_for_arg<MatType, Eigen::MatrixBase<MatType> > {
struct expected_pytype_for_arg<
Eigen::Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > {
static PyTypeObject const *get_pytype() { static PyTypeObject const *get_pytype() {
PyTypeObject const *py_type = eigenpy::getPyArrayType(); PyTypeObject const *py_type = eigenpy::getPyArrayType();
return py_type; return py_type;
...@@ -59,44 +58,43 @@ struct copy_if_non_const<const MatType, true> { ...@@ -59,44 +58,43 @@ struct copy_if_non_const<const MatType, true> {
}; };
#if EIGEN_VERSION_AT_LEAST(3, 2, 0) #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride>
struct referent_storage_eigen_ref;
template <typename MatType, int Options, typename Stride> template <typename _RefType>
struct referent_storage_eigen_ref { struct referent_storage_eigen_ref {
typedef Eigen::Ref<MatType, Options, Stride> RefType; typedef _RefType RefType;
typedef typename get_eigen_plain_type<RefType>::type PlainObjectType;
typedef typename ::eigenpy::aligned_storage< typedef typename ::eigenpy::aligned_storage<
::boost::python::detail::referent_size<RefType &>::value>::type ::boost::python::detail::referent_size<RefType &>::value>::type
AlignedStorage; AlignedStorage;
referent_storage_eigen_ref() referent_storage_eigen_ref()
: pyArray(NULL), : pyArray(NULL),
mat_ptr(NULL), plain_ptr(NULL),
ref_ptr(reinterpret_cast<RefType *>(ref_storage.bytes)) {} ref_ptr(reinterpret_cast<RefType *>(ref_storage.bytes)) {}
referent_storage_eigen_ref(const RefType &ref, PyArrayObject *pyArray, referent_storage_eigen_ref(const RefType &ref, PyArrayObject *pyArray,
MatType *mat_ptr = NULL) PlainObjectType *plain_ptr = NULL)
: pyArray(pyArray), : pyArray(pyArray),
mat_ptr(mat_ptr), plain_ptr(plain_ptr),
ref_ptr(reinterpret_cast<RefType *>(ref_storage.bytes)) { ref_ptr(reinterpret_cast<RefType *>(ref_storage.bytes)) {
Py_INCREF(pyArray); Py_INCREF(pyArray);
new (ref_storage.bytes) RefType(ref); new (ref_storage.bytes) RefType(ref);
} }
~referent_storage_eigen_ref() { ~referent_storage_eigen_ref() {
if (mat_ptr != NULL && PyArray_ISWRITEABLE(pyArray)) if (plain_ptr != NULL && PyArray_ISWRITEABLE(pyArray))
copy_if_non_const<MatType>::run(*mat_ptr, pyArray); copy_if_non_const<PlainObjectType>::run(*plain_ptr, pyArray);
Py_DECREF(pyArray); Py_DECREF(pyArray);
if (mat_ptr != NULL) mat_ptr->~MatType(); if (plain_ptr != NULL) plain_ptr->~PlainObjectType();
ref_ptr->~RefType(); ref_ptr->~RefType();
} }
AlignedStorage ref_storage; AlignedStorage ref_storage;
PyArrayObject *pyArray; PyArrayObject *pyArray;
MatType *mat_ptr; PlainObjectType *plain_ptr;
RefType *ref_ptr; RefType *ref_ptr;
}; };
#endif #endif
...@@ -110,18 +108,16 @@ namespace detail { ...@@ -110,18 +108,16 @@ namespace detail {
#if EIGEN_VERSION_AT_LEAST(3, 2, 0) #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct referent_storage<Eigen::Ref<MatType, Options, Stride> &> { struct referent_storage<Eigen::Ref<MatType, Options, Stride> &> {
typedef ::eigenpy::details::referent_storage_eigen_ref<MatType, Options, typedef Eigen::Ref<MatType, Options, Stride> RefType;
Stride> typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
StorageType;
typedef typename ::eigenpy::aligned_storage< typedef typename ::eigenpy::aligned_storage<
referent_size<StorageType &>::value>::type type; referent_size<StorageType &>::value>::type type;
}; };
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct referent_storage<const Eigen::Ref<const MatType, Options, Stride> &> { struct referent_storage<const Eigen::Ref<const MatType, Options, Stride> &> {
typedef ::eigenpy::details::referent_storage_eigen_ref<const MatType, Options, typedef Eigen::Ref<const MatType, Options, Stride> RefType;
Stride> typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
StorageType;
typedef typename ::eigenpy::aligned_storage< typedef typename ::eigenpy::aligned_storage<
referent_size<StorageType &>::value>::type type; referent_size<StorageType &>::value>::type type;
}; };
...@@ -173,7 +169,7 @@ struct rvalue_from_python_data<Eigen::PlainObjectBase<Derived> const &> ...@@ -173,7 +169,7 @@ struct rvalue_from_python_data<Eigen::PlainObjectBase<Derived> const &>
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &> struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &>
: rvalue_from_python_storage<Eigen::Ref<MatType, Options, Stride> &> { : rvalue_from_python_storage<Eigen::Ref<MatType, Options, Stride> &> {
typedef Eigen::Ref<MatType, Options, Stride> T; typedef Eigen::Ref<MatType, Options, Stride> RefType;
#if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) && \ #if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) && \
(!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) && \ (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) && \
...@@ -181,7 +177,7 @@ struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &> ...@@ -181,7 +177,7 @@ struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &>
!defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing \ !defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing \
this */ this */
// This must always be a POD struct with m_data its first member. // This must always be a POD struct with m_data its first member.
BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<T>, BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<RefType>,
stage1) == 0); stage1) == 0);
#endif #endif
...@@ -199,9 +195,7 @@ struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &> ...@@ -199,9 +195,7 @@ struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &>
// Destroys any object constructed in the storage. // Destroys any object constructed in the storage.
~rvalue_from_python_data() { ~rvalue_from_python_data() {
typedef ::eigenpy::details::referent_storage_eigen_ref<MatType, Options, typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
Stride>
StorageType;
if (this->stage1.convertible == this->storage.bytes) if (this->stage1.convertible == this->storage.bytes)
static_cast<StorageType *>((void *)this->storage.bytes)->~StorageType(); static_cast<StorageType *>((void *)this->storage.bytes)->~StorageType();
} }
...@@ -212,7 +206,7 @@ struct rvalue_from_python_data< ...@@ -212,7 +206,7 @@ struct rvalue_from_python_data<
const Eigen::Ref<const MatType, Options, Stride> &> const Eigen::Ref<const MatType, Options, Stride> &>
: rvalue_from_python_storage< : rvalue_from_python_storage<
const Eigen::Ref<const MatType, Options, Stride> &> { const Eigen::Ref<const MatType, Options, Stride> &> {
typedef const Eigen::Ref<const MatType, Options, Stride> T; typedef Eigen::Ref<const MatType, Options, Stride> RefType;
#if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) && \ #if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) && \
(!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) && \ (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) && \
...@@ -220,7 +214,7 @@ struct rvalue_from_python_data< ...@@ -220,7 +214,7 @@ struct rvalue_from_python_data<
!defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing \ !defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing \
this */ this */
// This must always be a POD struct with m_data its first member. // This must always be a POD struct with m_data its first member.
BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<T>, BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<RefType>,
stage1) == 0); stage1) == 0);
#endif #endif
...@@ -238,9 +232,7 @@ struct rvalue_from_python_data< ...@@ -238,9 +232,7 @@ struct rvalue_from_python_data<
// Destroys any object constructed in the storage. // Destroys any object constructed in the storage.
~rvalue_from_python_data() { ~rvalue_from_python_data() {
typedef ::eigenpy::details::referent_storage_eigen_ref<const MatType, typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
Options, Stride>
StorageType;
if (this->stage1.convertible == this->storage.bytes) if (this->stage1.convertible == this->storage.bytes)
static_cast<StorageType *>((void *)this->storage.bytes)->~StorageType(); static_cast<StorageType *>((void *)this->storage.bytes)->~StorageType();
} }
...@@ -269,8 +261,23 @@ void eigen_from_py_construct( ...@@ -269,8 +261,23 @@ void eigen_from_py_construct(
memory->convertible = storage->storage.bytes; memory->convertible = storage->storage.bytes;
} }
template <typename MatType, typename _Scalar> template <typename EigenType,
struct EigenFromPy { typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_from_py_impl {
typedef typename EigenType::Scalar Scalar;
/// \brief Determine if pyObj can be converted into a MatType object
static void *convertible(PyObject *pyObj);
/// \brief Allocate memory and copy pyObj in the new storage
static void construct(PyObject *pyObj,
bp::converter::rvalue_from_python_stage1_data *memory);
static void registration();
};
template <typename MatType>
struct eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> > {
typedef typename MatType::Scalar Scalar; typedef typename MatType::Scalar Scalar;
/// \brief Determine if pyObj can be converted into a MatType object /// \brief Determine if pyObj can be converted into a MatType object
...@@ -283,8 +290,14 @@ struct EigenFromPy { ...@@ -283,8 +290,14 @@ struct EigenFromPy {
static void registration(); static void registration();
}; };
template <typename MatType, typename _Scalar> template <typename EigenType,
void *EigenFromPy<MatType, _Scalar>::convertible(PyObject *pyObj) { typename Scalar =
typename boost::remove_reference<EigenType>::type::Scalar>
struct EigenFromPy : eigen_from_py_impl<EigenType> {};
template <typename MatType>
void *eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> >::convertible(
PyObject *pyObj) {
if (!call_PyArray_Check(reinterpret_cast<PyObject *>(pyObj))) return 0; if (!call_PyArray_Check(reinterpret_cast<PyObject *>(pyObj))) return 0;
PyArrayObject *pyArray = reinterpret_cast<PyArrayObject *>(pyObj); PyArrayObject *pyArray = reinterpret_cast<PyArrayObject *>(pyObj);
...@@ -384,26 +397,33 @@ void *EigenFromPy<MatType, _Scalar>::convertible(PyObject *pyObj) { ...@@ -384,26 +397,33 @@ void *EigenFromPy<MatType, _Scalar>::convertible(PyObject *pyObj) {
return pyArray; return pyArray;
} }
template <typename MatType, typename _Scalar> template <typename MatType>
void EigenFromPy<MatType, _Scalar>::construct( void eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> >::construct(
PyObject *pyObj, bp::converter::rvalue_from_python_stage1_data *memory) { PyObject *pyObj, bp::converter::rvalue_from_python_stage1_data *memory) {
eigen_from_py_construct<MatType>(pyObj, memory); eigen_from_py_construct<MatType>(pyObj, memory);
} }
template <typename MatType, typename _Scalar> template <typename MatType>
void EigenFromPy<MatType, _Scalar>::registration() { void eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> >::registration() {
bp::converter::registry::push_back( bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible), reinterpret_cast<void *(*)(_object *)>(&eigen_from_py_impl::convertible),
&EigenFromPy::construct, bp::type_id<MatType>() &eigen_from_py_impl::construct, bp::type_id<MatType>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES #ifndef BOOST_PYTHON_NO_PY_SIGNATURES
, ,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype &eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif #endif
); );
} }
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_from_py_converter_impl;
template <typename EigenType>
struct EigenFromPyConverter : eigen_from_py_converter_impl<EigenType> {};
template <typename MatType> template <typename MatType>
struct EigenFromPyConverter { struct eigen_from_py_converter_impl<MatType, Eigen::MatrixBase<MatType> > {
static void registration() { static void registration() {
EigenFromPy<MatType>::registration(); EigenFromPy<MatType>::registration();
...@@ -535,4 +555,10 @@ struct EigenFromPy<const Eigen::Ref<const MatType, Options, Stride> > { ...@@ -535,4 +555,10 @@ struct EigenFromPy<const Eigen::Ref<const MatType, Options, Stride> > {
} // namespace eigenpy } // namespace eigenpy
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
#include "eigenpy/tensor/eigen-from-python.hpp"
#endif
#include "eigenpy/sparse/eigen-from-python.hpp"
#endif // __eigenpy_eigen_from_python_hpp__ #endif // __eigenpy_eigen_from_python_hpp__
// //
// Copyright (c) 2014-2020 CNRS INRIA // Copyright (c) 2014-2024 CNRS INRIA
// //
#ifndef __eigenpy_eigen_to_python_hpp__ #ifndef __eigenpy_eigen_to_python_hpp__
...@@ -11,58 +11,40 @@ ...@@ -11,58 +11,40 @@
#include "eigenpy/eigen-allocator.hpp" #include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/numpy-allocator.hpp" #include "eigenpy/numpy-allocator.hpp"
#include "eigenpy/scipy-allocator.hpp"
#include "eigenpy/numpy-type.hpp" #include "eigenpy/numpy-type.hpp"
#include "eigenpy/scipy-type.hpp"
#include "eigenpy/registration.hpp"
namespace boost { namespace eigenpy {
namespace python {
template <typename MatrixRef, class MakeHolder> EIGENPY_DOCUMENTATION_START_IGNORE
struct to_python_indirect_eigen {
template <class U>
inline PyObject* operator()(U const& mat) const {
return eigenpy::EigenToPy<MatrixRef>::convert(const_cast<U&>(mat));
}
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES template <typename EigenType,
inline PyTypeObject const* get_pytype() const { typename BaseType = typename get_eigen_base_type<EigenType>::type>
return converter::registered_pytype<MatrixRef>::get_pytype(); struct eigen_to_py_impl;
}
#endif
};
template <typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime, template <typename MatType>
int Options, int MaxRowsAtCompileTime, int MaxColsAtCompileTime, struct eigen_to_py_impl_matrix;
class MakeHolder>
struct to_python_indirect<
Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder>
: to_python_indirect_eigen<
Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder> {};
template <typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime, template <typename MatType>
int Options, int MaxRowsAtCompileTime, int MaxColsAtCompileTime, struct eigen_to_py_impl<MatType, Eigen::MatrixBase<MatType> >
class MakeHolder> : eigen_to_py_impl_matrix<MatType> {};
struct to_python_indirect<
const Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder>
: to_python_indirect_eigen<
const Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime,
Options, MaxRowsAtCompileTime,
MaxColsAtCompileTime>&,
MakeHolder> {};
} // namespace python template <typename MatType>
} // namespace boost struct eigen_to_py_impl<MatType&, Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<MatType&> {};
namespace eigenpy { template <typename MatType>
namespace bp = boost::python; struct eigen_to_py_impl<const MatType, const Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<const MatType> {};
template <typename MatType>
struct eigen_to_py_impl<const MatType&, const Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<const MatType&> {};
template <typename MatType, typename _Scalar> template <typename MatType>
struct EigenToPy { struct eigen_to_py_impl_matrix {
static PyObject* convert( static PyObject* convert(
typename boost::add_reference< typename boost::add_reference<
typename boost::add_const<MatType>::type>::type mat) { typename boost::add_const<MatType>::type>::type mat) {
...@@ -76,9 +58,8 @@ struct EigenToPy { ...@@ -76,9 +58,8 @@ struct EigenToPy {
PyArrayObject* pyArray; PyArrayObject* pyArray;
// Allocate Python memory // Allocate Python memory
if ((((!(C == 1) != !(R == 1)) && !MatrixDerived::IsVectorAtCompileTime) || if ((((!(C == 1) != !(R == 1)) && !MatrixDerived::IsVectorAtCompileTime) ||
MatrixDerived::IsVectorAtCompileTime) && MatrixDerived::IsVectorAtCompileTime)) // Handle array with a single
NumpyType::getType() == // dimension
ARRAY_TYPE) // Handle array with a single dimension
{ {
npy_intp shape[1] = {C == 1 ? R : C}; npy_intp shape[1] = {C == 1 ? R : C};
pyArray = NumpyAllocator<MatType>::allocate( pyArray = NumpyAllocator<MatType>::allocate(
...@@ -96,30 +77,75 @@ struct EigenToPy { ...@@ -96,30 +77,75 @@ struct EigenToPy {
static PyTypeObject const* get_pytype() { return getPyArrayType(); } static PyTypeObject const* get_pytype() { return getPyArrayType(); }
}; };
template <typename MatType, int Options, typename Stride, typename _Scalar> template <typename MatType>
struct EigenToPy<Eigen::Ref<MatType, Options, Stride>, _Scalar> { struct eigen_to_py_impl_sparse_matrix;
static PyObject* convert(const Eigen::Ref<MatType, Options, Stride>& mat) {
typedef Eigen::Ref<MatType, Options, Stride> EigenRef;
assert((mat.rows() < INT_MAX) && (mat.cols() < INT_MAX) && template <typename MatType>
"Matrix range larger than int ... should never happen."); struct eigen_to_py_impl<MatType, Eigen::SparseMatrixBase<MatType> >
const npy_intp R = (npy_intp)mat.rows(), C = (npy_intp)mat.cols(); : eigen_to_py_impl_sparse_matrix<MatType> {};
PyArrayObject* pyArray; template <typename MatType>
// Allocate Python memory struct eigen_to_py_impl<MatType&, Eigen::SparseMatrixBase<MatType> >
if ((((!(C == 1) != !(R == 1)) && !MatType::IsVectorAtCompileTime) || : eigen_to_py_impl_sparse_matrix<MatType&> {};
MatType::IsVectorAtCompileTime) &&
NumpyType::getType() == template <typename MatType>
ARRAY_TYPE) // Handle array with a single dimension struct eigen_to_py_impl<const MatType, const Eigen::SparseMatrixBase<MatType> >
{ : eigen_to_py_impl_sparse_matrix<const MatType> {};
npy_intp shape[1] = {C == 1 ? R : C};
pyArray = NumpyAllocator<EigenRef>::allocate(const_cast<EigenRef&>(mat), template <typename MatType>
1, shape); struct eigen_to_py_impl<const MatType&, const Eigen::SparseMatrixBase<MatType> >
} else { : eigen_to_py_impl_sparse_matrix<const MatType&> {};
npy_intp shape[2] = {R, C};
pyArray = NumpyAllocator<EigenRef>::allocate(const_cast<EigenRef&>(mat), template <typename MatType>
2, shape); struct eigen_to_py_impl_sparse_matrix {
} enum { IsRowMajor = MatType::IsRowMajor };
static PyObject* convert(
typename boost::add_reference<
typename boost::add_const<MatType>::type>::type mat) {
typedef typename boost::remove_const<
typename boost::remove_reference<MatType>::type>::type MatrixDerived;
// Allocate and perform the copy
PyObject* pyArray =
ScipyAllocator<MatType>::allocate(const_cast<MatrixDerived&>(mat));
return pyArray;
}
static PyTypeObject const* get_pytype() {
return IsRowMajor ? ScipyType::getScipyCSRMatrixType()
: ScipyType::getScipyCSCMatrixType();
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct eigen_to_py_impl_tensor;
template <typename TensorType>
struct eigen_to_py_impl<TensorType, Eigen::TensorBase<TensorType> >
: eigen_to_py_impl_tensor<TensorType> {};
template <typename TensorType>
struct eigen_to_py_impl<const TensorType, const Eigen::TensorBase<TensorType> >
: eigen_to_py_impl_tensor<const TensorType> {};
template <typename TensorType>
struct eigen_to_py_impl_tensor {
static PyObject* convert(
typename boost::add_reference<
typename boost::add_const<TensorType>::type>::type tensor) {
// typedef typename boost::remove_const<
// typename boost::remove_reference<Tensor>::type>::type
// TensorDerived;
static const int NumIndices = TensorType::NumIndices;
npy_intp shape[NumIndices];
for (int k = 0; k < NumIndices; ++k) shape[k] = tensor.dimension(k);
PyArrayObject* pyArray = NumpyAllocator<TensorType>::allocate(
const_cast<TensorType&>(tensor), NumIndices, shape);
// Create an instance (either np.array or np.matrix) // Create an instance (either np.array or np.matrix)
return NumpyType::make(pyArray).ptr(); return NumpyType::make(pyArray).ptr();
...@@ -127,6 +153,14 @@ struct EigenToPy<Eigen::Ref<MatType, Options, Stride>, _Scalar> { ...@@ -127,6 +153,14 @@ struct EigenToPy<Eigen::Ref<MatType, Options, Stride>, _Scalar> {
static PyTypeObject const* get_pytype() { return getPyArrayType(); } static PyTypeObject const* get_pytype() { return getPyArrayType(); }
}; };
#endif
EIGENPY_DOCUMENTATION_END_IGNORE
template <typename EigenType,
typename Scalar =
typename boost::remove_reference<EigenType>::type::Scalar>
struct EigenToPy : eigen_to_py_impl<EigenType> {};
template <typename MatType> template <typename MatType>
struct EigenToPyConverter { struct EigenToPyConverter {
...@@ -134,6 +168,52 @@ struct EigenToPyConverter { ...@@ -134,6 +168,52 @@ struct EigenToPyConverter {
bp::to_python_converter<MatType, EigenToPy<MatType>, true>(); bp::to_python_converter<MatType, EigenToPy<MatType>, true>();
} }
}; };
} // namespace eigenpy } // namespace eigenpy
namespace boost {
namespace python {
template <typename MatrixRef, class MakeHolder>
struct to_python_indirect_eigen {
template <class U>
inline PyObject* operator()(U const& mat) const {
return eigenpy::EigenToPy<MatrixRef>::convert(const_cast<U&>(mat));
}
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
inline PyTypeObject const* get_pytype() const {
return converter::registered_pytype<MatrixRef>::get_pytype();
}
#endif
};
template <typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime,
int Options, int MaxRowsAtCompileTime, int MaxColsAtCompileTime,
class MakeHolder>
struct to_python_indirect<
Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder>
: to_python_indirect_eigen<
Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder> {};
template <typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime,
int Options, int MaxRowsAtCompileTime, int MaxColsAtCompileTime,
class MakeHolder>
struct to_python_indirect<
const Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder>
: to_python_indirect_eigen<
const Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime,
Options, MaxRowsAtCompileTime,
MaxColsAtCompileTime>&,
MakeHolder> {};
} // namespace python
} // namespace boost
#endif // __eigenpy_eigen_to_python_hpp__ #endif // __eigenpy_eigen_to_python_hpp__
// //
// Copyright (c) 2020 INRIA // Copyright (c) 2020-2023 INRIA
// //
#ifndef __eigenpy_eigen_typedef_hpp__ #ifndef __eigenpy_eigen_typedef_hpp__
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, Eigen::Dynamic, X) \ EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, Eigen::Dynamic, X) \
EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 2) \ EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 2) \
EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 3) \ EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 3) \
EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 4) EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 4) \
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, 1, 1) \
typedef Eigen::SparseMatrix<Scalar, Options> SparseMatrixX##TypeSuffix
#endif // ifndef __eigenpy_eigen_typedef_hpp__ #endif // ifndef __eigenpy_eigen_typedef_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_eigen_eigen_base_hpp__
#define __eigenpy_eigen_eigen_base_hpp__
#include "eigenpy/eigenpy.hpp"
namespace eigenpy {
template <typename Derived>
struct EigenBaseVisitor
: public boost::python::def_visitor<EigenBaseVisitor<Derived> > {
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("cols", &Derived::cols, bp::arg("self"),
"Returns the number of columns.")
.def("rows", &Derived::rows, bp::arg("self"),
"Returns the number of rows.")
.def("size", &Derived::rows, bp::arg("self"),
"Returns the number of coefficients, which is rows()*cols().");
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_eigen_eigen_base_hpp__
/* /*
* Copyright 2014-2019, CNRS * Copyright 2014-2019, CNRS
* Copyright 2018-2020, INRIA * Copyright 2018-2024, INRIA
*/ */
#ifndef __eigenpy_eigenpy_hpp__ #ifndef __eigenpy_eigenpy_hpp__
#define __eigenpy_eigenpy_hpp__ #define __eigenpy_eigenpy_hpp__
#include "eigenpy/fwd.hpp" #include "eigenpy/fwd.hpp"
#include "eigenpy/deprecated.hpp"
#include "eigenpy/eigen-typedef.hpp" #include "eigenpy/eigen-typedef.hpp"
#include "eigenpy/expose.hpp"
/// Custom CallPolicies
#include "eigenpy/std-unique-ptr.hpp"
#define ENABLE_SPECIFIC_MATRIX_TYPE(TYPE) \ #define ENABLE_SPECIFIC_MATRIX_TYPE(TYPE) \
::eigenpy::enableEigenPySpecific<TYPE>(); ::eigenpy::enableEigenPySpecific<TYPE>();
...@@ -19,22 +22,20 @@ namespace eigenpy { ...@@ -19,22 +22,20 @@ namespace eigenpy {
*/ */
void EIGENPY_DLLAPI enableEigenPy(); void EIGENPY_DLLAPI enableEigenPy();
/* Enable the Eigen--Numpy serialization for the templated MatrixBase class. bool EIGENPY_DLLAPI withTensorSupport();
* The second template argument is used for inheritance of Eigen classes. If
* using a native Eigen::MatrixBase, simply repeat the same arg twice. */ /* Enable the Eigen--Numpy serialization for the templated MatType class.*/
template <typename MatType> template <typename MatType>
void enableEigenPySpecific(); void enableEigenPySpecific();
/* Enable the Eigen--Numpy serialization for the templated MatrixBase class.
* The second template argument is used for inheritance of Eigen classes. If
* using a native Eigen::MatrixBase, simply repeat the same arg twice. */
template <typename MatType, typename EigenEquivalentType>
EIGENPY_DEPRECATED void enableEigenPySpecific();
template <typename Scalar, int Options> template <typename Scalar, int Options>
EIGEN_DONT_INLINE void exposeType() { EIGEN_DONT_INLINE void exposeType() {
EIGENPY_MAKE_TYPEDEFS_ALL_SIZES(Scalar, Options, s); EIGENPY_MAKE_TYPEDEFS_ALL_SIZES(Scalar, Options, s);
EIGENPY_UNUSED_TYPE(Vector1s);
EIGENPY_UNUSED_TYPE(RowVector1s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix1s);
ENABLE_SPECIFIC_MATRIX_TYPE(Vector2s); ENABLE_SPECIFIC_MATRIX_TYPE(Vector2s);
ENABLE_SPECIFIC_MATRIX_TYPE(RowVector2s); ENABLE_SPECIFIC_MATRIX_TYPE(RowVector2s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix2s); ENABLE_SPECIFIC_MATRIX_TYPE(Matrix2s);
...@@ -56,11 +57,19 @@ EIGEN_DONT_INLINE void exposeType() { ...@@ -56,11 +57,19 @@ EIGEN_DONT_INLINE void exposeType() {
ENABLE_SPECIFIC_MATRIX_TYPE(VectorXs); ENABLE_SPECIFIC_MATRIX_TYPE(VectorXs);
ENABLE_SPECIFIC_MATRIX_TYPE(RowVectorXs); ENABLE_SPECIFIC_MATRIX_TYPE(RowVectorXs);
ENABLE_SPECIFIC_MATRIX_TYPE(MatrixXs); ENABLE_SPECIFIC_MATRIX_TYPE(MatrixXs);
enableEigenPySpecific<SparseMatrixXs>();
} }
template <typename Scalar> template <typename Scalar>
EIGEN_DONT_INLINE void exposeType() { EIGEN_DONT_INLINE void exposeType() {
exposeType<Scalar, 0>(); exposeType<Scalar, 0>();
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
enableEigenPySpecific<Eigen::Tensor<Scalar, 1> >();
enableEigenPySpecific<Eigen::Tensor<Scalar, 2> >();
enableEigenPySpecific<Eigen::Tensor<Scalar, 3> >();
#endif
} }
} // namespace eigenpy } // namespace eigenpy
......
/* /*
* Copyright 2014-2023 CNRS INRIA * Copyright 2014-2024 CNRS INRIA
*/ */
#ifndef __eigenpy_fwd_hpp__ #ifndef __eigenpy_fwd_hpp__
...@@ -13,6 +13,18 @@ ...@@ -13,6 +13,18 @@
#define EIGENPY_MSVC_COMPILER #define EIGENPY_MSVC_COMPILER
#endif #endif
#if (__cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703))
#define EIGENPY_WITH_CXX17_SUPPORT
#endif
#if (__cplusplus >= 201402L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201403))
#define EIGENPY_WITH_CXX14_SUPPORT
#endif
#if (__cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600))
#define EIGENPY_WITH_CXX11_SUPPORT
#endif
#define EIGENPY_STRING_LITERAL(string) #string #define EIGENPY_STRING_LITERAL(string) #string
#define EIGENPY_STRINGIZE(string) EIGENPY_STRING_LITERAL(string) #define EIGENPY_STRINGIZE(string) EIGENPY_STRING_LITERAL(string)
#define _EIGENPY_PPCAT(A, B) A##B #define _EIGENPY_PPCAT(A, B) A##B
...@@ -31,9 +43,8 @@ ...@@ -31,9 +43,8 @@
EIGENPY_PRAGMA_WARNING(Deprecated : the_message) EIGENPY_PRAGMA_WARNING(Deprecated : the_message)
#define EIGENPY_PRAGMA_DEPRECATED_HEADER(old_header, new_header) \ #define EIGENPY_PRAGMA_DEPRECATED_HEADER(old_header, new_header) \
EIGENPY_PRAGMA_WARNING( \ EIGENPY_PRAGMA_WARNING( \
Deprecated header file \ Deprecated header file : #old_header has been replaced \
: #old_header has been replaced \ by #new_header.\n Please use #new_header instead of #old_header.)
by #new_header.\n Please use #new_header instead of #old_header.)
#elif defined(WIN32) #elif defined(WIN32)
#define EIGENPY_PRAGMA(x) __pragma(#x) #define EIGENPY_PRAGMA(x) __pragma(#x)
#define EIGENPY_PRAGMA_MESSAGE(the_message) \ #define EIGENPY_PRAGMA_MESSAGE(the_message) \
...@@ -49,7 +60,11 @@ ...@@ -49,7 +60,11 @@
EIGENPY_PRAGMA_WARNING( \ EIGENPY_PRAGMA_WARNING( \
EIGENPY_STRINGCAT("this file is deprecated: ", the_message)) EIGENPY_STRINGCAT("this file is deprecated: ", the_message))
#define EIGENPY_DOCUMENTATION_START_IGNORE /// \cond
#define EIGENPY_DOCUMENTATION_END_IGNORE /// \endcond
#include "eigenpy/config.hpp" #include "eigenpy/config.hpp"
#include <boost/type_traits/is_base_of.hpp>
// Silence a warning about a deprecated use of boost bind by boost python // Silence a warning about a deprecated use of boost bind by boost python
// at least fo boost 1.73 to 1.75 // at least fo boost 1.73 to 1.75
...@@ -58,6 +73,15 @@ ...@@ -58,6 +73,15 @@
#include <boost/python.hpp> #include <boost/python.hpp>
#include <boost/python/scope.hpp> #include <boost/python/scope.hpp>
#include <type_traits>
#include <utility>
namespace eigenpy {
namespace bp = boost::python;
}
#define NO_IMPORT_ARRAY #define NO_IMPORT_ARRAY
#include "eigenpy/numpy.hpp" #include "eigenpy/numpy.hpp"
#undef NO_IMPORT_ARRAY #undef NO_IMPORT_ARRAY
...@@ -65,8 +89,14 @@ ...@@ -65,8 +89,14 @@
#undef BOOST_BIND_GLOBAL_PLACEHOLDERS #undef BOOST_BIND_GLOBAL_PLACEHOLDERS
#include <Eigen/Core> #include <Eigen/Core>
#include <Eigen/Sparse>
#include <Eigen/Geometry> #include <Eigen/Geometry>
#ifdef EIGENPY_WITH_CXX11_SUPPORT
#include <unsupported/Eigen/CXX11/Tensor>
#define EIGENPY_WITH_TENSOR_SUPPORT
#endif
#if EIGEN_VERSION_AT_LEAST(3, 2, 90) #if EIGEN_VERSION_AT_LEAST(3, 2, 90)
#define EIGENPY_DEFAULT_ALIGNMENT_VALUE Eigen::Aligned16 #define EIGENPY_DEFAULT_ALIGNMENT_VALUE Eigen::Aligned16
#else #else
...@@ -78,20 +108,97 @@ ...@@ -78,20 +108,97 @@
#define EIGENPY_NO_ALIGNMENT_VALUE Eigen::Unaligned #define EIGENPY_NO_ALIGNMENT_VALUE Eigen::Unaligned
#define EIGENPY_UNUSED_VARIABLE(var) (void)(var) #define EIGENPY_UNUSED_VARIABLE(var) (void)(var)
#define EIGENPY_UNUSED_TYPE(type) EIGENPY_UNUSED_VARIABLE((type *)(NULL))
#ifndef NDEBUG
#define EIGENPY_USED_VARIABLE_ONLY_IN_DEBUG_MODE(var)
#else
#define EIGENPY_USED_VARIABLE_ONLY_IN_DEBUG_MODE(var) \
EIGENPY_UNUSED_VARIABLE(var)
#endif
#include "eigenpy/expose.hpp" #ifdef EIGENPY_WITH_CXX11_SUPPORT
#include <memory>
#define EIGENPY_SHARED_PTR_HOLDER_TYPE(T) ::std::shared_ptr<T>
#else
#include <boost/shared_ptr.hpp>
#define EIGENPY_SHARED_PTR_HOLDER_TYPE(T) ::boost::shared_ptr<T>
#endif
namespace eigenpy { namespace eigenpy {
template <typename MatType,
typename Scalar = // Default Scalar value can't be defined in the declaration
typename boost::remove_reference<MatType>::type::Scalar> // because of a CL bug.
// See https://github.com/stack-of-tasks/eigenpy/pull/462
template <typename MatType, typename Scalar>
struct EigenToPy; struct EigenToPy;
template <typename MatType, template <typename MatType, typename Scalar>
typename Scalar =
typename boost::remove_reference<MatType>::type::Scalar>
struct EigenFromPy; struct EigenFromPy;
template <typename T>
struct remove_const_reference {
typedef typename boost::remove_const<
typename boost::remove_reference<T>::type>::type type;
};
template <typename EigenType>
struct get_eigen_base_type {
typedef typename remove_const_reference<EigenType>::type EigenType_;
typedef typename boost::mpl::if_<
boost::is_base_of<Eigen::MatrixBase<EigenType_>, EigenType_>,
Eigen::MatrixBase<EigenType_>,
typename boost::mpl::if_<
boost::is_base_of<Eigen::SparseMatrixBase<EigenType_>, EigenType_>,
Eigen::SparseMatrixBase<EigenType_>
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
,
typename boost::mpl::if_<
boost::is_base_of<Eigen::TensorBase<EigenType_>, EigenType_>,
Eigen::TensorBase<EigenType_>, void>::type
#else
,
void
#endif
>::type>::type _type;
typedef typename boost::mpl::if_<
boost::is_const<typename boost::remove_reference<EigenType>::type>,
const _type, _type>::type type;
};
template <typename EigenType>
struct get_eigen_plain_type;
template <typename MatType, int Options, typename Stride>
struct get_eigen_plain_type<Eigen::Ref<MatType, Options, Stride> > {
typedef typename Eigen::internal::traits<
Eigen::Ref<MatType, Options, Stride> >::PlainObjectType type;
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct get_eigen_plain_type<Eigen::TensorRef<TensorType> > {
typedef TensorType type;
};
#endif
namespace internal {
template <class T1, class T2>
struct has_operator_equal_impl {
template <class U, class V>
static auto check(U *) -> decltype(std::declval<U>() == std::declval<V>());
template <typename, typename>
static auto check(...) -> std::false_type;
using type = typename std::is_same<bool, decltype(check<T1, T2>(0))>::type;
};
} // namespace internal
template <class T1, class T2 = T1>
struct has_operator_equal : internal::has_operator_equal_impl<T1, T2>::type {};
} // namespace eigenpy } // namespace eigenpy
#include "eigenpy/alignment.hpp" #include "eigenpy/alignment.hpp"
#include "eigenpy/id.hpp"
#endif // ifndef __eigenpy_fwd_hpp__ #endif // ifndef __eigenpy_fwd_hpp__
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
namespace eigenpy { namespace eigenpy {
namespace bp = boost::python;
template <typename Scalar, int Options = 0> template <typename Scalar, int Options = 0>
struct EulerAnglesConvertor { struct EulerAnglesConvertor {
typedef typename Eigen::Matrix<Scalar, 3, 1, Options> Vector3; typedef typename Eigen::Matrix<Scalar, 3, 1, Options> Vector3;
......
//
// Copyright (c) 2024 INRIA
//
#ifndef __eigenpy_id_hpp__
#define __eigenpy_id_hpp__
#include <boost/python.hpp>
#include <boost/cstdint.hpp>
namespace eigenpy {
///
/// \brief Add the Python method id to retrieving a unique id for a given object
/// exposed with Boost.Python
///
template <class C>
struct IdVisitor : public bp::def_visitor<IdVisitor<C> > {
template <class PyClass>
void visit(PyClass& cl) const {
cl.def("id", &id, bp::arg("self"),
"Returns the unique identity of an object.\n"
"For object held in C++, it corresponds to its memory address.");
}
private:
static boost::int64_t id(const C& self) {
return boost::int64_t(reinterpret_cast<const void*>(&self));
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_id_hpp__
/// Copyright (c) 2016-2024 CNRS INRIA
/// This file was originally taken from Pinocchio (header
/// <pinocchio/bindings/python/utils/std-vector.hpp>)
///
#ifndef __eigenpy_map_hpp__
#define __eigenpy_map_hpp__
#include "eigenpy/pickle-vector.hpp"
#include "eigenpy/registration.hpp"
#include "eigenpy/utils/empty-visitor.hpp"
#include <boost/python/suite/indexing/map_indexing_suite.hpp>
#include <boost/python/stl_iterator.hpp>
#include <boost/python/to_python_converter.hpp>
namespace eigenpy {
/// \brief Change the behavior of indexing (method __getitem__ in Python).
/// This is suitable e.g. for container of Eigen matrix objects if you want to
/// mutate them.
/// \sa overload_base_get_item_for_std_vector
template <typename Container>
struct overload_base_get_item_for_map
: public boost::python::def_visitor<
overload_base_get_item_for_map<Container> > {
typedef typename Container::value_type value_type;
typedef typename Container::value_type::second_type data_type;
typedef typename Container::key_type key_type;
typedef typename Container::key_type index_type;
template <class Class>
void visit(Class& cl) const {
cl.def("__getitem__", &base_get_item);
}
private:
static boost::python::object base_get_item(
boost::python::back_reference<Container&> container, PyObject* i_) {
index_type idx = convert_index(container.get(), i_);
typename Container::iterator i = container.get().find(idx);
if (i == container.get().end()) {
PyErr_SetString(PyExc_KeyError, "Invalid key");
boost::python::throw_error_already_set();
}
typename boost::python::to_python_indirect<
data_type&, boost::python::detail::make_reference_holder>
convert;
return boost::python::object(boost::python::handle<>(convert(i->second)));
}
static index_type convert_index(Container& /*container*/, PyObject* i_) {
boost::python::extract<key_type const&> i(i_);
if (i.check()) {
return i();
} else {
boost::python::extract<key_type> i(i_);
if (i.check()) return i();
}
PyErr_SetString(PyExc_TypeError, "Invalid index type");
boost::python::throw_error_already_set();
return index_type();
}
};
///////////////////////////////////////////////////////////////////////////////
// The following snippet of code has been taken from the header
// https://github.com/loco-3d/crocoddyl/blob/v2.1.0/bindings/python/crocoddyl/utils/map-converter.hpp
// The Crocoddyl library is written by Carlos Mastalli, Nicolas Mansard and
// Rohan Budhiraja.
///////////////////////////////////////////////////////////////////////////////
namespace bp = boost::python;
/**
* @brief Create a pickle interface for the map type
*
* @param[in] Container Map type to be pickled
* \sa Pickle
*/
template <typename Container>
struct PickleMap : public PickleVector<Container> {
static void setstate(bp::object op, bp::tuple tup) {
Container& o = bp::extract<Container&>(op)();
bp::stl_input_iterator<typename Container::value_type> begin(tup[0]), end;
o.insert(begin, end);
}
};
/// Conversion from dict to map solution proposed in
/// https://stackoverflow.com/questions/6116345/boostpython-possible-to-automatically-convert-from-dict-stdmap
/// This template encapsulates the conversion machinery.
template <typename Container>
struct dict_to_map {
static void register_converter() {
bp::converter::registry::push_back(&dict_to_map::convertible,
&dict_to_map::construct,
bp::type_id<Container>());
}
/// Check if conversion is possible
static void* convertible(PyObject* object) {
// Check if it is a list
if (!PyObject_GetIter(object)) return 0;
return object;
}
/// Perform the conversion
static void construct(PyObject* object,
bp::converter::rvalue_from_python_stage1_data* data) {
// convert the PyObject pointed to by `object` to a bp::dict
bp::handle<> handle(bp::borrowed(object)); // "smart ptr"
bp::dict dict(handle);
// get a pointer to memory into which we construct the map
// this is provided by the Python runtime
typedef bp::converter::rvalue_from_python_storage<Container> storage_type;
void* storage = reinterpret_cast<storage_type*>(data)->storage.bytes;
// placement-new allocate the result
new (storage) Container();
// iterate over the dictionary `dict`, fill up the map `map`
Container& map(*(static_cast<Container*>(storage)));
bp::list keys(dict.keys());
int keycount(static_cast<int>(bp::len(keys)));
for (int i = 0; i < keycount; ++i) {
// get the key
bp::object keyobj(keys[i]);
bp::extract<typename Container::key_type> keyproxy(keyobj);
if (!keyproxy.check()) {
PyErr_SetString(PyExc_KeyError, "Bad key type");
bp::throw_error_already_set();
}
typename Container::key_type key = keyproxy();
// get the corresponding value
bp::object valobj(dict[keyobj]);
bp::extract<typename Container::mapped_type> valproxy(valobj);
if (!valproxy.check()) {
PyErr_SetString(PyExc_ValueError, "Bad value type");
bp::throw_error_already_set();
}
typename Container::mapped_type val = valproxy();
map.emplace(key, val);
}
// remember the location for later
data->convertible = storage;
}
static bp::dict todict(Container& self) {
bp::dict dict;
typename Container::const_iterator it;
for (it = self.begin(); it != self.end(); ++it) {
dict.setdefault(it->first, it->second);
}
return dict;
}
};
/// Policies which handle the non-default constructible case
/// and set_item() using emplace().
template <class Container, bool NoProxy>
struct emplace_set_derived_policies
: bp::map_indexing_suite<
Container, NoProxy,
emplace_set_derived_policies<Container, NoProxy> > {
typedef typename Container::key_type index_type;
typedef typename Container::value_type::second_type data_type;
typedef typename Container::value_type value_type;
using DerivedPolicies =
bp::detail::final_map_derived_policies<Container, NoProxy>;
template <class Class>
static void extension_def(Class& cl) {
// Wrap the map's element (value_type)
std::string elem_name = "map_indexing_suite_";
bp::object class_name(cl.attr("__name__"));
bp::extract<std::string> class_name_extractor(class_name);
elem_name += class_name_extractor();
elem_name += "_entry";
namespace mpl = boost::mpl;
typedef typename mpl::if_<
mpl::and_<boost::is_class<data_type>, mpl::bool_<!NoProxy> >,
bp::return_internal_reference<>, bp::default_call_policies>::type
get_data_return_policy;
bp::class_<value_type>(elem_name.c_str(), bp::no_init)
.def("__repr__", &DerivedPolicies::print_elem)
.def("data", &DerivedPolicies::get_data, get_data_return_policy())
.def("key", &DerivedPolicies::get_key);
}
static void set_item(Container& container, index_type i, data_type const& v) {
container.emplace(i, v);
}
};
/**
* @brief Expose the map-like container, e.g. (std::map).
*
* @param[in] Container Container to expose.
* @param[in] NoProxy When set to false, the elements will be copied when
* returned to Python.
*/
template <class Container, bool NoProxy = false>
struct GenericMapVisitor
: public emplace_set_derived_policies<Container, NoProxy>,
public dict_to_map<Container> {
typedef dict_to_map<Container> FromPythonDictConverter;
template <typename DerivedVisitor>
static void expose(const std::string& class_name,
const std::string& doc_string,
const bp::def_visitor<DerivedVisitor>& visitor) {
namespace bp = bp;
if (!register_symbolic_link_to_registered_type<Container>()) {
bp::class_<Container>(class_name.c_str(), doc_string.c_str())
.def(GenericMapVisitor())
.def("todict", &FromPythonDictConverter::todict, bp::arg("self"),
"Returns the map type as a Python dictionary.")
.def_pickle(PickleMap<Container>())
.def(visitor);
// Register conversion
FromPythonDictConverter::register_converter();
}
}
static void expose(const std::string& class_name,
const std::string& doc_string = "") {
expose(class_name, doc_string, EmptyPythonVisitor());
}
template <typename DerivedVisitor>
static void expose(const std::string& class_name,
const bp::def_visitor<DerivedVisitor>& visitor) {
expose(class_name, "", visitor);
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_map_hpp__
/* /*
* Copyright 2020-2022 INRIA * Copyright 2020-2023 INRIA
*/ */
#ifndef __eigenpy_numpy_allocator_hpp__ #ifndef __eigenpy_numpy_allocator_hpp__
#define __eigenpy_numpy_allocator_hpp__ #define __eigenpy_numpy_allocator_hpp__
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/fwd.hpp" #include "eigenpy/fwd.hpp"
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/numpy-type.hpp" #include "eigenpy/numpy-type.hpp"
#include "eigenpy/register.hpp" #include "eigenpy/register.hpp"
namespace eigenpy { namespace eigenpy {
template <typename EigenType, typename BaseType>
struct numpy_allocator_impl;
template <typename EigenType>
struct numpy_allocator_impl_matrix;
template <typename MatType>
struct numpy_allocator_impl<
MatType, Eigen::MatrixBase<typename remove_const_reference<MatType>::type> >
: numpy_allocator_impl_matrix<MatType> {};
template <typename MatType>
struct numpy_allocator_impl<
const MatType,
const Eigen::MatrixBase<typename remove_const_reference<MatType>::type> >
: numpy_allocator_impl_matrix<const MatType> {};
// template <typename MatType>
// struct numpy_allocator_impl<MatType &, Eigen::MatrixBase<MatType> > :
// numpy_allocator_impl_matrix<MatType &>
//{};
template <typename MatType> template <typename MatType>
struct NumpyAllocator { struct numpy_allocator_impl<const MatType &, const Eigen::MatrixBase<MatType> >
: numpy_allocator_impl_matrix<const MatType &> {};
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct NumpyAllocator : numpy_allocator_impl<EigenType, BaseType> {};
template <typename MatType>
struct numpy_allocator_impl_matrix {
template <typename SimilarMatrixType> template <typename SimilarMatrixType>
static PyArrayObject *allocate( static PyArrayObject *allocate(
const Eigen::MatrixBase<SimilarMatrixType> &mat, npy_intp nd, const Eigen::MatrixBase<SimilarMatrixType> &mat, npy_intp nd,
...@@ -31,8 +61,40 @@ struct NumpyAllocator { ...@@ -31,8 +61,40 @@ struct NumpyAllocator {
} }
}; };
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct numpy_allocator_impl_tensor;
template <typename TensorType>
struct numpy_allocator_impl<TensorType, Eigen::TensorBase<TensorType> >
: numpy_allocator_impl_tensor<TensorType> {};
template <typename TensorType>
struct numpy_allocator_impl<const TensorType,
const Eigen::TensorBase<TensorType> >
: numpy_allocator_impl_tensor<const TensorType> {};
template <typename TensorType>
struct numpy_allocator_impl_tensor {
template <typename TensorDerived>
static PyArrayObject *allocate(const TensorDerived &tensor, npy_intp nd,
npy_intp *shape) {
const int code = Register::getTypeCode<typename TensorDerived::Scalar>();
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
static_cast<int>(nd), shape, code);
// Copy data
EigenAllocator<TensorDerived>::copy(
static_cast<const TensorDerived &>(tensor), pyArray);
return pyArray;
}
};
#endif
template <typename MatType> template <typename MatType>
struct NumpyAllocator<MatType &> { struct numpy_allocator_impl_matrix<MatType &> {
template <typename SimilarMatrixType> template <typename SimilarMatrixType>
static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType> &mat, static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType> &mat,
npy_intp nd, npy_intp *shape) { npy_intp nd, npy_intp *shape) {
...@@ -58,7 +120,7 @@ struct NumpyAllocator<MatType &> { ...@@ -58,7 +120,7 @@ struct NumpyAllocator<MatType &> {
#if EIGEN_VERSION_AT_LEAST(3, 2, 0) #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > { struct numpy_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride> > {
typedef Eigen::Ref<MatType, Options, Stride> RefType; typedef Eigen::Ref<MatType, Options, Stride> RefType;
static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) { static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
...@@ -76,7 +138,12 @@ struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > { ...@@ -76,7 +138,12 @@ struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > {
outer_stride = reverse_strides ? mat.innerStride() outer_stride = reverse_strides ? mat.innerStride()
: mat.outerStride(); : mat.outerStride();
#if NPY_ABI_VERSION < 0x02000000
const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize; const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
#else
const int elsize =
PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
#endif
npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride}; npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
...@@ -93,7 +160,7 @@ struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > { ...@@ -93,7 +160,7 @@ struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > {
#endif #endif
template <typename MatType> template <typename MatType>
struct NumpyAllocator<const MatType &> { struct numpy_allocator_impl_matrix<const MatType &> {
template <typename SimilarMatrixType> template <typename SimilarMatrixType>
static PyArrayObject *allocate( static PyArrayObject *allocate(
const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd, const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd,
...@@ -122,7 +189,8 @@ struct NumpyAllocator<const MatType &> { ...@@ -122,7 +189,8 @@ struct NumpyAllocator<const MatType &> {
#if EIGEN_VERSION_AT_LEAST(3, 2, 0) #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride> template <typename MatType, int Options, typename Stride>
struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > { struct numpy_allocator_impl_matrix<
const Eigen::Ref<const MatType, Options, Stride> > {
typedef const Eigen::Ref<const MatType, Options, Stride> RefType; typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) { static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
...@@ -141,7 +209,12 @@ struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > { ...@@ -141,7 +209,12 @@ struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
outer_stride = reverse_strides ? mat.innerStride() outer_stride = reverse_strides ? mat.innerStride()
: mat.outerStride(); : mat.outerStride();
#if NPY_ABI_VERSION < 0x02000000
const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize; const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
#else
const int elsize =
PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
#endif
npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride}; npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New( PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
...@@ -156,6 +229,70 @@ struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > { ...@@ -156,6 +229,70 @@ struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
} }
}; };
#endif
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct numpy_allocator_impl_tensor<Eigen::TensorRef<TensorType> > {
typedef Eigen::TensorRef<TensorType> RefType;
static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
npy_intp *shape) {
typedef typename RefType::Scalar Scalar;
static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
enum {
NPY_ARRAY_MEMORY_CONTIGUOUS =
IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
};
if (NumpyType::sharedMemory()) {
const int Scalar_type_code = Register::getTypeCode<Scalar>();
// static const Index NumIndices = TensorType::NumIndices;
// const int elsize =
// call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp
// strides[NumIndices];
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
const_cast<Scalar *>(tensor.data()),
NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
return pyArray;
} else {
return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
}
}
};
template <typename TensorType>
struct numpy_allocator_impl_tensor<const Eigen::TensorRef<const TensorType> > {
typedef const Eigen::TensorRef<const TensorType> RefType;
static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
npy_intp *shape) {
typedef typename RefType::Scalar Scalar;
static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
enum {
NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
};
if (NumpyType::sharedMemory()) {
const int Scalar_type_code = Register::getTypeCode<Scalar>();
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
const_cast<Scalar *>(tensor.data()),
NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
return pyArray;
} else {
return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
}
}
};
#endif #endif
} // namespace eigenpy } // namespace eigenpy
......