Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jcarpent/eigenpy
  • gsaurel/eigenpy
  • stack-of-tasks/eigenpy
3 results
Show changes
Showing
with 2414 additions and 606 deletions
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decompositions_sparse_ldlt_hpp__
#define __eigenpy_decompositions_sparse_ldlt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/SimplicialCholesky.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename _MatrixType, int _UpLo = Eigen::Lower,
typename _Ordering =
Eigen::AMDOrdering<typename _MatrixType::StorageIndex> >
struct SimplicialLDLTVisitor
: public boost::python::def_visitor<
SimplicialLDLTVisitor<_MatrixType, _UpLo, _Ordering> > {
typedef SimplicialLDLTVisitor<_MatrixType, _UpLo, _Ordering> Visitor;
typedef _MatrixType MatrixType;
typedef Eigen::SimplicialLDLT<MatrixType> Solver;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1, MatrixType::Options>
DenseVectorXs;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic,
MatrixType::Options>
DenseMatrixXs;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LDLT "
"factorization from a given matrix."))
.def("vectorD", &vectorD, bp::arg("self"),
"Returns the diagonal vector D.")
.def(SimplicialCholeskyVisitor<Solver>());
}
static void expose() {
static const std::string classname =
"SimplicialLDLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A direct sparse LDLT Cholesky factorizations.\n\n"
"This class provides a LDL^T Cholesky factorizations of sparse "
"matrices that are selfadjoint and positive definite."
"The factorization allows for solving A.X = B where X and B can be "
"either dense or sparse.\n\n"
"In order to reduce the fill-in, a symmetric permutation P is applied "
"prior to the factorization such that the factorized matrix is P A "
"P^-1.",
bp::no_init)
.def(SimplicialLDLTVisitor())
.def(IdVisitor<Solver>());
}
private:
static DenseVectorXs vectorD(const Solver &self) { return self.vectorD(); }
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decompositions_sparse_ldlt_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decompositions_sparse_llt_hpp__
#define __eigenpy_decompositions_sparse_llt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/SimplicialCholesky.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename _MatrixType, int _UpLo = Eigen::Lower,
typename _Ordering =
Eigen::AMDOrdering<typename _MatrixType::StorageIndex> >
struct SimplicialLLTVisitor
: public boost::python::def_visitor<
SimplicialLLTVisitor<_MatrixType, _UpLo, _Ordering> > {
typedef SimplicialLLTVisitor<_MatrixType, _UpLo, _Ordering> Visitor;
typedef _MatrixType MatrixType;
typedef Eigen::SimplicialLLT<MatrixType> Solver;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1, MatrixType::Options>
DenseVectorXs;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic,
MatrixType::Options>
DenseMatrixXs;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LLT "
"factorization from a given matrix."))
.def(SimplicialCholeskyVisitor<Solver>());
}
static void expose() {
static const std::string classname =
"SimplicialLLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A direct sparse LLT Cholesky factorizations.\n\n"
"This class provides a LL^T Cholesky factorizations of sparse matrices "
"that are selfadjoint and positive definite."
"The factorization allows for solving A.X = B where X and B can be "
"either dense or sparse.\n\n"
"In order to reduce the fill-in, a symmetric permutation P is applied "
"prior to the factorization such that the factorized matrix is P A "
"P^-1.",
bp::no_init)
.def(SimplicialLLTVisitor())
.def(IdVisitor<Solver>());
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decompositions_sparse_llt_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decompositions_sparse_simplicial_cholesky_hpp__
#define __eigenpy_decompositions_sparse_simplicial_cholesky_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include "eigenpy/decompositions/sparse/SparseSolverBase.hpp"
#include <Eigen/SparseCholesky>
namespace eigenpy {
template <typename SimplicialDerived>
struct SimplicialCholeskyVisitor
: public boost::python::def_visitor<
SimplicialCholeskyVisitor<SimplicialDerived> > {
typedef SimplicialDerived Solver;
typedef typename SimplicialDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1, MatrixType::Options>
DenseVectorXs;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic,
MatrixType::Options>
DenseMatrixXs;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("analyzePattern", &Solver::analyzePattern,
bp::args("self", "matrix"),
"Performs a symbolic decomposition on the sparcity of matrix.\n"
"This function is particularly useful when solving for several "
"problems having the same structure.")
.def(EigenBaseVisitor<Solver>())
.def(SparseSolverBaseVisitor<Solver>())
.def("matrixL", &matrixL, bp::arg("self"),
"Returns the lower triangular matrix L.")
.def("matrixU", &matrixU, bp::arg("self"),
"Returns the upper triangular matrix U.")
.def("compute",
(Solver & (Solver::*)(const MatrixType &matrix)) & Solver::compute,
bp::args("self", "matrix"),
"Computes the sparse Cholesky decomposition of a given matrix.",
bp::return_self<>())
.def("determinant", &Solver::determinant, bp::arg("self"),
"Returns the determinant of the underlying matrix from the "
"current factorization.")
.def("factorize", &Solver::factorize, bp::args("self", "matrix"),
"Performs a numeric decomposition of a given matrix.\n"
"The given matrix must has the same sparcity than the matrix on "
"which the symbolic decomposition has been performed.\n"
"See also analyzePattern().")
.def("info", &Solver::info, bp::arg("self"),
"NumericalIssue if the input contains INF or NaN values or "
"overflow occured. Returns Success otherwise.")
.def("setShift", &Solver::setShift,
(bp::args("self", "offset"), bp::arg("scale") = RealScalar(1)),
"Sets the shift parameters that will be used to adjust the "
"diagonal coefficients during the numerical factorization.\n"
"During the numerical factorization, the diagonal coefficients "
"are transformed by the following linear model: d_ii = offset + "
"scale * d_ii.\n"
"The default is the identity transformation with offset=0, and "
"scale=1.",
bp::return_self<>())
.def("permutationP", &Solver::permutationP, bp::arg("self"),
"Returns the permutation P.",
bp::return_value_policy<bp::copy_const_reference>())
.def("permutationPinv", &Solver::permutationPinv, bp::arg("self"),
"Returns the inverse P^-1 of the permutation P.",
bp::return_value_policy<bp::copy_const_reference>());
}
private:
static MatrixType matrixL(const Solver &self) { return self.matrixL(); }
static MatrixType matrixU(const Solver &self) { return self.matrixU(); }
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decompositions_sparse_simplicial_cholesky_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decompositions_sparse_sparse_solver_base_hpp__
#define __eigenpy_decompositions_sparse_sparse_solver_base_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include <Eigen/SparseCholesky>
namespace eigenpy {
template <typename SimplicialDerived>
struct SparseSolverBaseVisitor
: public boost::python::def_visitor<
SparseSolverBaseVisitor<SimplicialDerived> > {
typedef SimplicialDerived Solver;
typedef typename SimplicialDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1, MatrixType::Options>
DenseVectorXs;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic,
MatrixType::Options>
DenseMatrixXs;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("solve", &solve<DenseVectorXs>, bp::args("self", "b"),
"Returns the solution x of A x = b using the current "
"decomposition of A.")
.def("solve", &solve<DenseMatrixXs>, bp::args("self", "B"),
"Returns the solution X of A X = B using the current "
"decomposition of A where B is a right hand side matrix.")
.def("solve", &solve<MatrixType>, bp::args("self", "B"),
"Returns the solution X of A X = B using the current "
"decomposition of A where B is a right hand side matrix.");
}
private:
template <typename MatrixOrVector>
static MatrixOrVector solve(const Solver &self, const MatrixOrVector &vec) {
return self.solve(vec);
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decompositions_sparse_sparse_solver_base_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_accelerate_accelerate_hpp__
#define __eigenpy_decomposition_sparse_accelerate_accelerate_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include "eigenpy/decompositions/sparse/SparseSolverBase.hpp"
#include <Eigen/AccelerateSupport>
namespace eigenpy {
template <typename AccelerateDerived>
struct AccelerateImplVisitor : public boost::python::def_visitor<
AccelerateImplVisitor<AccelerateDerived> > {
typedef AccelerateDerived Solver;
typedef typename AccelerateDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef MatrixType CholMatrixType;
typedef typename MatrixType::StorageIndex StorageIndex;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def("analyzePattern", &Solver::analyzePattern,
bp::args("self", "matrix"),
"Performs a symbolic decomposition on the sparcity of matrix.\n"
"This function is particularly useful when solving for several "
"problems having the same structure.")
.def(EigenBaseVisitor<Solver>())
.def(SparseSolverBaseVisitor<Solver>())
.def("compute",
(Solver & (Solver::*)(const MatrixType &matrix)) & Solver::compute,
bp::args("self", "matrix"),
"Computes the sparse Cholesky decomposition of a given matrix.",
bp::return_self<>())
.def("factorize", &Solver::factorize, bp::args("self", "matrix"),
"Performs a numeric decomposition of a given matrix.\n"
"The given matrix must has the same sparcity than the matrix on "
"which the symbolic decomposition has been performed.\n"
"See also analyzePattern().")
.def("info", &Solver::info, bp::arg("self"),
"NumericalIssue if the input contains INF or NaN values or "
"overflow occured. Returns Success otherwise.")
.def("setOrder", &Solver::setOrder, bp::arg("self"), "Set order");
}
static void expose(const std::string &name, const std::string &doc = "") {
bp::class_<Solver, boost::noncopyable>(name.c_str(), doc.c_str(),
bp::no_init)
.def(AccelerateImplVisitor())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the "
"factorization from a given matrix."));
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decomposition_sparse_accelerate_accelerate_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_base_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_base_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/eigen/EigenBase.hpp"
#include "eigenpy/decompositions/sparse/SparseSolverBase.hpp"
#include <Eigen/CholmodSupport>
namespace eigenpy {
template <typename CholdmodDerived>
struct CholmodBaseVisitor
: public boost::python::def_visitor<CholmodBaseVisitor<CholdmodDerived> > {
typedef CholdmodDerived Solver;
typedef typename CholdmodDerived::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef MatrixType CholMatrixType;
typedef typename MatrixType::StorageIndex StorageIndex;
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("analyzePattern", &Solver::analyzePattern,
bp::args("self", "matrix"),
"Performs a symbolic decomposition on the sparcity of matrix.\n"
"This function is particularly useful when solving for several "
"problems having the same structure.")
.def(EigenBaseVisitor<Solver>())
.def(SparseSolverBaseVisitor<Solver>())
.def("compute",
(Solver & (Solver::*)(const MatrixType &matrix)) & Solver::compute,
bp::args("self", "matrix"),
"Computes the sparse Cholesky decomposition of a given matrix.",
bp::return_self<>())
.def("determinant", &Solver::determinant, bp::arg("self"),
"Returns the determinant of the underlying matrix from the "
"current factorization.")
.def("factorize", &Solver::factorize, bp::args("self", "matrix"),
"Performs a numeric decomposition of a given matrix.\n"
"The given matrix must has the same sparcity than the matrix on "
"which the symbolic decomposition has been performed.\n"
"See also analyzePattern().")
.def("info", &Solver::info, bp::arg("self"),
"NumericalIssue if the input contains INF or NaN values or "
"overflow occured. Returns Success otherwise.")
.def("logDeterminant", &Solver::logDeterminant, bp::arg("self"),
"Returns the log determinant of the underlying matrix from the "
"current factorization.")
.def("setShift", &Solver::setShift, (bp::args("self", "offset")),
"Sets the shift parameters that will be used to adjust the "
"diagonal coefficients during the numerical factorization.\n"
"During the numerical factorization, the diagonal coefficients "
"are transformed by the following linear model: d_ii = offset + "
"d_ii.\n"
"The default is the identity transformation with offset=0.",
bp::return_self<>());
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_base_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_decomposition_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_decomposition_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodBase.hpp"
namespace eigenpy {
template <typename CholdmodDerived>
struct CholmodDecompositionVisitor
: public boost::python::def_visitor<
CholmodDecompositionVisitor<CholdmodDerived> > {
typedef CholdmodDerived Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def("setMode", &Solver::setMode, bp::args("self", "mode"),
"Set the mode for the Cholesky decomposition.");
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_decomposition_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_ldlt_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_ldlt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodDecomposition.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename MatrixType_, int UpLo_ = Eigen::Lower>
struct CholmodSimplicialLDLTVisitor
: public boost::python::def_visitor<
CholmodSimplicialLDLTVisitor<MatrixType_, UpLo_> > {
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::CholmodSimplicialLDLT<MatrixType_, UpLo_> Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LDLT "
"factorization from a given matrix."))
;
}
static void expose() {
static const std::string classname =
"CholmodSimplicialLDLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A simplicial direct Cholesky (LDLT) factorization and solver based on "
"Cholmod.\n\n"
"This class allows to solve for A.X = B sparse linear problems via a "
"simplicial LL^T Cholesky factorization using the Cholmod library."
"This simplicial variant is equivalent to Eigen's built-in "
"SimplicialLDLT class."
"Therefore, it has little practical interest. The sparse matrix A must "
"be selfadjoint and positive definite."
"The vectors or matrices X and B can be either dense or sparse.",
bp::no_init)
.def(CholmodSimplicialLDLTVisitor());
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_ldlt_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_llt_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_llt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodDecomposition.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename MatrixType_, int UpLo_ = Eigen::Lower>
struct CholmodSimplicialLLTVisitor
: public boost::python::def_visitor<
CholmodSimplicialLLTVisitor<MatrixType_, UpLo_> > {
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::CholmodSimplicialLLT<MatrixType_, UpLo_> Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LLT "
"factorization from a given matrix."))
;
}
static void expose() {
static const std::string classname =
"CholmodSimplicialLLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A simplicial direct Cholesky (LLT) factorization and solver based on "
"Cholmod.\n\n"
"This class allows to solve for A.X = B sparse linear problems via a "
"simplicial LL^T Cholesky factorization using the Cholmod library."
"This simplicial variant is equivalent to Eigen's built-in "
"SimplicialLLT class."
"Therefore, it has little practical interest. The sparse matrix A must "
"be selfadjoint and positive definite."
"The vectors or matrices X and B can be either dense or sparse.",
bp::no_init)
.def(CholmodSimplicialLLTVisitor());
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_simplicial_llt_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_decomposition_sparse_cholmod_cholmod_supernodal_llt_hpp__
#define __eigenpy_decomposition_sparse_cholmod_cholmod_supernodal_llt_hpp__
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/decompositions/sparse/cholmod/CholmodDecomposition.hpp"
#include "eigenpy/utils/scalar-name.hpp"
namespace eigenpy {
template <typename MatrixType_, int UpLo_ = Eigen::Lower>
struct CholmodSupernodalLLTVisitor
: public boost::python::def_visitor<
CholmodSupernodalLLTVisitor<MatrixType_, UpLo_> > {
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::CholmodSupernodalLLT<MatrixType_, UpLo_> Solver;
template <class PyClass>
void visit(PyClass &cl) const {
cl
.def(CholmodBaseVisitor<Solver>())
.def(bp::init<>(bp::arg("self"), "Default constructor"))
.def(bp::init<MatrixType>(bp::args("self", "matrix"),
"Constructs and performs the LLT "
"factorization from a given matrix."))
;
}
static void expose() {
static const std::string classname =
"CholmodSupernodalLLT_" + scalar_name<Scalar>::shortname();
expose(classname);
}
static void expose(const std::string &name) {
bp::class_<Solver, boost::noncopyable>(
name.c_str(),
"A supernodal direct Cholesky (LLT) factorization and solver based on "
"Cholmod.\n\n"
"This class allows to solve for A.X = B sparse linear problems via a "
"supernodal LL^T Cholesky factorization using the Cholmod library."
"This supernodal variant performs best on dense enough problems, e.g., "
"3D FEM, or very high order 2D FEM."
"The sparse matrix A must be selfadjoint and positive definite. The "
"vectors or matrices X and B can be either dense or sparse.",
bp::no_init)
.def(CholmodSupernodalLLTVisitor());
}
};
} // namespace eigenpy
#endif // ifndef
// __eigenpy_decomposition_sparse_cholmod_cholmod_supernodal_llt_hpp__
//
// Copyright (C) 2020 INRIA
// Copyright (C) 2024 LAAS-CNRS, INRIA
//
#ifndef __eigenpy_deprecation_hpp__
#define __eigenpy_deprecation_hpp__
#include "eigenpy/fwd.hpp"
namespace eigenpy {
enum class DeprecationType { DEPRECATION, FUTURE };
namespace detail {
inline PyObject *deprecationTypeToPyObj(DeprecationType dep) {
switch (dep) {
case DeprecationType::DEPRECATION:
return PyExc_DeprecationWarning;
case DeprecationType::FUTURE:
return PyExc_FutureWarning;
default: // The switch handles all cases explicitly, this should never be
// triggered.
throw std::invalid_argument(
"Undefined DeprecationType - this should never be triggered.");
}
}
} // namespace detail
/// @brief A Boost.Python call policy which triggers a Python warning on
/// precall.
template <DeprecationType deprecation_type = DeprecationType::DEPRECATION,
class BasePolicy = bp::default_call_policies>
struct deprecation_warning_policy : BasePolicy {
using result_converter = typename BasePolicy::result_converter;
using argument_package = typename BasePolicy::argument_package;
deprecation_warning_policy(const std::string &warning_msg)
: BasePolicy(), m_what(warning_msg) {}
std::string what() const { return m_what; }
const BasePolicy *derived() const {
return static_cast<const BasePolicy *>(this);
}
template <class ArgPackage>
bool precall(const ArgPackage &args) const {
PyErr_WarnEx(detail::deprecationTypeToPyObj(deprecation_type),
m_what.c_str(), 1);
return derived()->precall(args);
}
protected:
const std::string m_what;
};
template <DeprecationType deprecation_type = DeprecationType::DEPRECATION,
class BasePolicy = bp::default_call_policies>
struct deprecated_function
: deprecation_warning_policy<deprecation_type, BasePolicy> {
deprecated_function(const std::string &msg =
"This function has been marked as deprecated, and "
"will be removed in the future.")
: deprecation_warning_policy<deprecation_type, BasePolicy>(msg) {}
};
template <DeprecationType deprecation_type = DeprecationType::DEPRECATION,
class BasePolicy = bp::default_call_policies>
struct deprecated_member
: deprecation_warning_policy<deprecation_type, BasePolicy> {
deprecated_member(const std::string &msg =
"This attribute or method has been marked as "
"deprecated, and will be removed in the future.")
: deprecation_warning_policy<deprecation_type, BasePolicy>(msg) {}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_deprecation_hpp__
/*
* Copyright 2014-2019, CNRS
* Copyright 2018-2019, INRIA
* Copyright 2018-2024, INRIA
*/
#ifndef __eigenpy_details_hpp__
#define __eigenpy_details_hpp__
#include "eigenpy/details/rvalue_from_python_data.hpp"
#include "eigenpy/fwd.hpp"
#include <patchlevel.h> // For PY_MAJOR_VERSION
#include <numpy/arrayobject.h>
#include <iostream>
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/eigen-from-python.hpp"
#include "eigenpy/eigen-to-python.hpp"
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/exception.hpp"
#include "eigenpy/numpy-type.hpp"
#include "eigenpy/registration.hpp"
#include "eigenpy/map.hpp"
#define GET_PY_ARRAY_TYPE(array) PyArray_ObjectType(reinterpret_cast<PyObject *>(array), 0)
namespace eigenpy
{
template <typename SCALAR> struct NumpyEquivalentType {};
template <> struct NumpyEquivalentType<double> { enum { type_code = NPY_DOUBLE };};
template <> struct NumpyEquivalentType<int> { enum { type_code = NPY_INT };};
template <> struct NumpyEquivalentType<long> { enum { type_code = NPY_LONG };};
template <> struct NumpyEquivalentType<float> { enum { type_code = NPY_FLOAT };};
template <typename SCALAR1, typename SCALAR2>
struct FromTypeToType : public boost::false_type {};
template <typename SCALAR>
struct FromTypeToType<SCALAR,SCALAR> : public boost::true_type {};
template <> struct FromTypeToType<int,long> : public boost::true_type {};
template <> struct FromTypeToType<int,float> : public boost::true_type {};
template <> struct FromTypeToType<int,double> : public boost::true_type {};
template <> struct FromTypeToType<long,float> : public boost::true_type {};
template <> struct FromTypeToType<long,double> : public boost::true_type {};
template <> struct FromTypeToType<float,double> : public boost::true_type {};
#include "eigenpy/scalar-conversion.hpp"
namespace bp = boost::python;
namespace eigenpy {
enum NP_TYPE
{
DEFAULT_TYPE,
MATRIX_TYPE,
ARRAY_TYPE
};
struct NumpyType
{
static NumpyType & getInstance()
{
static NumpyType instance;
return instance;
}
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type,
typename Scalar = typename EigenType::Scalar>
struct expose_eigen_type_impl;
operator bp::object () { return CurrentNumpyType; }
bp::object make(PyArrayObject* pyArray, bool copy = false)
{ return make((PyObject*)pyArray,copy); }
bp::object make(PyObject* pyObj, bool copy = false)
{
if (getType() == DEFAULT_TYPE) {
std::cerr <<
"eigenpy warning: you use the deprecated class numpy.matrix without explicily asking for it. "
"The default behaviour will change to numpy.array at next major release.\n"
"- Either call eigenpy.switchToNumpyMatrix() before using eigenpy to suppress this warning\n"
"- or call eigenpy.switchToNumpyArray() and adapt your code accordingly.\n"
"See https://github.com/stack-of-tasks/eigenpy/issues/87 for further details."
<< std::endl;
switchToNumpyMatrix();
}
bp::object m;
if(PyType_IsSubtype(reinterpret_cast<PyTypeObject*>(CurrentNumpyType.ptr()),NumpyMatrixType))
m = NumpyMatrixObject(bp::object(bp::handle<>(pyObj)), bp::object(), copy);
// m = NumpyAsMatrixObject(bp::object(bp::handle<>(pyObj)));
else if(PyType_IsSubtype(reinterpret_cast<PyTypeObject*>(CurrentNumpyType.ptr()),NumpyArrayType))
m = bp::object(bp::handle<>(pyObj)); // nothing to do here
Py_INCREF(m.ptr());
return m;
}
static void setNumpyType(bp::object & obj)
{
PyTypeObject * obj_type = PyType_Check(obj.ptr()) ? reinterpret_cast<PyTypeObject*>(obj.ptr()) : obj.ptr()->ob_type;
if(PyType_IsSubtype(obj_type,getInstance().NumpyMatrixType))
switchToNumpyMatrix();
else if(PyType_IsSubtype(obj_type,getInstance().NumpyArrayType))
switchToNumpyArray();
}
static void switchToNumpyArray()
{
getInstance().CurrentNumpyType = getInstance().NumpyArrayObject;
getType() = ARRAY_TYPE;
}
static void switchToNumpyMatrix()
{
getInstance().CurrentNumpyType = getInstance().NumpyMatrixObject;
getType() = MATRIX_TYPE;
}
static NP_TYPE & getType()
{
static NP_TYPE np_type;
return np_type;
}
protected:
NumpyType()
{
pyModule = bp::import("numpy");
#if PY_MAJOR_VERSION >= 3
// TODO I don't know why this Py_INCREF is necessary.
// Without it, the destructor of NumpyType SEGV sometimes.
Py_INCREF(pyModule.ptr());
#endif
NumpyMatrixObject = pyModule.attr("matrix");
NumpyMatrixType = reinterpret_cast<PyTypeObject*>(NumpyMatrixObject.ptr());
NumpyArrayObject = pyModule.attr("ndarray");
NumpyArrayType = reinterpret_cast<PyTypeObject*>(NumpyArrayObject.ptr());
//NumpyAsMatrixObject = pyModule.attr("asmatrix");
//NumpyAsMatrixType = reinterpret_cast<PyTypeObject*>(NumpyAsMatrixObject.ptr());
CurrentNumpyType = NumpyMatrixObject; // default conversion
getType() = DEFAULT_TYPE;
}
template <typename MatType, typename Scalar>
struct expose_eigen_type_impl<MatType, Eigen::MatrixBase<MatType>, Scalar> {
static void run() {
if (check_registration<MatType>()) return;
bp::object CurrentNumpyType;
bp::object pyModule;
// Numpy types
bp::object NumpyMatrixObject; PyTypeObject * NumpyMatrixType;
//bp::object NumpyAsMatrixObject; PyTypeObject * NumpyAsMatrixType;
bp::object NumpyArrayObject; PyTypeObject * NumpyArrayType;
};
template<typename MatType>
struct EigenObjectAllocator
{
typedef MatType Type;
typedef typename MatType::Scalar Scalar;
static void allocate(PyArrayObject * pyArray, void * storage)
{
const int rows = (int)PyArray_DIMS(pyArray)[0];
const int cols = (int)PyArray_DIMS(pyArray)[1];
Type * mat_ptr = new (storage) Type(rows,cols);
if(NumpyEquivalentType<Scalar>::type_code == GET_PY_ARRAY_TYPE(pyArray))
{
*mat_ptr = MapNumpy<MatType,Scalar>::map(pyArray); // avoid useless cast
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_INT)
{
*mat_ptr = MapNumpy<MatType,int>::map(pyArray).template cast<Scalar>();
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_LONG)
{
*mat_ptr = MapNumpy<MatType,long>::map(pyArray).template cast<Scalar>();
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_FLOAT)
{
*mat_ptr = MapNumpy<MatType,float>::map(pyArray).template cast<Scalar>();
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_DOUBLE)
{
*mat_ptr = MapNumpy<MatType,double>::map(pyArray).template cast<Scalar>();
return;
}
}
/// \brief Copy mat into the Python array using Eigen::Map
template<typename MatrixDerived>
static void copy(const Eigen::MatrixBase<MatrixDerived> & mat_,
PyArrayObject * pyArray)
{
const MatrixDerived & mat = const_cast<const MatrixDerived &>(mat_.derived());
if(NumpyEquivalentType<Scalar>::type_code == GET_PY_ARRAY_TYPE(pyArray))
{
MapNumpy<MatType,Scalar>::map(pyArray) = mat; // no cast needed
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_INT)
{
MapNumpy<MatType,int>::map(pyArray) = mat.template cast<int>();
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_LONG)
{
MapNumpy<MatType,long>::map(pyArray) = mat.template cast<long>();
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_FLOAT)
{
MapNumpy<MatType,float>::map(pyArray) = mat.template cast<float>();
return;
}
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_DOUBLE)
{
MapNumpy<MatType,double>::map(pyArray) = mat.template cast<double>();
return;
}
}
};
#if EIGEN_VERSION_AT_LEAST(3,2,0)
template<typename MatType>
struct EigenObjectAllocator< eigenpy::Ref<MatType> >
{
typedef eigenpy::Ref<MatType> Type;
typedef typename MatType::Scalar Scalar;
static void allocate(PyArrayObject * pyArray, void * storage)
{
typename MapNumpy<MatType,Scalar>::EigenMap numpyMap = MapNumpy<MatType,Scalar>::map(pyArray);
new (storage) Type(numpyMap);
}
static void copy(Type const & mat, PyArrayObject * pyArray)
{
EigenObjectAllocator<MatType>::copy(mat,pyArray);
}
};
// to-python
EigenToPyConverter<MatType>::registration();
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
EigenToPyConverter<Eigen::Ref<MatType> >::registration();
EigenToPyConverter<const Eigen::Ref<const MatType> >::registration();
#endif
/* --- TO PYTHON -------------------------------------------------------------- */
template<typename MatType>
struct EigenToPy
{
static PyObject* convert(MatType const & mat)
{
typedef typename MatType::Scalar Scalar;
assert( (mat.rows()<INT_MAX) && (mat.cols()<INT_MAX)
&& "Matrix range larger than int ... should never happen." );
const int R = (int)mat.rows(), C = (int)mat.cols();
PyArrayObject* pyArray;
// Allocate Python memory
if(C == 1 && NumpyType::getType() == ARRAY_TYPE) // Handle array with a single dimension
{
npy_intp shape[1] = { R };
pyArray = (PyArrayObject*) PyArray_SimpleNew(1, shape,
NumpyEquivalentType<Scalar>::type_code);
}
else
{
npy_intp shape[2] = { R,C };
pyArray = (PyArrayObject*) PyArray_SimpleNew(2, shape,
NumpyEquivalentType<Scalar>::type_code);
}
// Allocate memory
EigenObjectAllocator<MatType>::copy(mat,pyArray);
// Create an instance (either np.array or np.matrix)
return NumpyType::getInstance().make(pyArray).ptr();
}
};
/* --- FROM PYTHON ------------------------------------------------------------ */
template<typename MatType>
struct EigenFromPy
{
/// \brief Determine if pyObj can be converted into a MatType object
static void* convertible(PyArrayObject* pyArray)
{
if(!PyArray_Check(pyArray))
return 0;
if(MatType::IsVectorAtCompileTime)
{
// Special care of scalar matrix of dimension 1x1.
if(PyArray_DIMS(pyArray)[0] == 1 && PyArray_DIMS(pyArray)[1] == 1)
return pyArray;
if(PyArray_DIMS(pyArray)[0] > 1 && PyArray_DIMS(pyArray)[1] > 1)
{
#ifndef NDEBUG
std::cerr << "The number of dimension of the object does not correspond to a vector" << std::endl;
#endif
return 0;
}
if(((PyArray_DIMS(pyArray)[0] == 1) && (MatType::ColsAtCompileTime == 1))
|| ((PyArray_DIMS(pyArray)[1] == 1) && (MatType::RowsAtCompileTime == 1)))
{
#ifndef NDEBUG
if(MatType::ColsAtCompileTime == 1)
std::cerr << "The object is not a column vector" << std::endl;
else
std::cerr << "The object is not a row vector" << std::endl;
#endif
return 0;
}
}
if(PyArray_NDIM(pyArray) != 2)
{
if ( (PyArray_NDIM(pyArray) !=1) || (! MatType::IsVectorAtCompileTime) )
{
#ifndef NDEBUG
std::cerr << "The number of dimension of the object is not correct." << std::endl;
#endif
return 0;
}
}
if(PyArray_NDIM(pyArray) == 2)
{
const int R = (int)PyArray_DIMS(pyArray)[0];
const int C = (int)PyArray_DIMS(pyArray)[1];
if( (MatType::RowsAtCompileTime!=R)
&& (MatType::RowsAtCompileTime!=Eigen::Dynamic) )
return 0;
if( (MatType::ColsAtCompileTime!=C)
&& (MatType::ColsAtCompileTime!=Eigen::Dynamic) )
return 0;
}
// Check if the Scalar type of the obj_ptr is compatible with the Scalar type of MatType
if(GET_PY_ARRAY_TYPE(pyArray) == NPY_INT)
{
if(!FromTypeToType<int,typename MatType::Scalar>::value)
{
#ifndef NDEBUG
std::cerr << "The Python matrix scalar type (int) cannot be converted into the scalar type of the Eigen matrix. Loss of arithmetic precision" << std::endl;
#endif
return 0;
}
}
else if(GET_PY_ARRAY_TYPE(pyArray) == NPY_LONG)
{
if(!FromTypeToType<long,typename MatType::Scalar>::value)
{
#ifndef NDEBUG
std::cerr << "The Python matrix scalar type (long) cannot be converted into the scalar type of the Eigen matrix. Loss of arithmetic precision" << std::endl;
#endif
return 0;
}
}
else if(GET_PY_ARRAY_TYPE(pyArray) == NPY_FLOAT)
{
if(!FromTypeToType<float,typename MatType::Scalar>::value)
{
#ifndef NDEBUG
std::cerr << "The Python matrix scalar type (float) cannot be converted into the scalar type of the Eigen matrix. Loss of arithmetic precision" << std::endl;
#endif
return 0;
}
}
else if(GET_PY_ARRAY_TYPE(pyArray) == NPY_DOUBLE)
{
if(!FromTypeToType<double,typename MatType::Scalar>::value)
{
#ifndef NDEBUG
std::cerr << "The Python matrix scalar (double) type cannot be converted into the scalar type of the Eigen matrix. Loss of arithmetic precision." << std::endl;
#endif
return 0;
}
}
else if(GET_PY_ARRAY_TYPE(pyArray) != NumpyEquivalentType<typename MatType::Scalar>::type_code)
{
#ifndef NDEBUG
std::cerr << "The internal type as no Eigen equivalent." << std::endl;
#endif
return 0;
}
#ifdef NPY_1_8_API_VERSION
if(!(PyArray_FLAGS(pyArray)))
#else
if(!(PyArray_FLAGS(pyArray) & NPY_ALIGNED))
#endif
{
#ifndef NDEBUG
std::cerr << "NPY non-aligned matrices are not implemented." << std::endl;
#endif
return 0;
}
return pyArray;
}
/// \brief Allocate memory and copy pyObj in the new storage
static void construct(PyObject* pyObj,
bp::converter::rvalue_from_python_stage1_data* memory)
{
PyArrayObject * pyArray = reinterpret_cast<PyArrayObject*>(pyObj);
assert((PyArray_DIMS(pyArray)[0]<INT_MAX) && (PyArray_DIMS(pyArray)[1]<INT_MAX));
void* storage = ((bp::converter::rvalue_from_python_storage<MatType>*)
((void*)memory))->storage.bytes;
EigenObjectAllocator<MatType>::allocate(pyArray,storage);
memory->convertible = storage;
}
static void registration()
{
bp::converter::registry::push_back
(reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&EigenFromPy::construct,bp::type_id<MatType>());
}
};
template<typename MatType>
struct EigenFromPy< Eigen::MatrixBase<MatType> >
{
typedef EigenFromPy<MatType> EigenFromPyDerived;
typedef Eigen::MatrixBase<MatType> Base;
/// \brief Determine if pyObj can be converted into a MatType object
static void* convertible(PyArrayObject* pyObj)
{
return EigenFromPyDerived::convertible(pyObj);
}
/// \brief Allocate memory and copy pyObj in the new storage
static void construct(PyObject* pyObj,
bp::converter::rvalue_from_python_stage1_data* memory)
{
EigenFromPyDerived::construct(pyObj,memory);
}
static void registration()
{
bp::converter::registry::push_back
(reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&EigenFromPy::construct,bp::type_id<Base>());
}
};
#define numpy_import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } }
template<typename MatType,typename EigenEquivalentType>
void enableEigenPySpecific()
{
enableEigenPySpecific<MatType>();
// from-python
EigenFromPyConverter<MatType>::registration();
}
template<typename MatType>
struct EigenFromPyConverter
{
static void registration()
{
EigenFromPy<MatType>::registration();
// Add also conversion to Eigen::MatrixBase<MatType>
typedef Eigen::MatrixBase<MatType> MatTypeBase;
EigenFromPy<MatTypeBase>::registration();
}
};
#if EIGEN_VERSION_AT_LEAST(3,2,0)
/// Template specialization for Eigen::Ref
template<typename MatType>
struct EigenFromPyConverter< eigenpy::Ref<MatType> >
{
static void registration()
{
bp::converter::registry::push_back
(reinterpret_cast<void *(*)(_object *)>(&EigenFromPy<MatType>::convertible),
&EigenFromPy<MatType>::construct,bp::type_id<MatType>());
}
};
#endif
template<typename MatType>
void enableEigenPySpecific()
{
numpy_import_array();
if(check_registration<MatType>()) return;
bp::to_python_converter<MatType,EigenToPy<MatType> >();
};
template <typename MatType, typename Scalar>
struct expose_eigen_type_impl<MatType, Eigen::SparseMatrixBase<MatType>,
Scalar> {
static void run() {
if (check_registration<MatType>()) return;
// to-python
EigenToPyConverter<MatType>::registration();
// #if EIGEN_VERSION_AT_LEAST(3, 2, 0)
// EigenToPyConverter<Eigen::Ref<MatType> >::registration();
// EigenToPyConverter<const Eigen::Ref<const MatType> >::registration();
// #endif
// from-python
EigenFromPyConverter<MatType>::registration();
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType, typename Scalar>
struct expose_eigen_type_impl<TensorType, Eigen::TensorBase<TensorType>,
Scalar> {
static void run() {
if (check_registration<TensorType>()) return;
// to-python
EigenToPyConverter<TensorType>::registration();
EigenToPyConverter<Eigen::TensorRef<TensorType> >::registration();
EigenToPyConverter<
const Eigen::TensorRef<const TensorType> >::registration();
// from-python
EigenFromPyConverter<TensorType>::registration();
}
};
#endif
template <typename MatType>
void enableEigenPySpecific() {
expose_eigen_type_impl<MatType>::run();
}
} // namespace eigenpy
} // namespace eigenpy
#endif // ifndef __eigenpy_details_hpp__
#endif // ifndef __eigenpy_details_hpp__
#ifndef __eigenpy_details_rvalue_from_python_data_hpp__
#define __eigenpy_details_rvalue_from_python_data_hpp__
#include <boost/python/converter/rvalue_from_python_data.hpp>
#include <Eigen/Core>
namespace boost
{
namespace python
{
namespace converter
{
/// \brief Template specialization of rvalue_from_python_data
template<typename Derived>
struct rvalue_from_python_data<Eigen::MatrixBase<Derived> const & >
: rvalue_from_python_storage<Eigen::MatrixBase<Derived> const & >
{
typedef Eigen::MatrixBase<Derived> const & T;
# if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) \
&& (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) \
&& (!defined(__DECCXX_VER) || __DECCXX_VER > 60590014) \
&& !defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing this */
// This must always be a POD struct with m_data its first member.
BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<T>,stage1) == 0);
# endif
// The usual constructor
rvalue_from_python_data(rvalue_from_python_stage1_data const & _stage1)
{
this->stage1 = _stage1;
}
// This constructor just sets m_convertible -- used by
// implicitly_convertible<> to perform the final step of the
// conversion, where the construct() function is already known.
rvalue_from_python_data(void* convertible)
{
this->stage1.convertible = convertible;
}
// Destroys any object constructed in the storage.
~rvalue_from_python_data()
{
if (this->stage1.convertible == this->storage.bytes)
static_cast<Derived *>((void *)this->storage.bytes)->~Derived();
}
};
}
}
} // namespace boost::python::converter
#endif // ifndef __eigenpy_details_rvalue_from_python_data_hpp__
//
// Copyright (c) 2014-2023 CNRS INRIA
//
#ifndef __eigenpy_eigen_allocator_hpp__
#define __eigenpy_eigen_allocator_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/numpy-map.hpp"
#include "eigenpy/register.hpp"
#include "eigenpy/scalar-conversion.hpp"
#include "eigenpy/utils/is-aligned.hpp"
namespace eigenpy {
namespace details {
template <typename MatType,
bool IsVectorAtCompileTime = MatType::IsVectorAtCompileTime>
struct init_matrix_or_array {
static MatType *run(int rows, int cols, void *storage) {
if (storage)
return new (storage) MatType(rows, cols);
else
return new MatType(rows, cols);
}
static MatType *run(PyArrayObject *pyArray, void *storage = NULL) {
assert(PyArray_NDIM(pyArray) == 1 || PyArray_NDIM(pyArray) == 2);
int rows = -1, cols = -1;
const int ndim = PyArray_NDIM(pyArray);
if (ndim == 2) {
rows = (int)PyArray_DIMS(pyArray)[0];
cols = (int)PyArray_DIMS(pyArray)[1];
} else if (ndim == 1) {
rows = (int)PyArray_DIMS(pyArray)[0];
cols = 1;
}
return run(rows, cols, storage);
}
};
template <typename MatType>
struct init_matrix_or_array<MatType, true> {
static MatType *run(int rows, int cols, void *storage) {
if (storage)
return new (storage) MatType(rows, cols);
else
return new MatType(rows, cols);
}
static MatType *run(int size, void *storage) {
if (storage)
return new (storage) MatType(size);
else
return new MatType(size);
}
static MatType *run(PyArrayObject *pyArray, void *storage = NULL) {
const int ndim = PyArray_NDIM(pyArray);
if (ndim == 1) {
const int size = (int)PyArray_DIMS(pyArray)[0];
return run(size, storage);
} else {
const int rows = (int)PyArray_DIMS(pyArray)[0];
const int cols = (int)PyArray_DIMS(pyArray)[1];
return run(rows, cols, storage);
}
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename Tensor>
struct init_tensor {
static Tensor *run(PyArrayObject *pyArray, void *storage = NULL) {
enum { Rank = Tensor::NumDimensions };
assert(PyArray_NDIM(pyArray) == Rank);
typedef typename Tensor::Index Index;
Eigen::array<Index, Rank> dimensions;
for (int k = 0; k < PyArray_NDIM(pyArray); ++k)
dimensions[k] = PyArray_DIMS(pyArray)[k];
if (storage)
return new (storage) Tensor(dimensions);
else
return new Tensor(dimensions);
}
};
#endif
template <typename MatType>
struct check_swap_impl_matrix;
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct check_swap_impl;
template <typename MatType>
struct check_swap_impl<MatType, Eigen::MatrixBase<MatType> >
: check_swap_impl_matrix<MatType> {};
template <typename MatType>
struct check_swap_impl_matrix {
static bool run(PyArrayObject *pyArray,
const Eigen::MatrixBase<MatType> &mat) {
if (PyArray_NDIM(pyArray) == 0) return false;
if (mat.rows() == PyArray_DIMS(pyArray)[0])
return false;
else
return true;
}
};
template <typename EigenType>
bool check_swap(PyArrayObject *pyArray, const EigenType &mat) {
return check_swap_impl<EigenType>::run(pyArray, mat);
}
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct check_swap_impl_tensor {
static bool run(PyArrayObject * /*pyArray*/, const TensorType & /*tensor*/) {
return false;
}
};
template <typename TensorType>
struct check_swap_impl<TensorType, Eigen::TensorBase<TensorType> >
: check_swap_impl_tensor<TensorType> {};
#endif
// template <typename MatType>
// struct cast_impl_matrix;
//
// template <typename EigenType,
// typename BaseType = typename get_eigen_base_type<EigenType>::type>
// struct cast_impl;
//
// template <typename MatType>
// struct cast_impl<MatType, Eigen::MatrixBase<MatType> >
// : cast_impl_matrix<MatType> {};
//
// template <typename MatType>
// struct cast_impl_matrix
//{
// template <typename NewScalar, typename MatrixIn, typename MatrixOut>
// static void run(const Eigen::MatrixBase<MatrixIn> &input,
// const Eigen::MatrixBase<MatrixOut> &dest) {
// dest.const_cast_derived() = input.template cast<NewScalar>();
// }
// };
template <typename Scalar, typename NewScalar,
template <typename D> class EigenBase = Eigen::MatrixBase,
bool cast_is_valid = FromTypeToType<Scalar, NewScalar>::value>
struct cast {
template <typename MatrixIn, typename MatrixOut>
static void run(const Eigen::MatrixBase<MatrixIn> &input,
const Eigen::MatrixBase<MatrixOut> &dest) {
dest.const_cast_derived() = input.template cast<NewScalar>();
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename Scalar, typename NewScalar>
struct cast<Scalar, NewScalar, Eigen::TensorRef, true> {
template <typename TensorIn, typename TensorOut>
static void run(const TensorIn &input, TensorOut &dest) {
dest = input.template cast<NewScalar>();
}
};
#endif
template <typename Scalar, typename NewScalar,
template <typename D> class EigenBase>
struct cast<Scalar, NewScalar, EigenBase, false> {
template <typename MatrixIn, typename MatrixOut>
static void run(const MatrixIn /*input*/, const MatrixOut /*dest*/) {
// do nothing
assert(false && "Must never happened");
}
};
} // namespace details
#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX(MatType, Scalar, NewScalar, \
pyArray, mat) \
details::cast<Scalar, NewScalar>::run( \
NumpyMap<MatType, Scalar>::map(pyArray, \
details::check_swap(pyArray, mat)), \
mat)
#define EIGENPY_CAST_FROM_EIGEN_MATRIX_TO_PYARRAY(MatType, Scalar, NewScalar, \
mat, pyArray) \
details::cast<Scalar, NewScalar>::run( \
mat, NumpyMap<MatType, NewScalar>::map( \
pyArray, details::check_swap(pyArray, mat)))
// Define specific cast for Windows and Mac
#if defined _WIN32 || defined __CYGWIN__
// Manage NPY_INT on Windows (NPY_INT32 is NPY_LONG).
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO) \
case NPY_INT: \
CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT: \
CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \
break;
#elif defined __APPLE__
// Manage NPY_LONGLONG on Mac (NPY_INT64 is NPY_LONG).
// long long and long are both the same type
// but NPY_LONGLONG and NPY_LONG are different dtype.
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO) \
case NPY_LONGLONG: \
CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \
break; \
case NPY_ULONGLONG: \
CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \
break;
#else
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO)
#endif
/// Define casting between Numpy matrix type to Eigen type.
#define EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH( \
pyArray_type_code, MatType, Scalar, pyArray, mat, CAST_MACRO) \
switch (pyArray_type_code) { \
case NPY_BOOL: \
CAST_MACRO(MatType, bool, Scalar, pyArray, mat); \
break; \
case NPY_INT8: \
CAST_MACRO(MatType, int8_t, Scalar, pyArray, mat); \
break; \
case NPY_INT16: \
CAST_MACRO(MatType, int16_t, Scalar, pyArray, mat); \
break; \
case NPY_INT32: \
CAST_MACRO(MatType, int32_t, Scalar, pyArray, mat); \
break; \
case NPY_INT64: \
CAST_MACRO(MatType, int64_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT8: \
CAST_MACRO(MatType, uint8_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT16: \
CAST_MACRO(MatType, uint16_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT32: \
CAST_MACRO(MatType, uint32_t, Scalar, pyArray, mat); \
break; \
case NPY_UINT64: \
CAST_MACRO(MatType, uint64_t, Scalar, pyArray, mat); \
break; \
case NPY_FLOAT: \
CAST_MACRO(MatType, float, Scalar, pyArray, mat); \
break; \
case NPY_CFLOAT: \
CAST_MACRO(MatType, std::complex<float>, Scalar, pyArray, mat); \
break; \
case NPY_DOUBLE: \
CAST_MACRO(MatType, double, Scalar, pyArray, mat); \
break; \
case NPY_CDOUBLE: \
CAST_MACRO(MatType, std::complex<double>, Scalar, pyArray, mat); \
break; \
case NPY_LONGDOUBLE: \
CAST_MACRO(MatType, long double, Scalar, pyArray, mat); \
break; \
case NPY_CLONGDOUBLE: \
CAST_MACRO(MatType, std::complex<long double>, Scalar, pyArray, mat); \
break; \
EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH_OS_SPECIFIC( \
MatType, Scalar, pyArray, mat, CAST_MACRO) \
default: \
throw Exception("You asked for a conversion which is not implemented."); \
}
template <typename EigenType>
struct EigenAllocator;
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_allocator_impl;
template <typename MatType>
struct eigen_allocator_impl_matrix;
template <typename MatType>
struct eigen_allocator_impl<MatType, Eigen::MatrixBase<MatType> >
: eigen_allocator_impl_matrix<MatType> {};
template <typename MatType>
struct eigen_allocator_impl<const MatType, const Eigen::MatrixBase<MatType> >
: eigen_allocator_impl_matrix<const MatType> {};
template <typename MatType>
struct eigen_allocator_impl_matrix {
typedef MatType Type;
typedef typename MatType::Scalar Scalar;
static void allocate(
PyArrayObject *pyArray,
boost::python::converter::rvalue_from_python_storage<MatType> *storage) {
void *raw_ptr = storage->storage.bytes;
assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
"The pointer is not aligned.");
Type *mat_ptr = details::init_matrix_or_array<Type>::run(pyArray, raw_ptr);
Type &mat = *mat_ptr;
copy(pyArray, mat);
}
/// \brief Copy Python array into the input matrix mat.
template <typename MatrixDerived>
static void copy(PyArrayObject *pyArray,
const Eigen::MatrixBase<MatrixDerived> &mat_) {
MatrixDerived &mat = mat_.const_cast_derived();
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code == Scalar_type_code) {
mat = NumpyMap<MatType, Scalar>::map(
pyArray, details::check_swap(pyArray, mat)); // avoid useless cast
return;
}
EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH(
pyArray_type_code, MatType, Scalar, pyArray, mat,
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_MATRIX);
}
/// \brief Copy mat into the Python array using Eigen::Map
template <typename MatrixDerived>
static void copy(const Eigen::MatrixBase<MatrixDerived> &mat_,
PyArrayObject *pyArray) {
const MatrixDerived &mat =
const_cast<const MatrixDerived &>(mat_.derived());
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code == Scalar_type_code) // no cast needed
{
NumpyMap<MatType, Scalar>::map(pyArray,
details::check_swap(pyArray, mat)) = mat;
return;
}
throw Exception(
"Scalar conversion from Eigen to Numpy is not implemented.");
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct eigen_allocator_impl_tensor;
template <typename TensorType>
struct eigen_allocator_impl<TensorType, Eigen::TensorBase<TensorType> >
: eigen_allocator_impl_tensor<TensorType> {};
template <typename TensorType>
struct eigen_allocator_impl<const TensorType,
const Eigen::TensorBase<TensorType> >
: eigen_allocator_impl_tensor<const TensorType> {};
template <typename TensorType>
struct eigen_allocator_impl_tensor {
typedef typename TensorType::Scalar Scalar;
static void allocate(
PyArrayObject *pyArray,
boost::python::converter::rvalue_from_python_storage<TensorType>
*storage) {
void *raw_ptr = storage->storage.bytes;
assert(is_aligned(raw_ptr, EIGENPY_DEFAULT_ALIGN_BYTES) &&
"The pointer is not aligned.");
TensorType *tensor_ptr =
details::init_tensor<TensorType>::run(pyArray, raw_ptr);
TensorType &tensor = *tensor_ptr;
copy(pyArray, tensor);
}
#define EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR(TensorType, Scalar, \
NewScalar, pyArray, tensor) \
{ \
typename NumpyMap<TensorType, Scalar>::EigenMap pyArray_map = \
NumpyMap<TensorType, Scalar>::map( \
pyArray, details::check_swap(pyArray, tensor)); \
details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(pyArray_map, \
tensor); \
}
/// \brief Copy Python array into the input matrix mat.
template <typename TensorDerived>
static void copy(PyArrayObject *pyArray, TensorDerived &tensor) {
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code == Scalar_type_code) {
tensor = NumpyMap<TensorType, Scalar>::map(
pyArray, details::check_swap(pyArray, tensor)); // avoid useless cast
return;
}
EIGENPY_CAST_FROM_NUMPY_TO_EIGEN_SWITCH(
pyArray_type_code, TensorType, Scalar, pyArray, tensor,
EIGENPY_CAST_FROM_PYARRAY_TO_EIGEN_TENSOR);
}
#define EIGENPY_CAST_FROM_EIGEN_TENSOR_TO_PYARRAY(TensorType, Scalar, \
NewScalar, tensor, pyArray) \
{ \
typename NumpyMap<TensorType, NewScalar>::EigenMap pyArray_map = \
NumpyMap<TensorType, NewScalar>::map( \
pyArray, details::check_swap(pyArray, tensor)); \
details::cast<Scalar, NewScalar, Eigen::TensorRef>::run(tensor, \
pyArray_map); \
}
/// \brief Copy mat into the Python array using Eigen::Map
static void copy(const TensorType &tensor, PyArrayObject *pyArray) {
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code == Scalar_type_code) // no cast needed
{
NumpyMap<TensorType, Scalar>::map(
pyArray, details::check_swap(pyArray, tensor)) = tensor;
return;
}
throw Exception(
"Scalar conversion from Eigen to Numpy is not implemented.");
}
};
#endif
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
/// @brief Check if we need to allocate @tparam MatType to convert @param
/// pyArray.
/// @details do not allocate if:
/// want row-major & data C-contiguous OR
/// want col-major & data F-contiguous OR
/// you want a compile-time vector
/// in these cases, data layout fits desired view layout
template <typename MatType>
inline bool is_arr_layout_compatible_with_mat_type(PyArrayObject *pyArray) {
bool is_array_C_cont = PyArray_IS_C_CONTIGUOUS(pyArray);
bool is_array_F_cont = PyArray_IS_F_CONTIGUOUS(pyArray);
return (MatType::IsRowMajor && is_array_C_cont) ||
(!MatType::IsRowMajor && is_array_F_cont) ||
(MatType::IsVectorAtCompileTime &&
(is_array_C_cont || is_array_F_cont));
}
template <typename MatType, int Options, typename Stride>
struct eigen_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride> > {
typedef Eigen::Ref<MatType, Options, Stride> RefType;
typedef typename MatType::Scalar Scalar;
typedef
typename ::boost::python::detail::referent_storage<RefType &>::StorageType
StorageType;
static void allocate(
PyArrayObject *pyArray,
::boost::python::converter::rvalue_from_python_storage<RefType>
*storage) {
typedef typename StrideType<
MatType,
Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
Eigen::internal::traits<RefType>::StrideType::
OuterStrideAtCompileTime>::type NumpyMapStride;
bool need_to_allocate = false;
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
bool incompatible_layout =
!is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
need_to_allocate |= incompatible_layout;
if (Options !=
Eigen::Unaligned) // we need to check whether the memory is correctly
// aligned and composed of a continuous segment
{
void *data_ptr = PyArray_DATA(pyArray);
if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options))
need_to_allocate |= true;
}
void *raw_ptr = storage->storage.bytes;
if (need_to_allocate) {
MatType *mat_ptr;
mat_ptr = details::init_matrix_or_array<MatType>::run(pyArray);
RefType mat_ref(*mat_ptr);
new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
RefType &mat = *reinterpret_cast<RefType *>(raw_ptr);
EigenAllocator<MatType>::copy(pyArray, mat);
} else {
assert(pyArray_type_code == Scalar_type_code);
typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap
numpyMap =
NumpyMap<MatType, Scalar, Options, NumpyMapStride>::map(pyArray);
RefType mat_ref(numpyMap);
new (raw_ptr) StorageType(mat_ref, pyArray);
}
}
static void copy(RefType const &ref, PyArrayObject *pyArray) {
EigenAllocator<MatType>::copy(ref, pyArray);
}
};
template <typename MatType, int Options, typename Stride>
struct eigen_allocator_impl_matrix<
const Eigen::Ref<const MatType, Options, Stride> > {
typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
typedef typename MatType::Scalar Scalar;
typedef
typename ::boost::python::detail::referent_storage<RefType &>::StorageType
StorageType;
static void allocate(
PyArrayObject *pyArray,
::boost::python::converter::rvalue_from_python_storage<RefType>
*storage) {
typedef typename StrideType<
MatType,
Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
Eigen::internal::traits<RefType>::StrideType::
OuterStrideAtCompileTime>::type NumpyMapStride;
bool need_to_allocate = false;
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
bool incompatible_layout =
!is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
need_to_allocate |= incompatible_layout;
if (Options !=
Eigen::Unaligned) // we need to check whether the memory is correctly
// aligned and composed of a continuous segment
{
void *data_ptr = PyArray_DATA(pyArray);
if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr, Options))
need_to_allocate |= true;
}
void *raw_ptr = storage->storage.bytes;
if (need_to_allocate) {
MatType *mat_ptr;
mat_ptr = details::init_matrix_or_array<MatType>::run(pyArray);
RefType mat_ref(*mat_ptr);
new (raw_ptr) StorageType(mat_ref, pyArray, mat_ptr);
MatType &mat = *mat_ptr;
EigenAllocator<MatType>::copy(pyArray, mat);
} else {
assert(pyArray_type_code == Scalar_type_code);
typename NumpyMap<MatType, Scalar, Options, NumpyMapStride>::EigenMap
numpyMap =
NumpyMap<MatType, Scalar, Options, NumpyMapStride>::map(pyArray);
RefType mat_ref(numpyMap);
new (raw_ptr) StorageType(mat_ref, pyArray);
}
}
static void copy(RefType const &ref, PyArrayObject *pyArray) {
EigenAllocator<MatType>::copy(ref, pyArray);
}
};
#endif
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType, typename TensorRef>
struct eigen_allocator_impl_tensor_ref;
template <typename TensorType>
struct eigen_allocator_impl_tensor<Eigen::TensorRef<TensorType> >
: eigen_allocator_impl_tensor_ref<TensorType,
Eigen::TensorRef<TensorType> > {};
template <typename TensorType>
struct eigen_allocator_impl_tensor<const Eigen::TensorRef<const TensorType> >
: eigen_allocator_impl_tensor_ref<
const TensorType, const Eigen::TensorRef<const TensorType> > {};
template <typename TensorType, typename RefType>
struct eigen_allocator_impl_tensor_ref {
typedef typename TensorType::Scalar Scalar;
typedef
typename ::boost::python::detail::referent_storage<RefType &>::StorageType
StorageType;
static void allocate(
PyArrayObject *pyArray,
::boost::python::converter::rvalue_from_python_storage<RefType>
*storage) {
// typedef typename StrideType<
// MatType,
// Eigen::internal::traits<RefType>::StrideType::InnerStrideAtCompileTime,
// Eigen::internal::traits<RefType>::StrideType::
// OuterStrideAtCompileTime>::type NumpyMapStride;
static const int Options = Eigen::internal::traits<TensorType>::Options;
bool need_to_allocate = false;
const int pyArray_type_code = EIGENPY_GET_PY_ARRAY_TYPE(pyArray);
const int Scalar_type_code = Register::getTypeCode<Scalar>();
if (pyArray_type_code != Scalar_type_code) need_to_allocate |= true;
// bool incompatible_layout =
// !is_arr_layout_compatible_with_mat_type<MatType>(pyArray);
// need_to_allocate |= incompatible_layout;
// if (Options !=
// Eigen::Unaligned) // we need to check whether the memory is
// correctly
// // aligned and composed of a continuous segment
// {
// void *data_ptr = PyArray_DATA(pyArray);
// if (!PyArray_ISONESEGMENT(pyArray) || !is_aligned(data_ptr,
// Options))
// need_to_allocate |= true;
// }
void *raw_ptr = storage->storage.bytes;
if (need_to_allocate) {
typedef typename boost::remove_const<TensorType>::type TensorTypeNonConst;
TensorTypeNonConst *tensor_ptr;
tensor_ptr = details::init_tensor<TensorTypeNonConst>::run(pyArray);
RefType tensor_ref(*tensor_ptr);
new (raw_ptr) StorageType(tensor_ref, pyArray, tensor_ptr);
TensorTypeNonConst &tensor = *tensor_ptr;
EigenAllocator<TensorTypeNonConst>::copy(pyArray, tensor);
} else {
assert(pyArray_type_code == Scalar_type_code);
typename NumpyMap<TensorType, Scalar, Options>::EigenMap numpyMap =
NumpyMap<TensorType, Scalar, Options>::map(pyArray);
RefType tensor_ref(numpyMap);
new (raw_ptr) StorageType(tensor_ref, pyArray);
}
}
static void copy(RefType const &ref, PyArrayObject *pyArray) {
EigenAllocator<TensorType>::copy(ref, pyArray);
}
};
#endif
template <typename EigenType>
struct EigenAllocator : eigen_allocator_impl<EigenType> {};
} // namespace eigenpy
#endif // __eigenpy_eigen_allocator_hpp__
//
// Copyright (c) 2014-2023 CNRS INRIA
//
#ifndef __eigenpy_eigen_from_python_hpp__
#define __eigenpy_eigen_from_python_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/numpy-type.hpp"
#include "eigenpy/scalar-conversion.hpp"
namespace eigenpy {
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct expected_pytype_for_arg {};
template <typename MatType>
struct expected_pytype_for_arg<MatType, Eigen::MatrixBase<MatType> > {
static PyTypeObject const *get_pytype() {
PyTypeObject const *py_type = eigenpy::getPyArrayType();
return py_type;
}
};
} // namespace eigenpy
namespace boost {
namespace python {
namespace converter {
template <typename Scalar, int Rows, int Cols, int Options, int MaxRows,
int MaxCols>
struct expected_pytype_for_arg<
Eigen::Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
: eigenpy::expected_pytype_for_arg<
Eigen::Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > {};
} // namespace converter
} // namespace python
} // namespace boost
namespace eigenpy {
namespace details {
template <typename MatType, bool is_const = boost::is_const<MatType>::value>
struct copy_if_non_const {
static void run(const Eigen::MatrixBase<MatType> &input,
PyArrayObject *pyArray) {
EigenAllocator<MatType>::copy(input, pyArray);
}
};
template <typename MatType>
struct copy_if_non_const<const MatType, true> {
static void run(const Eigen::MatrixBase<MatType> & /*input*/,
PyArrayObject * /*pyArray*/) {}
};
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename _RefType>
struct referent_storage_eigen_ref {
typedef _RefType RefType;
typedef typename get_eigen_plain_type<RefType>::type PlainObjectType;
typedef typename ::eigenpy::aligned_storage<
::boost::python::detail::referent_size<RefType &>::value>::type
AlignedStorage;
referent_storage_eigen_ref()
: pyArray(NULL),
plain_ptr(NULL),
ref_ptr(reinterpret_cast<RefType *>(ref_storage.bytes)) {}
referent_storage_eigen_ref(const RefType &ref, PyArrayObject *pyArray,
PlainObjectType *plain_ptr = NULL)
: pyArray(pyArray),
plain_ptr(plain_ptr),
ref_ptr(reinterpret_cast<RefType *>(ref_storage.bytes)) {
Py_INCREF(pyArray);
new (ref_storage.bytes) RefType(ref);
}
~referent_storage_eigen_ref() {
if (plain_ptr != NULL && PyArray_ISWRITEABLE(pyArray))
copy_if_non_const<PlainObjectType>::run(*plain_ptr, pyArray);
Py_DECREF(pyArray);
if (plain_ptr != NULL) plain_ptr->~PlainObjectType();
ref_ptr->~RefType();
}
AlignedStorage ref_storage;
PyArrayObject *pyArray;
PlainObjectType *plain_ptr;
RefType *ref_ptr;
};
#endif
} // namespace details
} // namespace eigenpy
namespace boost {
namespace python {
namespace detail {
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride>
struct referent_storage<Eigen::Ref<MatType, Options, Stride> &> {
typedef Eigen::Ref<MatType, Options, Stride> RefType;
typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
typedef typename ::eigenpy::aligned_storage<
referent_size<StorageType &>::value>::type type;
};
template <typename MatType, int Options, typename Stride>
struct referent_storage<const Eigen::Ref<const MatType, Options, Stride> &> {
typedef Eigen::Ref<const MatType, Options, Stride> RefType;
typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
typedef typename ::eigenpy::aligned_storage<
referent_size<StorageType &>::value>::type type;
};
#endif
} // namespace detail
} // namespace python
} // namespace boost
namespace boost {
namespace python {
namespace converter {
#define EIGENPY_RVALUE_FROM_PYTHON_DATA_INIT(type) \
typedef ::eigenpy::rvalue_from_python_data<type> Base; \
\
rvalue_from_python_data(rvalue_from_python_stage1_data const &_stage1) \
: Base(_stage1) {} \
\
rvalue_from_python_data(void *convertible) : Base(convertible){};
template <typename Scalar, int Rows, int Cols, int Options, int MaxRows,
int MaxCols>
struct rvalue_from_python_data<
Eigen::Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> const &>
: ::eigenpy::rvalue_from_python_data<Eigen::Matrix<
Scalar, Rows, Cols, Options, MaxRows, MaxCols> const &> {
typedef Eigen::Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> T;
EIGENPY_RVALUE_FROM_PYTHON_DATA_INIT(T const &)
};
template <typename Derived>
struct rvalue_from_python_data<Eigen::MatrixBase<Derived> const &>
: ::eigenpy::rvalue_from_python_data<Derived const &> {
EIGENPY_RVALUE_FROM_PYTHON_DATA_INIT(Derived const &)
};
template <typename Derived>
struct rvalue_from_python_data<Eigen::EigenBase<Derived> const &>
: ::eigenpy::rvalue_from_python_data<Derived const &> {
EIGENPY_RVALUE_FROM_PYTHON_DATA_INIT(Derived const &)
};
template <typename Derived>
struct rvalue_from_python_data<Eigen::PlainObjectBase<Derived> const &>
: ::eigenpy::rvalue_from_python_data<Derived const &> {
EIGENPY_RVALUE_FROM_PYTHON_DATA_INIT(Derived const &)
};
template <typename MatType, int Options, typename Stride>
struct rvalue_from_python_data<Eigen::Ref<MatType, Options, Stride> &>
: rvalue_from_python_storage<Eigen::Ref<MatType, Options, Stride> &> {
typedef Eigen::Ref<MatType, Options, Stride> RefType;
#if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) && \
(!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) && \
(!defined(__DECCXX_VER) || __DECCXX_VER > 60590014) && \
!defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing \
this */
// This must always be a POD struct with m_data its first member.
BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<RefType>,
stage1) == 0);
#endif
// The usual constructor
rvalue_from_python_data(rvalue_from_python_stage1_data const &_stage1) {
this->stage1 = _stage1;
}
// This constructor just sets m_convertible -- used by
// implicitly_convertible<> to perform the final step of the
// conversion, where the construct() function is already known.
rvalue_from_python_data(void *convertible) {
this->stage1.convertible = convertible;
}
// Destroys any object constructed in the storage.
~rvalue_from_python_data() {
typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
if (this->stage1.convertible == this->storage.bytes)
static_cast<StorageType *>((void *)this->storage.bytes)->~StorageType();
}
};
template <typename MatType, int Options, typename Stride>
struct rvalue_from_python_data<
const Eigen::Ref<const MatType, Options, Stride> &>
: rvalue_from_python_storage<
const Eigen::Ref<const MatType, Options, Stride> &> {
typedef Eigen::Ref<const MatType, Options, Stride> RefType;
#if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) && \
(!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) && \
(!defined(__DECCXX_VER) || __DECCXX_VER > 60590014) && \
!defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing \
this */
// This must always be a POD struct with m_data its first member.
BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<RefType>,
stage1) == 0);
#endif
// The usual constructor
rvalue_from_python_data(rvalue_from_python_stage1_data const &_stage1) {
this->stage1 = _stage1;
}
// This constructor just sets m_convertible -- used by
// implicitly_convertible<> to perform the final step of the
// conversion, where the construct() function is already known.
rvalue_from_python_data(void *convertible) {
this->stage1.convertible = convertible;
}
// Destroys any object constructed in the storage.
~rvalue_from_python_data() {
typedef ::eigenpy::details::referent_storage_eigen_ref<RefType> StorageType;
if (this->stage1.convertible == this->storage.bytes)
static_cast<StorageType *>((void *)this->storage.bytes)->~StorageType();
}
};
} // namespace converter
} // namespace python
} // namespace boost
namespace eigenpy {
template <typename MatOrRefType>
void eigen_from_py_construct(
PyObject *pyObj, bp::converter::rvalue_from_python_stage1_data *memory) {
PyArrayObject *pyArray = reinterpret_cast<PyArrayObject *>(pyObj);
assert((PyArray_DIMS(pyArray)[0] < INT_MAX) &&
(PyArray_DIMS(pyArray)[1] < INT_MAX));
bp::converter::rvalue_from_python_storage<MatOrRefType> *storage =
reinterpret_cast<
bp::converter::rvalue_from_python_storage<MatOrRefType> *>(
reinterpret_cast<void *>(memory));
EigenAllocator<MatOrRefType>::allocate(pyArray, storage);
memory->convertible = storage->storage.bytes;
}
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_from_py_impl {
typedef typename EigenType::Scalar Scalar;
/// \brief Determine if pyObj can be converted into a MatType object
static void *convertible(PyObject *pyObj);
/// \brief Allocate memory and copy pyObj in the new storage
static void construct(PyObject *pyObj,
bp::converter::rvalue_from_python_stage1_data *memory);
static void registration();
};
template <typename MatType>
struct eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> > {
typedef typename MatType::Scalar Scalar;
/// \brief Determine if pyObj can be converted into a MatType object
static void *convertible(PyObject *pyObj);
/// \brief Allocate memory and copy pyObj in the new storage
static void construct(PyObject *pyObj,
bp::converter::rvalue_from_python_stage1_data *memory);
static void registration();
};
template <typename EigenType,
typename Scalar =
typename boost::remove_reference<EigenType>::type::Scalar>
struct EigenFromPy : eigen_from_py_impl<EigenType> {};
template <typename MatType>
void *eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> >::convertible(
PyObject *pyObj) {
if (!call_PyArray_Check(reinterpret_cast<PyObject *>(pyObj))) return 0;
PyArrayObject *pyArray = reinterpret_cast<PyArrayObject *>(pyObj);
if (!np_type_is_convertible_into_scalar<Scalar>(
EIGENPY_GET_PY_ARRAY_TYPE(pyArray)))
return 0;
if (MatType::IsVectorAtCompileTime) {
const Eigen::DenseIndex size_at_compile_time =
MatType::IsRowMajor ? MatType::ColsAtCompileTime
: MatType::RowsAtCompileTime;
switch (PyArray_NDIM(pyArray)) {
case 0:
return 0;
case 1: {
if (size_at_compile_time != Eigen::Dynamic) {
// check that the sizes at compile time matche
if (PyArray_DIMS(pyArray)[0] == size_at_compile_time)
return pyArray;
else
return 0;
} else // This is a dynamic MatType
return pyArray;
}
case 2: {
// Special care of scalar matrix of dimension 1x1.
if (PyArray_DIMS(pyArray)[0] == 1 && PyArray_DIMS(pyArray)[1] == 1) {
if (size_at_compile_time != Eigen::Dynamic) {
if (size_at_compile_time == 1)
return pyArray;
else
return 0;
} else // This is a dynamic MatType
return pyArray;
}
if (PyArray_DIMS(pyArray)[0] > 1 && PyArray_DIMS(pyArray)[1] > 1) {
return 0;
}
if (((PyArray_DIMS(pyArray)[0] == 1) &&
(MatType::ColsAtCompileTime == 1)) ||
((PyArray_DIMS(pyArray)[1] == 1) &&
(MatType::RowsAtCompileTime == 1))) {
return 0;
}
if (size_at_compile_time !=
Eigen::Dynamic) { // This is a fixe size vector
const Eigen::DenseIndex pyArray_size =
PyArray_DIMS(pyArray)[0] > PyArray_DIMS(pyArray)[1]
? PyArray_DIMS(pyArray)[0]
: PyArray_DIMS(pyArray)[1];
if (size_at_compile_time != pyArray_size) return 0;
}
break;
}
default:
return 0;
}
} else // this is a matrix
{
if (PyArray_NDIM(pyArray) ==
1) // We can always convert a vector into a matrix
{
return pyArray;
}
if (PyArray_NDIM(pyArray) != 2) {
return 0;
}
if (PyArray_NDIM(pyArray) == 2) {
const int R = (int)PyArray_DIMS(pyArray)[0];
const int C = (int)PyArray_DIMS(pyArray)[1];
if ((MatType::RowsAtCompileTime != R) &&
(MatType::RowsAtCompileTime != Eigen::Dynamic))
return 0;
if ((MatType::ColsAtCompileTime != C) &&
(MatType::ColsAtCompileTime != Eigen::Dynamic))
return 0;
}
}
#ifdef NPY_1_8_API_VERSION
if (!(PyArray_FLAGS(pyArray)))
#else
if (!(PyArray_FLAGS(pyArray) & NPY_ALIGNED))
#endif
{
return 0;
}
return pyArray;
}
template <typename MatType>
void eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> >::construct(
PyObject *pyObj, bp::converter::rvalue_from_python_stage1_data *memory) {
eigen_from_py_construct<MatType>(pyObj, memory);
}
template <typename MatType>
void eigen_from_py_impl<MatType, Eigen::MatrixBase<MatType> >::registration() {
bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&eigen_from_py_impl::convertible),
&eigen_from_py_impl::construct, bp::type_id<MatType>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif
);
}
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_from_py_converter_impl;
template <typename EigenType>
struct EigenFromPyConverter : eigen_from_py_converter_impl<EigenType> {};
template <typename MatType>
struct eigen_from_py_converter_impl<MatType, Eigen::MatrixBase<MatType> > {
static void registration() {
EigenFromPy<MatType>::registration();
// Add conversion to Eigen::MatrixBase<MatType>
typedef Eigen::MatrixBase<MatType> MatrixBase;
EigenFromPy<MatrixBase>::registration();
// Add conversion to Eigen::EigenBase<MatType>
typedef Eigen::EigenBase<MatType> EigenBase;
EigenFromPy<EigenBase, typename MatType::Scalar>::registration();
// Add conversion to Eigen::PlainObjectBase<MatType>
typedef Eigen::PlainObjectBase<MatType> PlainObjectBase;
EigenFromPy<PlainObjectBase>::registration();
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
// Add conversion to Eigen::Ref<MatType>
typedef Eigen::Ref<MatType> RefType;
EigenFromPy<RefType>::registration();
// Add conversion to Eigen::Ref<MatType>
typedef const Eigen::Ref<const MatType> ConstRefType;
EigenFromPy<ConstRefType>::registration();
#endif
}
};
template <typename MatType>
struct EigenFromPy<Eigen::MatrixBase<MatType> > : EigenFromPy<MatType> {
typedef EigenFromPy<MatType> EigenFromPyDerived;
typedef Eigen::MatrixBase<MatType> Base;
static void registration() {
bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&EigenFromPy::construct, bp::type_id<Base>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif
);
}
};
template <typename MatType>
struct EigenFromPy<Eigen::EigenBase<MatType>, typename MatType::Scalar>
: EigenFromPy<MatType> {
typedef EigenFromPy<MatType> EigenFromPyDerived;
typedef Eigen::EigenBase<MatType> Base;
static void registration() {
bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&EigenFromPy::construct, bp::type_id<Base>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif
);
}
};
template <typename MatType>
struct EigenFromPy<Eigen::PlainObjectBase<MatType> > : EigenFromPy<MatType> {
typedef EigenFromPy<MatType> EigenFromPyDerived;
typedef Eigen::PlainObjectBase<MatType> Base;
static void registration() {
bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&EigenFromPy::construct, bp::type_id<Base>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif
);
}
};
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride>
struct EigenFromPy<Eigen::Ref<MatType, Options, Stride> > {
typedef Eigen::Ref<MatType, Options, Stride> RefType;
typedef typename MatType::Scalar Scalar;
/// \brief Determine if pyObj can be converted into a MatType object
static void *convertible(PyObject *pyObj) {
if (!call_PyArray_Check(pyObj)) return 0;
PyArrayObject *pyArray = reinterpret_cast<PyArrayObject *>(pyObj);
if (!PyArray_ISWRITEABLE(pyArray)) return 0;
return EigenFromPy<MatType>::convertible(pyObj);
}
static void registration() {
bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&eigen_from_py_construct<RefType>, bp::type_id<RefType>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif
);
}
};
template <typename MatType, int Options, typename Stride>
struct EigenFromPy<const Eigen::Ref<const MatType, Options, Stride> > {
typedef const Eigen::Ref<const MatType, Options, Stride> ConstRefType;
typedef typename MatType::Scalar Scalar;
/// \brief Determine if pyObj can be converted into a MatType object
static void *convertible(PyObject *pyObj) {
return EigenFromPy<MatType>::convertible(pyObj);
}
static void registration() {
bp::converter::registry::push_back(
reinterpret_cast<void *(*)(_object *)>(&EigenFromPy::convertible),
&eigen_from_py_construct<ConstRefType>, bp::type_id<ConstRefType>()
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
,
&eigenpy::expected_pytype_for_arg<MatType>::get_pytype
#endif
);
}
};
#endif
} // namespace eigenpy
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
#include "eigenpy/tensor/eigen-from-python.hpp"
#endif
#include "eigenpy/sparse/eigen-from-python.hpp"
#endif // __eigenpy_eigen_from_python_hpp__
//
// Copyright (c) 2014-2024 CNRS INRIA
//
#ifndef __eigenpy_eigen_to_python_hpp__
#define __eigenpy_eigen_to_python_hpp__
#include <boost/type_traits.hpp>
#include "eigenpy/fwd.hpp"
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/numpy-allocator.hpp"
#include "eigenpy/scipy-allocator.hpp"
#include "eigenpy/numpy-type.hpp"
#include "eigenpy/scipy-type.hpp"
#include "eigenpy/registration.hpp"
namespace eigenpy {
EIGENPY_DOCUMENTATION_START_IGNORE
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct eigen_to_py_impl;
template <typename MatType>
struct eigen_to_py_impl_matrix;
template <typename MatType>
struct eigen_to_py_impl<MatType, Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<MatType> {};
template <typename MatType>
struct eigen_to_py_impl<MatType&, Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<MatType&> {};
template <typename MatType>
struct eigen_to_py_impl<const MatType, const Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<const MatType> {};
template <typename MatType>
struct eigen_to_py_impl<const MatType&, const Eigen::MatrixBase<MatType> >
: eigen_to_py_impl_matrix<const MatType&> {};
template <typename MatType>
struct eigen_to_py_impl_matrix {
static PyObject* convert(
typename boost::add_reference<
typename boost::add_const<MatType>::type>::type mat) {
typedef typename boost::remove_const<
typename boost::remove_reference<MatType>::type>::type MatrixDerived;
assert((mat.rows() < INT_MAX) && (mat.cols() < INT_MAX) &&
"Matrix range larger than int ... should never happen.");
const npy_intp R = (npy_intp)mat.rows(), C = (npy_intp)mat.cols();
PyArrayObject* pyArray;
// Allocate Python memory
if ((((!(C == 1) != !(R == 1)) && !MatrixDerived::IsVectorAtCompileTime) ||
MatrixDerived::IsVectorAtCompileTime)) // Handle array with a single
// dimension
{
npy_intp shape[1] = {C == 1 ? R : C};
pyArray = NumpyAllocator<MatType>::allocate(
const_cast<MatrixDerived&>(mat.derived()), 1, shape);
} else {
npy_intp shape[2] = {R, C};
pyArray = NumpyAllocator<MatType>::allocate(
const_cast<MatrixDerived&>(mat.derived()), 2, shape);
}
// Create an instance (either np.array or np.matrix)
return NumpyType::make(pyArray).ptr();
}
static PyTypeObject const* get_pytype() { return getPyArrayType(); }
};
template <typename MatType>
struct eigen_to_py_impl_sparse_matrix;
template <typename MatType>
struct eigen_to_py_impl<MatType, Eigen::SparseMatrixBase<MatType> >
: eigen_to_py_impl_sparse_matrix<MatType> {};
template <typename MatType>
struct eigen_to_py_impl<MatType&, Eigen::SparseMatrixBase<MatType> >
: eigen_to_py_impl_sparse_matrix<MatType&> {};
template <typename MatType>
struct eigen_to_py_impl<const MatType, const Eigen::SparseMatrixBase<MatType> >
: eigen_to_py_impl_sparse_matrix<const MatType> {};
template <typename MatType>
struct eigen_to_py_impl<const MatType&, const Eigen::SparseMatrixBase<MatType> >
: eigen_to_py_impl_sparse_matrix<const MatType&> {};
template <typename MatType>
struct eigen_to_py_impl_sparse_matrix {
enum { IsRowMajor = MatType::IsRowMajor };
static PyObject* convert(
typename boost::add_reference<
typename boost::add_const<MatType>::type>::type mat) {
typedef typename boost::remove_const<
typename boost::remove_reference<MatType>::type>::type MatrixDerived;
// Allocate and perform the copy
PyObject* pyArray =
ScipyAllocator<MatType>::allocate(const_cast<MatrixDerived&>(mat));
return pyArray;
}
static PyTypeObject const* get_pytype() {
return IsRowMajor ? ScipyType::getScipyCSRMatrixType()
: ScipyType::getScipyCSCMatrixType();
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct eigen_to_py_impl_tensor;
template <typename TensorType>
struct eigen_to_py_impl<TensorType, Eigen::TensorBase<TensorType> >
: eigen_to_py_impl_tensor<TensorType> {};
template <typename TensorType>
struct eigen_to_py_impl<const TensorType, const Eigen::TensorBase<TensorType> >
: eigen_to_py_impl_tensor<const TensorType> {};
template <typename TensorType>
struct eigen_to_py_impl_tensor {
static PyObject* convert(
typename boost::add_reference<
typename boost::add_const<TensorType>::type>::type tensor) {
// typedef typename boost::remove_const<
// typename boost::remove_reference<Tensor>::type>::type
// TensorDerived;
static const int NumIndices = TensorType::NumIndices;
npy_intp shape[NumIndices];
for (int k = 0; k < NumIndices; ++k) shape[k] = tensor.dimension(k);
PyArrayObject* pyArray = NumpyAllocator<TensorType>::allocate(
const_cast<TensorType&>(tensor), NumIndices, shape);
// Create an instance (either np.array or np.matrix)
return NumpyType::make(pyArray).ptr();
}
static PyTypeObject const* get_pytype() { return getPyArrayType(); }
};
#endif
EIGENPY_DOCUMENTATION_END_IGNORE
template <typename EigenType,
typename Scalar =
typename boost::remove_reference<EigenType>::type::Scalar>
struct EigenToPy : eigen_to_py_impl<EigenType> {};
template <typename MatType>
struct EigenToPyConverter {
static void registration() {
bp::to_python_converter<MatType, EigenToPy<MatType>, true>();
}
};
} // namespace eigenpy
namespace boost {
namespace python {
template <typename MatrixRef, class MakeHolder>
struct to_python_indirect_eigen {
template <class U>
inline PyObject* operator()(U const& mat) const {
return eigenpy::EigenToPy<MatrixRef>::convert(const_cast<U&>(mat));
}
#ifndef BOOST_PYTHON_NO_PY_SIGNATURES
inline PyTypeObject const* get_pytype() const {
return converter::registered_pytype<MatrixRef>::get_pytype();
}
#endif
};
template <typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime,
int Options, int MaxRowsAtCompileTime, int MaxColsAtCompileTime,
class MakeHolder>
struct to_python_indirect<
Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder>
: to_python_indirect_eigen<
Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder> {};
template <typename Scalar, int RowsAtCompileTime, int ColsAtCompileTime,
int Options, int MaxRowsAtCompileTime, int MaxColsAtCompileTime,
class MakeHolder>
struct to_python_indirect<
const Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, Options,
MaxRowsAtCompileTime, MaxColsAtCompileTime>&,
MakeHolder>
: to_python_indirect_eigen<
const Eigen::Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime,
Options, MaxRowsAtCompileTime,
MaxColsAtCompileTime>&,
MakeHolder> {};
} // namespace python
} // namespace boost
#endif // __eigenpy_eigen_to_python_hpp__
//
// Copyright (c) 2020-2023 INRIA
//
#ifndef __eigenpy_eigen_typedef_hpp__
#define __eigenpy_eigen_typedef_hpp__
#include "eigenpy/fwd.hpp"
#define EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, Size, SizeSuffix) \
/** \ingroup matrixtypedefs */ \
typedef Eigen::Matrix<Type, Size, Size, Options> \
Matrix##SizeSuffix##TypeSuffix; \
/** \ingroup matrixtypedefs */ \
typedef Eigen::Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \
/** \ingroup matrixtypedefs */ \
typedef Eigen::Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix;
#define EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, Size) \
/** \ingroup matrixtypedefs */ \
typedef Eigen::Matrix<Type, Size, Eigen::Dynamic, Options> \
Matrix##Size##X##TypeSuffix; \
/** \ingroup matrixtypedefs */ \
typedef Eigen::Matrix<Type, Eigen::Dynamic, Size, Options> \
Matrix##X##Size##TypeSuffix;
#define EIGENPY_MAKE_TYPEDEFS_ALL_SIZES(Type, Options, TypeSuffix) \
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, 2, 2) \
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, 3, 3) \
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, 4, 4) \
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, Eigen::Dynamic, X) \
EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 2) \
EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 3) \
EIGENPY_MAKE_FIXED_TYPEDEFS(Type, Options, TypeSuffix, 4) \
EIGENPY_MAKE_TYPEDEFS(Type, Options, TypeSuffix, 1, 1) \
typedef Eigen::SparseMatrix<Scalar, Options> SparseMatrixX##TypeSuffix
#endif // ifndef __eigenpy_eigen_typedef_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_eigen_eigen_base_hpp__
#define __eigenpy_eigen_eigen_base_hpp__
#include "eigenpy/eigenpy.hpp"
namespace eigenpy {
template <typename Derived>
struct EigenBaseVisitor
: public boost::python::def_visitor<EigenBaseVisitor<Derived> > {
template <class PyClass>
void visit(PyClass &cl) const {
cl.def("cols", &Derived::cols, bp::arg("self"),
"Returns the number of columns.")
.def("rows", &Derived::rows, bp::arg("self"),
"Returns the number of rows.")
.def("size", &Derived::rows, bp::arg("self"),
"Returns the number of coefficients, which is rows()*cols().");
}
};
} // namespace eigenpy
#endif // ifndef __eigenpy_eigen_eigen_base_hpp__
/*
* Copyright 2014-2019, CNRS
* Copyright 2018-2019, INRIA
* Copyright 2018-2024, INRIA
*/
#ifndef __eigenpy_eigenpy_hpp__
#define __eigenpy_eigenpy_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/deprecated.hh"
#include "eigenpy/eigenpy_export.h"
#include "eigenpy/eigen-typedef.hpp"
#include "eigenpy/expose.hpp"
#if EIGEN_VERSION_AT_LEAST(3,2,0)
#include "eigenpy/ref.hpp"
/// Custom CallPolicies
#include "eigenpy/std-unique-ptr.hpp"
#define ENABLE_SPECIFIC_MATRIX_TYPE(TYPE) \
::eigenpy::enableEigenPySpecific<TYPE>(); \
::eigenpy::enableEigenPySpecific< eigenpy::Ref<TYPE> >();
::eigenpy::enableEigenPySpecific<TYPE>();
#else // if EIGEN_VERSION_AT_LEAST(3,2,0)
namespace eigenpy {
#define ENABLE_SPECIFIC_MATRIX_TYPE(TYPE) \
::eigenpy::enableEigenPySpecific<TYPE>();
/* Enable Eigen-Numpy serialization for a set of standard MatrixBase instance.
*/
void EIGENPY_DLLAPI enableEigenPy();
#endif // if EIGEN_VERSION_AT_LEAST(3,2,0)
bool EIGENPY_DLLAPI withTensorSupport();
namespace eigenpy
{
/* Enable Eigen-Numpy serialization for a set of standard MatrixBase instance. */
void EIGENPY_EXPORT enableEigenPy();
/* Enable the Eigen--Numpy serialization for the templated MatType class.*/
template <typename MatType>
void enableEigenPySpecific();
template<typename MatType>
void enableEigenPySpecific();
/* Enable the Eigen--Numpy serialization for the templated MatrixBase class.
* The second template argument is used for inheritance of Eigen classes. If
* using a native Eigen::MatrixBase, simply repeat the same arg twice. */
template<typename MatType,typename EigenEquivalentType>
EIGENPY_DEPRECATED void enableEigenPySpecific();
template <typename Scalar, int Options>
EIGEN_DONT_INLINE void exposeType() {
EIGENPY_MAKE_TYPEDEFS_ALL_SIZES(Scalar, Options, s);
EIGENPY_UNUSED_TYPE(Vector1s);
EIGENPY_UNUSED_TYPE(RowVector1s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix1s);
} // namespace eigenpy
ENABLE_SPECIFIC_MATRIX_TYPE(Vector2s);
ENABLE_SPECIFIC_MATRIX_TYPE(RowVector2s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix2s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix2Xs);
ENABLE_SPECIFIC_MATRIX_TYPE(MatrixX2s);
#include "eigenpy/details.hpp"
ENABLE_SPECIFIC_MATRIX_TYPE(Vector3s);
ENABLE_SPECIFIC_MATRIX_TYPE(RowVector3s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix3s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix3Xs);
ENABLE_SPECIFIC_MATRIX_TYPE(MatrixX3s);
ENABLE_SPECIFIC_MATRIX_TYPE(Vector4s);
ENABLE_SPECIFIC_MATRIX_TYPE(RowVector4s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix4s);
ENABLE_SPECIFIC_MATRIX_TYPE(Matrix4Xs);
ENABLE_SPECIFIC_MATRIX_TYPE(MatrixX4s);
#endif // ifndef __eigenpy_eigenpy_hpp__
ENABLE_SPECIFIC_MATRIX_TYPE(VectorXs);
ENABLE_SPECIFIC_MATRIX_TYPE(RowVectorXs);
ENABLE_SPECIFIC_MATRIX_TYPE(MatrixXs);
enableEigenPySpecific<SparseMatrixXs>();
}
template <typename Scalar>
EIGEN_DONT_INLINE void exposeType() {
exposeType<Scalar, 0>();
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
enableEigenPySpecific<Eigen::Tensor<Scalar, 1> >();
enableEigenPySpecific<Eigen::Tensor<Scalar, 2> >();
enableEigenPySpecific<Eigen::Tensor<Scalar, 3> >();
#endif
}
} // namespace eigenpy
#include "eigenpy/details.hpp"
#endif // ifndef __eigenpy_eigenpy_hpp__
......@@ -3,41 +3,40 @@
* Copyright 2018-2019, INRIA
*/
#include <boost/python.hpp>
#ifndef __eigenpy_exception_hpp__
#define __eigenpy_exception_hpp__
#include <exception>
#include <string>
#ifndef __eigenpy_exception_hpp__
#define __eigenpy_exception_hpp__
#include "eigenpy/fwd.hpp"
namespace eigenpy {
/*
* Eigenpy exception. They can be catch with python (equivalent
* eigenpy.exception class).
*/
class Exception : public std::exception {
public:
Exception() : message() {}
Exception(const std::string &msg) : message(msg) {}
const char *what() const throw() { return this->getMessage().c_str(); }
~Exception() throw() {}
virtual const std::string &getMessage() const { return message; }
std::string copyMessage() const { return getMessage(); }
/* Call this static function to "enable" the translation of this C++ exception
* in Python. */
static void registerException();
private:
static void translateException(Exception const &e);
static PyObject *pyType;
protected:
std::string message;
};
} // namespace eigenpy
namespace eigenpy
{
/*
* Eigenpy exception. They can be catch with python (equivalent eigenpy.exception class).
*/
class Exception : public std::exception
{
public:
Exception() : message() {}
Exception(const std::string & msg) : message(msg) {}
const char *what() const throw()
{
return this->getMessage().c_str();
}
~Exception() throw() {}
virtual const std::string & getMessage() const { return message; }
std::string copyMessage() const { return getMessage(); }
/* Call this static function to "enable" the translation of this C++ exception in Python. */
static void registerException();
private:
static void translateException( Exception const & e );
static PyObject * pyType;
protected:
std::string message;
};
} // namespace eigenpy
#endif // ifndef __eigenpy_exception_hpp__
#endif // ifndef __eigenpy_exception_hpp__