Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • devel
  • master
  • pr/238
  • pr/370
  • pr/455
  • pr/456
  • pre-commit-ci-update-config
  • 1.3.1
  • 1.3.4
  • v1.0.0
  • v1.0.1
  • v1.1.0
  • v1.2.0
  • v1.3.0
  • v1.3.1
  • v1.3.2
  • v1.3.3
  • v1.4.0
  • v1.4.1
  • v1.4.2
  • v1.4.3
  • v1.4.4
  • v1.4.5
  • v1.5.0
  • v1.5.1
  • v1.5.2
  • v1.5.3
  • v1.5.4
  • v1.5.5
  • v1.5.6
  • v1.5.7
  • v1.5.8
  • v1.6.0
  • v1.6.1
  • v1.6.10
  • v1.6.11
  • v1.6.12
  • v1.6.13
  • v1.6.2
  • v1.6.3
  • v1.6.4
  • v1.6.5
  • v1.6.6
  • v1.6.7
  • v1.6.8
  • v1.6.9
  • v2.0.0
  • v2.0.1
  • v2.0.2
  • v2.0.3
  • v2.1.0
  • v2.1.1
  • v2.1.2
  • v2.2.0
  • v2.2.1
  • v2.2.2
  • v2.3.0
  • v2.3.1
  • v2.3.2
  • v2.3.3
  • v2.4.0
  • v2.4.1
  • v2.4.2
  • v2.4.3
  • v2.4.4
  • v2.5.0
  • v2.6.0
  • v2.6.1
  • v2.6.10
  • v2.6.11
  • v2.6.2
  • v2.6.3
  • v2.6.4
  • v2.6.5
  • v2.6.6
  • v2.6.7
  • v2.6.8
  • v2.6.9
  • v2.7.0
  • v2.7.1
  • v2.7.10
  • v2.7.11
  • v2.7.12
  • v2.7.13
  • v2.7.14
  • v2.7.2
  • v2.7.3
  • v2.7.4
  • v2.7.5
  • v2.7.6
  • v2.7.7
  • v2.7.8
  • v2.7.9
  • v2.8.0
  • v2.8.1
  • v2.9.0
  • v2.9.1
  • v2.9.2
  • v3.0.0
  • v3.1.0
  • v3.11.0
101 results

Target

Select target project
  • jcarpent/eigenpy
  • gsaurel/eigenpy
  • stack-of-tasks/eigenpy
3 results
Select Git revision
  • devel
  • master
  • topic/multipy
  • topic/multipy2
  • topic/perf
  • topic/soabi
  • 1.3.1
  • 1.3.4
  • v1.0.1
  • v1.2.0
  • v1.3.0
  • v1.3.1
  • v1.3.2
  • v1.3.3
  • v1.4.0
  • v1.4.1
  • v1.4.2
  • v1.4.3
  • v1.4.4
  • v1.4.5
  • v1.5.0
21 results
Show changes
Showing
with 1184 additions and 318 deletions
/*
* Copyright 2020-2022 INRIA
* Copyright 2020-2023 INRIA
*/
#ifndef __eigenpy_numpy_allocator_hpp__
#define __eigenpy_numpy_allocator_hpp__
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/fwd.hpp"
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/numpy-type.hpp"
#include "eigenpy/register.hpp"
namespace eigenpy {
template <typename EigenType, typename BaseType>
struct numpy_allocator_impl;
template <typename EigenType>
struct numpy_allocator_impl_matrix;
template <typename MatType>
struct numpy_allocator_impl<
MatType, Eigen::MatrixBase<typename remove_const_reference<MatType>::type>>
: numpy_allocator_impl_matrix<MatType> {};
template <typename MatType>
struct numpy_allocator_impl<
const MatType,
const Eigen::MatrixBase<typename remove_const_reference<MatType>::type>>
: numpy_allocator_impl_matrix<const MatType> {};
// template <typename MatType>
// struct numpy_allocator_impl<MatType &, Eigen::MatrixBase<MatType> > :
// numpy_allocator_impl_matrix<MatType &>
//{};
template <typename MatType>
struct NumpyAllocator {
struct numpy_allocator_impl<const MatType &, const Eigen::MatrixBase<MatType>>
: numpy_allocator_impl_matrix<const MatType &> {};
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct NumpyAllocator : numpy_allocator_impl<EigenType, BaseType> {};
template <typename MatType>
struct numpy_allocator_impl_matrix {
template <typename SimilarMatrixType>
static PyArrayObject *allocate(
const Eigen::MatrixBase<SimilarMatrixType> &mat, npy_intp nd,
......@@ -31,8 +61,40 @@ struct NumpyAllocator {
}
};
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct numpy_allocator_impl_tensor;
template <typename TensorType>
struct numpy_allocator_impl<TensorType, Eigen::TensorBase<TensorType>>
: numpy_allocator_impl_tensor<TensorType> {};
template <typename TensorType>
struct numpy_allocator_impl<const TensorType,
const Eigen::TensorBase<TensorType>>
: numpy_allocator_impl_tensor<const TensorType> {};
template <typename TensorType>
struct numpy_allocator_impl_tensor {
template <typename TensorDerived>
static PyArrayObject *allocate(const TensorDerived &tensor, npy_intp nd,
npy_intp *shape) {
const int code = Register::getTypeCode<typename TensorDerived::Scalar>();
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_SimpleNew(
static_cast<int>(nd), shape, code);
// Copy data
EigenAllocator<TensorDerived>::copy(
static_cast<const TensorDerived &>(tensor), pyArray);
return pyArray;
}
};
#endif
template <typename MatType>
struct NumpyAllocator<MatType &> {
struct numpy_allocator_impl_matrix<MatType &> {
template <typename SimilarMatrixType>
static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType> &mat,
npy_intp nd, npy_intp *shape) {
......@@ -58,7 +120,7 @@ struct NumpyAllocator<MatType &> {
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride>
struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > {
struct numpy_allocator_impl_matrix<Eigen::Ref<MatType, Options, Stride>> {
typedef Eigen::Ref<MatType, Options, Stride> RefType;
static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
......@@ -76,7 +138,12 @@ struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > {
outer_stride = reverse_strides ? mat.innerStride()
: mat.outerStride();
#if NPY_ABI_VERSION < 0x02000000
const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
#else
const int elsize =
PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
#endif
npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
......@@ -93,7 +160,7 @@ struct NumpyAllocator<Eigen::Ref<MatType, Options, Stride> > {
#endif
template <typename MatType>
struct NumpyAllocator<const MatType &> {
struct numpy_allocator_impl_matrix<const MatType &> {
template <typename SimilarMatrixType>
static PyArrayObject *allocate(
const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd,
......@@ -122,7 +189,8 @@ struct NumpyAllocator<const MatType &> {
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
template <typename MatType, int Options, typename Stride>
struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
struct numpy_allocator_impl_matrix<
const Eigen::Ref<const MatType, Options, Stride>> {
typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape) {
......@@ -141,7 +209,12 @@ struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
outer_stride = reverse_strides ? mat.innerStride()
: mat.outerStride();
#if NPY_ABI_VERSION < 0x02000000
const int elsize = call_PyArray_DescrFromType(Scalar_type_code)->elsize;
#else
const int elsize =
PyDataType_ELSIZE(call_PyArray_DescrFromType(Scalar_type_code));
#endif
npy_intp strides[2] = {elsize * inner_stride, elsize * outer_stride};
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
......@@ -156,6 +229,70 @@ struct NumpyAllocator<const Eigen::Ref<const MatType, Options, Stride> > {
}
};
#endif
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType>
struct numpy_allocator_impl_tensor<Eigen::TensorRef<TensorType>> {
typedef Eigen::TensorRef<TensorType> RefType;
static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
npy_intp *shape) {
typedef typename RefType::Scalar Scalar;
static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
enum {
NPY_ARRAY_MEMORY_CONTIGUOUS =
IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
};
if (NumpyType::sharedMemory()) {
const int Scalar_type_code = Register::getTypeCode<Scalar>();
// static const Index NumIndices = TensorType::NumIndices;
// const int elsize =
// call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp
// strides[NumIndices];
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
const_cast<Scalar *>(tensor.data()),
NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
return pyArray;
} else {
return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
}
}
};
template <typename TensorType>
struct numpy_allocator_impl_tensor<const Eigen::TensorRef<const TensorType>> {
typedef const Eigen::TensorRef<const TensorType> RefType;
static PyArrayObject *allocate(RefType &tensor, npy_intp nd,
npy_intp *shape) {
typedef typename RefType::Scalar Scalar;
static const bool IsRowMajor = TensorType::Options & Eigen::RowMajorBit;
enum {
NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
};
if (NumpyType::sharedMemory()) {
const int Scalar_type_code = Register::getTypeCode<Scalar>();
PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code, NULL,
const_cast<Scalar *>(tensor.data()),
NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
return pyArray;
} else {
return NumpyAllocator<TensorType>::allocate(tensor, nd, shape);
}
}
};
#endif
} // namespace eigenpy
......
/*
* Copyright 2014-2019, CNRS
* Copyright 2018-2020, INRIA
* Copyright 2018-2023, INRIA
*/
#ifndef __eigenpy_numpy_map_hpp__
......@@ -11,42 +11,40 @@
#include "eigenpy/stride.hpp"
namespace eigenpy {
template <typename MatType, typename InputScalar, int AlignmentValue,
typename Stride, bool IsVector = MatType::IsVectorAtCompileTime>
struct NumpyMapTraits {};
struct numpy_map_impl_matrix;
/* Wrap a numpy::array with an Eigen::Map. No memory copy. */
template <typename MatType, typename InputScalar,
int AlignmentValue = EIGENPY_NO_ALIGNMENT_VALUE,
typename Stride = typename StrideType<MatType>::type>
struct NumpyMap {
typedef NumpyMapTraits<MatType, InputScalar, AlignmentValue, Stride> Impl;
typedef typename Impl::EigenMap EigenMap;
template <typename EigenType, typename InputScalar, int AlignmentValue,
typename Stride,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct numpy_map_impl;
static EigenMap map(PyArrayObject* pyArray, bool swap_dimensions = false);
};
} // namespace eigenpy
template <typename MatType, typename InputScalar, int AlignmentValue,
typename Stride>
struct numpy_map_impl<MatType, InputScalar, AlignmentValue, Stride,
Eigen::MatrixBase<MatType>>
: numpy_map_impl_matrix<MatType, InputScalar, AlignmentValue, Stride> {};
/* --- DETAILS
* ------------------------------------------------------------------ */
/* --- DETAILS
* ------------------------------------------------------------------ */
/* --- DETAILS
* ------------------------------------------------------------------ */
template <typename MatType, typename InputScalar, int AlignmentValue,
typename Stride>
struct numpy_map_impl<const MatType, InputScalar, AlignmentValue, Stride,
const Eigen::MatrixBase<MatType>>
: numpy_map_impl_matrix<const MatType, InputScalar, AlignmentValue,
Stride> {};
namespace eigenpy {
template <typename MatType, typename InputScalar, int AlignmentValue,
typename Stride>
struct NumpyMapTraits<MatType, InputScalar, AlignmentValue, Stride, false> {
struct numpy_map_impl_matrix<MatType, InputScalar, AlignmentValue, Stride,
false> {
typedef Eigen::Matrix<InputScalar, MatType::RowsAtCompileTime,
MatType::ColsAtCompileTime, MatType::Options>
EquivalentInputMatrixType;
typedef Eigen::Map<EquivalentInputMatrixType, AlignmentValue, Stride>
EigenMap;
static EigenMap mapImpl(PyArrayObject* pyArray,
bool swap_dimensions = false) {
static EigenMap map(PyArrayObject* pyArray, bool swap_dimensions = false) {
enum {
OuterStrideAtCompileTime = Stride::OuterStrideAtCompileTime,
InnerStrideAtCompileTime = Stride::InnerStrideAtCompileTime,
......@@ -135,15 +133,15 @@ struct NumpyMapTraits<MatType, InputScalar, AlignmentValue, Stride, false> {
template <typename MatType, typename InputScalar, int AlignmentValue,
typename Stride>
struct NumpyMapTraits<MatType, InputScalar, AlignmentValue, Stride, true> {
struct numpy_map_impl_matrix<MatType, InputScalar, AlignmentValue, Stride,
true> {
typedef Eigen::Matrix<InputScalar, MatType::RowsAtCompileTime,
MatType::ColsAtCompileTime, MatType::Options>
EquivalentInputMatrixType;
typedef Eigen::Map<EquivalentInputMatrixType, AlignmentValue, Stride>
EigenMap;
static EigenMap mapImpl(PyArrayObject* pyArray,
bool swap_dimensions = false) {
static EigenMap map(PyArrayObject* pyArray, bool swap_dimensions = false) {
EIGENPY_UNUSED_VARIABLE(swap_dimensions);
assert(PyArray_NDIM(pyArray) <= 2);
......@@ -157,12 +155,10 @@ struct NumpyMapTraits<MatType, InputScalar, AlignmentValue, Stride, true> {
else
rowMajor = (PyArray_DIMS(pyArray)[0] > PyArray_DIMS(pyArray)[1]) ? 0 : 1;
assert((PyArray_DIMS(pyArray)[rowMajor] < INT_MAX) &&
(PyArray_STRIDE(pyArray, rowMajor)));
assert(PyArray_DIMS(pyArray)[rowMajor] < INT_MAX);
const int R = (int)PyArray_DIMS(pyArray)[rowMajor];
const long int itemsize = PyArray_ITEMSIZE(pyArray);
const int stride = (int)PyArray_STRIDE(pyArray, rowMajor) / (int)itemsize;
;
if ((MatType::MaxSizeAtCompileTime != R) &&
(MatType::MaxSizeAtCompileTime != Eigen::Dynamic)) {
......@@ -172,17 +168,64 @@ struct NumpyMapTraits<MatType, InputScalar, AlignmentValue, Stride, true> {
InputScalar* pyData = reinterpret_cast<InputScalar*>(PyArray_DATA(pyArray));
assert(Stride(stride).inner() == stride &&
"Stride should be a dynamic stride");
return EigenMap(pyData, R, Stride(stride));
}
};
template <typename MatType, typename InputScalar, int AlignmentValue,
#ifdef EIGENPY_WITH_TENSOR_SUPPORT
template <typename TensorType, typename InputScalar, int AlignmentValue,
typename Stride>
struct numpy_map_impl_tensor;
template <typename TensorType, typename InputScalar, int AlignmentValue,
typename Stride>
struct numpy_map_impl<TensorType, InputScalar, AlignmentValue, Stride,
Eigen::TensorBase<TensorType>>
: numpy_map_impl_tensor<TensorType, InputScalar, AlignmentValue, Stride> {};
template <typename TensorType, typename InputScalar, int AlignmentValue,
typename Stride>
typename NumpyMap<MatType, InputScalar, AlignmentValue, Stride>::EigenMap
NumpyMap<MatType, InputScalar, AlignmentValue, Stride>::map(
PyArrayObject* pyArray, bool swap_dimensions) {
return Impl::mapImpl(pyArray, swap_dimensions);
}
struct numpy_map_impl<const TensorType, InputScalar, AlignmentValue, Stride,
const Eigen::TensorBase<TensorType>>
: numpy_map_impl_tensor<const TensorType, InputScalar, AlignmentValue,
Stride> {};
template <typename TensorType, typename InputScalar, int AlignmentValue,
typename Stride>
struct numpy_map_impl_tensor {
typedef TensorType Tensor;
typedef typename Eigen::internal::traits<TensorType>::Index Index;
static const int Options = Eigen::internal::traits<TensorType>::Options;
static const int NumIndices = TensorType::NumIndices;
typedef Eigen::Tensor<InputScalar, NumIndices, Options, Index>
EquivalentInputTensorType;
typedef typename EquivalentInputTensorType::Dimensions Dimensions;
typedef Eigen::TensorMap<EquivalentInputTensorType, Options> EigenMap;
static EigenMap map(PyArrayObject* pyArray, bool swap_dimensions = false) {
EIGENPY_UNUSED_VARIABLE(swap_dimensions);
assert(PyArray_NDIM(pyArray) == NumIndices || NumIndices == Eigen::Dynamic);
Eigen::DSizes<Index, NumIndices> dimensions;
for (int k = 0; k < PyArray_NDIM(pyArray); ++k)
dimensions[k] = PyArray_DIMS(pyArray)[k];
InputScalar* pyData = reinterpret_cast<InputScalar*>(PyArray_DATA(pyArray));
return EigenMap(pyData, dimensions);
}
};
#endif
/* Wrap a numpy::array with an Eigen::Map. No memory copy. */
template <typename EigenType, typename InputScalar,
int AlignmentValue = EIGENPY_NO_ALIGNMENT_VALUE,
typename Stride = typename StrideType<EigenType>::type>
struct NumpyMap
: numpy_map_impl<EigenType, InputScalar, AlignmentValue, Stride> {};
} // namespace eigenpy
......
/*
* Copyright 2018-2020 INRIA
* Copyright 2018-2023 INRIA
*/
#ifndef __eigenpy_numpy_type_hpp__
......@@ -14,21 +14,57 @@
#include "eigenpy/scalar-conversion.hpp"
namespace eigenpy {
namespace bp = boost::python;
template <typename Scalar>
bool np_type_is_convertible_into_scalar(const int np_type) {
if (static_cast<NPY_TYPES>(NumpyEquivalentType<Scalar>::type_code) >=
NPY_USERDEF)
const auto scalar_np_code =
static_cast<NPY_TYPES>(NumpyEquivalentType<Scalar>::type_code);
if (scalar_np_code >= NPY_USERDEF)
return np_type == Register::getTypeCode<Scalar>();
if (NumpyEquivalentType<Scalar>::type_code == np_type) return true;
if (scalar_np_code == np_type) return true;
// Manage type promotion
switch (np_type) {
case NPY_BOOL:
return FromTypeToType<bool, Scalar>::value;
case NPY_INT8:
return FromTypeToType<int8_t, Scalar>::value;
case NPY_INT16:
return FromTypeToType<int16_t, Scalar>::value;
case NPY_INT32:
return FromTypeToType<int32_t, Scalar>::value;
case NPY_INT64:
return FromTypeToType<int64_t, Scalar>::value;
case NPY_UINT8:
return FromTypeToType<uint8_t, Scalar>::value;
case NPY_UINT16:
return FromTypeToType<uint16_t, Scalar>::value;
case NPY_UINT32:
return FromTypeToType<uint32_t, Scalar>::value;
case NPY_UINT64:
return FromTypeToType<uint64_t, Scalar>::value;
#if defined _WIN32 || defined __CYGWIN__
// Manage NPY_INT on Windows (NPY_INT32 is NPY_LONG).
// See https://github.com/stack-of-tasks/eigenpy/pull/455
case NPY_INT:
return FromTypeToType<int, Scalar>::value;
case NPY_LONG:
return FromTypeToType<long, Scalar>::value;
return FromTypeToType<int32_t, Scalar>::value;
case NPY_UINT:
return FromTypeToType<uint32_t, Scalar>::value;
#endif // WIN32
#if defined __APPLE__
// Manage NPY_LONGLONG on Mac (NPY_INT64 is NPY_LONG)..
// long long and long are both the same type
// but NPY_LONGLONG and NPY_LONG are different dtype.
// See https://github.com/stack-of-tasks/eigenpy/pull/455
case NPY_LONGLONG:
return FromTypeToType<int64_t, Scalar>::value;
case NPY_ULONGLONG:
return FromTypeToType<uint64_t, Scalar>::value;
#endif // MAC
case NPY_FLOAT:
return FromTypeToType<float, Scalar>::value;
case NPY_CFLOAT:
......@@ -46,54 +82,28 @@ bool np_type_is_convertible_into_scalar(const int np_type) {
}
}
enum NP_TYPE { MATRIX_TYPE, ARRAY_TYPE };
struct EIGENPY_DLLAPI NumpyType {
static NumpyType& getInstance();
operator bp::object() { return getInstance().CurrentNumpyType; }
static bp::object make(PyArrayObject* pyArray, bool copy = false);
static bp::object make(PyObject* pyObj, bool copy = false);
static void setNumpyType(bp::object& obj);
static void sharedMemory(const bool value);
static bool sharedMemory();
static void switchToNumpyArray();
static void switchToNumpyMatrix();
static NP_TYPE& getType();
static bp::object getNumpyType();
static const PyTypeObject* getNumpyMatrixType();
static const PyTypeObject* getNumpyArrayType();
static bool isMatrix();
static bool isArray();
protected:
NumpyType();
bp::object CurrentNumpyType;
bp::object pyModule;
// Numpy types
bp::object NumpyMatrixObject;
PyTypeObject* NumpyMatrixType;
// bp::object NumpyAsMatrixObject; PyTypeObject * NumpyAsMatrixType;
bp::object NumpyArrayObject;
PyTypeObject* NumpyArrayType;
NP_TYPE np_type;
bool shared_memory;
};
} // namespace eigenpy
......
/*
* Copyright 2020-2022 INRIA
* Copyright 2020-2024 INRIA
*/
#ifndef __eigenpy_numpy_hpp__
#define __eigenpy_numpy_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/config.hpp"
#ifndef PY_ARRAY_UNIQUE_SYMBOL
#define PY_ARRAY_UNIQUE_SYMBOL EIGENPY_ARRAY_API
#endif
// For compatibility with Numpy 2.x. See:
// https://numpy.org/devdocs/reference/c-api/array.html#c.NPY_API_SYMBOL_ATTRIBUTE
#define NPY_API_SYMBOL_ATTRIBUTE EIGENPY_DLLAPI
// When building with MSVC, Python headers use some pragma operator to link
// against the Python DLL.
// Unfortunately, it can link against the wrong build type of the library
// leading to some linking issue.
// Boost::Python provides a helper specifically dedicated to selecting the right
// Python library depending on build type, so let's make use of it.
// Numpy headers drags Python with them. As a result, it
// is necessary to include this helper before including Numpy.
// See: https://github.com/stack-of-tasks/eigenpy/pull/514
#include <boost/python/detail/wrap_python.hpp>
#include <numpy/numpyconfig.h>
#ifdef NPY_1_8_API_VERSION
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#endif
// Allow compiling against NumPy 1.x and 2.x. See:
// https://github.com/numpy/numpy/blob/afea8fd66f6bdbde855f5aff0b4e73eb0213c646/doc/source/reference/c-api/array.rst#L1224
#if NPY_ABI_VERSION < 0x02000000
#define PyArray_DescrProto PyArray_Descr
#endif
#include <numpy/ndarrayobject.h>
#include <numpy/ufuncobject.h>
#if NPY_ABI_VERSION < 0x02000000
static inline PyArray_ArrFuncs* PyDataType_GetArrFuncs(PyArray_Descr* descr) {
return descr->f;
}
#endif
/* PEP 674 disallow using macros as l-values
see : https://peps.python.org/pep-0674/
*/
#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
static inline void _Py_SET_TYPE(PyObject* o, PyTypeObject* type) {
Py_TYPE(o) = type;
}
#define Py_SET_TYPE(o, type) _Py_SET_TYPE((PyObject*)(o), type)
#endif
#if defined _WIN32 || defined __CYGWIN__
#define EIGENPY_GET_PY_ARRAY_TYPE(array) \
call_PyArray_MinScalarType(array)->type_num
......@@ -26,68 +63,140 @@
#define EIGENPY_GET_PY_ARRAY_TYPE(array) PyArray_MinScalarType(array)->type_num
#endif
#include <complex>
namespace eigenpy {
void EIGENPY_DLLAPI import_numpy();
int EIGENPY_DLLAPI PyArray_TypeNum(PyTypeObject* type);
// By default, the Scalar is considered as a Python object
template <typename Scalar>
template <typename Scalar, typename Enable = void>
struct NumpyEquivalentType {
enum { type_code = NPY_USERDEF };
};
template <>
struct NumpyEquivalentType<float> {
enum { type_code = NPY_FLOAT };
struct NumpyEquivalentType<bool> {
enum { type_code = NPY_BOOL };
};
template <>
struct NumpyEquivalentType<std::complex<float> > {
enum { type_code = NPY_CFLOAT };
struct NumpyEquivalentType<char> {
enum { type_code = NPY_INT8 };
};
template <>
struct NumpyEquivalentType<double> {
enum { type_code = NPY_DOUBLE };
struct NumpyEquivalentType<unsigned char> {
enum { type_code = NPY_UINT8 };
};
template <>
struct NumpyEquivalentType<std::complex<double> > {
enum { type_code = NPY_CDOUBLE };
struct NumpyEquivalentType<int8_t> {
enum { type_code = NPY_INT8 };
};
template <>
struct NumpyEquivalentType<long double> {
enum { type_code = NPY_LONGDOUBLE };
struct NumpyEquivalentType<int16_t> {
enum { type_code = NPY_INT16 };
};
template <>
struct NumpyEquivalentType<std::complex<long double> > {
enum { type_code = NPY_CLONGDOUBLE };
struct NumpyEquivalentType<uint16_t> {
enum { type_code = NPY_UINT16 };
};
template <>
struct NumpyEquivalentType<bool> {
enum { type_code = NPY_BOOL };
struct NumpyEquivalentType<int32_t> {
enum { type_code = NPY_INT32 };
};
template <>
struct NumpyEquivalentType<uint32_t> {
enum { type_code = NPY_UINT32 };
};
// On Windows, long is a 32 bytes type but it's a different type than int
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#if defined _WIN32 || defined __CYGWIN__
template <>
struct NumpyEquivalentType<long> {
enum { type_code = NPY_INT32 };
};
template <>
struct NumpyEquivalentType<unsigned long> {
enum { type_code = NPY_UINT32 };
};
#endif // WIN32
template <>
struct NumpyEquivalentType<int> {
enum { type_code = NPY_INT };
struct NumpyEquivalentType<int64_t> {
enum { type_code = NPY_INT64 };
};
template <>
struct NumpyEquivalentType<unsigned int> {
enum { type_code = NPY_UINT };
struct NumpyEquivalentType<uint64_t> {
enum { type_code = NPY_UINT64 };
};
// On Mac, long is a 64 bytes type but it's a different type than int64_t
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#if defined __APPLE__
template <>
struct NumpyEquivalentType<long> {
enum { type_code = NPY_LONG };
enum { type_code = NPY_INT64 };
};
//#if defined _WIN32 || defined __CYGWIN__
template <>
struct NumpyEquivalentType<long long> {
struct NumpyEquivalentType<unsigned long> {
enum { type_code = NPY_UINT64 };
};
#endif // MAC
// On Linux, long long is a 64 bytes type but it's a different type than int64_t
// See https://github.com/stack-of-tasks/eigenpy/pull/455
#if defined __linux__
#include <type_traits>
template <typename Scalar>
struct NumpyEquivalentType<
Scalar,
typename std::enable_if<!std::is_same<int64_t, long long>::value &&
std::is_same<Scalar, long long>::value>::type> {
enum { type_code = NPY_LONGLONG };
};
//#else
// template <> struct NumpyEquivalentType<long long> { enum { type_code =
// NPY_LONGLONG };};
//#endif
template <typename Scalar>
struct NumpyEquivalentType<
Scalar, typename std::enable_if<
!std::is_same<uint64_t, unsigned long long>::value &&
std::is_same<Scalar, unsigned long long>::value>::type> {
enum { type_code = NPY_ULONGLONG };
};
#endif // Linux
template <>
struct NumpyEquivalentType<unsigned long> {
enum { type_code = NPY_ULONG };
struct NumpyEquivalentType<float> {
enum { type_code = NPY_FLOAT };
};
template <>
struct NumpyEquivalentType<double> {
enum { type_code = NPY_DOUBLE };
};
template <>
struct NumpyEquivalentType<long double> {
enum { type_code = NPY_LONGDOUBLE };
};
template <>
struct NumpyEquivalentType<std::complex<float>> {
enum { type_code = NPY_CFLOAT };
};
template <>
struct NumpyEquivalentType<std::complex<double>> {
enum { type_code = NPY_CDOUBLE };
};
template <>
struct NumpyEquivalentType<std::complex<long double>> {
enum { type_code = NPY_CLONGDOUBLE };
};
template <typename Scalar>
......@@ -122,7 +231,7 @@ EIGENPY_DLLAPI PyArray_Descr* call_PyArray_DescrFromType(int typenum);
EIGENPY_DLLAPI void call_PyArray_InitArrFuncs(PyArray_ArrFuncs* funcs);
EIGENPY_DLLAPI int call_PyArray_RegisterDataType(PyArray_Descr* dtype);
EIGENPY_DLLAPI int call_PyArray_RegisterDataType(PyArray_DescrProto* dtype);
EIGENPY_DLLAPI int call_PyArray_RegisterCanCast(PyArray_Descr* descr,
int totype,
......@@ -170,7 +279,7 @@ inline void call_PyArray_InitArrFuncs(PyArray_ArrFuncs* funcs) {
PyArray_InitArrFuncs(funcs);
}
inline int call_PyArray_RegisterDataType(PyArray_Descr* dtype) {
inline int call_PyArray_RegisterDataType(PyArray_DescrProto* dtype) {
return PyArray_RegisterDataType(dtype);
}
......
///
/// Copyright (c) 2023 CNRS INRIA
///
/// Definitions for exposing boost::optional<T> types.
/// Also works with std::optional.
#ifndef __eigenpy_optional_hpp__
#define __eigenpy_optional_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/eigen-from-python.hpp"
#include "eigenpy/registration.hpp"
#include <boost/optional.hpp>
#ifdef EIGENPY_WITH_CXX17_SUPPORT
#include <optional>
#endif
#ifndef EIGENPY_DEFAULT_OPTIONAL
#define EIGENPY_DEFAULT_OPTIONAL boost::optional
#endif
namespace boost {
namespace python {
namespace converter {
template <typename T>
struct expected_pytype_for_arg<boost::optional<T>>
: expected_pytype_for_arg<T> {};
#ifdef EIGENPY_WITH_CXX17_SUPPORT
template <typename T>
struct expected_pytype_for_arg<std::optional<T>> : expected_pytype_for_arg<T> {
};
#endif
} // namespace converter
} // namespace python
} // namespace boost
namespace eigenpy {
namespace detail {
/// Helper struct to decide which type is the "none" type for a specific
/// optional<T> implementation.
template <template <typename> class OptionalTpl>
struct nullopt_helper {};
template <>
struct nullopt_helper<boost::optional> {
typedef boost::none_t type;
static type value() { return boost::none; }
};
#ifdef EIGENPY_WITH_CXX17_SUPPORT
template <>
struct nullopt_helper<std::optional> {
typedef std::nullopt_t type;
static type value() { return std::nullopt; }
};
#endif
template <typename NoneType>
struct NoneToPython {
static PyObject *convert(const NoneType &) { Py_RETURN_NONE; }
static void registration() {
if (!check_registration<NoneType>()) {
bp::to_python_converter<NoneType, NoneToPython, false>();
}
}
};
template <typename T,
template <typename> class OptionalTpl = EIGENPY_DEFAULT_OPTIONAL>
struct OptionalToPython {
static PyObject *convert(const OptionalTpl<T> &obj) {
if (obj)
return bp::incref(bp::object(*obj).ptr());
else {
return bp::incref(bp::object().ptr()); // None
}
}
static PyTypeObject const *get_pytype() {
return bp::converter::registered_pytype<T>::get_pytype();
}
static void registration() {
if (!check_registration<OptionalTpl<T>>()) {
bp::to_python_converter<OptionalTpl<T>, OptionalToPython, true>();
}
}
};
template <typename T,
template <typename> class OptionalTpl = EIGENPY_DEFAULT_OPTIONAL>
struct OptionalFromPython {
static void *convertible(PyObject *obj_ptr);
static void construct(PyObject *obj_ptr,
bp::converter::rvalue_from_python_stage1_data *memory);
static void registration();
};
template <typename T, template <typename> class OptionalTpl>
void *OptionalFromPython<T, OptionalTpl>::convertible(PyObject *obj_ptr) {
if (obj_ptr == Py_None) {
return obj_ptr;
}
bp::extract<T> bp_obj(obj_ptr);
if (!bp_obj.check())
return 0;
else
return obj_ptr;
}
template <typename T, template <typename> class OptionalTpl>
void OptionalFromPython<T, OptionalTpl>::construct(
PyObject *obj_ptr, bp::converter::rvalue_from_python_stage1_data *memory) {
// create storage
using rvalue_storage_t =
bp::converter::rvalue_from_python_storage<OptionalTpl<T>>;
void *storage =
reinterpret_cast<rvalue_storage_t *>(reinterpret_cast<void *>(memory))
->storage.bytes;
if (obj_ptr == Py_None) {
new (storage) OptionalTpl<T>(nullopt_helper<OptionalTpl>::value());
} else {
const T value = bp::extract<T>(obj_ptr);
new (storage) OptionalTpl<T>(value);
}
memory->convertible = storage;
}
template <typename T, template <typename> class OptionalTpl>
void OptionalFromPython<T, OptionalTpl>::registration() {
bp::converter::registry::push_back(
&convertible, &construct, bp::type_id<OptionalTpl<T>>(),
bp::converter::expected_pytype_for_arg<OptionalTpl<T>>::get_pytype);
}
} // namespace detail
/// Register converters for the type `optional<T>` to Python.
/// By default \tparam optional is `EIGENPY_DEFAULT_OPTIONAL`.
template <typename T,
template <typename> class OptionalTpl = EIGENPY_DEFAULT_OPTIONAL>
struct OptionalConverter {
static void registration() {
detail::OptionalToPython<T, OptionalTpl>::registration();
detail::OptionalFromPython<T, OptionalTpl>::registration();
}
};
} // namespace eigenpy
#endif // __eigenpy_optional_hpp__
//
// Copyright (c) 2019-2020 CNRS INRIA
//
#ifndef __eigenpy_utils_pickle_vector_hpp__
#define __eigenpy_utils_pickle_vector_hpp__
#include <boost/python.hpp>
#include <boost/python/stl_iterator.hpp>
#include <boost/python/tuple.hpp>
namespace eigenpy {
///
/// \brief Create a pickle interface for the std::vector
///
/// \tparam VecType Vector Type to pickle
///
template <typename VecType>
struct PickleVector : boost::python::pickle_suite {
static boost::python::tuple getinitargs(const VecType&) {
return boost::python::make_tuple();
}
static boost::python::tuple getstate(boost::python::object op) {
return boost::python::make_tuple(
boost::python::list(boost::python::extract<const VecType&>(op)()));
}
static void setstate(boost::python::object op, boost::python::tuple tup) {
if (boost::python::len(tup) > 0) {
VecType& o = boost::python::extract<VecType&>(op)();
boost::python::stl_input_iterator<typename VecType::value_type> begin(
tup[0]),
end;
while (begin != end) {
o.push_back(*begin);
++begin;
}
}
}
static bool getstate_manages_dict() { return true; }
};
} // namespace eigenpy
#endif // ifndef __eigenpy_utils_pickle_vector_hpp__
/*
* Copyright 2014-2022 CNRS INRIA
* Copyright 2014-2023 CNRS INRIA
*/
#ifndef __eigenpy_quaternion_hpp__
#define __eigenpy_quaternion_hpp__
#include <Eigen/Core>
#include <Eigen/Geometry>
#include "eigenpy/eigenpy.hpp"
#include "eigenpy/exception.hpp"
#include "eigenpy/eigen-from-python.hpp"
namespace boost {
namespace python {
......@@ -18,12 +16,12 @@ namespace converter {
/// \brief Template specialization of rvalue_from_python_data
template <typename Quaternion>
struct rvalue_from_python_data<Eigen::QuaternionBase<Quaternion> const&>
: rvalue_from_python_data_eigen<Quaternion const&> {
: ::eigenpy::rvalue_from_python_data<Quaternion const&> {
EIGENPY_RVALUE_FROM_PYTHON_DATA_INIT(Quaternion const&)
};
template <class Quaternion>
struct implicit<Quaternion, Eigen::QuaternionBase<Quaternion> > {
struct implicit<Quaternion, Eigen::QuaternionBase<Quaternion>> {
typedef Quaternion Source;
typedef Eigen::QuaternionBase<Quaternion> Target;
......@@ -67,13 +65,11 @@ class ExceptionIndex : public Exception {
}
};
namespace bp = boost::python;
template <typename QuaternionDerived>
class QuaternionVisitor;
template <typename Scalar, int Options>
struct call<Eigen::Quaternion<Scalar, Options> > {
struct call<Eigen::Quaternion<Scalar, Options>> {
typedef Eigen::Quaternion<Scalar, Options> Quaternion;
static inline void expose() { QuaternionVisitor<Quaternion>::expose(); }
......@@ -86,7 +82,7 @@ struct call<Eigen::Quaternion<Scalar, Options> > {
template <typename Quaternion>
class QuaternionVisitor
: public bp::def_visitor<QuaternionVisitor<Quaternion> > {
: public bp::def_visitor<QuaternionVisitor<Quaternion>> {
typedef Eigen::QuaternionBase<Quaternion> QuaternionBase;
typedef typename QuaternionBase::Scalar Scalar;
......@@ -220,7 +216,6 @@ class QuaternionVisitor
.def("__ne__", &QuaternionVisitor::__ne__)
.def("__abs__", &Quaternion::norm)
.def("__len__", &QuaternionVisitor::__len__)
.staticmethod("__len__")
.def("__setitem__", &QuaternionVisitor::__setitem__)
.def("__getitem__", &QuaternionVisitor::__getitem__)
.def("assign", &assign<Quaternion>, bp::args("self", "quat"),
......@@ -294,8 +289,8 @@ class QuaternionVisitor
return q;
}
static Quaternion* FromTwoVectors(const Eigen::Ref<Vector3> u,
const Eigen::Ref<Vector3> v) {
static Quaternion* FromTwoVectors(const Eigen::Ref<const Vector3> u,
const Eigen::Ref<const Vector3> v) {
Quaternion* q(new Quaternion);
q->setFromTwoVectors(u, v);
return q;
......@@ -308,12 +303,12 @@ class QuaternionVisitor
static Quaternion* DefaultConstructor() { return new Quaternion; }
static Quaternion* FromOneVector(const Eigen::Ref<Vector4> v) {
static Quaternion* FromOneVector(const Eigen::Ref<const Vector4> v) {
Quaternion* q(new Quaternion(v[3], v[0], v[1], v[2]));
return q;
}
static Quaternion* FromRotationMatrix(const Eigen::Ref<Matrix3> R) {
static Quaternion* FromRotationMatrix(const Eigen::Ref<const Matrix3> R) {
Quaternion* q(new Quaternion(R));
return q;
}
......@@ -353,16 +348,24 @@ class QuaternionVisitor
public:
static void expose() {
bp::class_<Quaternion>("Quaternion",
"Quaternion representing rotation.\n\n"
"Supported operations "
"('q is a Quaternion, 'v' is a Vector3): "
"'q*q' (rotation composition), "
"'q*=q', "
"'q*v' (rotating 'v' by 'q'), "
"'q==q', 'q!=q', 'q[0..3]'.",
bp::no_init)
.def(QuaternionVisitor<Quaternion>());
#if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 6
typedef EIGENPY_SHARED_PTR_HOLDER_TYPE(Quaternion) HolderType;
#else
typedef ::boost::python::detail::not_specified HolderType;
#endif
bp::class_<Quaternion, HolderType>(
"Quaternion",
"Quaternion representing rotation.\n\n"
"Supported operations "
"('q is a Quaternion, 'v' is a Vector3): "
"'q*q' (rotation composition), "
"'q*=q', "
"'q*v' (rotating 'v' by 'q'), "
"'q==q', 'q!=q', 'q[0..3]'.",
bp::no_init)
.def(QuaternionVisitor<Quaternion>())
.def(IdVisitor<Quaternion>());
// Cast to Eigen::QuaternionBase and vice-versa
bp::implicitly_convertible<Quaternion, QuaternionBase>();
......
......@@ -20,6 +20,25 @@ namespace eigenpy {
struct EIGENPY_DLLAPI Register {
static PyArray_Descr *getPyArrayDescr(PyTypeObject *py_type_ptr);
static PyArray_Descr *getPyArrayDescrFromTypeNum(const int type_num);
template <typename Scalar>
static PyArray_Descr *getPyArrayDescrFromScalarType() {
if (!isNumpyNativeType<Scalar>()) {
const std::type_info &info = typeid(Scalar);
if (instance().type_to_py_type_bindings.find(&info) !=
instance().type_to_py_type_bindings.end()) {
PyTypeObject *py_type = instance().type_to_py_type_bindings[&info];
return instance().py_array_descr_bindings[py_type];
} else
return nullptr;
} else {
PyArray_Descr *new_descr =
call_PyArray_DescrFromType(NumpyEquivalentType<Scalar>::type_code);
return new_descr;
}
}
template <typename Scalar>
static bool isRegistered() {
return isRegistered(Register::getPyType<Scalar>());
......@@ -31,7 +50,6 @@ struct EIGENPY_DLLAPI Register {
template <typename Scalar>
static PyTypeObject *getPyType() {
namespace bp = boost::python;
if (!isNumpyNativeType<Scalar>()) {
const PyTypeObject *const_py_type_ptr =
bp::converter::registered_pytype<Scalar>::get_pytype();
......@@ -53,7 +71,6 @@ struct EIGENPY_DLLAPI Register {
template <typename Scalar>
static PyArray_Descr *getPyArrayDescr() {
namespace bp = boost::python;
if (!isNumpyNativeType<Scalar>()) {
return getPyArrayDescr(getPyType<Scalar>());
} else {
......@@ -89,7 +106,7 @@ struct EIGENPY_DLLAPI Register {
static Register &instance();
private:
Register(){};
Register() {};
struct Compare_PyTypeObject {
bool operator()(const PyTypeObject *a, const PyTypeObject *b) const {
......
......@@ -3,12 +3,14 @@
* Copyright 2018-2019, INRIA
*/
#include "eigenpy/fwd.hpp"
#ifndef __eigenpy_registration_hpp__
#define __eigenpy_registration_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/registration_class.hpp"
namespace eigenpy {
///
/// \brief Check at runtime the registration of the type T inside the boost
/// python registry.
......@@ -19,8 +21,6 @@ namespace eigenpy {
///
template <typename T>
inline bool check_registration() {
namespace bp = boost::python;
const bp::type_info info = bp::type_id<T>();
const bp::converter::registration* reg = bp::converter::registry::query(info);
if (reg == NULL)
......@@ -34,25 +34,44 @@ inline bool check_registration() {
///
/// \brief Symlink to the current scope the already registered class T.
///
/// \returns true if the type T is effectively symlinked.
/// \returns true if the type T is effectively symlinked.
///
/// \tparam T The type to symlink.
///
template <typename T>
inline bool register_symbolic_link_to_registered_type() {
namespace bp = boost::python;
if (eigenpy::check_registration<T>()) {
const bp::type_info info = bp::type_id<T>();
const bp::converter::registration* reg =
bp::converter::registry::query(info);
bp::handle<> class_obj(reg->get_class_object());
bp::incref(class_obj.get());
bp::scope().attr(reg->get_class_object()->tp_name) = bp::object(class_obj);
return true;
}
return false;
}
/// Same as \see register_symbolic_link_to_registered_type() but apply \p
/// visitor on \tparam T if it already exists
template <typename T, typename Visitor>
inline bool register_symbolic_link_to_registered_type(const Visitor& visitor) {
if (eigenpy::check_registration<T>()) {
const bp::type_info info = bp::type_id<T>();
const bp::converter::registration* reg =
bp::converter::registry::query(info);
bp::handle<> class_obj(reg->get_class_object());
bp::incref(class_obj.get());
bp::object object(class_obj);
bp::scope().attr(reg->get_class_object()->tp_name) = object;
registration_class<T> cl(object);
cl.def(visitor);
return true;
}
return false;
}
} // namespace eigenpy
#endif // ifndef __eigenpy_registration_hpp__
/*
* Copyright 2023, INRIA
*/
#ifndef __eigenpy_registration_class_hpp__
#define __eigenpy_registration_class_hpp__
#include <boost/python/class.hpp>
#include "eigenpy/fwd.hpp"
namespace eigenpy {
/*! Copy of the \see boost::python::class_
* This class allow to add methods to an existing class without registering it
* again.
**/
template <class W>
class registration_class {
public:
using self = registration_class;
/// \p object Hold the namespace of the class that will be modified
registration_class(bp::object object) : m_object(object) {}
/// \see boost::python::class_::def(bp::def_visitor<Derived> const& visitor)
template <class Visitor>
self& def(Visitor const& visitor) {
visitor.visit(*this);
return *this;
}
template <class DerivedVisitor>
self& def(bp::def_visitor<DerivedVisitor> const& visitor) {
static_cast<DerivedVisitor const&>(visitor).visit(*this);
return *this;
}
/// \see boost::python::class_::def(char const* name, F f)
template <class F>
self& def(char const* name, F f) {
def_impl(bp::detail::unwrap_wrapper((W*)0), name, f,
bp::detail::def_helper<char const*>(0), &f);
return *this;
}
/// \see boost::python::class_::def(char const* name, A1 a1, A2 const& a2)
template <class A1, class A2>
self& def(char const* name, A1 a1, A2 const& a2) {
def_maybe_overloads(name, a1, a2, &a2);
return *this;
}
/// \see boost::python::class_::def(char const* name, Fn fn, A1 const& a1, A2
/// const& a2)
template <class Fn, class A1, class A2>
self& def(char const* name, Fn fn, A1 const& a1, A2 const& a2) {
def_impl(bp::detail::unwrap_wrapper((W*)0), name, fn,
bp::detail::def_helper<A1, A2>(a1, a2), &fn);
return *this;
}
/// \see boost::python::class_::def(char const* name, Fn fn, A1 const& a1, A2
/// const& a2, A3 const& a3)
template <class Fn, class A1, class A2, class A3>
self& def(char const* name, Fn fn, A1 const& a1, A2 const& a2, A3 const& a3) {
def_impl(bp::detail::unwrap_wrapper((W*)0), name, fn,
bp::detail::def_helper<A1, A2, A3>(a1, a2, a3), &fn);
return *this;
}
private:
/// \see boost::python::class_::def_impl(T*, char const* name, Fn fn, Helper
/// const& helper, ...)
template <class T, class Fn, class Helper>
inline void def_impl(T*, char const* name, Fn fn, Helper const& helper, ...) {
bp::objects::add_to_namespace(
m_object, name,
make_function(fn, helper.policies(), helper.keywords(),
bp::detail::get_signature(fn, (T*)0)),
helper.doc());
def_default(name, fn, helper,
boost::mpl::bool_<Helper::has_default_implementation>());
}
/// \see boost::python::class_::def_default(char const* name, Fn, Helper
/// const& helper, boost::mpl::bool_<true>)
template <class Fn, class Helper>
inline void def_default(char const* name, Fn, Helper const& helper,
boost::mpl::bool_<true>) {
bp::detail::error::virtual_function_default<
W, Fn>::must_be_derived_class_member(helper.default_implementation());
bp::objects::add_to_namespace(
m_object, name,
make_function(helper.default_implementation(), helper.policies(),
helper.keywords()));
}
/// \see boost::python::class_::def_default(char const*, Fn, Helper const&,
/// boost::mpl::bool_<false>)
template <class Fn, class Helper>
inline void def_default(char const*, Fn, Helper const&,
boost::mpl::bool_<false>) {}
/// \see boost::python::class_::def_maybe_overloads(char const* name, SigT
/// sig,OverloadsT const& overloads,bp::detail::overloads_base const*)
template <class OverloadsT, class SigT>
void def_maybe_overloads(char const* name, SigT sig,
OverloadsT const& overloads,
bp::detail::overloads_base const*)
{
bp::detail::define_with_defaults(name, overloads, *this,
bp::detail::get_signature(sig));
}
/// \see boost::python::class_::def_maybe_overloads(char const* name, Fn fn,
/// A1 const& a1, ...)
template <class Fn, class A1>
void def_maybe_overloads(char const* name, Fn fn, A1 const& a1, ...) {
def_impl(bp::detail::unwrap_wrapper((W*)0), name, fn,
bp::detail::def_helper<A1>(a1), &fn);
}
private:
bp::object m_object;
};
} // namespace eigenpy
#endif // ifndef __eigenpy_registration_class_hpp__
//
// Copyright (c) 2014-2020 CNRS INRIA
// Copyright (c) 2014-2024 CNRS INRIA
//
#ifndef __eigenpy_scalar_conversion_hpp__
#define __eigenpy_scalar_conversion_hpp__
#include "eigenpy/config.hpp"
#include <boost/numeric/conversion/conversion_traits.hpp>
#include <complex>
namespace eigenpy {
template <typename SCALAR1, typename SCALAR2>
struct FromTypeToType : public boost::false_type {};
template <typename SCALAR>
struct FromTypeToType<SCALAR, SCALAR> : public boost::true_type {};
template <typename Source, typename Target>
struct FromTypeToType
: public boost::mpl::if_c<std::is_same<Source, Target>::value,
std::true_type,
typename boost::numeric::conversion_traits<
Source, Target>::subranged>::type {};
/// FromTypeToType specialization to manage std::complex
template <typename ScalarSource, typename ScalarTarget>
struct FromTypeToType<std::complex<ScalarSource>, std::complex<ScalarTarget>>
: public boost::mpl::if_c<
std::is_same<ScalarSource, ScalarTarget>::value, std::true_type,
typename boost::numeric::conversion_traits<
ScalarSource, ScalarTarget>::subranged>::type {};
template <>
struct FromTypeToType<int, long> : public boost::true_type {};
template <>
struct FromTypeToType<int, float> : public boost::true_type {};
template <>
struct FromTypeToType<int, std::complex<float> > : public boost::true_type {};
template <>
struct FromTypeToType<int, double> : public boost::true_type {};
template <>
struct FromTypeToType<int, std::complex<double> > : public boost::true_type {};
template <>
struct FromTypeToType<int, long double> : public boost::true_type {};
template <>
struct FromTypeToType<int, std::complex<long double> >
: public boost::true_type {};
template <>
struct FromTypeToType<long, float> : public boost::true_type {};
template <>
struct FromTypeToType<long, std::complex<float> > : public boost::true_type {};
template <>
struct FromTypeToType<long, double> : public boost::true_type {};
template <>
struct FromTypeToType<long, std::complex<double> > : public boost::true_type {};
template <>
struct FromTypeToType<long, long double> : public boost::true_type {};
template <>
struct FromTypeToType<long, std::complex<long double> >
: public boost::true_type {};
template <>
struct FromTypeToType<float, std::complex<float> > : public boost::true_type {};
template <>
struct FromTypeToType<float, double> : public boost::true_type {};
template <>
struct FromTypeToType<float, std::complex<double> > : public boost::true_type {
};
template <>
struct FromTypeToType<float, long double> : public boost::true_type {};
template <>
struct FromTypeToType<float, std::complex<long double> >
: public boost::true_type {};
template <>
struct FromTypeToType<double, std::complex<double> > : public boost::true_type {
};
template <>
struct FromTypeToType<double, long double> : public boost::true_type {};
template <>
struct FromTypeToType<double, std::complex<long double> >
: public boost::true_type {};
} // namespace eigenpy
#endif // __eigenpy_scalar_conversion_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_scipy_allocator_hpp__
#define __eigenpy_scipy_allocator_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/eigen-allocator.hpp"
#include "eigenpy/scipy-type.hpp"
#include "eigenpy/register.hpp"
namespace eigenpy {
template <typename EigenType, typename BaseType>
struct scipy_allocator_impl;
template <typename EigenType>
struct scipy_allocator_impl_sparse_matrix;
template <typename MatType>
struct scipy_allocator_impl<
MatType,
Eigen::SparseMatrixBase<typename remove_const_reference<MatType>::type>>
: scipy_allocator_impl_sparse_matrix<MatType> {};
template <typename MatType>
struct scipy_allocator_impl<const MatType,
const Eigen::SparseMatrixBase<
typename remove_const_reference<MatType>::type>>
: scipy_allocator_impl_sparse_matrix<const MatType> {};
// template <typename MatType>
// struct scipy_allocator_impl<MatType &, Eigen::MatrixBase<MatType> > :
// scipy_allocator_impl_sparse_matrix<MatType &>
//{};
template <typename MatType>
struct scipy_allocator_impl<const MatType &,
const Eigen::SparseMatrixBase<MatType>>
: scipy_allocator_impl_sparse_matrix<const MatType &> {};
template <typename EigenType,
typename BaseType = typename get_eigen_base_type<EigenType>::type>
struct ScipyAllocator : scipy_allocator_impl<EigenType, BaseType> {};
template <typename MatType>
struct scipy_allocator_impl_sparse_matrix {
template <typename SimilarMatrixType>
static PyObject *allocate(
const Eigen::SparseCompressedBase<SimilarMatrixType> &mat_,
bool copy = false) {
EIGENPY_UNUSED_VARIABLE(copy);
typedef typename SimilarMatrixType::Scalar Scalar;
typedef typename SimilarMatrixType::StorageIndex StorageIndex;
enum { IsRowMajor = SimilarMatrixType::IsRowMajor };
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> DataVector;
typedef const Eigen::Map<const DataVector> MapDataVector;
typedef Eigen::Matrix<StorageIndex, Eigen::Dynamic, 1> StorageIndexVector;
typedef Eigen::Matrix<int32_t, Eigen::Dynamic, 1> ScipyStorageIndexVector;
typedef const Eigen::Map<const StorageIndexVector> MapStorageIndexVector;
SimilarMatrixType &mat = mat_.const_cast_derived();
bp::object scipy_sparse_matrix_type =
ScipyType::get_pytype_object<SimilarMatrixType>();
MapDataVector data(mat.valuePtr(), mat.nonZeros());
MapStorageIndexVector outer_indices(
mat.outerIndexPtr(), (IsRowMajor ? mat.rows() : mat.cols()) + 1);
MapStorageIndexVector inner_indices(mat.innerIndexPtr(), mat.nonZeros());
bp::object scipy_sparse_matrix;
if (mat.rows() == 0 &&
mat.cols() == 0) // handle the specific case of empty matrix
{
// PyArray_Descr* npy_type =
// Register::getPyArrayDescrFromScalarType<Scalar>(); bp::dict args;
// args["dtype"] =
// bp::object(bp::handle<>(bp::borrowed(npy_type->typeobj)));
// args["shape"] = bp::object(bp::handle<>(bp::borrowed(Py_None)));
// scipy_sparse_matrix =
// scipy_sparse_matrix_type(*bp::make_tuple(0,0),**args);
scipy_sparse_matrix = scipy_sparse_matrix_type(
Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>(0, 0));
} else if (mat.nonZeros() == 0) {
scipy_sparse_matrix =
scipy_sparse_matrix_type(bp::make_tuple(mat.rows(), mat.cols()));
} else {
scipy_sparse_matrix = scipy_sparse_matrix_type(bp::make_tuple(
DataVector(data),
ScipyStorageIndexVector(inner_indices.template cast<int32_t>()),
ScipyStorageIndexVector(
outer_indices.template cast<int32_t>()))); //,
// bp::make_tuple(mat.rows(),
// mat.cols())));
}
Py_INCREF(scipy_sparse_matrix.ptr());
return scipy_sparse_matrix.ptr();
}
};
// template <typename MatType>
// struct scipy_allocator_impl_sparse_matrix<MatType &> {
// template <typename SimilarMatrixType>
// static PyArrayObject *allocate(Eigen::PlainObjectBase<SimilarMatrixType>
// &mat,
// npy_intp nd, npy_intp *shape) {
// typedef typename SimilarMatrixType::Scalar Scalar;
// enum {
// NPY_ARRAY_MEMORY_CONTIGUOUS =
// SimilarMatrixType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
// };
//
// if (NumpyType::sharedMemory()) {
// const int Scalar_type_code = Register::getTypeCode<Scalar>();
// PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
// getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
// mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS | NPY_ARRAY_ALIGNED);
//
// return pyArray;
// } else {
// return NumpyAllocator<MatType>::allocate(mat, nd, shape);
// }
// }
// };
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
// template <typename MatType, int Options, typename Stride>
// struct scipy_allocator_impl_sparse_matrix<Eigen::Ref<MatType, Options,
// Stride> > {
// typedef Eigen::Ref<MatType, Options, Stride> RefType;
//
// static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape)
// {
// typedef typename RefType::Scalar Scalar;
// enum {
// NPY_ARRAY_MEMORY_CONTIGUOUS =
// RefType::IsRowMajor ? NPY_ARRAY_CARRAY : NPY_ARRAY_FARRAY
// };
//
// if (NumpyType::sharedMemory()) {
// const int Scalar_type_code = Register::getTypeCode<Scalar>();
// const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
// Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
// : mat.innerStride(),
// outer_stride = reverse_strides ? mat.innerStride()
// : mat.outerStride();
//
// const int elsize =
// call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp
// strides[2] = {elsize * inner_stride, elsize * outer_stride};
//
// PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
// getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
// strides, mat.data(), NPY_ARRAY_MEMORY_CONTIGUOUS |
// NPY_ARRAY_ALIGNED);
//
// return pyArray;
// } else {
// return NumpyAllocator<MatType>::allocate(mat, nd, shape);
// }
// }
// };
#endif
// template <typename MatType>
// struct scipy_allocator_impl_sparse_matrix<const MatType &> {
// template <typename SimilarMatrixType>
// static PyArrayObject *allocate(
// const Eigen::PlainObjectBase<SimilarMatrixType> &mat, npy_intp nd,
// npy_intp *shape) {
// typedef typename SimilarMatrixType::Scalar Scalar;
// enum {
// NPY_ARRAY_MEMORY_CONTIGUOUS_RO = SimilarMatrixType::IsRowMajor
// ? NPY_ARRAY_CARRAY_RO
// : NPY_ARRAY_FARRAY_RO
// };
//
// if (NumpyType::sharedMemory()) {
// const int Scalar_type_code = Register::getTypeCode<Scalar>();
// PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
// getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
// const_cast<Scalar *>(mat.data()),
// NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
//
// return pyArray;
// } else {
// return NumpyAllocator<MatType>::allocate(mat, nd, shape);
// }
// }
// };
#if EIGEN_VERSION_AT_LEAST(3, 2, 0)
// template <typename MatType, int Options, typename Stride>
// struct scipy_allocator_impl_sparse_matrix<
// const Eigen::Ref<const MatType, Options, Stride> > {
// typedef const Eigen::Ref<const MatType, Options, Stride> RefType;
//
// static PyArrayObject *allocate(RefType &mat, npy_intp nd, npy_intp *shape)
// {
// typedef typename RefType::Scalar Scalar;
// enum {
// NPY_ARRAY_MEMORY_CONTIGUOUS_RO =
// RefType::IsRowMajor ? NPY_ARRAY_CARRAY_RO : NPY_ARRAY_FARRAY_RO
// };
//
// if (NumpyType::sharedMemory()) {
// const int Scalar_type_code = Register::getTypeCode<Scalar>();
//
// const bool reverse_strides = MatType::IsRowMajor || (mat.rows() == 1);
// Eigen::DenseIndex inner_stride = reverse_strides ? mat.outerStride()
// : mat.innerStride(),
// outer_stride = reverse_strides ? mat.innerStride()
// : mat.outerStride();
//
// const int elsize =
// call_PyArray_DescrFromType(Scalar_type_code)->elsize; npy_intp
// strides[2] = {elsize * inner_stride, elsize * outer_stride};
//
// PyArrayObject *pyArray = (PyArrayObject *)call_PyArray_New(
// getPyArrayType(), static_cast<int>(nd), shape, Scalar_type_code,
// strides, const_cast<Scalar *>(mat.data()),
// NPY_ARRAY_MEMORY_CONTIGUOUS_RO | NPY_ARRAY_ALIGNED);
//
// return pyArray;
// } else {
// return NumpyAllocator<MatType>::allocate(mat, nd, shape);
// }
// }
// };
#endif
} // namespace eigenpy
#endif // ifndef __eigenpy_scipy_allocator_hpp__
/*
* Copyright 2024 INRIA
*/
#ifndef __eigenpy_scipy_type_hpp__
#define __eigenpy_scipy_type_hpp__
#include "eigenpy/fwd.hpp"
#include "eigenpy/register.hpp"
#include "eigenpy/scalar-conversion.hpp"
#include "eigenpy/numpy-type.hpp"
namespace eigenpy {
struct EIGENPY_DLLAPI ScipyType {
static ScipyType& getInstance();
static void sharedMemory(const bool value);
static bool sharedMemory();
static bp::object getScipyType();
static const PyTypeObject* getScipyCSRMatrixType();
static const PyTypeObject* getScipyCSCMatrixType();
template <typename SparseMatrix>
static bp::object get_pytype_object(
const Eigen::SparseMatrixBase<SparseMatrix>* ptr = nullptr) {
EIGENPY_UNUSED_VARIABLE(ptr);
return SparseMatrix::IsRowMajor ? getInstance().csr_matrix_obj
: getInstance().csc_matrix_obj;
}
template <typename SparseMatrix>
static PyTypeObject const* get_pytype(
const Eigen::SparseMatrixBase<SparseMatrix>* ptr = nullptr) {
EIGENPY_UNUSED_VARIABLE(ptr);
return SparseMatrix::IsRowMajor ? getInstance().csr_matrix_type
: getInstance().csc_matrix_type;
}
static int get_numpy_type_num(const bp::object& obj) {
const PyTypeObject* type = Py_TYPE(obj.ptr());
EIGENPY_USED_VARIABLE_ONLY_IN_DEBUG_MODE(type);
assert(type == getInstance().csr_matrix_type ||
type == getInstance().csc_matrix_type);
bp::object dtype = obj.attr("dtype");
const PyArray_Descr* npy_type =
reinterpret_cast<PyArray_Descr*>(dtype.ptr());
return npy_type->type_num;
}
protected:
ScipyType();
bp::object sparse_module;
// SciPy types
bp::object csr_matrix_obj, csc_matrix_obj;
PyTypeObject *csr_matrix_type, *csc_matrix_type;
bool shared_memory;
};
} // namespace eigenpy
#endif // ifndef __eigenpy_scipy_type_hpp__
/*
* Copyright 2017, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017 CNRS
* Copyright 2024 Inria
*/
#ifndef __eigenpy_bfgs_preconditioners_hpp__
......@@ -24,11 +13,9 @@
namespace eigenpy {
namespace bp = boost::python;
template <typename Preconditioner>
struct BFGSPreconditionerBaseVisitor
: public bp::def_visitor<BFGSPreconditionerBaseVisitor<Preconditioner> > {
: public bp::def_visitor<BFGSPreconditionerBaseVisitor<Preconditioner>> {
typedef Eigen::VectorXd VectorType;
template <class PyClass>
void visit(PyClass& cl) const {
......@@ -51,6 +38,7 @@ struct BFGSPreconditionerBaseVisitor
static void expose(const std::string& name) {
bp::class_<Preconditioner>(name, bp::no_init)
.def(IdVisitor<Preconditioner>())
.def(BFGSPreconditionerBaseVisitor<Preconditioner>());
}
};
......@@ -58,7 +46,7 @@ struct BFGSPreconditionerBaseVisitor
template <typename Preconditioner>
struct LimitedBFGSPreconditionerBaseVisitor
: public bp::def_visitor<
LimitedBFGSPreconditionerBaseVisitor<Preconditioner> > {
LimitedBFGSPreconditionerBaseVisitor<Preconditioner>> {
template <class PyClass>
void visit(PyClass& cl) const {
cl.def(PreconditionerBaseVisitor<Preconditioner>())
......@@ -70,6 +58,7 @@ struct LimitedBFGSPreconditionerBaseVisitor
static void expose(const std::string& name) {
bp::class_<Preconditioner>(name.c_str(), bp::no_init)
.def(IdVisitor<Preconditioner>())
.def(LimitedBFGSPreconditionerBaseVisitor<Preconditioner>());
}
};
......
/*
* Copyright 2017, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017 CNRS
* Copyright 2024 Inria
*/
#ifndef __eigenpy_basic_preconditioners_hpp__
......@@ -23,18 +12,16 @@
namespace eigenpy {
namespace bp = boost::python;
template <typename Preconditioner>
struct PreconditionerBaseVisitor
: public bp::def_visitor<PreconditionerBaseVisitor<Preconditioner> > {
: public bp::def_visitor<PreconditionerBaseVisitor<Preconditioner>> {
typedef Eigen::MatrixXd MatrixType;
typedef Eigen::VectorXd VectorType;
template <class PyClass>
void visit(PyClass& cl) const {
cl.def(bp::init<>("Default constructor"))
.def(bp::init<MatrixType>(bp::arg("A"),
.def(bp::init<MatrixType>(bp::args("self", "A"),
"Initialize the preconditioner with matrix A "
"for further Az=b solving."))
#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
......@@ -64,7 +51,7 @@ struct PreconditionerBaseVisitor
template <typename Scalar>
struct DiagonalPreconditionerVisitor
: PreconditionerBaseVisitor<Eigen::DiagonalPreconditioner<Scalar> > {
: PreconditionerBaseVisitor<Eigen::DiagonalPreconditioner<Scalar>> {
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixType;
typedef Eigen::DiagonalPreconditioner<Scalar> Preconditioner;
......@@ -83,7 +70,8 @@ struct DiagonalPreconditionerVisitor
"A preconditioner based on the digonal entrie.\n"
"This class allows to approximately solve for A.x = b problems "
"assuming A is a diagonal matrix.",
bp::no_init);
bp::no_init)
.def(IdVisitor<Preconditioner>());
}
};
......@@ -91,7 +79,7 @@ struct DiagonalPreconditionerVisitor
template <typename Scalar>
struct LeastSquareDiagonalPreconditionerVisitor
: PreconditionerBaseVisitor<
Eigen::LeastSquareDiagonalPreconditioner<Scalar> > {
Eigen::LeastSquareDiagonalPreconditioner<Scalar>> {
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixType;
typedef Eigen::LeastSquareDiagonalPreconditioner<Scalar> Preconditioner;
......@@ -105,7 +93,8 @@ struct LeastSquareDiagonalPreconditionerVisitor
"his class allows to approximately solve for A' A x = A' b problems "
"assuming A' A is a diagonal matrix.",
bp::no_init)
.def(DiagonalPreconditionerVisitor<Scalar>());
.def(DiagonalPreconditionerVisitor<Scalar>())
.def(IdVisitor<Preconditioner>());
}
};
#endif
......@@ -119,7 +108,8 @@ struct IdentityPreconditionerVisitor
static void expose() {
bp::class_<Preconditioner>("IdentityPreconditioner", bp::no_init)
.def(PreconditionerBaseVisitor<Preconditioner>());
.def(PreconditionerBaseVisitor<Preconditioner>())
.def(IdVisitor<Preconditioner>());
}
};
......
/*
* Copyright 2017, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017 CNRS
*/
#ifndef __eigenpy_conjugate_gradient_hpp__
......@@ -24,12 +12,10 @@
namespace eigenpy {
namespace bp = boost::python;
template <typename ConjugateGradient>
struct ConjugateGradientVisitor
: public boost::python::def_visitor<
ConjugateGradientVisitor<ConjugateGradient> > {
ConjugateGradientVisitor<ConjugateGradient>> {
typedef typename ConjugateGradient::MatrixType MatrixType;
template <class PyClass>
......@@ -45,7 +31,8 @@ struct ConjugateGradientVisitor
static void expose(const std::string& name = "ConjugateGradient") {
bp::class_<ConjugateGradient, boost::noncopyable>(name.c_str(), bp::no_init)
.def(ConjugateGradientVisitor<ConjugateGradient>());
.def(ConjugateGradientVisitor<ConjugateGradient>())
.def(IdVisitor<ConjugateGradient>());
}
};
......
/*
* Copyright 2017, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017 CNRS
*/
#ifndef __eigenpy_iterative_solver_base_hpp__
......@@ -22,11 +10,9 @@
namespace eigenpy {
namespace bp = boost::python;
template <typename IterativeSolver>
struct IterativeSolverVisitor : public boost::python::def_visitor<
IterativeSolverVisitor<IterativeSolver> > {
IterativeSolverVisitor<IterativeSolver>> {
typedef typename IterativeSolver::MatrixType MatrixType;
typedef typename IterativeSolver::Preconditioner Preconditioner;
typedef Eigen::VectorXd VectorType;
......
/*
* Copyright 2017-2018, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017-2018 CNRS
*/
#ifndef __eigenpy_least_square_conjugate_gradient_hpp__
......@@ -24,12 +12,10 @@
namespace eigenpy {
namespace bp = boost::python;
template <typename LeastSquaresConjugateGradient>
struct LeastSquaresConjugateGradientVisitor
: public boost::python::def_visitor<LeastSquaresConjugateGradientVisitor<
LeastSquaresConjugateGradient> > {
: public boost::python::def_visitor<
LeastSquaresConjugateGradientVisitor<LeastSquaresConjugateGradient>> {
typedef Eigen::MatrixXd MatrixType;
template <class PyClass>
......@@ -48,7 +34,8 @@ struct LeastSquaresConjugateGradientVisitor
"LeastSquaresConjugateGradient", bp::no_init)
.def(IterativeSolverVisitor<LeastSquaresConjugateGradient>())
.def(LeastSquaresConjugateGradientVisitor<
LeastSquaresConjugateGradient>());
LeastSquaresConjugateGradient>())
.def(IdVisitor<LeastSquaresConjugateGradient>());
}
};
......
/*
* Copyright 2017, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017 CNRS
*/
#ifndef __eigenpy_sparse_solver_base_hpp__
......@@ -21,11 +9,9 @@
namespace eigenpy {
namespace bp = boost::python;
template <typename SparseSolver>
struct SparseSolverVisitor
: public bp::def_visitor<SparseSolverVisitor<SparseSolver> > {
: public bp::def_visitor<SparseSolverVisitor<SparseSolver>> {
typedef Eigen::VectorXd VectorType;
template <class PyClass>
......
/*
* Copyright 2017, Justin Carpentier, LAAS-CNRS
*
* This file is part of eigenpy.
* eigenpy is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
* eigenpy is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details. You should
* have received a copy of the GNU Lesser General Public License along
* with eigenpy. If not, see <http://www.gnu.org/licenses/>.
* Copyright 2017 CNRS
*/
#ifndef __eigenpy_preconditioners_hpp__
......