Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • gsaurel/hpp-fcl
  • coal-library/coal
2 results
Show changes
Showing
with 1126 additions and 78 deletions
# Find FLANN, a Fast Library for Approximate Nearest Neighbors
include(FindPackageHandleStandardArgs)
find_path(FLANN_INCLUDE_DIR flann.hpp PATH_SUFFIXES flann)
if (FLANN_INCLUDE_DIR)
file(READ "${FLANN_INCLUDE_DIR}/config.h" FLANN_CONFIG)
string(REGEX REPLACE ".*FLANN_VERSION_ \"([0-9.]+)\".*" "\\1" FLANN_VERSION ${FLANN_CONFIG})
if(NOT FLANN_VERSION VERSION_LESS flann_FIND_VERSION)
string(REGEX REPLACE "/flann$" "" FLANN_INCLUDE_DIRS ${FLANN_INCLUDE_DIR})
endif()
endif()
find_package_handle_standard_args(flann DEFAULT_MSG FLANN_INCLUDE_DIRS)
# this module was taken from http://trac.evemu.org/browser/trunk/cmake/FindTinyXML.cmake
# - Find TinyXML
# Find the native TinyXML includes and library
#
# TINYXML_FOUND - True if TinyXML found.
# TINYXML_INCLUDE_DIR - where to find tinyxml.h, etc.
# TINYXML_LIBRARIES - List of libraries when using TinyXML.
#
INCLUDE( "FindPackageHandleStandardArgs" )
FIND_PATH( TINYXML_INCLUDE_DIRS "tinyxml.h"
PATH_SUFFIXES "tinyxml" )
FIND_LIBRARY( TINYXML_LIBRARY_DIRS
NAMES "tinyxml"
PATH_SUFFIXES "tinyxml" )
# handle the QUIETLY and REQUIRED arguments and set TINYXML_FOUND to TRUE if
# all listed variables are TRUE
FIND_PACKAGE_HANDLE_STANDARD_ARGS( "TinyXML" DEFAULT_MSG TINYXML_INCLUDE_DIRS TINYXML_LIBRARY_DIRS )
Dependencies:
============
- Boost (thread, date_time, unit_test_framework, filesystem)
- libccd (available at http://libccd.danfis.cz/)
- octomap (optional dependency, available at http://octomap.github.com)
Boost and libccd are mandatory dependencies. If octomap is not found,
collision detection with octrees will not be possible.
For installation, CMake will also be needed (http://cmake.org).
Install:
=======
* Linux / Mac OS:
The CMakeLists.txt can be used to generate makefiles; For example, one may use operations such as:
mkdir build
cd build
cmake ..
make -jN # N is the maximum number of parallel compile jobs
Once the compilation is finished,
make install
will install the project. To specify the installation prefix,
pass the parameter -DCMAKE_INSTALL_PREFIX=/my/prefix/ to the "cmake .." command above.
* Visual Studio:
The CMakeLists.txt can be used to generate a Visual Studio project, using the cmake build tool.
Software License Agreement (BSD License)
Copyright (c) 2008-2014, Willow Garage, Inc.
Copyright (c) 2014-2015, Open Source Robotics Foundation
Copyright (c) 2014-2023, CNRS
Copyright (c) 2018-2025, INRIA
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Open Source Robotics Foundation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
FCL -- The Flexible Collision Library
===
# Coal — An extension of the Flexible Collision Library
This library includes various techniques for efficient collision detection and proximity computation.
<p align="center">
<a href="https://gepgitlab.laas.fr/humanoid-path-planner/hpp-fcl/commits/master/"><img src="https://gepgitlab.laas.fr/humanoid-path-planner/hpp-fcl/badges/master/pipeline.svg" alt="Pipeline status"/></a>
<a href="https://gepettoweb.laas.fr/hpp/hpp-fcl/doxygen-html/index.html"><img src="https://img.shields.io/badge/docs-online-brightgreen" alt="Documentation"/></a>
<a href="http://projects.laas.fr/gepetto/doc/humanoid-path-planner/hpp-fcl/master/coverage/"><img src="https://gepgitlab.laas.fr/humanoid-path-planner/hpp-fcl/badges/master/coverage.svg?job=doc-coverage" alt="Coverage report"/></a>
<a href="https://anaconda.org/conda-forge/hpp-fcl"><img src="https://img.shields.io/conda/dn/conda-forge/hpp-fcl.svg" alt="Conda Downloads"/></a>
<a href="https://anaconda.org/conda-forge/hpp-fcl"><img src="https://img.shields.io/conda/vn/conda-forge/hpp-fcl.svg" alt="Conda Version"/></a>
<a href="https://badge.fury.io/py/hpp-fcl"><img src="https://badge.fury.io/py/hpp-fcl.svg" alt="PyPI version"></a>
<a href="https://github.com/psf/black"><img alt="black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
<a href="https://github.com/astral-sh/ruff"><img alt="ruff" src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json"></a>
</p>
Supported shapes:
- sphere
- box
- cone
- cylinder
- mesh
[FCL](https://github.com/flexible-collision-library/fcl) was forked in 2015, creating a new project called HPP-FCL.
Since then, a large part of the code has been rewritten or removed (unused and untested code), and new features have been introduced (see below).
Due to these major changes, it was decided in 2024 to rename the HPP-FCL project to **Coal**.
In addition, collision checking with octrees is also supported.
Octrees are represented using the octomap library http://octomap.github.com
If you use **Coal** in your projects and research papers, we would appreciate it if you would [cite it](https://raw.githubusercontent.com/coal-library/coal/devel/CITATION.bib).
## New features
Compared to the original [FCL](https://github.com/flexible-collision-library/fcl) library, the main new features are:
- dedicated and efficient implementations of the GJK and the EPA algorithms (we do not rely on [libccd](https://github.com/danfis/libccd))
- the support of safety margins for collision detection
- an accelerated version of collision detection *à la Nesterov*, which leads to increased performance (up to a factor of 2). More details are available in this [paper](https://hal.archives-ouvertes.fr/hal-03662157/)
- the computation of a lower bound of the distance between two objects when collision checking is performed, and no collision is found
- the implementation of Python bindings for easy code prototyping
- the support of new geometries such as height fields, capsules, ellipsoids, etc.
- enhance reliability with the fix of a myriad of bugs
- efficient computation of **contact points** and **contact patches** between objects
- full support of object serialization via Boost.Serialization
Note: the broad phase was reintroduced by [Justin Carpentier](https://github.com/jcarpent) in 2022, based on the FCL version 0.7.0.
This project is now used in several robotics frameworks such as [Pinocchio](https://github.com/stack-of-tasks/pinocchio), an open-source library which implements efficient and versatile rigid-body dynamics algorithms, the [Humanoid Path Planner](https://humanoid-path-planner.github.io/hpp-doc), an open-source library for Motion and Manipulation Planning. **Coal** has recently also been used to develop [Simple](https://github.com/Simple-Robotics/Simple), a new (differentiable) and efficient simulator for robotics and beyond.
## A high-performance library
Unlike the original FCL library, Coal implements the well-established [GJK algorithm](https://en.wikipedia.org/wiki/Gilbert%E2%80%93Johnson%E2%80%93Keerthi_distance_algorithm) and [its variants](https://hal.archives-ouvertes.fr/hal-03662157/) for collision detection and distance computation. These implementations lead to state-of-the-art performance, as shown in the figures below.
On the one hand, we have benchmarked Coal against major state-of-the-art software alternatives:
1. the [Bullet simulator](https://github.com/bulletphysics/bullet3),
2. the original [FCL library](https://github.com/flexible-collision-library/fcl) (used in the [Drake framework]()),
3. the [libccd library](https://github.com/danfis/libccd) (used in [MuJoCo](http://mujoco.org/)).
The results are depicted in the following figure, which notably shows that the accelerated variants of GJK largely outperform by a large margin (from 5x up to 15x times faster).
Please notice that the y-axis is in log scale.
<p align="center">
<img src="./doc/images/coal-vs-the-rest-of-the-world.png" width="600" alt="Coal vs the rest of the world" align="center"/>
</p>
On the other hand, why do we care about dedicated collision detection solvers like GJK for the narrow phase? Why can't we simply formulate the collision detection problem as a quadratic problem and call an off-the-shelf optimization solver like [ProxQP](https://github.com/Simple-Robotics/proxsuite))? Here is why:
<p align="center">
<img src="./doc/images/coal-performances.jpg" width="600" alt="Coal vs generic QP solvers" align="center"/>
</p>
One can observe that GJK-based approaches largely outperform solutions based on classic optimization solvers (e.g., QP solver like [ProxQP](https://github.com/Simple-Robotics/proxsuite)), notably for large geometries composed of tens or hundreds of vertices.
## Open-source projects relying on Pinocchio
- [Pinocchio](https://github.com/stack-of-tasks/pinocchio) A fast and flexible implementation of Rigid Body Dynamics algorithms and their analytical derivatives.
- [IfcOpenShell](https://github.com/IfcOpenShell/IfcOpenShell) Open source IFC library and geometry engine.
- [Crocoddyl](https://github.com/loco-3d/crocoddyl) A software to realize model predictive control for complex robotics platforms.
- [TSID](https://github.com/stack-of-tasks/tsid/) A software that implements a Task Space Inverse Dynamics QP.
- [HPP](https://humanoid-path-planner.github.io/hpp-doc/) A SDK that implements motion planners for humanoids and other robots.
- [Jiminy](https://github.com/duburcqa/jiminy) A simulator based on Pinocchio.
- [ocs2](https://github.com/leggedrobotics/ocs2) A toolbox for Optimal Control for Switched Systems (OCS2)
## Installation
### Conda
Coal can be installed from the [conda-forge channel](https://anaconda.org/conda-forge/coal):
```bash
conda install coal -c conda-forge
```
## Build
You can find build instruction [here](./development/build.md).
## C++ example
Both the C++ library and the python bindings can be installed as simply as `conda -c conda-forge install coal`.
The `.so` library, include files and python bindings will then be installed under `$CONDA_PREFIX/lib`, `$CONDA_PREFIX/include` and `$CONDA_PREFIX/lib/python3.XX/site-packages`.
Here is an example of using Coal in C++:
```cpp
#include "coal/math/transform.h"
#include "coal/mesh_loader/loader.h"
#include "coal/BVH/BVH_model.h"
#include "coal/collision.h"
#include "coal/collision_data.h"
#include <iostream>
#include <memory>
// Function to load a convex mesh from a `.obj`, `.stl` or `.dae` file.
//
// This function imports the object inside the file as a BVHModel, i.e. a point cloud
// which is hierarchically transformed into a tree of bounding volumes.
// The leaves of this tree are the individual points of the point cloud
// stored in the `.obj` file.
// This BVH can then be used for collision detection.
//
// For better computational efficiency, we sometimes prefer to work with
// the convex hull of the point cloud. This insures that the underlying object
// is convex and thus very fast collision detection algorithms such as
// GJK or EPA can be called with this object.
// Consequently, after creating the BVH structure from the point cloud, this function
// also computes its convex hull.
std::shared_ptr<coal::ConvexBase> loadConvexMesh(const std::string& file_name) {
coal::NODE_TYPE bv_type = coal::BV_AABB;
coal::MeshLoader loader(bv_type);
coal::BVHModelPtr_t bvh = loader.load(file_name);
bvh->buildConvexHull(true, "Qt");
return bvh->convex;
}
int main() {
// Create the coal shapes.
// Coal supports many primitive shapes: boxes, spheres, capsules, cylinders, ellipsoids, cones, planes,
// halfspace and convex meshes (i.e. convex hulls of clouds of points).
// It also supports BVHs (bounding volumes hierarchies), height-fields and octrees.
std::shared_ptr<coal::Ellipsoid> shape1 = std::make_shared<coal::Ellipsoid>(0.7, 1.0, 0.8);
std::shared_ptr<coal::ConvexBase> shape2 = loadConvexMesh("../path/to/mesh/file.obj");
// Define the shapes' placement in 3D space
coal::Transform3s T1;
T1.setQuatRotation(coal::Quaternion3f::UnitRandom());
T1.setTranslation(coal::Vec3s::Random());
coal::Transform3s T2 = coal::Transform3s::Identity();
T2.setQuatRotation(coal::Quaternion3f::UnitRandom());
T2.setTranslation(coal::Vec3s::Random());
// Define collision requests and results.
//
// The collision request allows to set parameters for the collision pair.
// For example, we can set a positive or negative security margin.
// If the distance between the shapes is less than the security margin, the shapes
// will be considered in collision.
// Setting a positive security margin can be usefull in motion planning,
// i.e to prevent shapes from getting too close to one another.
// In physics simulation, allowing a negative security margin may be usefull to stabilize the simulation.
coal::CollisionRequest col_req;
col_req.security_margin = 1e-1;
// A collision result stores the result of the collision test (signed distance between the shapes,
// witness points location, normal etc.)
coal::CollisionResult col_res;
// Collision call
coal::collide(shape1.get(), T1, shape2.get(), T2, col_req, col_res);
// We can access the collision result once it has been populated
std::cout << "Collision? " << col_res.isCollision() << "\n";
if (col_res.isCollision()) {
coal::Contact contact = col_res.getContact(0);
// The penetration depth does **not** take into account the security margin.
// Consequently, the penetration depth is the true signed distance which separates the shapes.
// To have the distance which takes into account the security margin, we can simply add the two together.
std::cout << "Penetration depth: " << contact.penetration_depth << "\n";
std::cout << "Distance between the shapes including the security margin: " << contact.penetration_depth + col_req.security_margin << "\n";
std::cout << "Witness point on shape1: " << contact.nearest_points[0].transpose() << "\n";
std::cout << "Witness point on shape2: " << contact.nearest_points[1].transpose() << "\n";
std::cout << "Normal: " << contact.normal.transpose() << "\n";
}
// Before calling another collision test, it is important to clear the previous results stored in the collision result.
col_res.clear();
return 0;
}
```
## Python example
Here is the C++ example from above translated in python using the python bindings of Coal:
```python
import numpy as np
import coal
# Optional:
# The Pinocchio library is a rigid body algorithms library and has a handy SE3 module.
# It can be installed as simply as `conda -c conda-forge install pinocchio`.
# Installing pinocchio also installs coal.
import pinocchio as pin
def loadConvexMesh(file_name: str):
loader = coal.MeshLoader()
bvh: coal.BVHModelBase = loader.load(file_name)
bvh.buildConvexHull(True, "Qt")
return bvh.convex
if __name__ == "__main__":
# Create coal shapes
shape1 = coal.Ellipsoid(0.7, 1.0, 0.8)
shape2 = loadConvexMesh("../path/to/mesh/file.obj")
# Define the shapes' placement in 3D space
T1 = coal.Transform3s()
T1.setTranslation(pin.SE3.Random().translation)
T1.setRotation(pin.SE3.Random().rotation)
T2 = coal.Transform3s();
# Using np arrays also works
T1.setTranslation(np.random.rand(3))
T2.setRotation(pin.SE3.Random().rotation)
# Define collision requests and results
col_req = coal.CollisionRequest()
col_res = coal.CollisionResult()
# Collision call
coal.collide(shape1, T1, shape2, T2, col_req, col_res)
# Accessing the collision result once it has been populated
print("Is collision? ", {col_res.isCollision()})
if col_res.isCollision():
contact: coal.Contact = col_res.getContact(0)
print("Penetration depth: ", contact.penetration_depth)
print("Distance between the shapes including the security margin: ", contact.penetration_depth + col_req.security_margin)
print("Witness point shape1: ", contact.getNearestPoint1())
print("Witness point shape2: ", contact.getNearestPoint2())
print("Normal: ", contact.normal)
# Before running another collision call, it is important to clear the old one
col_res.clear()
```
## Acknowledgments
The development of **Coal** is actively supported by the [Gepetto team](http://projects.laas.fr/gepetto/) [@LAAS-CNRS](http://www.laas.fr), the [Willow team](https://www.di.ens.fr/willow/) [@INRIA](http://www.inria.fr) and, to some extent, [Eureka Robotics](https://eurekarobotics.com/).
Subproject commit 6b0564f45af29a90160aafdfd67eb7a07ace48ed
{
"hooks": [
"share/hpp-fcl/hook/ament_prefix_path.dsv",
"share/hpp-fcl/hook/python_path.dsv"
]
}
\ No newline at end of file
# Build and install from source with Pixi
To build Coal from source the easiest way is to use [Pixi](https://pixi.sh/latest/#installation).
[Pixi](https://pixi.sh/latest/) is a cross-platform package management tool for developers that
will install all required dependencies in `.pixi` directory.
It's used by our CI agent so you have the guarantee to get the right dependencies.
Run the following command to install dependencies, configure, build and test the project:
```bash
pixi run test
```
The project will be built in the `build` directory.
You can run `pixi shell` and build the project with `cmake` and `ninja` manually.
# Release with Pixi
To create a release with Pixi run the following commands on the **devel** branch:
```bash
COAL_VERSION=X.Y.Z pixi run release_new_version
git push origin
git push origin vX.Y.Z
git push origin devel:master
```
Where `X.Y.Z` is the new version.
Be careful to follow the [Semantic Versioning](https://semver.org/spec/v2.0.0.html) rules.
You will find the following assets:
- `./build_new_version/coal-X.Y.Z.tar.gz`
- `./build_new_version/coal-X.Y.Z.tar.gz.sig`
Then, create a new release on [GitHub](https://github.com/coal-library/coal/releases/new) with:
* Tag: vX.Y.Z
* Title: Coal X.Y.Z
* Body:
```
## What's Changed
CHANGELOG CONTENT
**Full Changelog**: https://github.com/coal-library/coal/compare/vXX.YY.ZZ...vX.Y.Z
```
Where `XX.YY.ZZ` is the last release version.
Then upload `coal-X.Y.Z.tar.gz` and `coal-X.Y.Z.tar.gz.sig` and publish the release.
:: Setup ccache
set CMAKE_CXX_COMPILER_LAUNCHER=ccache
:: Create compile_commands.json for language server
set CMAKE_EXPORT_COMPILE_COMMANDS=1
:: Activate color output with Ninja
set CMAKE_COLOR_DIAGNOSTICS=1
:: Set default build value only if not previously set
if not defined COAL_BUILD_TYPE (set COAL_BUILD_TYPE=Release)
if not defined COAL_PYTHON_STUBS (set COAL_PYTHON_STUBS=ON)
if not defined COAL_HAS_QHULL (set COAL_HAS_QHULL=OFF)
#! /bin/bash
# Activation script
# Remove flags setup from cxx-compiler
unset CFLAGS
unset CPPFLAGS
unset CXXFLAGS
unset DEBUG_CFLAGS
unset DEBUG_CPPFLAGS
unset DEBUG_CXXFLAGS
unset LDFLAGS
if [[ $host_alias == *"apple"* ]];
then
# On OSX setting the rpath and -L it's important to use the conda libc++ instead of the system one.
# If conda-forge use install_name_tool to package some libs, -headerpad_max_install_names is then mandatory
export LDFLAGS="-Wl,-headerpad_max_install_names -Wl,-rpath,$CONDA_PREFIX/lib -L$CONDA_PREFIX/lib"
elif [[ $host_alias == *"linux"* ]];
then
# On GNU/Linux, I don't know if these flags are mandatory with g++ but
# it allow to use clang++ as compiler
export LDFLAGS="-Wl,-rpath,$CONDA_PREFIX/lib -Wl,-rpath-link,$CONDA_PREFIX/lib -L$CONDA_PREFIX/lib"
fi
# Setup ccache
export CMAKE_CXX_COMPILER_LAUNCHER=ccache
# Create compile_commands.json for language server
export CMAKE_EXPORT_COMPILE_COMMANDS=1
# Activate color output with Ninja
export CMAKE_COLOR_DIAGNOSTICS=1
# Set default build value only if not previously set
export COAL_BUILD_TYPE=${COAL_BUILD_TYPE:=Release}
export COAL_PYTHON_STUBS=${COAL_PYTHON_STUBS:=ON}
export COAL_HAS_QHULL=${COAL_HAS_QHULL:=OFF}
#! /bin/bash
# Clang activation script
export CC="clang"
export CXX="clang++"
:: Setup clang-cl compiler
set CC=clang-cl
set CXX=clang-cl
SET(DOXYGEN_XML_OUTPUT "doxygen-xml" PARENT_SCOPE)
SET(DOXYGEN_FILE_PATTERNS "*.h *.hh *.hxx" PARENT_SCOPE)
SET(DOXYGEN_GENERATE_XML "YES" PARENT_SCOPE)
SET(DOXYGEN_EXPAND_ONLY_PREDEF "NO" PARENT_SCOPE)
SET(DOXYGEN_ENABLE_PREPROCESSING "YES" PARENT_SCOPE)
SET(DOXYGEN_MACRO_EXPANSION "YES" PARENT_SCOPE)
SET(DOXYGEN_EXCLUDE "${PROJECT_SOURCE_DIR}/include/hpp/")
# We must not document octree if Octomap is not setup.
# This create a build issue when building the bindings because doxygen-autodoc will
# include octree.h that will include octomap.h.
IF(NOT COAL_HAS_OCTOMAP)
SET(DOXYGEN_EXCLUDE "${DOXYGEN_EXCLUDE} ${PROJECT_SOURCE_DIR}/include/coal/octree.h")
SET(DOXYGEN_EXCLUDE "${DOXYGEN_EXCLUDE} ${PROJECT_SOURCE_DIR}/include/coal/serialization/octree.h")
SET(DOXYGEN_EXCLUDE "${DOXYGEN_EXCLUDE} ${PROJECT_SOURCE_DIR}/include/coal/internal/traversal_node_octree.h")
ENDIF()
SET(DOXYGEN_EXCLUDE ${DOXYGEN_EXCLUDE} PARENT_SCOPE)
SET(DOXYGEN_PREDEFINED "IS_DOXYGEN" PARENT_SCOPE)
USE_MATHJAX= YES
doc/distance_computation.png

36.9 KiB

import matplotlib.pyplot as plt
import numpy as np
interactive = False
m = 1.0
b = 1.2
mb = m + b
X = np.array([-mb / 2, 0, m, mb, 2 * mb])
# X = np.linspace(-1, 4., 21)
def dlb(d):
if d < 0:
return None
if d > mb:
u = d - mb
return mb - m + u / 2
return d - m
plt.figure(figsize=(9, 3.5))
# plt.plot(X, X-m, ":k")
# plt.plot([m+b, X[-1]], [b, b], ":k")
plt.fill_between(
[m + b, X[-1]],
[b, b],
[b, X[-1] - m],
alpha=0.2,
hatch="|",
facecolor="g",
label="Distance lower band area",
)
plt.plot(X, [dlb(x) for x in X], "-g", label="distance lower bound")
# plt.plot([X[0], m, m, X[-1]], [0, 0, b, b], ":k")
plt.axvspan(X[0], m, alpha=0.5, hatch="\\", facecolor="r", label="Collision area")
ax = plt.gca()
ax.set_xlabel("Object distance")
ax.set_xticks([0, m, mb])
ax.set_xticklabels(["0", "security margin", "security margin\n+ break distance"])
ax.set_yticks([0, b])
ax.set_yticklabels(["0", "break distance"])
ax.grid(which="major", ls="solid")
ax.grid(which="minor", ls="dashed")
plt.axvline(0, ls="solid")
# plt.axvline(m, ls="dashed", label="margin")
# plt.axvline(mb, ls="dashed")
plt.axhline(0.0, ls="solid")
plt.title("Collision and distance lower band")
plt.legend(loc="lower right")
if interactive:
plt.show()
else:
import os.path as path
dir_path = path.dirname(path.realpath(__file__))
plt.savefig(
path.join(dir_path, "distance_computation.png"),
bbox_inches="tight",
orientation="landscape",
)
#!/usr/bin/env python3
import pdb
import sys
# ABC = AB^AC
# (ABC^AJ).a = (j.c - j.b) a.a + (j.a - j.c) b.a + (j.b - j.a) c.a, for j = b or c
segment_fmt = "{j}a_aa"
plane_fmt = ""
edge_fmt = "{j}a * {b}a_{c}a + {j}{b} * {c}a_aa - {j}{c} * {b}a_aa"
# These checks must be negative and not positive, as in the cheat sheet.
# They are the same as in the cheat sheet, except that we consider (...).dot(A)
# instead of (...).dot(-A)
plane_tests = ["C.dot (a_cross_b)", "D.dot(a_cross_c)", "-D.dot(a_cross_b)"]
checks = (
plane_tests
+ [edge_fmt.format(**{"j": j, "b": "b", "c": "c"}) for j in ["b", "c"]]
+ [edge_fmt.format(**{"j": j, "b": "c", "c": "d"}) for j in ["c", "d"]]
+ [edge_fmt.format(**{"j": j, "b": "d", "c": "b"}) for j in ["d", "b"]]
+ [segment_fmt.format(**{"j": j}) for j in ["b", "c", "d"]]
)
checks_hr = (
["ABC.AO >= 0", "ACD.AO >= 0", "ADB.AO >= 0"]
+ ["(ABC ^ {}).AO >= 0".format(n) for n in ["AB", "AC"]]
+ ["(ACD ^ {}).AO >= 0".format(n) for n in ["AC", "AD"]]
+ ["(ADB ^ {}).AO >= 0".format(n) for n in ["AD", "AB"]]
+ ["AB.AO >= 0", "AC.AO >= 0", "AD.AO >= 0"]
)
# weights of the checks.
weights = (
[
2,
]
* 3
+ [
3,
]
* 6
+ [
1,
]
* 3
)
# Segment tests first, because they have lower weight.
# tests = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ]
tests = [
9,
10,
11,
0,
1,
2,
3,
4,
5,
6,
7,
8,
]
assert len(tests) == len(checks)
assert sorted(tests) == list(range(len(tests)))
regions = [
"ABC",
"ACD",
"ADB",
"AB",
"AC",
"AD",
"A",
"Inside",
]
cases = list(range(len(regions)))
# The following 3 lists refer to table doc/GJK_tetrahedra_boolean_table.ods
# A check ID is (+/- (index+1)) where a minus sign encodes a NOT operation
# and index refers to an index in list checks.
# definitions is a list of list of check IDs to be ANDed.
# For instance, a0.a3.!a4 -> [ 1, 4, -5]
definitions = [
[1, 4, -5],
[2, 6, -7],
[3, 8, -9],
[-4, 9, 10],
[-6, 5, 11],
[-8, 7, 12],
[-10, -11, -12],
[-1, -2, -3],
]
# conditions is a list of (list of (list of check IDs to be ANDed) to be ORed).
conditions = [
[],
[],
[],
[],
[],
[],
[],
[], # [ [10, 11, 12], ], # I don't think this is always true...
]
# rejections is a list of (list of (list of check IDs to be ANDed) to be ORed).
rejections = [
[
[2, 6, 7],
[3, -8, -9],
],
[
[3, 8, 9],
[1, -4, -5],
],
[
[1, 4, 5],
[2, -6, -7],
],
[
[-1, -3],
],
[
[-2, -1],
],
[
[-3, -2],
],
[
[4, -5],
[6, -7],
[8, -9],
],
[],
]
implications = [
[
[
4,
5,
10,
],
[11],
],
[
[
6,
7,
11,
],
[12],
],
[
[
8,
9,
12,
],
[10],
],
[
[
-4,
-5,
11,
],
[10],
],
[
[
-6,
-7,
12,
],
[11],
],
[
[
-8,
-9,
10,
],
[12],
],
[[1, 4, 5, 6], [-7]],
[[2, 6, 9, 8], [-9]],
[[3, 8, 9, 4], [-5]],
[
[
-4,
5,
10,
],
[-11],
],
[
[
4,
-5,
-10,
],
[11],
],
[
[
-6,
7,
11,
],
[-12],
],
[
[
6,
-7,
-11,
],
[12],
],
[
[
-8,
9,
12,
],
[-10],
],
[
[
8,
-9,
-12,
],
[10],
],
[[10, 3, 9, -12, 4, -5], [1]],
[[10, -3, 1, -4], [9]],
[[10, -3, -1, 2, -6, 11], [5]],
[[-10, 11, 2, -12, -5, -1], [6]],
[[-10, 11, -2, 1, 5], [-6]],
[[-10, -11, 12, 1, -7, -2, 4], [-5]],
[[-10, -11, 12, -3, 2, 7], [-8]],
[[-10, -11, 12, -3, -2], [-1]],
]
def set_test_values(current_tests, test_values, itest, value):
def satisfies(values, indices):
for k in indices:
if k > 0 and values[k - 1] is not True:
return False
if k < 0 and values[-k - 1] is not False:
return False
return True
remaining_tests = list(current_tests)
next_test_values = list(test_values)
remaining_tests.remove(itest)
next_test_values[itest] = value
rerun = True
while rerun:
rerun = False
for impl in implications:
if satisfies(next_test_values, impl[0]):
for id in impl[1]:
k = (id - 1) if id > 0 else (-id - 1)
if k in remaining_tests:
next_test_values[k] = id > 0
remaining_tests.remove(k)
rerun = True
else:
if next_test_values[k] != (id > 0):
raise ValueError("Absurd case")
return remaining_tests, next_test_values
def set_tests_values(current_tests, test_values, itests, values):
for itest, value in zip(itests, values):
current_tests, test_values = set_test_values(
current_tests, test_values, itest, value
)
return current_tests, test_values
def apply_test_values(cases, test_values):
def canSatisfy(values, indices):
for k in indices:
if k > 0 and values[k - 1] is False:
return False
if k < 0 and values[-k - 1] is True:
return False
return True
def satisfies(values, indices):
for k in indices:
if k > 0 and values[k - 1] is not True:
return False
if k < 0 and values[-k - 1] is not False:
return False
return True
# Check all cases.
left_cases = []
for case in cases:
defi = definitions[case]
conds = conditions[case]
rejs = rejections[case]
if satisfies(test_values, defi):
# A definition is True, stop recursion
return [case]
if not canSatisfy(test_values, defi):
continue
for cond in conds:
if satisfies(test_values, cond):
# A condition is True, stop recursion
return [case]
append = True
for rej in rejs:
if satisfies(test_values, rej):
# A rejection is True, discard this case
append = False
break
if append:
left_cases.append(case)
return left_cases
def max_number_of_tests(
current_tests,
cases,
test_values=[
None,
]
* len(tests),
prevBestScore=float("inf"),
prevScore=0,
):
for test in current_tests:
assert test_values[test] is None, "Test " + str(test) + " already performed"
left_cases = apply_test_values(cases, test_values)
if len(left_cases) == 1:
return prevScore, {
"case": left_cases[0],
}
elif len(left_cases) == 0:
return prevScore, {
"case": None,
"comments": [
"applied " + str(test_values),
"to " + ", ".join([regions[c] for c in cases]),
],
}
assert len(current_tests) > 0, "No more test but " + str(left_cases) + " remains"
currentBestScore = prevBestScore
bestScore = float("inf")
bestOrder = [None, None]
for i, test in enumerate(current_tests):
assert bestScore >= currentBestScore
currentScore = prevScore + len(left_cases) * weights[test]
# currentScore = prevScore + weights[test]
if currentScore > currentBestScore: # Cannot do better -> stop
continue
try:
remaining_tests, next_test_values = set_test_values(
current_tests, test_values, test, True
)
except ValueError:
remaining_tests = None
if remaining_tests is not None:
# Do not put this in try catch as I do not want other ValueError to be
# understood as an infeasible branch.
score_if_t, order_if_t = max_number_of_tests(
remaining_tests,
left_cases,
next_test_values,
currentBestScore,
currentScore,
)
if score_if_t >= currentBestScore: # True didn't do better -> stop
continue
else:
score_if_t, order_if_t = prevScore, None
try:
remaining_tests, next_test_values = set_test_values(
current_tests, test_values, test, False
)
except ValueError:
remaining_tests = None
if remaining_tests is not None:
# Do not put this in try catch as I do not want other ValueError to be
# understood as an infeasible branch.
score_if_f, order_if_f = max_number_of_tests(
remaining_tests,
left_cases,
next_test_values,
currentBestScore,
currentScore,
)
else:
score_if_f, order_if_f = prevScore, None
currentScore = max(score_if_t, score_if_f)
if currentScore < bestScore:
if currentScore < currentBestScore:
bestScore = currentScore
bestOrder = {"test": test, "true": order_if_t, "false": order_if_f}
# pdb.set_trace()
currentBestScore = currentScore
if len(tests) == len(current_tests):
print("New best score: {}".format(currentBestScore))
return bestScore, bestOrder
def printComments(order, indent, file):
if "comments" in order:
for comment in order["comments"]:
print(indent + "// " + comment, file=file)
def printOrder(order, indent="", start=True, file=sys.stdout, curTests=[]):
if start:
print(
"bool GJK::projectTetrahedraOrigin(const Simplex& current, Simplex& next)",
file=file,
)
print("{", file=file)
print(
indent + "// The code of this function was generated using doc/gjk.py",
file=file,
)
print(indent + "const vertex_id_t a = 3, b = 2, c = 1, d = 0;", file=file)
for v in "abcd":
print(
indent
+ "const Vec3s& {} (current.vertex[{}]->w);".format(v.upper(), v),
file=file,
)
print(indent + "const CoalScalar aa = A.squaredNorm();".format(), file=file)
for v in "dcb":
for m in "abcd":
if m <= v:
print(
indent
+ "const CoalScalar {0}{1} = {2}.dot({3});".format(
v, m, v.upper(), m.upper()
),
file=file,
)
else:
print(
indent + "const CoalScalar& {0}{1} = {1}{0};".format(v, m),
file=file,
)
print(indent + "const CoalScalar {0}a_aa = {0}a - aa;".format(v), file=file)
for l0, l1 in zip("bcd", "cdb"):
print(
indent + "const CoalScalar {0}a_{1}a = {0}a - {1}a;".format(l0, l1),
file=file,
)
for v in "bc":
print(
indent + "const Vec3s a_cross_{0} = A.cross({1});".format(v, v.upper()),
file=file,
)
print("", file=file)
print("#define REGION_INSIDE() " + indent + "\\", file=file)
print(indent + " ray.setZero(); \\", file=file)
print(indent + " next.vertex[0] = current.vertex[d]; \\", file=file)
print(indent + " next.vertex[1] = current.vertex[c]; \\", file=file)
print(indent + " next.vertex[2] = current.vertex[b]; \\", file=file)
print(indent + " next.vertex[3] = current.vertex[a]; \\", file=file)
print(indent + " next.rank=4; \\", file=file)
print(indent + " return true;", file=file)
print("", file=file)
if "case" in order:
case = order["case"]
if case is None:
print(
indent + "// There are no case corresponding to this set of tests.",
file=file,
)
printComments(order, indent, file)
print(indent + "assert(false);", file=file)
return
region = regions[case]
print(indent + "// Region " + region, file=file)
printComments(order, indent, file)
toFree = ["b", "c", "d"]
if region == "Inside":
print(indent + "REGION_INSIDE()", file=file)
toFree = []
elif region == "A":
print(indent + "originToPoint (current, a, A, next, ray);", file=file)
elif len(region) == 2:
region[0]
B = region[1]
print(
indent
+ "originToSegment "
"(current, a, {b}, A, {B}, {B}-A, -{b}a_aa, next, ray);".format(
**{"b": B.lower(), "B": B}
),
file=file,
)
toFree.remove(B.lower())
elif len(region) == 3:
B = region[1]
C = region[2]
test = plane_tests[["ABC", "ACD", "ADB"].index(region)]
if test.startswith("-"):
test = test[1:]
else:
test = "-" + test
print(
indent
+ "originToTriangle "
"(current, a, {b}, {c}, ({B}-A).cross({C}-A), {t}, next, ray);".format(
**{"b": B.lower(), "c": C.lower(), "B": B, "C": C, "t": test}
),
file=file,
)
toFree.remove(B.lower())
toFree.remove(C.lower())
else:
assert False, "Unknown region " + region
for pt in toFree:
print(
indent + "free_v[nfree++] = current.vertex[{}];".format(pt), file=file
)
else:
assert "test" in order and "true" in order and "false" in order
check = checks[order["test"]]
check_hr = checks_hr[order["test"]]
printComments(order, indent, file)
nextTests_t = curTests + [
"a" + str(order["test"] + 1),
]
nextTests_f = curTests + [
"!a" + str(order["test"] + 1),
]
if order["true"] is None:
if order["false"] is None:
print(
indent
+ """assert(false && "Case {} should never happen.");""".format(
check_hr
)
)
else:
print(
indent
+ "assert(!({} <= 0)); // Not {} / {}".format(
check, check_hr, ".".join(nextTests_f)
),
file=file,
)
printOrder(
order["false"],
indent=indent,
start=False,
file=file,
curTests=nextTests_f,
)
elif order["false"] is None:
print(
indent
+ "assert({} <= 0); // {} / {}".format(
check, check_hr, ".".join(nextTests_t)
),
file=file,
)
printOrder(
order["true"],
indent=indent,
start=False,
file=file,
curTests=nextTests_t,
)
else:
print(
indent
+ "if ({} <= 0) {{ // if {} / {}".format(
check, check_hr, ".".join(nextTests_t)
),
file=file,
)
printOrder(
order["true"],
indent=indent + " ",
start=False,
file=file,
curTests=nextTests_t,
)
print(
indent
+ "}} else {{ // not {} / {}".format(check_hr, ".".join(nextTests_f)),
file=file,
)
printOrder(
order["false"],
indent=indent + " ",
start=False,
file=file,
curTests=nextTests_f,
)
print(indent + "}} // end of {}".format(check_hr), file=file)
if start:
print("", file=file)
print("#undef REGION_INSIDE", file=file)
print(indent + "return false;", file=file)
print("}", file=file)
def unit_tests():
# a4, a5, a10, a11, a12
cases = list(range(len(regions)))
pdb.set_trace()
left_cases = apply_test_values(
cases,
test_values=[
None,
None,
None,
True,
True,
None,
None,
None,
None,
True,
True,
True,
],
)
assert len(left_cases) > 1
# unit_tests()
score, order = max_number_of_tests(tests, cases)
print(score)
printOrder(order, indent=" ")
# TODO add weights such that:
# - it is preferred to have all the use of one check in one branch.
# idea: ponderate by the number of remaining tests.
doc/images/coal-performances.jpg

285 KiB

File added