Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
Q
quadruped-reactive-walking
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Gepetto
quadruped-reactive-walking
Commits
6da1b3d2
Commit
6da1b3d2
authored
3 years ago
by
odri
Browse files
Options
Downloads
Patches
Plain Diff
Switch from boost to mutex
parent
cd34adc2
No related branches found
No related tags found
No related merge requests found
Pipeline
#16519
failed
3 years ago
Stage: test
Changes
3
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
include/qrw/MpcWrapper.hpp
+12
-12
12 additions, 12 deletions
include/qrw/MpcWrapper.hpp
src/MpcWrapper.cpp
+59
-76
59 additions, 76 deletions
src/MpcWrapper.cpp
src/control_solo12.cpp
+6
-0
6 additions, 0 deletions
src/control_solo12.cpp
with
77 additions
and
88 deletions
include/qrw/MpcWrapper.hpp
+
12
−
12
View file @
6da1b3d2
...
...
@@ -9,8 +9,8 @@
#ifndef MPCWRAPPER_H_INCLUDED
#define MPCWRAPPER_H_INCLUDED
#include
<string>
#include
<boost/interprocess/managed_shared_memory.hpp>
//
#include <string>
//
#include <boost/interprocess/managed_shared_memory.hpp>
// #include <boost/interprocess/shared_memory_object.hpp>
// #include <boost/interprocess/mapped_region.hpp>
#include
"pinocchio/math/rpy.hpp"
...
...
@@ -18,10 +18,16 @@
#include
<Eigen/Dense>
#include
<chrono>
#include
<thread>
#include
<mutex>
#include
"qrw/Types.h"
#include
"qrw/MPC.hpp"
namespace
bi
=
boost
::
interprocess
;
// Functions acting on shared memory
void
stop_thread
();
void
write_in
(
int
k
,
MatrixN
const
&
xref
,
MatrixN
const
&
fsteps
);
void
read_in
();
void
check_memory
();
class
MpcWrapper
{
public:
...
...
@@ -66,21 +72,15 @@ class MpcWrapper {
void
run_MPC_asynchronous
(
int
k
,
MatrixN
const
&
xref
,
MatrixN
const
&
fsteps
);
void
create_MPC_asynchronous
();
// int check_memory();
//void write_in(int k, MatrixN const& xref, MatrixN const& fsteps);
//void read_in();
private
:
Params
*
params_
;
MPC
mpc_
;
// shared_memory_object shm (boost::interprocess::create_only, "SharedMemory", boost::interprocess::read_write);
// mapped_region region;
bi
::
managed_shared_memory
segment
;
int
*
shared_k
;
MatrixN
*
shared_xref
;
MatrixN
*
shared_fsteps
;
int
test
=
0
;
Eigen
::
Matrix
<
double
,
24
,
2
>
last_available_result
;
Matrix14
gait_past
;
...
...
This diff is collapsed.
Click to expand it.
src/MpcWrapper.cpp
+
59
−
76
View file @
6da1b3d2
#include
"qrw/MpcWrapper.hpp"
// bi::managed_shared_memory MpcWrapper::segment = bi::managed_shared_memory(bi::open_or_create, "SharedMemory", 5000);
// Shared global variables
bool
shared_running
=
true
;
int
shared_k
;
MatrixN
shared_xref
;
MatrixN
shared_fsteps
;
MpcWrapper
::
MpcWrapper
()
:
test
(
0
),
last_available_result
(
Eigen
::
Matrix
<
double
,
24
,
2
>::
Zero
()),
gait_past
(
Matrix14
::
Zero
()),
gait_next
(
Matrix14
::
Zero
()),
segment
(
bi
::
open_or_create
,
"SharedMemory"
,
1000
)
{}
// Mutexes to protect the global variables
std
::
mutex
mutexIn
;
// From main loop to MPC
std
::
mutex
mutexOut
;
// From MPC to main loop
int
check_memory
()
void
stop_thread
()
{
/*
//Open already created shared memory object.
shared_memory_object shm_parallel(open_only, "SharedMemory", read_only);
//Map the whole shared memory in this process
mapped_region region_parallel(shm_parallel, read_only);
shared_running
=
false
;
}
//Check that memory was initialized to 1
while (true)
void
write_in
(
int
k
,
MatrixN
const
&
xref
,
MatrixN
const
&
fsteps
)
{
std
::
cout
<<
"Writing memory"
<<
std
::
endl
;
const
std
::
lock_guard
<
std
::
mutex
>
lockIn
(
mutexIn
);
shared_k
=
k
;
shared_xref
=
xref
;
shared_fsteps
=
fsteps
;
}
void
read_in
()
{
std
::
cout
<<
"Reading memory"
<<
std
::
endl
;
const
std
::
lock_guard
<
std
::
mutex
>
lockIn
(
mutexIn
);
std
::
cout
<<
"Shared k"
<<
std
::
endl
<<
shared_k
<<
std
::
endl
;
// std::cout << "Shared xref" << std::endl << shared_xref << std::endl;
// std::cout << "Shared fsteps" << std::endl << shared_fsteps << std::endl;
}
void
check_memory
()
{
std
::
cout
<<
"Checking memory"
<<
std
::
endl
;
while
(
shared_running
)
{
char *mem = static_cast<char*>(region_parallel.get_address());
for(std::size_t i = 0; i < region_parallel.get_size(); ++i)
{
if(*mem++ != 1) {printf("ERROR DETECTED"); return 1;} //Error checking memory
}
printf("NO ERROR DETECTED\n");
std::this_thread::sleep_for(std::chrono::milliseconds(500));
read_in
();
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
milliseconds
(
1000
));
}
return 0;
*/
printf
(
"PASS
\n
"
);
bi
::
managed_shared_memory
segment_parallel
(
bi
::
open_only
,
"SharedMemory"
);
std
::
pair
<
int
*
,
std
::
size_t
>
test0
=
segment_parallel
.
find
<
int
>
(
"shared_k"
);
//std::pair<MatrixN*, std::size_t> test1 = segment_parallel.find<MatrixN>("shared_xref");
//std::pair<MatrixN*, std::size_t> test2 = segment_parallel.find<MatrixN>("shared_fsteps");
//while (true)
//{
// std::this_thread::sleep_for(std::chrono::milliseconds(500));
// std::cout << "shared_k: " << std::endl << *(test0.first) << std::endl;
//std::cout << "shared_xref: " << std::endl << *(test1.first) << std::endl;
//std::cout << "shared_fsteps: " << std::endl << *(test2.first) << std::endl;
/*int* test0 = segment.find_or_construct<int>("shared_k");
MatrixN* test1 = segment.find_or_construct<MatrixN>("shared_xref");
MatrixN* test2 = segment.find_or_construct<MatrixN>("shared_fsteps");
std::cout << "shared_k: " << std::endl << test0 << std::endl;
std::cout << "shared_xref: " << std::endl << test1 << std::endl;
std::cout << "shared_fsteps: " << std::endl << test2 << std::endl;*/
//}
return
0
;
}
MpcWrapper
::
MpcWrapper
()
:
test
(
0
),
last_available_result
(
Eigen
::
Matrix
<
double
,
24
,
2
>::
Zero
()),
gait_past
(
Matrix14
::
Zero
()),
gait_next
(
Matrix14
::
Zero
())
{}
void
MpcWrapper
::
initialize
(
Params
&
params
)
{
params_
=
&
params
;
...
...
@@ -62,36 +58,10 @@ void MpcWrapper::initialize(Params& params) {
last_available_result
(
2
,
0
)
=
params
.
h_ref
;
last_available_result
.
col
(
0
).
tail
(
12
)
=
(
Vector3
(
0.0
,
0.0
,
8.0
)).
replicate
<
4
,
1
>
();
// Initialization of the shared memory
shared_k
=
segment
.
construct
<
int
>
(
"shared_k"
)
(
42
);
/*shared_xref = segment.construct<MatrixN>("shared_xref") (12, params.gait.rows() + 1);
shared_fsteps = segment.construct<MatrixN>("shared_fsteps") (params.gait.rows(), 12);*/
// Initialization of shared memory
// Remove shared memory on construction and destruction
/*struct shm_remove
{
shm_remove() { shared_memory_object::remove("SharedMemory"); }
~shm_remove(){ shared_memory_object::remove("SharedMemory"); }
} remover;*/
// Create a shared memory object.
/*bi::shared_memory_object shm (bi::create_only, "SharedMemory", bi::read_write);
// Set size
shm.truncate(500);
// Map the whole shared memory in this process
bi::mapped_region region (shm, bi::read_write);
// Write all the memory to 1
std::memset(region.get_address(), 1, region.get_size());*/
std
::
thread
checking_thread
(
check_memory
);
// spawn new thread that calls check_memory()
// checking_thread.join();
//std::pair<int*, std::size_t> test0 = segment.find<int>("shared_k");
// std::cout << "shared_k: " << std::endl << *(test0.first) << std::endl;
// Initialize the shared memory
shared_k
=
42
;
shared_xref
=
MatrixN
::
Zero
(
12
,
params
.
gait
.
rows
()
+
1
);
shared_fsteps
=
MatrixN
::
Zero
(
params
.
gait
.
rows
(),
12
);
}
...
...
@@ -105,8 +75,20 @@ void MpcWrapper::solve(int k, MatrixN const& xref, MatrixN const& fsteps,
std::cout << sizeof(MatrixN::Zero(12, params_->gait.rows() + 1)) << std::endl;
std::cout << sizeof(MatrixN::Zero(params_->gait.rows(), 12)) << std::endl;
std::cout << params_->gait << std::endl;*/
write_in
(
k
,
xref
,
fsteps
);
// std::thread checking_thread(check_memory); // spawn new thread that calls check_memory()
// checking_thread.join();
double
t
=
0
;
while
(
t
<
15.0
)
{
std
::
cout
<<
"Waiting"
<<
std
::
endl
;
write_in
(
k
,
xref
,
fsteps
);
k
++
;
t
+=
0.5
;
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
milliseconds
(
500
));
}
// Run in parallel process
run_MPC_asynchronous
(
k
,
xref
,
fsteps
);
...
...
@@ -188,3 +170,4 @@ Eigen::Matrix<double, 24, 2> MpcWrapper::get_latest_result()
// if (there is new result) {last_available_result = retrieve data mpc i.e. dataOut.block(0, 0, 24, 2) }
return
last_available_result
;
}
This diff is collapsed.
Click to expand it.
src/control_solo12.cpp
+
6
−
0
View file @
6da1b3d2
...
...
@@ -68,6 +68,7 @@ int main()
// Initialization of variables
Controller
controller
;
// Main controller
controller
.
initialize
(
params
);
std
::
thread
checking_thread
(
check_memory
);
// spawn new thread that calls check_memory()
int
k_loop
=
0
;
// Initialize the communication, session, joints, wait for motors to be ready
...
...
@@ -122,6 +123,11 @@ int main()
break
;
}
// Close parallel thread
stop_thread
();
checking_thread
.
join
();
std
::
cout
<<
"Parallel thread closed"
<<
std
::
endl
;
// DAMPING TO GET ON THE GROUND PROGRESSIVELY *********************
double
t
=
0.0
;
double
t_max
=
2.5
;
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment