diff --git a/include/qrw/Params.hpp b/include/qrw/Params.hpp
index 92e1bd420e649dfcdbfcc95dd21a1379dda326bb..8372777ed07fbd1a75f4787da1cf36ece87f631f 100644
--- a/include/qrw/Params.hpp
+++ b/include/qrw/Params.hpp
@@ -51,11 +51,12 @@ public:
     double T_gait;
     double T_mpc;
     int N_SIMULATION;
-    bool type_MPC;
+    int type_MPC;
     bool use_flat_plane;
     bool predefined_vel;
     bool kf_enabled;
     bool enable_pyb_GUI;
+    std::vector<double> test_list;
 
 };
 
diff --git a/python/gepadd.cpp b/python/gepadd.cpp
index 58de3e21f7b80986ef072b33a6c9c054a552cd56..f09b33f4bbc3f4cc94d0d1cc4293a8125cc78e1f 100644
--- a/python/gepadd.cpp
+++ b/python/gepadd.cpp
@@ -264,7 +264,8 @@ struct ParamsPythonVisitor : public bp::def_visitor<ParamsPythonVisitor<Params>>
             .def_readwrite("use_flat_plane", &Params::use_flat_plane)
             .def_readwrite("predefined_vel", &Params::predefined_vel)
             .def_readwrite("kf_enabled", &Params::kf_enabled)
-            .def_readwrite("enable_pyb_GUI", &Params::enable_pyb_GUI);
+            .def_readwrite("enable_pyb_GUI", &Params::enable_pyb_GUI)
+            .def_readwrite("test_list", &Params::test_list);
 
     }
 
diff --git a/scripts/Controller.py b/scripts/Controller.py
index 3f03fc4789f1b4c4b8131503439de33cbe822789..84e73d64c66e3c37716b83688bed5297c2a25394 100644
--- a/scripts/Controller.py
+++ b/scripts/Controller.py
@@ -64,7 +64,7 @@ class Controller:
             T_gait (float): duration of one gait period in seconds
             T_mpc (float): duration of mpc prediction horizon
             N_SIMULATION (int): number of iterations of inverse dynamics during the simulation
-            type_mpc (bool): True to have PA's MPC, False to have Thomas's MPC
+            type_mpc (int): 0 for OSQP MPC, 1, 2, 3 for Crocoddyl MPCs
             use_flat_plane (bool): to use either a flat ground or a rough ground
             predefined_vel (bool): to use either a predefined velocity profile or a gamepad
             enable_pyb_GUI (bool): to display PyBullet GUI or not
@@ -228,9 +228,6 @@ class Controller:
                                                                 self.h_v[0:6, 0:1].copy(),
                                                                 self.v_ref[0:6, 0])
 
-        # Update pos, vel and acc references for feet
-        self.footTrajectoryGenerator.update(self.k, o_targetFootstep)
-
         # Run state planner (outputs the reference trajectory of the base)
         self.statePlanner.computeReferenceStates(self.q[0:7, 0:1], self.h_v[0:6, 0:1].copy(),
                                                  self.v_ref[0:6, 0:1], 0.0)
@@ -254,8 +251,15 @@ class Controller:
 
         t_mpc = time.time()
 
+        # If the MPC optimizes footsteps positions then we use them
+        if self.k > 100 and self.type_MPC == 3:
+            o_targetFootstep[:2, :] = self.footstepPlanner.getRz()[:2, :2] @ self.x_f_mpc[24:, 0].reshape((2, 4), order='F') + self.q[0:2, 0:1]
+
+        # Update pos, vel and acc references for feet
+        self.footTrajectoryGenerator.update(self.k, o_targetFootstep)
+
         # Target state for the whole body control
-        self.x_f_wbc = (self.x_f_mpc[:, 0]).copy()
+        self.x_f_wbc = (self.x_f_mpc[:24, 0]).copy()
         if not self.gait.getIsStatic():
             self.x_f_wbc[0] = self.myController.dt * xref[6, 1]
             self.x_f_wbc[1] = self.myController.dt * xref[7, 1]
diff --git a/scripts/Logger.py b/scripts/Logger.py
index 1672a1b12a52864e27fd926fb454e3d143edda0c..86048232ac6dd71aec4503e7c9c49b9f45c71fe4 100644
--- a/scripts/Logger.py
+++ b/scripts/Logger.py
@@ -17,7 +17,7 @@ class Logger:
         dt_mpc (float): time step of the MPC
         k_mpc (int): number of tsid iterations for one iteration of the mpc
         T_mpc (float): duration of mpc prediction horizon
-        type_MPC (bool): which MPC you want to use (PA's or Thomas')
+        type_MPC (int): 0 for OSQP MPC, 1, 2, 3 for Crocoddyl MPCs
     """
 
     def __init__(self, k_max_loop, dt, dt_mpc, k_mpc, T_mpc, type_MPC):
@@ -614,7 +614,7 @@ class Logger:
     def log_predicted_trajectories(self, k, mpc_wrapper):
         """ Store information about the predicted evolution of the optimization vector components
         """
-        if self.type_MPC:
+        if self.type_MPC == 0:
             self.pred_trajectories[:, :, int(k/self.k_mpc)] = mpc_wrapper.mpc.x_robot
             self.pred_forces[:, :, int(k/self.k_mpc)] = mpc_wrapper.mpc.x[mpc_wrapper.mpc.xref.shape[0]*(mpc_wrapper.mpc.xref.shape[1]-1):].reshape((mpc_wrapper.mpc.xref.shape[0],
                                                                                                                                                      mpc_wrapper.mpc.xref.shape[1]-1),
@@ -750,7 +750,7 @@ class Logger:
         self.log_torques(k, tsid_controller)
 
         # Store information about the cost function
-        """if self.type_MPC and not enable_multiprocessing:
+        """if (self.type_MPC == 0) and not enable_multiprocessing:
             self.log_cost_function(k, mpc_wrapper)"""
 
         # Store information about the predicted evolution of the optimization vector components
@@ -780,7 +780,7 @@ class Logger:
 
         # Plot information about the state of the robot
         # Cost not comparable between the two solvers
-        if self.type_MPC and not enable_multiprocessing:
+        if (self.type_MPC == 0) and not enable_multiprocessing:
             self.plot_cost_function()
 
         # Plot information about the predicted evolution of the optimization vector components
diff --git a/scripts/LoggerControl.py b/scripts/LoggerControl.py
index d8be4b19352a10fcfce9d1b2d6ba285f396e8c90..af1a27f8c6fad81687e6419b5fefe19ec8a3cc0a 100644
--- a/scripts/LoggerControl.py
+++ b/scripts/LoggerControl.py
@@ -73,7 +73,10 @@ class LoggerControl():
         # Model Predictive Control
         # output vector of the MPC (next state + reference contact force)
         if statePlanner is not None:
-            self.mpc_x_f = np.zeros([logSize, 24, statePlanner.getNSteps()])
+            if loop.type_MPC == 3:
+                self.mpc_x_f = np.zeros([logSize, 32, statePlanner.getNSteps()])
+            else:
+                self.mpc_x_f = np.zeros([logSize, 24, statePlanner.getNSteps()])
 
         # Whole body control
         self.wbc_x_f = np.zeros([logSize, 24])  # input vector of the WBC (next state + reference contact force)
@@ -226,8 +229,8 @@ class LoggerControl():
             plt.plot(t_range, self.wbc_feet_pos[:, i % 3, np.int(i/3)], color='b', linewidth=3, marker='')
             plt.plot(t_range, self.wbc_feet_err[:, i % 3, np.int(i/3)] + self.wbc_feet_pos[0, i % 3, np.int(i/3)], color='g', linewidth=3, marker='')
             plt.plot(t_range, self.wbc_feet_pos_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
-            """plt.plot(t_range, self.wbc_feet_pos_invkin[:, i % 3, np.int(i/3)],
-                     color='darkviolet', linewidth=3, linestyle="--", marker='')"""
+            plt.plot(t_range, self.wbc_feet_pos_invkin[:, i % 3, np.int(i/3)],
+                     color='darkviolet', linewidth=3, linestyle="--", marker='')
             if (i % 3) == 2:
                 mini = np.min(self.wbc_feet_pos[:, i % 3, np.int(i/3)])
                 maxi = np.max(self.wbc_feet_pos[:, i % 3, np.int(i/3)])
diff --git a/scripts/MPC_Wrapper.py b/scripts/MPC_Wrapper.py
index f0fd9174af21b7a17ddda538291b9b61231a8e8d..c427534f61dd8b67480792fad468e30707911ac8 100644
--- a/scripts/MPC_Wrapper.py
+++ b/scripts/MPC_Wrapper.py
@@ -5,6 +5,7 @@ import libquadruped_reactive_walking as MPC
 from multiprocessing import Process, Value, Array
 from utils_mpc import quaternionToRPY
 import crocoddyl_class.MPC_crocoddyl as MPC_crocoddyl
+import crocoddyl_class.MPC_crocoddyl_planner as MPC_crocoddyl_planner
 
 
 class Dummy:
@@ -23,7 +24,7 @@ class MPC_Wrapper:
     a parallel process
 
     Args:
-        mpc_type (bool): True to have PA's MPC, False to have Thomas's MPC
+        mpc_type (int): 0 for OSQP MPC, 1, 2, 3 for Crocoddyl MPCs
         dt (float): Time step of the MPC
         n_steps (int): Number of time steps in one gait cycle
         k_mpc (int): Number of inv dyn time step for one iteration of the MPC
@@ -52,23 +53,35 @@ class MPC_Wrapper:
             self.newData = Value('b', False)
             self.newResult = Value('b', False)
             self.dataIn = Array('d', [0.0] * (1 + (np.int(self.n_steps)+1) * 12 + 12*self.N_gait))
-            self.dataOut = Array('d', [0] * 24 * (np.int(self.n_steps)))
+            if self.mpc_type == 3:  # Need more space to store optimized footsteps
+                self.dataOut = Array('d', [0] * 32 * (np.int(self.n_steps)))
+            else:
+                self.dataOut = Array('d', [0] * 24 * (np.int(self.n_steps)))
             self.fsteps_future = np.zeros((self.N_gait, 12))
             self.running = Value('b', True)
         else:
             # Create the new version of the MPC solver object
-            if mpc_type:
+            if mpc_type == 0:  # OSQP MPC
                 self.mpc = MPC.MPC(dt, n_steps, T_gait, self.N_gait)
-            else:
-                self.mpc = MPC_crocoddyl.MPC_crocoddyl(
-                    dt=dt, T_mpc=T_gait, mu=0.9, inner=False, linearModel=True, n_period=int((dt * n_steps)/T_gait))
+            elif mpc_type == 1:  # Crocoddyl MPC Linear
+                self.mpc = MPC_crocoddyl.MPC_crocoddyl(dt=dt, T_mpc=T_gait, mu=0.9, inner=False,
+                                                       linearModel=True, N_gait=self.N_gait)
+            elif mpc_type == 2:  # Crocoddyl MPC Non-Linear
+                self.mpc = MPC_crocoddyl.MPC_crocoddyl(dt=dt, T_mpc=T_gait, mu=0.9, inner=False,
+                                                       linearModel=False, N_gait=self.N_gait)
+            else:  # Crocoddyl MPC Non-Linear with footsteps optimization
+                self.mpc = MPC_crocoddyl_planner.MPC_crocoddyl_planner(dt=dt, T_mpc=T_gait, mu=0.9,
+                                                                       inner=False, N_gait=self.N_gait)
 
         # Setup initial result for the first iteration of the main control loop
         x_init = np.zeros(12)
         x_init[0:3] = q_init[0:3, 0]
         x_init[3:6] = quaternionToRPY(q_init[3:7, 0]).ravel()
-        self.last_available_result = np.zeros((24, (np.int(self.n_steps))))
-        self.last_available_result[:, 0] = np.hstack((x_init, np.array([0.0, 0.0, 8.0] * 4)))
+        if self.mpc_type == 3:  # Need more space to store optimized footsteps
+            self.last_available_result = np.zeros((32, (np.int(self.n_steps))))
+        else:
+            self.last_available_result = np.zeros((24, (np.int(self.n_steps))))
+        self.last_available_result[:24, 0] = np.hstack((x_init, np.array([0.0, 0.0, 8.0] * 4)))
 
     def solve(self, k, xref, fsteps, gait):
         """Call either the asynchronous MPC or the synchronous MPC depending on the value of multiprocessing during
@@ -96,7 +109,7 @@ class MPC_Wrapper:
             mass = 2.5  # TODO: grab from URDF?
             nb_ctc = np.sum(gait[pt-1, :])
             F = 9.81 * mass / nb_ctc
-            self.last_available_result[12:, self.n_steps-1] = np.zeros(12)
+            self.last_available_result[12:24, self.n_steps-1] = np.zeros(12)
             for i in range(4):
                 if (gait[pt-1, i] == 1):
                     self.last_available_result[12+3*i+2, self.n_steps-1] = F
@@ -137,11 +150,11 @@ class MPC_Wrapper:
         # Run the MPC to get the reference forces and the next predicted state
         # Result is stored in mpc.f_applied, mpc.q_next, mpc.v_next
 
-        if self.mpc_type:
+        if self.mpc_type == 0:
             # OSQP MPC
             self.mpc.run(np.int(k), xref.copy(), fsteps.copy())
         else:
-            # Crocoddyl MPC (TODO: Adapt)
+            # Crocoddyl MPC
             self.mpc.solve(k, xref.copy(), fsteps.copy())
 
         # Output of the MPC
@@ -198,20 +211,24 @@ class MPC_Wrapper:
                 # Create the MPC object of the parallel process during the first iteration
                 if k == 0:
                     # loop_mpc = MPC.MPC(self.dt, self.n_steps, self.T_gait)
-                    if self.mpc_type:
+                    if self.mpc_type == 0:
                         loop_mpc = MPC.MPC(self.dt, self.n_steps, self.T_gait, self.N_gait)
-                    else:
-                        loop_mpc = MPC_crocoddyl.MPC_crocoddyl(self.dt, self.T_gait, 1, True)
-                        dummy_fstep_planner = Dummy()
+                    elif self.mpc_type == 1:  # Crocoddyl MPC Linear
+                        self.mpc = MPC_crocoddyl.MPC_crocoddyl(dt=self.dt, T_mpc=self.T_gait, mu=0.9, inner=False,
+                                                               linearModel=True, N_gait=self.N_gait)
+                    elif self.mpc_type == 2:  # Crocoddyl MPC Non-Linear
+                        self.mpc = MPC_crocoddyl.MPC_crocoddyl(dt=self.dt, T_mpc=self.T_gait, mu=0.9, inner=False,
+                                                               linearModel=False, N_gait=self.N_gait)
+                    else:  # Crocoddyl MPC Non-Linear with footsteps optimization
+                        self.mpc = MPC_crocoddyl_planner.MPC_crocoddyl_planner(dt=self.dt, T_mpc=self.T_gait, mu=0.9,
+                                                                               inner=False, N_gait=self.N_gait)
 
                 # Run the asynchronous MPC with the data that as been retrieved
-                if self.mpc_type:
+                if self.mpc_type == 0:
                     fsteps[np.isnan(fsteps)] = 0.0
                     loop_mpc.run(np.int(k), xref, fsteps)
                 else:
-                    dummy_fstep_planner.xref = xref
-                    dummy_fstep_planner.fsteps = fsteps
-                    loop_mpc.solve(k, dummy_fstep_planner)
+                    loop_mpc.solve(k, xref.copy(), fsteps.copy())
 
                 # Store the result (predicted state + desired forces) in the shared memory
                 # print(len(self.dataOut))
@@ -261,7 +278,10 @@ class MPC_Wrapper:
         """Return the result of the asynchronous MPC (desired contact forces) that is stored in the shared memory
         """
 
-        return np.array(self.dataOut[:]).reshape((24, -1), order='F')
+        if self.mpc_type == 3:  # Need more space to store optimized footsteps
+            return np.array(self.dataOut[:]).reshape((32, -1), order='F')
+        else:
+            return np.array(self.dataOut[:]).reshape((24, -1), order='F')
 
     def roll_asynchronous(self, fsteps):
         """Move one step further in the gait cycle. Since the output of the asynchronous MPC is retrieved by
diff --git a/scripts/QP_WBC.py b/scripts/QP_WBC.py
index 4dc1f738a3e62066d2a3af04a03e955b409ea421..95e6b2cc9f0e7526d7e40cf95c43816cb58db0b0 100644
--- a/scripts/QP_WBC.py
+++ b/scripts/QP_WBC.py
@@ -90,7 +90,7 @@ class wbc_controller():
         q_tmp[6, 0] = 1.0
         self.M = pin.crba(self.robot.model, self.robot.data, q_tmp)
 
-        self.M[:6, :6] = self.M[:6, :6] * (np.eye(6) == 1)  # (self.M[:6, :6] > 1e-3)
+        # self.M[:6, :6] = self.M[:6, :6] * (np.eye(6) == 1)  # (self.M[:6, :6] > 1e-3)
 
         # Compute Jacobian of contact points
         pin.computeJointJacobians(self.robot.model, self.robot.data, q)
diff --git a/scripts/crocoddyl_class/MPC_crocoddyl.py b/scripts/crocoddyl_class/MPC_crocoddyl.py
index ecbae0cee7beb641fb390252cff3c17a119acf02..87d9b604ff84337355e53b268dd61892342590a5 100644
--- a/scripts/crocoddyl_class/MPC_crocoddyl.py
+++ b/scripts/crocoddyl_class/MPC_crocoddyl.py
@@ -17,7 +17,7 @@ class MPC_crocoddyl:
         linearModel(bool) : Approximation in the cross product by using desired state
     """
 
-    def __init__(self, dt=0.02, T_mpc=0.32,  mu=1, inner=True, linearModel=True, n_period=1):
+    def __init__(self, dt=0.02, T_mpc=0.32,  mu=1, inner=True, linearModel=True, N_gait=20):
 
         # Time step of the solver
         self.dt = dt
@@ -74,7 +74,7 @@ class MPC_crocoddyl:
         self.max_fz = 25
 
         # Gait matrix
-        self.gait = np.zeros((20, 4))
+        self.gait = np.zeros((N_gait, 4))
         self.index = 0
 
         # Weight on the shoulder term :
@@ -82,7 +82,7 @@ class MPC_crocoddyl:
         self.shoulder_hlim = 0.27
 
         # Position of the feet
-        self.fsteps = np.full((20, 12), np.nan)
+        self.fsteps = np.full((N_gait, 12), np.nan)
 
         # List of the actionModel
         self.ListAction = []
@@ -90,7 +90,7 @@ class MPC_crocoddyl:
         # Initialisation of the List model using ActionQuadrupedModel()
         # The same model cannot be used [model]*(T_mpc/dt) because the dynamic
         # model changes for each nodes.
-        for i in range(int(self.T_mpc/self.dt)*n_period):
+        for i in range(int(self.T_mpc/self.dt)):
             if linearModel:
                 model = quadruped_walkgen.ActionModelQuadruped()
             else:
@@ -174,7 +174,6 @@ class MPC_crocoddyl:
             self.ListAction[j].updateModel(np.reshape(self.fsteps[j, :], (3, 4), order='F'),
                                            xref[:, j+1], self.gait[j, :])
 
-
         # Update model of the terminal model
         self.terminalModel.updateModel(np.reshape(
             self.fsteps[self.index-1, :], (3, 4), order='F'), xref[:, -1], self.gait[self.index-1, :])
@@ -190,7 +189,6 @@ class MPC_crocoddyl:
             fsteps : feet predicted positions
         """
 
-
         # Update the dynamic depending on the predicted feet position
         self.updateProblem(fsteps, xref)
 
@@ -210,7 +208,6 @@ class MPC_crocoddyl:
         """print("1")
         from IPython import embed
         embed()"""
-
         self.ddp.solve(self.x_init,  self.u_init, self.max_iteration)
 
         """print("3")
diff --git a/scripts/crocoddyl_class/MPC_crocoddyl_planner.py b/scripts/crocoddyl_class/MPC_crocoddyl_planner.py
index 42e1cc0d06051662de9ec171582c4ee74dc24764..e01996310712f6a711a4d5dff6b99ed39bc7f793 100644
--- a/scripts/crocoddyl_class/MPC_crocoddyl_planner.py
+++ b/scripts/crocoddyl_class/MPC_crocoddyl_planner.py
@@ -1,18 +1,14 @@
 # coding: utf8
-import sys
-import os 
-from sys import argv
-sys.path.insert(0, os.getcwd()) # adds current directory to python path
+
 import crocoddyl
 import numpy as np
-import quadruped_walkgen
-import utils
+import quadruped_walkgen as quadruped_walkgen
 import pinocchio as pin
 
 
 class MPC_crocoddyl_planner():
-    """Wrapper class for the MPC problem to call the ddp solver and 
-    retrieve the results. 
+    """Wrapper class for the MPC problem to call the ddp solver and
+    retrieve the results.
 
     Args:
         dt (float): time step of the MPC
@@ -21,7 +17,7 @@ class MPC_crocoddyl_planner():
         inner(bool): Inside or outside approximation of the friction cone
     """
 
-    def __init__(self, dt = 0.02 , T_mpc = 0.32 ,  mu = 1, inner = True  , warm_start = False , min_fz = 0.0 , n_periods = 1):    
+    def __init__(self, dt = 0.02 , T_mpc = 0.32 ,  mu = 1, inner = True  , warm_start = False , min_fz = 0.0 , N_gait = 20):    
 
         # Time step of the solver
         self.dt = dt
@@ -29,17 +25,14 @@ class MPC_crocoddyl_planner():
         # Period of the MPC
         self.T_mpc = T_mpc
 
-        # Number of period : not used yet
-        self.n_periods = n_periods
-
-        # Mass of the robot 
-        self.mass = 2.50000279 
+        # Mass of the robot
+        self.mass = 2.50000279
 
-        # Inertia matrix of the robot in body frame 
+        # Inertia matrix of the robot in body frame
         # self.gI = np.diag([0.00578574, 0.01938108, 0.02476124])
         self.gI = np.array([[3.09249e-2, -8.00101e-7, 1.865287e-5],
-                        [-8.00101e-7, 5.106100e-2, 1.245813e-4],
-                        [1.865287e-5, 1.245813e-4, 6.939757e-2]])  
+                            [-8.00101e-7, 5.106100e-2, 1.245813e-4],
+                            [1.865287e-5, 1.245813e-4, 6.939757e-2]])
 
         # Friction coefficient
         if inner :
@@ -83,12 +76,12 @@ class MPC_crocoddyl_planner():
         self.min_fz = min_fz
 
         # Gait matrix
-        self.gait = np.zeros((20, 5))
-        self.gait_old = np.zeros((20, 5))
+        self.gait = np.zeros((20, 4))
+        self.gait_old = np.zeros((1, 4))
         self.index = 0
 
         # Position of the feet in local frame
-        self.fsteps = np.full((20, 13), 0.0)        
+        self.fsteps = np.full((20, 12), 0.0)
 
         # List of the actionModel
         self.ListAction = [] 
@@ -134,13 +127,13 @@ class MPC_crocoddyl_planner():
         self.ddp = None
 
         # Xs results without the actionStepModel
-        self.Xs = np.zeros((20,int(T_mpc/dt)*n_periods))
+        self.Xs = np.zeros((20, int(T_mpc/dt)))
         # self.Us = np.zeros((12,int(T_mpc/dt)))
 
         # Initial foot location (local frame, X,Y plan)
-        self.p0 = [ 0.1946,0.15005, 0.1946,-0.15005, -0.1946,   0.15005 ,-0.1946,  -0.15005]
+        self.p0 = [ 0.1946,0.14695, 0.1946,-0.14695, -0.1946,   0.14695 ,-0.1946,  -0.14695]
 
-    def solve(self, k, xref , l_feet ,  oMl = pin.SE3.Identity()):
+    def solve(self, k, xref , l_feet , oMl = pin.SE3.Identity()):
         """ Solve the MPC problem 
 
         Args:
@@ -153,14 +146,14 @@ class MPC_crocoddyl_planner():
         self.updateProblem( k , xref , l_feet , oMl)
 
         # Solve problem
-        self.ddp.solve(self.x_init,self.u_init, self.max_iteration)        
-        
+        self.ddp.solve(self.x_init, self.u_init, self.max_iteration) 
+
         # Get the results
         self.get_fsteps()
 
         return 0
 
-    def updateProblem(self,k,xref , l_feet , oMl = pin.SE3.Identity()):
+    def updateProblem(self, k, xref, l_feet, oMl = pin.SE3.Identity()):
         """Update the dynamic of the model list according to the predicted position of the feet, 
         and the desired state. 
 
@@ -168,44 +161,33 @@ class MPC_crocoddyl_planner():
         """
 
         self.oMl = oMl
-        # position of foot predicted by previous gait cycle in world frame
-        for i in range(4):
-            self.l_fsteps[:,i] = self.oMl.inverse() * self.o_fsteps[:,i] 
-    
-        if k > 0:            
-            # Move one step further in the gait 
-            # Add and remove step model in the list of model
-            self.roll() 
-            
-            # Update initial state of the problem
-            
-            if np.sum(self.gait[0,1:]) == 4 : 
-                # 4 contact --> need previous control cycle to know which foot was on the ground
-                # On swing phase before --> initialised below shoulder
-                p0 = np.repeat(np.array([1,1,1,1])-self.gait_old[0,1:],2)*self.p0  
-                # On the ground before -->  initialised with the current feet position
-                p0 +=  np.repeat(self.gait_old[0,1:],2)*l_feet[0:2,:].reshape(8, order = 'F')
-            else : 
-                # On swing phase before --> initialised below shoulder
-                p0 = np.repeat(np.array([1,1,1,1])-self.gait[0,1:],2)*self.p0  
-                # On the ground before -->  initialised with the current feet position
-                p0 +=  np.repeat(self.gait[0,1:],2)*l_feet[0:2,:].reshape(8, order = 'F')
-       
-        else : 
 
-            # Create gait matrix
-            self.create_walking_trot()
-            self.gait_old = self.gait 
- 
-            # First step : create the list of model
-            self.create_List_model()
-            # According to the current footstepplanner, the walk start on the next phase
-            self.roll()
-            # Update initial state of the problem with the shoulder position 
-            p0 = self.p0
+        # Save previous gait state before updating the gait
+        self.gait_old[0, :] = self.gait[0, :].copy()
+
+        # Recontruct the gait based on the computed footsteps
+        a = 0
+        while np.any(l_feet[a, :]):
+            self.gait[a, :] = (l_feet[a, ::3] != 0.0).astype(int)
+            a += 1
+        self.gait[a:, :] = 0.0
+
+        # On swing phase before --> initialised below shoulder
+        p0 = (1.0 - np.repeat(self.gait[0,:], 2)) * self.p0
+        # On the ground before -->  initialised with the current feet position
+        p0 += np.repeat(self.gait[0,:], 2) * l_feet[0, [0, 1, 3, 4, 6, 7, 9, 10]]  # (x, y) of each foot
         
+        if k == 0:
+            # Create the list of models
+            self.create_List_model()
+
+            # By default we suppose we were in the same state previously
+            self.gait_old[0, :] = self.gait[0, :].copy()
+        else:
+            # Update list of models
+            self.roll_models()
+
         j = 0
-        k_cum = 0
         L = []
         # Iterate over all phases of the gait
         # The first column of xref correspond to the current state 
@@ -214,43 +196,45 @@ class MPC_crocoddyl_planner():
         self.u_init = []
         gap = 0
 
-        while (self.gait[j, 0] != 0):
-            
-            for i in range(k_cum, k_cum+np.int(self.gait[j, 0])):
-
-                if self.ListAction[i].__class__.__name__ == "ActionModelQuadrupedStep" :
-
-                    self.x_init.append(np.zeros(20))
-                    self.u_init.append(np.zeros(4))
-                    if i == 0 : 
-                        self.ListAction[i].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap]  , self.gait[0, 1:] - self.gait_old[0, 1:])
-                        
-                    else : 
-                        self.ListAction[i].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap]  , self.gait[j, 1:] - self.gait[j-1, 1:])
-
-                    self.ListAction[i+1].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap]  , self.gait[j, 1:])
-
-                    self.x_init.append(np.zeros(20))
-                    self.u_init.append(np.zeros(12))
-                    k_cum +=  1
-                    gap -= 1
-                    # self.ListAction[i+1].shoulderWeights = 2*np.array(4*[0.25,0.3])
-                    
-                else : 
-                    self.ListAction[i].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap]  , self.gait[j, 1:])                    
-                    self.x_init.append(np.zeros(20))
-                    self.u_init.append(np.zeros(12))
-
-            k_cum += np.int(self.gait[j, 0])
+        next_step_flag = True
+        next_step_index = 0  # Get index of incoming step for updatePositionWeights
+
+        while np.any(self.gait[j, :]):
+
+            if self.ListAction[j+gap].__class__.__name__ == "ActionModelQuadrupedStep" :
+                
+                if next_step_flag:
+                    next_step_index = j+gap
+                    next_step_flag = False
+
+                self.x_init.append(np.zeros(20))
+                self.u_init.append(np.zeros(4))
+
+                if j == 0: 
+                    self.ListAction[j+gap].updateModel(np.reshape(l_feet[j, :], (3, 4), order='F') , xref[:, j]  , self.gait[0, :] - self.gait_old[0, :])
+                else: 
+                    self.ListAction[j+gap].updateModel(np.reshape(l_feet[j, :], (3, 4), order='F') , xref[:, j]  , self.gait[j, :] - self.gait[j-1, :])
+
+                self.ListAction[j+gap+1].updateModel(np.reshape(l_feet[j, :], (3, 4), order='F') , xref[:, j]  , self.gait[j, :])
+                self.x_init.append(np.zeros(20))
+                self.u_init.append(np.zeros(12))
+
+                gap += 1
+                # self.ListAction[j+1].shoulderWeights = 2*np.array(4*[0.25,0.3])
+                
+            else: 
+                self.ListAction[j+gap].updateModel(np.reshape(l_feet[j, :], (3, 4), order='F') , xref[:, j]  , self.gait[j, :])                    
+                self.x_init.append(np.zeros(20))
+                self.u_init.append(np.zeros(12))
+
             j += 1 
 
         if k > self.start_stop_optim : 
             # Update the lastPositionweight
-            self.updatePositionWeights()
-        
+            self.updatePositionWeights(next_step_index)
         
-        # # Update model of the terminal model
-        self.terminalModel.updateModel(np.reshape(self.fsteps[j-1, 1:], (3, 4), order='F') , xref[:,-1] , self.gait[j-1, 1:])
+        # Update model of the terminal model # TODO: Check if correct row of l_feet
+        self.terminalModel.updateModel(np.reshape(l_feet[j-1, :], (3, 4), order='F') , xref[:,-1] , self.gait[j-1, :])
         self.x_init.append(np.zeros(20))
 
         # Shooting problem
@@ -267,10 +251,22 @@ class MPC_crocoddyl_planner():
         """Return the desired contact forces that have been computed by the last iteration of the MPC
         Args:
         """
-        if self.ListAction[0].__class__.__name__ == "ActionModelQuadrupedStep" :
-            return np.repeat(self.gait[0,1:] , 3)*np.reshape(np.asarray(self.ddp.us[1])  , (12,))
-        else :
-            return np.repeat(self.gait[0,1:] , 3)*np.reshape(np.asarray(self.ddp.us[0])  , (12,))
+
+        cpt = 0
+        N = int(self.T_mpc / self.dt)
+        result = np.zeros((32, N))
+        for i in range(len(self.ListAction)):
+            if self.ListAction[i].__class__.__name__ != "ActionModelQuadrupedStep" :
+                if cpt >= N:
+                    raise ValueError("Too many action model considering the current MPC prediction horizon")
+                result[:12, cpt] = self.ddp.xs[i][:12]
+                result[12:24, cpt] = self.ddp.us[i] # * np.repeat(self.gait[cpt, :] , 3)
+                result[24:, cpt] = self.ddp.xs[i][12:]
+                cpt += 1
+                if i > 0 and self.ListAction[i-1].__class__.__name__ == "ActionModelQuadrupedStep" :
+                    print(self.ddp.xs[i][12:])
+
+        return result
 
     def update_model_augmented(self , model):
         '''Set intern parameters for augmented model type
@@ -317,20 +313,17 @@ class MPC_crocoddyl_planner():
 
         # Iterate over all phases of the gait
         # The first column of xref correspond to the current state 
-        while (self.gait[j, 0] != 0):
-            for i in range(k_cum, k_cum+np.int(self.gait[j, 0])):
+        while np.any(self.gait[j, :]):
 
-                model = quadruped_walkgen.ActionModelQuadrupedAugmented()
+            model = quadruped_walkgen.ActionModelQuadrupedAugmented()
 
-                # Update intern parameters
-                self.update_model_augmented(model)
+            # Update intern parameters
+            self.update_model_augmented(model)
 
-                # Add model to the list of model
-                self.ListAction.append(model)
+            # Add model to the list of model
+            self.ListAction.append(model)
 
-            
-            
-            if np.sum(self.gait[j+1, 1:]) == 4 : # No optimisation on the first line
+            if np.any(self.gait[j+1, :]) and not np.array_equal(self.gait[j, :], self.gait[j+1, :]): # No optimisation on the first line
 
                 model = quadruped_walkgen.ActionModelQuadrupedStep()
                 # Update intern parameters
@@ -339,7 +332,6 @@ class MPC_crocoddyl_planner():
                 # Add model to the list of model
                 self.ListAction.append(model)
             
-            k_cum += np.int(self.gait[j, 0])
             j += 1
 
         # Model parameters of terminal node  
@@ -372,59 +364,35 @@ class MPC_crocoddyl_planner():
         # Starting status of the gait
         # 4-stance phase, 2-stance phase, 4-stance phase, 2-stance phase
         self.gait = np.zeros((self.fsteps.shape[0], 5))
-        for i in range(self.n_periods):
-            self.gait[(4*i):(4*(i+1)), 0] = np.array([1, N-1, 1, N-1])
-            self.fsteps[(4*i):(4*(i+1)), 0] = self.gait[(4*i):(4*(i+1)), 0]
-
-            # Set stance and swing phases
-            # Coefficient (i, j) is equal to 0.0 if the j-th feet is in swing phase during the i-th phase
-            # Coefficient (i, j) is equal to 1.0 if the j-th feet is in stance phase during the i-th phase
-            self.gait[4*i+0, 1:] = np.ones((4,))
-            self.gait[4*i+1, [1, 4]] = np.ones((2,))
-            self.gait[4*i+2, 1:] = np.ones((4,))
-            self.gait[4*i+3, [2, 3]] = np.ones((2,))
 
-        return 0
+        self.gait[(4*i):(4*(i+1)), 0] = np.array([1, N-1, 1, N-1])
+        self.fsteps[(4*i):(4*(i+1)), 0] = self.gait[(4*i):(4*(i+1)), 0]
 
-    def roll(self):
-        """Move one step further in the gait cycle
+        # Set stance and swing phases
+        # Coefficient (i, j) is equal to 0.0 if the j-th feet is in swing phase during the i-th phase
+        # Coefficient (i, j) is equal to 1.0 if the j-th feet is in stance phase during the i-th phase
+        self.gait[4*i+0, 1:] = np.ones((4,))
+        self.gait[4*i+1, [1, 4]] = np.ones((2,))
+        self.gait[4*i+2, 1:] = np.ones((4,))
+        self.gait[4*i+3, [2, 3]] = np.ones((2,))
 
-        Decrease by 1 the number of remaining step for the current phase of the gait and increase
-        by 1 the number of remaining step for the last phase of the gait (periodic motion)
+        return 0
 
+    def roll_models(self):
+        """
+        Move one step further in the gait cycle
         Add and remove corresponding model in ListAction
         """
-        self.gait_old = self.gait 
-
-        # Index of the first empty line
-        index = next((idx for idx, val in np.ndenumerate(self.gait[:, 0]) if val==0.0), 0.0)[0]
-
-        # Create a new phase if needed or increase the last one by 1 step
-        if np.array_equal(self.gait[0, 1:], self.gait[index-1, 1:]):
-            self.gait[index-1, 0] += 1.0
-        else:
-            self.gait[index, 1:] = self.gait[0, 1:]
-            self.gait[index, 0] = 1.0
 
         # Remove first model
+        flag = False
         if self.ListAction[0].__class__.__name__ == "ActionModelQuadrupedStep" :
             self.ListAction.pop(0)
+            flag = True
         model = self.ListAction.pop(0)
 
-        # Decrease the current phase by 1 step and delete it if it has ended
-
-        if self.gait[0, 0] > 1.0:
-            self.gait[0, 0] -= 1.0
-        else:
-            self.gait = np.roll(self.gait, -1, axis=0)
-            self.gait[-1, :] = np.zeros((5, ))
-
-      
-        # Get new Index of the first empty line
-        index = next((idx for idx, val in np.ndenumerate(self.gait[:, 0]) if val==0.0), 0.0)[0]
-
         # Add last model & step model if needed
-        if np.sum(self.gait[index - 1, 1:]) == 4 and self.gait[index - 1, 0 ] != 0: 
+        if flag: # not np.array_equal(self.gait_old[0, :], self.gait[0, :]):
             modelStep = quadruped_walkgen.ActionModelQuadrupedStep()
             self.update_model_step(modelStep)
 
@@ -471,16 +439,11 @@ class MPC_crocoddyl_planner():
         j = 0
         k_cum = 0
 
-        self.fsteps[:,0] = self.gait[:,0]
-
         # Iterate over all phases of the gait
-        while (self.gait[j, 0] != 0):
-            for i in range(k_cum, k_cum+np.int(self.gait[j, 0])):
-                self.fsteps[j ,1: ] = np.repeat(self.gait[j,1:] , 3)*np.concatenate([self.Xs[12:14 , k_cum ],[0.],self.Xs[14:16 , k_cum ],[0.],
-                                                                                    self.Xs[16:18 , k_cum ],[0.],self.Xs[18:20 , k_cum ],[0.]])           
-
-            k_cum += np.int(self.gait[j, 0])
-            j += 1       
+        while np.any(self.gait[j, :]):
+            self.fsteps[j , :] = np.repeat(self.gait[j,:] , 3)*np.concatenate([self.Xs[12:14 , j],[0.],self.Xs[14:16 , j],[0.],
+                                                                                self.Xs[16:18 , j],[0.],self.Xs[18:20 , j],[0.]])           
+            j += 1
 
         ####################################################
         # Compute the current position of feet in contact
@@ -489,20 +452,20 @@ class MPC_crocoddyl_planner():
         #####################################################
 
         for i in range(4):
-            index = next((idx for idx, val in np.ndenumerate(self.fsteps[:, 3*i+1]) if ((not (val==0)) and (not np.isnan(val)))), [-1])[0]
-            pos_tmp = np.reshape(np.array(self.oMl * (np.array([self.fsteps[index, (1+i*3):(4+i*3)]]).transpose())) , (3,1) )
+            index = next((idx for idx, val in np.ndenumerate(self.fsteps[:, 3*i]) if ((not (val==0)) and (not np.isnan(val)))), [-1])[0]
+            pos_tmp = np.reshape(np.array(self.oMl * (np.array([self.fsteps[index, (i*3):(3+i*3)]]).transpose())) , (3,1) )
             self.o_fsteps[:2, i] = pos_tmp[0:2, 0]
 
         return self.fsteps
 
-    def updatePositionWeights(self) : 
+    def updatePositionWeights(self, next_step_index) : 
 
         """Update the parameters in the ListAction to keep the next foot position at the same position computed by the 
          previous control cycle and avoid re-optimization at the end of the flying phase
         """
 
-        if self.gait[0,0] == self.index_stop : 
-             self.ListAction[int(self.gait[0,0])+ 1].lastPositionWeights =  np.repeat((np.array([1,1,1,1]) - self.gait[0,1:]) , 2 )*  self.lastPositionWeights
+        if next_step_index == self.index_stop : 
+            self.ListAction[next_step_index + 1].lastPositionWeights =  np.repeat((np.array([1,1,1,1]) - self.gait[0, :]) , 2 )*  self.lastPositionWeights
        
         return 0
 
diff --git a/scripts/test_mpc.py b/scripts/test_mpc.py
index 18324d664a4ef345d44ff09aa0cc71f72c38704a..b53d3092dc8ca8f27f797e96e46e925213197565 100644
--- a/scripts/test_mpc.py
+++ b/scripts/test_mpc.py
@@ -29,7 +29,7 @@ class TestMPC(unittest.TestCase):
     def setUp(self):
 
         self.mass = 2.50000279
-        type_MPC = True
+        type_MPC = 0
         dt_wbc = 0.002
         dt_mpc = 0.02
         k_mpc = int(dt_mpc / dt_wbc)
diff --git a/src/Params.cpp b/src/Params.cpp
index a39c95c28d108db1ecba7ddf4e4ee5c13159b1de..5bbf347ffcff0e4dd2777d43c0cdf6c8b8f68976 100644
--- a/src/Params.cpp
+++ b/src/Params.cpp
@@ -15,7 +15,7 @@ Params::Params()
     , T_gait(0.0)
     , T_mpc(0.0)
     , N_SIMULATION(0)
-    , type_MPC(false)
+    , type_MPC(0)
     , use_flat_plane(false)
     , predefined_vel(false)
     , kf_enabled(false)
@@ -72,7 +72,7 @@ void Params::initialize(const std::string& file_path)
     N_SIMULATION = robot_node["N_SIMULATION"].as<int>();
 
     assert_yaml_parsing(robot_node, "robot", "type_MPC");
-    type_MPC = robot_node["type_MPC"].as<bool>();
+    type_MPC = robot_node["type_MPC"].as<int>();
 
     assert_yaml_parsing(robot_node, "robot", "use_flat_plane");
     use_flat_plane = robot_node["use_flat_plane"].as<bool>();
@@ -86,4 +86,7 @@ void Params::initialize(const std::string& file_path)
     assert_yaml_parsing(robot_node, "robot", "enable_pyb_GUI");
     enable_pyb_GUI = robot_node["enable_pyb_GUI"].as<bool>();
 
+    assert_yaml_parsing(robot_node, "robot", "test_list");
+    test_list = robot_node["test_list"].as<std::vector<double> >();
+
 }
diff --git a/src/config_solo12.yaml b/src/config_solo12.yaml
index bfad36d255f931f5905528e48501ec87e4f07170..83b940bbcf3fefdc3828abe3010944790c6ca7d7 100644
--- a/src/config_solo12.yaml
+++ b/src/config_solo12.yaml
@@ -11,9 +11,10 @@ robot:
     dt_mpc: 0.02  # Time step of the model predictive control
     T_gait: 0.32  # Duration of one gait period
     T_mpc: 0.32  # Duration of the prediction horizon
-    N_SIMULATION: 3000  # Number of simulated wbc time steps
-    type_MPC: true  # Which MPC solver you want to use: True to have PA's MPC, to False to have Thomas's MPC
+    N_SIMULATION: 10000  # Number of simulated wbc time steps
+    type_MPC: 3  # Which MPC solver you want to use: 0 for OSQP MPC, 1, 2, 3 for Crocoddyl MPCs
     use_flat_plane: true  # If True the ground is flat, otherwise it has bumps
     predefined_vel: true  # If we are using a predefined reference velocity (True) or a joystick (False)
     kf_enabled: false  # Use complementary filter (False) or kalman filter (True) for the estimator 
-    enable_pyb_GUI: true  # Enable/disable PyBullet GUI
+    enable_pyb_GUI: true # Enable/disable PyBullet GUI
+    test_list: [1.0, 2.0, 3.0, 1.0]