diff --git a/include/quadruped-reactive-walking/MPC.hpp b/include/quadruped-reactive-walking/MPC.hpp
index bdb3d1dec36d3ba0cc0a4207b36cf0fcf4bc0430..d52fb4abda749b0c6e2c0ee57e7c4f4382b07d0b 100644
--- a/include/quadruped-reactive-walking/MPC.hpp
+++ b/include/quadruped-reactive-walking/MPC.hpp
@@ -36,7 +36,7 @@ class MPC {
   Eigen::Matrix<double, 12, 12> B = Eigen::Matrix<double, 12, 12>::Zero();
   Eigen::Matrix<double, 12, 1> x0 = Eigen::Matrix<double, 12, 1>::Zero();
   double x_next[12] = {};
-  Eigen::MatrixXd x_f_applied = Eigen::MatrixXd::Zero(1, 24);
+  Eigen::MatrixXd x_f_applied;
 
   // Matrix ML
   const static int size_nz_ML = 5000;
diff --git a/scripts/Controller.py b/scripts/Controller.py
index f0403e34e512695cfeaa36a9412c9633266d1c22..0a98dc67eb2c1d26a8c7b8077036226dd0d10849 100644
--- a/scripts/Controller.py
+++ b/scripts/Controller.py
@@ -234,7 +234,7 @@ class Controller:
         t_mpc = time.time()
 
         # Target state for the whole body control
-        self.x_f_wbc = self.x_f_mpc.copy()
+        self.x_f_wbc = (self.x_f_mpc[:, 0]).copy()
         if not self.planner.is_static:
             self.x_f_wbc[0] = self.q_estim[0, 0]
             self.x_f_wbc[1] = self.q_estim[1, 0]
diff --git a/scripts/MPC_Wrapper.py b/scripts/MPC_Wrapper.py
index 071432cc12f9a850e7492f55a97f35d48d45fd06..9a6724a071348bd9deb138237ba468a02efabaa2 100644
--- a/scripts/MPC_Wrapper.py
+++ b/scripts/MPC_Wrapper.py
@@ -51,7 +51,7 @@ class MPC_Wrapper:
             self.newData = Value('b', False)
             self.newResult = Value('b', False)
             self.dataIn = Array('d', [0.0] * (1 + (np.int(self.n_steps)+1) * 12 + 13*20))
-            self.dataOut = Array('d', [0] * 24)
+            self.dataOut = Array('d', [0] * 24 * (np.int(self.n_steps)))
             self.fsteps_future = np.zeros((20, 13))
             self.running = Value('b', True)
         else:
@@ -66,7 +66,8 @@ class MPC_Wrapper:
         x_init = np.zeros(12)
         x_init[0:3] = q_init[0:3, 0]
         x_init[3:6] = quaternionToRPY(q_init[3:7, 0]).ravel()
-        self.last_available_result = np.hstack((x_init, np.array([0.0, 0.0, 8.0] * 4)))
+        self.last_available_result = np.zeros((24, (np.int(self.n_steps))))
+        self.last_available_result[:, 0] = np.hstack((x_init, np.array([0.0, 0.0, 8.0] * 4)))
 
     def solve(self, k, fstep_planner):
         """Call either the asynchronous MPC or the synchronous MPC depending on the value of multiprocessing during
@@ -87,10 +88,10 @@ class MPC_Wrapper:
             mass = 2.5
             nb_ctc = np.sum(fstep_planner.gait[0, 1:])
             F = 9.81 * mass / nb_ctc
-            self.last_available_result[12:] = np.zeros(12)
+            self.last_available_result[12:, 0] = np.zeros(12)
             for i in range(4):
                 if (fstep_planner.gait[0, 1+i] == 1):
-                    self.last_available_result[12+3*i+2] = F
+                    self.last_available_result[12+3*i+2, 0] = F
 
         return 0
 
@@ -206,7 +207,10 @@ class MPC_Wrapper:
                     loop_mpc.solve(k, dummy_fstep_planner)
 
                 # Store the result (predicted state + desired forces) in the shared memory
-                self.dataOut[:] = loop_mpc.get_latest_result().tolist()
+                # print(len(self.dataOut))
+                # print((loop_mpc.get_latest_result()).shape)
+
+                self.dataOut[:] = loop_mpc.get_latest_result().ravel(order='F')
 
                 # Set shared variable to true to signal that a new result is available
                 newResult.value = True
@@ -250,7 +254,7 @@ class MPC_Wrapper:
         """Return the result of the asynchronous MPC (desired contact forces) that is stored in the shared memory
         """
 
-        return np.array(self.dataOut[:])
+        return np.array(self.dataOut[:]).reshape((24, -1), order='F')
 
     def roll_asynchronous(self, fsteps):
         """Move one step further in the gait cycle. Since the output of the asynchronous MPC is retrieved by
diff --git a/src/MPC.cpp b/src/MPC.cpp
index 5738381ec59096abeaca4301ea58efba1003f9bc..ca459a1b42aee468473fbf0a197f630dc1ab36cb 100644
--- a/src/MPC.cpp
+++ b/src/MPC.cpp
@@ -9,6 +9,7 @@ MPC::MPC(double dt_in, int n_steps_in, double T_gait_in) {
   x = Eigen::Matrix<double, Eigen::Dynamic, 1>::Zero(12 * n_steps * 2, 1);
   S_gait = Eigen::Matrix<int, Eigen::Dynamic, 1>::Zero(12 * n_steps, 1);
   warmxf = Eigen::Matrix<double, Eigen::Dynamic, 1>::Zero(12 * n_steps * 2, 1);
+  x_f_applied = Eigen::MatrixXd::Zero(24, n_steps);
 
   // Predefined variables
   mass = 2.50000279f;
@@ -560,10 +561,14 @@ Extract relevant information from the output of the QP solver
 */
 int MPC::retrieve_result() {
   // Retrieve the "contact forces" part of the solution of the QP problem
+  for (int i = 0; i < (n_steps); i++) {
+    for (int k = 0; k < 12; k++) {
+      x_f_applied(k, i) = (workspce->solution->x)[k + 12*i] + xref(k, 1+i);
+      x_f_applied(k + 12, i) = (workspce->solution->x)[12 * (n_steps+i) + k];
+    }
+  }
   for (int k = 0; k < 12; k++) {
     x_next[k] = (workspce->solution->x)[k];
-    x_f_applied(0, k) = (workspce->solution->x)[k] + xref(k, 1);
-    x_f_applied(0, k + 12) = (workspce->solution->x)[12 * n_steps + k];
   }
 
   /*std::cout << "SOLUTION States" << std::endl;