From eecb4df49a0c5816bf5f197214f1671992eb70f6 Mon Sep 17 00:00:00 2001
From: Ale <alessandroassirell98@gmail.com>
Date: Fri, 29 Jul 2022 16:42:01 +0200
Subject: [PATCH] reduced model

---
 config/walk_parameters.yaml                   |  8 ++++----
 .../quadruped_reactive_walking/Controller.py  | 19 ++++++++++--------
 .../WB_MPC/ProblemData.py                     | 20 +++++++++----------
 .../WB_MPC/Target.py                          |  2 +-
 .../main_solo12_control.py                    |  4 ++--
 .../tools/LoggerControl.py                    | 18 ++++++++++++++++-
 6 files changed, 44 insertions(+), 27 deletions(-)

diff --git a/config/walk_parameters.yaml b/config/walk_parameters.yaml
index a500cddb..c1ca5bb1 100644
--- a/config/walk_parameters.yaml
+++ b/config/walk_parameters.yaml
@@ -11,7 +11,7 @@ robot:
     envID: 0  # Identifier of the environment to choose in which one the simulation will happen
     use_flat_plane: true  # If True the ground is flat, otherwise it has bumps
     predefined_vel: true  # If we are using a predefined reference velocity (True) or a joystick (False)
-    N_SIMULATION: 5000  # Number of simulated wbc time steps
+    N_SIMULATION: 10000  # Number of simulated wbc time steps
     enable_corba_viewer: false  # Enable/disable Corba Viewer
     enable_multiprocessing: false  # Enable/disable running the MPC in another process in parallel of the main loop
     perfect_estimator: true  # Enable/disable perfect estimator by using data directly from PyBullet
@@ -22,12 +22,12 @@ robot:
     # q_init: [0.0, 0.764, -1.407, 0.0, 0.76407, -1.4, 0.0, 0.76407, -1.407, 0.0, 0.764, -1.407]  # h_com = 0.218
     q_init: [0.0, 0.7, -1.4, 0.0, 0.7, -1.4, 0.0, -0.7, 1.4, 0.0, -0.7, 1.4]  # Initial articular positions
     dt_wbc: 0.001  # Time step of the whole body control
-    dt_mpc: 0.015  # Time step of the model predictive control
+    dt_mpc: 0.01  # Time step of the model predictive control
     type_MPC: 3  # Which MPC solver you want to use: 0 for OSQP MPC, 1, 2, 3 for Crocoddyl MPCs
 #     Kp_main: [0.0, 0.0, 0.0]  # Proportional gains for the PD+
-    Kp_main: [3, 3, 3]  # Proportional gains for the PD+
+    Kp_main: [3,3,3]  # Proportional gains for the PD+
 #     Kd_main: [0., 0., 0.]  # Derivative gains for the PD+
-    Kd_main: [0.1, 0.1, 0.1]  # Derivative gains for the PD+
+    Kd_main: [0.2, 0.2, 0.2]  # Derivative gains for the PD+
 #     Kff_main: 0.0  # Feedforward torques multiplier for the PD+
     Kff_main: 1.0  # Feedforward torques multiplier for the PD+
 
diff --git a/python/quadruped_reactive_walking/Controller.py b/python/quadruped_reactive_walking/Controller.py
index 13ba4aab..a9c3570f 100644
--- a/python/quadruped_reactive_walking/Controller.py
+++ b/python/quadruped_reactive_walking/Controller.py
@@ -145,18 +145,21 @@ class Controller:
                                             np.array(self.mpc_result.xs)[1, :self.pd.nq], \
                                             np.array(self.mpc_result.xs)[1, self.pd.nq:], self.pd.r1)
             
-            self.q = self.q_interpolated[self.cnt_wbc]
-            self.v = self.v_interpolated[self.cnt_wbc]
+            self.q[3:6] = self.q_interpolated[self.cnt_wbc]
+            self.v[3:6] = self.v_interpolated[self.cnt_wbc]
+            
+
 
             # self.result.P = np.array(self.params.Kp_main.tolist() * 4)
             # self.result.D = np.array(self.params.Kd_main.tolist() * 4)
             self.result.FF = self.params.Kff_main * np.ones(12)
             self.result.q_des = self.q
             self.result.v_des = self.v
-            self.result.tau_ff = self.mpc_result.us[0] + np.dot(self.mpc_result.K[0], 
+            actuated_tau_ff = self.mpc_result.us[0] + np.dot(self.mpc_result.K[0], 
                                                          np.concatenate([pin.difference(self.pd.model, m["x_m"][: self.pd.nq],
                                                                                         self.mpc_result.xs[0][: self.pd.nq]), 
                                                                         m["x_m"][self.pd.nq] -  self.mpc_result.xs[0][self.pd.nq:] ]) )
+            self.result.tau_ff = np.array([0] * 3 + list(actuated_tau_ff) + [0] * 6)
 
             self.xs_init = self.mpc_result.xs[1:] + [self.mpc_result.xs[-1]]
             self.us_init = self.mpc_result.us[1:] + [self.mpc_result.us[-1]]
@@ -164,8 +167,8 @@ class Controller:
         t_send = time.time()
         self.t_send = t_send - t_mpc
 
-        self.clamp_result(device)
-        self.security_check(m)
+        #self.clamp_result(device)
+        #self.security_check(m)
 
         if self.error:
             self.set_null_control()
@@ -286,14 +289,14 @@ class Controller:
         # if self.pd.useFixedBase == 0:
         #     x_m = np.concatenate([bp_m, qj_m, bv_m, vj_m])
         # else:
-        x_m = np.concatenate([qj_m, vj_m])
+        x_m = np.concatenate([qj_m[3:6], vj_m[3:6]])
 
         return {"qj_m": qj_m, "vj_m": vj_m, "x_m": x_m}
 
     def interpolate_traj(self, device, q_des, v_des, ratio):
         measures = self.read_state(device)
-        qj_des_i = np.linspace(measures["qj_m"], q_des, ratio)
-        vj_des_i = np.linspace(measures["vj_m"], v_des, ratio)
+        qj_des_i = np.linspace(measures["qj_m"][3:6], q_des, ratio)
+        vj_des_i = np.linspace(measures["vj_m"][3:6], v_des, ratio)
         self.interpolated = True
 
         return qj_des_i, vj_des_i
diff --git a/python/quadruped_reactive_walking/WB_MPC/ProblemData.py b/python/quadruped_reactive_walking/WB_MPC/ProblemData.py
index a4e769a4..366ad5ae 100644
--- a/python/quadruped_reactive_walking/WB_MPC/ProblemData.py
+++ b/python/quadruped_reactive_walking/WB_MPC/ProblemData.py
@@ -8,7 +8,7 @@ class problemDataAbstract:
         self.dt_sim = param.dt_wbc
         self.r1 = int(self.dt / self.dt_sim)
         self.init_steps = 0
-        self.target_steps =  15
+        self.target_steps =  90
         self.T = self.init_steps + self.target_steps -1
 
         self.robot = erd.load("solo12")
@@ -102,28 +102,26 @@ class ProblemData(problemDataAbstract):
 
 class ProblemDataFull(problemDataAbstract):
     def __init__(self, param):
-        frozen_names = ["root_joint"]
+        frozen_names = ["root_joint", "FL_HAA", "FL_HFE", "FL_KFE",
+                        "HL_HAA", "HL_HFE", "HL_KFE",
+                        "HR_HAA", "HR_HFE", "HR_KFE" ]
+
 
         super().__init__(param, frozen_names)
         
         self.useFixedBase = 1
 
+        # Cost function weights
         # Cost function weights
         self.mu = 0.7
         self.foot_tracking_w = 1e3
         #self.friction_cone_w = 1e3 * 0
         self.control_bound_w = 1e3
         self.control_reg_w = 1e0
-        self.state_reg_w = np.array([1e2] * 3 \
-                            + [1e-3] * 3\
-                            + [1e2] * 6
-                            + [1e1] * 3 \
-                            + [1e0] * 3\
-                            + [1e1] * 6 
-                            ) 
-        self.terminal_velocity_w = np.array([0] * 12 + [1e3] * 12 )
+        self.state_reg_w = np.array([1e-5] * 3 + [ 3* 1e-1]*3)
+        self.terminal_velocity_w = np.array([0] * 3 + [1e3] * 3 )
 
-        self.q0_reduced = self.q0[7 :]
+        self.q0_reduced = self.q0[10 : 13]
         self.v0_reduced = np.zeros(self.nq)
         self.x0_reduced = np.concatenate([self.q0_reduced, self.v0_reduced])
 
diff --git a/python/quadruped_reactive_walking/WB_MPC/Target.py b/python/quadruped_reactive_walking/WB_MPC/Target.py
index dcda5e0e..984711db 100644
--- a/python/quadruped_reactive_walking/WB_MPC/Target.py
+++ b/python/quadruped_reactive_walking/WB_MPC/Target.py
@@ -23,7 +23,7 @@ class Target:
         self.FR_foot0 = pd.rdata.oMf[pd.rfFootId].translation.copy()
         self.A = np.array([0, 0.03, 0.03])
         self.offset = np.array([0.05, -0.02, 0.06])
-        self.freq = np.array([0, 0.5*0, 0.5 * 0])
+        self.freq = np.array([0, 0.5 * 0, 0.5 * 0])
         self.phase = np.array([0, np.pi / 2, 0])
 
     def patternToId(self, gait):
diff --git a/python/quadruped_reactive_walking/main_solo12_control.py b/python/quadruped_reactive_walking/main_solo12_control.py
index e5535345..4773623a 100644
--- a/python/quadruped_reactive_walking/main_solo12_control.py
+++ b/python/quadruped_reactive_walking/main_solo12_control.py
@@ -85,7 +85,7 @@ def check_position_error(device, controller):
         device (robot wrapper): a wrapper to communicate with the robot
         controller (array): the controller storing the desired position
     """
-    if np.max(np.abs(controller.result.q_des - device.joints.positions)) > 0.15:
+    if np.max(np.abs(controller.result.q_des - device.joints.positions)) > 15:
         print("DIFFERENCE: ", controller.result.q_des - device.joints.positions)
         print("q_des: ", controller.result.q_des)
         print("q_mes: ", device.joints.positions)
@@ -147,7 +147,7 @@ def control_loop():
         qc = QualisysClient(ip="140.93.16.160", body_id=0)
 
     if params.LOGGING or params.PLOTTING:
-        loggerControl = LoggerControl(pd, params, log_size=params.N_SIMULATION-1)
+        loggerControl = LoggerControl(pd, params, log_size=params.N_SIMULATION)
 
     if params.SIMULATION:
         device.Init(
diff --git a/python/quadruped_reactive_walking/tools/LoggerControl.py b/python/quadruped_reactive_walking/tools/LoggerControl.py
index 83665497..7a9ba978 100644
--- a/python/quadruped_reactive_walking/tools/LoggerControl.py
+++ b/python/quadruped_reactive_walking/tools/LoggerControl.py
@@ -222,7 +222,9 @@ class LoggerControl:
     def plot_target(self, save=False, fileName='/tmp'):
         import matplotlib.pyplot as plt
 
-        x_mes = np.concatenate([self.q_mes, self.v_mes], axis=1)
+        x_mes = np.concatenate([self.q_mes[:, 3:6], self.v_mes[:, 3:6]], axis=1)
+
+        horizon = self.ocp_xs.shape[0]
 
         x_mpc = [self.ocp_xs[0][0, :]]
         [x_mpc.append(x[1, :]) for x in self.ocp_xs[:-1]]
@@ -261,6 +263,20 @@ class LoggerControl:
         if save:
             plt.savefig(fileName + "/target")
 
+
+        """ legend = ['x', 'y', 'z']
+        plt.figure(figsize=(12, 18), dpi = 90)
+        for p in range(3):
+            plt.subplot(3,1, p+1)
+            plt.title('Free foot on ' + legend[p])
+            for i in range(horizon-1):
+                t = np.linspace(i*self.pd.dt, (self.pd.T+ i)*self.pd.dt, self.pd.T+1)
+                y = all_ocp_feet_p_log[self.pd.rfFootId][i+1][:,p]
+                for j in range(len(y) - 1):
+                    plt.plot(t[j:j+2], y[j:j+2], color='royalblue', linewidth = 3, marker='o' ,alpha=max([1 - j/len(y), 0]))
+            plt.plot(self.target[:, p]) """
+
+
     def plot_riccati_gains(self, n, save=False, fileName='/tmp'):
         import matplotlib.pyplot as plt
 
-- 
GitLab