Skip to content
Snippets Groups Projects
Commit 681b4a1f authored by rachelmoan's avatar rachelmoan
Browse files

Removing magic functions from path tracker. Instead, pass in dynamics model.

parent 6571a68f
No related branches found
No related tags found
No related merge requests found
...@@ -2,25 +2,29 @@ ...@@ -2,25 +2,29 @@
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from scipy.integrate import odeint
from guided_mrmp.controllers.utils import compute_path_from_wp, get_ref_trajectory from guided_mrmp.controllers.utils import compute_path_from_wp, get_ref_trajectory
from guided_mrmp.controllers.mpc import MPC from guided_mrmp.controllers.mpc import MPC
from guided_mrmp.utils import Roomba from guided_mrmp.utils import Roomba
T = 1 # Prediction Horizon [s]
DT = 0.2 # discretization step [s]
# Classes # Classes
class PathTracker: class PathTracker:
def __init__(self, initial_position, target_v, T, DT, waypoints): def __init__(self, initial_position, dynamics, target_v, T, DT, waypoints):
""" """
waypoints (list [[xpoints],[ypoints]]): Initializes the PathTracker object.
Parameters:
- initial_position: The initial position of the robot [x, y, heading].
- dynamics: The dynamics model of the robot.
- target_v: The target velocity of the robot.
- T: The time horizon for the model predictive control (MPC).
- DT: The time step for the MPC.
- waypoints: A list of waypoints defining the desired path.
Returns:
None
""" """
# State of the robot [x,y, heading] # State of the robot [x,y, heading]
self.state = initial_position self.state = initial_position
self.dynamics = dynamics
self.T = T self.T = T
self.DT = DT self.DT = DT
self.target_v = target_v self.target_v = target_v
...@@ -68,8 +72,8 @@ class PathTracker: ...@@ -68,8 +72,8 @@ class PathTracker:
def ego_to_global(self, mpc_out): def ego_to_global(self, mpc_out):
""" """
transforms optimized trajectory XY points from ego(car) reference transforms optimized trajectory XY points from ego (robot) reference
into global(map) frame into global (map) frame
Args: Args:
mpc_out (): mpc_out ():
...@@ -143,7 +147,7 @@ class PathTracker: ...@@ -143,7 +147,7 @@ class PathTracker:
# self.state, [self.control[0], self.control[1]], DT # self.state, [self.control[0], self.control[1]], DT
# ) # )
return self.control return x_mpc, self.control
...@@ -162,8 +166,8 @@ class PathTracker: ...@@ -162,8 +166,8 @@ class PathTracker:
if (np.sqrt((self.state[0] - self.path[0, -1]) ** 2 + (self.state[1] - self.path[1, -1]) ** 2) < 0.1): if (np.sqrt((self.state[0] - self.path[0, -1]) ** 2 + (self.state[1] - self.path[1, -1]) ** 2) < 0.1):
print("Success! Goal Reached") print("Success! Goal Reached")
return np.asarray([self.x_history, self.y_history, self.h_history]) return np.asarray([self.x_history, self.y_history, self.h_history])
controls = self.get_next_control() x_mpc, controls = self.get_next_control(self.state)
next_state = self.predict_next_state_roomba(self.state, [self.control[0], self.control[1]], self.DT) next_state = self.dynamics.next_state(self.state, [self.control[0], self.control[1]], self.DT)
self.state = next_state self.state = next_state
...@@ -175,19 +179,6 @@ class PathTracker: ...@@ -175,19 +179,6 @@ class PathTracker:
self.y_history.append(self.state[1]) self.y_history.append(self.state[1])
self.h_history.append(self.state[2]) self.h_history.append(self.state[2])
def predict_next_state_roomba(self, state, u, dt):
dxdt = u[0] * np.cos(state[2])
dydt = u[0] * np.sin(state[2])
dthetadt = u[1]
# Update state using Euler integration
new_x = state[0] + dxdt * dt
new_y = state[1] + dydt * dt
new_theta = state[2] + dthetadt * dt
# Return the predicted next state
return np.array([new_x, new_y, new_theta])
def plot_sim(self): def plot_sim(self):
self.sim_time = self.sim_time + self.DT self.sim_time = self.sim_time + self.DT
# self.x_history.append(self.state[0]) # self.x_history.append(self.state[0])
...@@ -309,10 +300,11 @@ if __name__ == "__main__": ...@@ -309,10 +300,11 @@ if __name__ == "__main__":
# Example usage # Example usage
initial_pos = np.array([0.0, 0.5, 0.0, 0.0]) initial_pos = np.array([0.0, 0.5, 0.0, 0.0])
dynamics = Roomba()
target_vocity = 3.0 # m/s target_vocity = 3.0 # m/s
T = 1 # Prediction Horizon [s] T = 1 # Prediction Horizon [s]
DT = 0.2 # discretization step [s] DT = 0.2 # discretization step [s]
wp = [[0, 3, 4, 6, 10, 12, 13, 13, 6, 1, 0], wp = [[0, 3, 4, 6, 10, 12, 13, 13, 6, 1, 0],
[0, 0, 2, 4, 3, 3, -1, -2, -6, -2, -2]] [0, 0, 2, 4, 3, 3, -1, -2, -6, -2, -2]]
sim = PathTracker(initial_position=initial_pos, target_v=target_vocity, T=T, DT=DT, waypoints=wp) sim = PathTracker(initial_position=initial_pos, dynamics=dynamics, target_v=target_vocity, T=T, DT=DT, waypoints=wp)
x,y,h = sim.run(show_plots=False) x,y,h = sim.run(show_plots=False)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment