From a2bbca32f653973f9b44d6669a1168fbad238665 Mon Sep 17 00:00:00 2001 From: rachelmoan <moanrachel516@gmail.com> Date: Wed, 16 Oct 2024 21:19:18 -0500 Subject: [PATCH] added parallelization to the MPC calculation --- guided_mrmp/planners/db_guided_mrmp.py | 140 ++++++++++++++++++++----- 1 file changed, 111 insertions(+), 29 deletions(-) diff --git a/guided_mrmp/planners/db_guided_mrmp.py b/guided_mrmp/planners/db_guided_mrmp.py index d95b523..bd095f2 100644 --- a/guided_mrmp/planners/db_guided_mrmp.py +++ b/guided_mrmp/planners/db_guided_mrmp.py @@ -5,7 +5,7 @@ This module is essentially one big path tracking algorithm. It uses MPC to path track each of the robots, while looking ahead to identify and resolve conflicts. """ - +import time as t import pygame from shapely.geometry import Polygon, Point @@ -54,6 +54,8 @@ class GuidedMRMP: self.guide_paths[idx] = compute_path_from_wp(waypoints[0], waypoints[1], .05) + self.scaling_factor = self.settings['simulator']['scaling_factor'] + def ego_to_global(self, robot, mpc_out): """ transforms optimized trajectory XY points from ego (robot) reference @@ -109,20 +111,56 @@ class GuidedMRMP: # check if the robots overlap if circ1.intersects(circ2): if (r1,r2) not in conflicts and (r2,r1) not in conflicts: - print(f"conflict between {r1.label} and {r2.label}") + # print(f"conflict between {r1.label} and {r2.label}") conflicts.append((r1,r2)) return conflicts - def all_robots_at_goal(self): - for r in self.robots: - if (np.sqrt((r.tracker.state[0] - r.goal[0]) ** 2 + (r.tracker.state[1] - r.goal[1]) ** 2) > 0.1): - return False - return True + def get_next_controls_and_trajectories(self, screen): + """ + Get the next control for each robot. + """ + from joblib import Parallel, delayed + + next_controls = [] + next_trajs = [] + + def process_robot(idx, r): + state = r.current_position + path = self.guide_paths[idx] + + # Get Reference_traj -> inputs are in worldframe + target_traj = get_ref_trajectory(np.array(state), + np.array(path), + r.target_v, + self.T, + self.DT) + + mpc = MPC(self.dynamics_models[idx], self.T, self.DT, self.Q, self.Qf, self.R, self.P, self.settings['model_predictive_controller']) - def get_next_controls_and_trajectories(self,screen): + # dynamics w.r.t robot frame + curr_state = np.array([0, 0, 0]) + x_mpc, u_mpc = mpc.step(curr_state, target_traj, np.array(r.control)) + + # self.add_vis_target_traj(screen, self.scaling_factor, r, x_mpc) + + # only the first one is used to advance the simulation + control = [u_mpc[0, :], u_mpc[1, :]] + + return np.asarray(control), self.ego_to_global(r, x_mpc) + + with Parallel(n_jobs=12) as parallel: + results = parallel(delayed(process_robot)(idx, r) for idx, r in enumerate(self.robots)) + + next_controls = [r[0] for r in results] + next_trajs = [r[1] for r in results] + + + return next_controls, next_trajs + + def get_next_controls_and_trajectories_old(self,screen): """ Get the next control for each robot. """ @@ -141,17 +179,16 @@ class GuidedMRMP: self.T, self.DT) - - mpc = MPC(self.dynamics_models[idx], self.T, self.DT, self.Q, self.Qf, self.R, self.P) + mpc = MPC(self.dynamics_models[idx], self.T, self.DT, self.Q, self.Qf, self.R, self.P, self.settings['model_predictive_controller']) # dynamics w.r.t robot frame curr_state = np.array([0, 0, 0]) - x_mpc, u_mpc = mpc.step(curr_state, target_traj, r.control) + x_mpc, u_mpc = mpc.step(curr_state, target_traj, np.array(r.control)) - self.add_vis_target_traj(screen, r, x_mpc) + self.add_vis_target_traj(screen, self.scaling_factor, r, x_mpc) # only the first one is used to advance the simulation - control = [u_mpc[0, 0], u_mpc[1, 0]] + control = [u_mpc[0, :], u_mpc[1, :]] next_controls.append(np.asarray(control)) @@ -159,7 +196,6 @@ class GuidedMRMP: next_trajs.append(self.ego_to_global(r, x_mpc)) return next_controls, next_trajs - def advance(self, screen, state, time, dt=0.1): """ Advance the simulation by one timestep. @@ -171,28 +207,56 @@ class GuidedMRMP: """ # get the next control for each robot - next_desired_controls, trajectories = self.get_next_controls_and_trajectories(screen) + next_desired_controls, trajectories = self.get_next_controls_and_trajectories(None) + + # print(f"next_desired_controls = {next_desired_controls}") # find all the conflicts at the next time horizon - conflicts = self.find_all_conflicts(trajectories, dt) + # conflicts = self.find_all_conflicts(trajectories, dt) + conflicts = [] + + if len(conflicts) == 0: + # no conflicts, return the desired controls + controls = [] + + for control in next_desired_controls: + next_controls = [control[0][0], control[1][0]] + controls.append(np.asarray(next_controls)) + + # print(f"controls = {controls}") + for r, next_control in zip(self.robots, controls): + r.control = next_control + return False, controls # resolve the conflicts using the database # resolver = TrajOptDBResolver(10, 60, conflicts, self.robots) - # resolver = TrajOptResolver(conflicts, self.robots, dt, 10,) - # updated_controls = resolver.get_local_controls() - - # for conflict,new_controls in zip(conflicts,updated_controls): - # for r,control in zip(conflict,new_controls): - # r.next_control = control + resolver = TrajOptResolver(conflicts, self.robots, trajectories, dt, 2,self.env.circle_obs, self.env.rect_obs,1,1,1) + next_desired_controls, trajs = resolver.get_local_controls(next_desired_controls) + + self.add_vis_conflict_free_traj(screen, self.scaling_factor, trajs) + + # data = { + # "conflicts": conflicts, + # "dt": dt, + # "env": self.env, + # "next_desired_controls": next_desired_controls, + # "robots": self.robots, + # "trajectories": trajs + # } + + # import yaml + # with open("tests/db_opt_data1.yaml", "w") as file: + # yaml.dump(data, file) # return the valid controls for r, next_control in zip(self.robots, next_desired_controls): - r.control = next_control - controls = next_desired_controls - return controls + r.control = [next_control[0][-1], next_control[1][-1]] + + return True, next_desired_controls + - def add_vis_guide_paths(self, screen): + def add_vis_guide_paths(self, screen, scaling_factor): """ Add the visualization to the screen. """ @@ -204,7 +268,8 @@ class GuidedMRMP: # for node in path: # pygame.draw.circle(screen, r.color, (int(node[0]), int(node[1])), 2) for i in range(len(xs)-1): - pygame.draw.line(screen, r.color, (xs[i],ys[i]), (xs[i+1],ys[i+1]), 2) + pygame.draw.line(screen, r.color, (xs[i]*scaling_factor,ys[i]*scaling_factor), + (xs[i+1]*scaling_factor,ys[i+1]*scaling_factor), 2) def add_vis_grid(self, screen, grid_size, top_left, cell_size): """ @@ -222,7 +287,7 @@ class GuidedMRMP: pygame.draw.line(screen, (0,0,0), (xs[0],ys[0]), (xs[1],ys[1]), 2) - def add_vis_target_traj(self,screen, robot, traj): + def add_vis_target_traj(self,screen, scaling_factor, robot, traj): """ Add the visualization to the screen. """ @@ -232,7 +297,24 @@ class GuidedMRMP: y = int(traj[1,i]) next_x = int(traj[0,i+1]) next_y = int(traj[1,i+1]) - pygame.draw.line(screen, (255,0,0), (x,y), (next_x,next_y), 2) + pygame.draw.line(screen, (255,0,0), (x*scaling_factor,y*scaling_factor), + (next_x*scaling_factor,next_y*scaling_factor), 2) + + def add_vis_conflict_free_traj(self,screen, scaling_factor, trajs): + """ + Add the visualization to the screen. + """ + # traj = self.ego_to_global(robot, traj) + for traj in trajs: + # print(traj) + for i in range(len(traj[0])-1): + x = int(traj[0][i]) + y = int(traj[1][i]) + next_x = int(traj[0][i+1]) + next_y = int(traj[1][i+1]) + pygame.draw.line(screen, (0,255,0), (x*scaling_factor,y*scaling_factor), + (next_x*scaling_factor,next_y*scaling_factor), 2) + if __name__ == "__main__": -- GitLab