diff --git a/guided_mrmp/controllers/mpc.py b/guided_mrmp/controllers/mpc.py index 6e31c6e9d4dbaab03294c4e2633fb5696875e76a..dc08415154d4e7dafabbd36723f9d7e99de71375 100644 --- a/guided_mrmp/controllers/mpc.py +++ b/guided_mrmp/controllers/mpc.py @@ -1,5 +1,5 @@ import numpy as np -from guided_mrmp.utils import Roomba +# from guided_mrmp.utils import Roomba from guided_mrmp.optimizer import Optimizer np.seterr(divide="ignore", invalid="ignore") @@ -20,18 +20,18 @@ class MPC: input_cost (): input_rate_cost (): """ - self.nx = 3 # number of state vars - self.nu = 2 # number of input/control vars + self.nx = model.state_dimension() # number of state vars + self.nu = model.control_dimension() # number of input/control vars if len(state_cost) != self.nx: - raise ValueError(f"State Error cost matrix shuld be of size {self.nx}") + raise ValueError(f"State Error cost matrix should be of size {self.nx}") if len(final_state_cost) != self.nx: - raise ValueError(f"End State Error cost matrix shuld be of size {self.nx}") + raise ValueError(f"End State Error cost matrix should be of size {self.nx}") if len(input_cost) != self.nu: - raise ValueError(f"Control Effort cost matrix shuld be of size {self.nu}") + raise ValueError(f"Control Effort cost matrix should be of size {self.nu}") if len(input_rate_cost) != self.nu: raise ValueError( - f"Control Effort Difference cost matrix shuld be of size {self.nu}" + f"Control Effort Difference cost matrix should be of size {self.nu}" ) self.robot_model = model diff --git a/guided_mrmp/main.py b/guided_mrmp/main.py index 8af5ba31095264b8404619b73b462405a8f66c4b..f924c3a56d0c2ffe317a8ddd4d43437c14d4e22e 100644 --- a/guided_mrmp/main.py +++ b/guided_mrmp/main.py @@ -3,13 +3,21 @@ import os import random import numpy as np import pygame -from guided_mrmp.utils import Env, Roomba, Robot +from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals from guided_mrmp.planners import RRT from guided_mrmp.planners import RRTStar from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP from guided_mrmp.simulator import Simulator +class_names_dist = { + 'Roomba': Roomba, + # 'RRT': RRT(), + # 'RRTStar': RRTStar(), + } + +# robot = class_names_dist[config["robot_naame"]](**config["robot_params"]) + def load_settings(file_path): with open(file_path, 'r') as file: settings = yaml.safe_load(file) @@ -81,40 +89,43 @@ def initialize_robots(starts, goals, dynamics_models, env): if __name__ == "__main__": - # Load files using the provided arguments + # Load the settings settings = load_settings("settings.yaml") + set_python_seed(42) + + # Load and create the environment circle_obstacles = settings['environment']['circle_obstacles'] rectangle_obstacles = settings['environment']['rectangle_obstacles'] - robot_starts = settings['robot_starts'] - robot_goals = settings['robot_goals'] + x_range = settings['environment']['x_range'] + y_range = settings['environment']['y_range'] + env = Env(x_range, y_range, circle_obstacles, rectangle_obstacles) - dynamics_models = settings['dynamics_models'] + # Load the dynamics models + dynamics_models_st = settings['dynamics_models'] - set_python_seed(42) - - # Create the environment - # circle_obstacles = [(100,100,20), (200,200,30)] - # rectangle_obstacles = [(400,400,50,50)] - circle_obstacles = [] - rectangle_obstacles = [] - env = Env([0,640], [0,480], circle_obstacles, rectangle_obstacles) + dynamics_models = [] + for model in dynamics_models_st: + dynamics_models.append(class_names_dist[model]()) - # Create the robots - # starts, goals = create_random_starts_and_goals(env, 4) - starts = [[10,10,0],[600,300,0]] - goals = [[600,300,0],[10,10,0]] + # Load and create the robots + robot_starts = settings['robot_starts'] + robot_goals = settings['robot_goals'] + if robot_starts is None: + starts, goals = create_random_starts_and_goals(env, 4) - robots = initialize_robots(starts, goals, dynamics_models, env) - dynamics_model1 = Roomba() - dynamics_models = [dynamics_model1, dynamics_model1] + robots = initialize_robots(robot_starts, robot_goals, dynamics_models, env) # Create the Guided MRMP policy - policy = GuidedMRMP(env, robots, dynamics_models) + T = settings['prediction_horizon'] + DT = settings['discretization_step'] + policy = GuidedMRMP(env, robots, dynamics_models, T, DT, settings) # Create the simulator pygame.init() - screen = pygame.display.set_mode((640, 480)) - sim = Simulator(robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy) + screen = pygame.display.set_mode((x_range[1], y_range[1])) + sim = Simulator(robots, dynamics_models, env, policy, settings) + + # Run the simulation sim.run(screen) diff --git a/guided_mrmp/planners/multirobot/db_guided_mrmp.py b/guided_mrmp/planners/multirobot/db_guided_mrmp.py index 50cba25b59250de9505f6edade0b031df7d7187b..580964a6a330b0bdaf2e71551e606d16ea2c6d5f 100644 --- a/guided_mrmp/planners/multirobot/db_guided_mrmp.py +++ b/guided_mrmp/planners/multirobot/db_guided_mrmp.py @@ -19,11 +19,8 @@ from guided_mrmp.controllers.utils import get_ref_trajectory, compute_path_from_ from guided_mrmp.controllers.mpc import MPC -T = 5 # Prediction Horizon [s] -DT = .5 # discretization step [s] - class GuidedMRMP: - def __init__(self, env, robots, dynamics_models): + def __init__(self, env, robots, dynamics_models, T, DT, settings): """ inputs: - robots (list): list of Robot class objects @@ -33,9 +30,18 @@ class GuidedMRMP: self.dynamics_models = dynamics_models self.env = env self.guide_paths = [[]]*len(robots) + self.settings = settings + self.T = T + self.DT = DT self.K = int(T / DT) + # for MPC + self.Q = self.settings['model_predictive_controller']['Q'] # state error cost + self.Qf = self.settings['model_predictive_controller']['Qf'] # state final error cost + self.R = self.settings['model_predictive_controller']['R'] # input cost + self.P = self.settings['model_predictive_controller']['P'] # input rate of change cost + for idx,r in enumerate(self.robots): xs = [] @@ -135,20 +141,16 @@ class GuidedMRMP: # Get Reference_traj -> inputs are in worldframe target_traj = get_ref_trajectory(np.array(state), np.array(path), - 3.0, - T, - DT) + r.target_v, + self.T, + self.DT) - # For a circular robot (easy dynamics) - Q = [20, 20, 20] # state error cost - Qf = [30, 30, 30] # state final error cost - R = [10, 10] # input cost - P = [10, 10] # input rate of change cost - mpc = MPC(self.dynamics_models[idx], T, DT, Q, Qf, R, P) - - # dynamycs w.r.t robot frame + + mpc = MPC(self.dynamics_models[idx], self.T, self.DT, self.Q, self.Qf, self.R, self.P) + + # dynamics w.r.t robot frame curr_state = np.array([0, 0, 0]) x_mpc, u_mpc = mpc.step( curr_state, diff --git a/guided_mrmp/simulator.py b/guided_mrmp/simulator.py index 3f4d1ea5b7955c4b0013695d78e62480ca51ae41..6387f6256cade9a7e3b93881fa686d7deb668005 100644 --- a/guided_mrmp/simulator.py +++ b/guided_mrmp/simulator.py @@ -10,7 +10,7 @@ from guided_mrmp.planners import RRTStar from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP class Simulator: - def __init__(self, robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy): + def __init__(self, robots, dynamics_models, env, policy, settings): """ robots: list of Robot objects dynamics_models: list of DynamicsModel objects @@ -21,8 +21,8 @@ class Simulator: time: the current time """ self.robots = robots - self.circ_obstacles = circle_obstacles - self.rect_obstacles = rectangle_obstacles + self.circ_obstacles = env.circle_obs + self.rect_obstacles = env.rect_obs self.policy = policy self.state = [robot.current_position for robot in robots] diff --git a/guided_mrmp/utils/robot.py b/guided_mrmp/utils/robot.py index 33ada249eaf58f532ab6a61ae4527c9e60c3688c..c02e846d0f93870d7edd410726caa9c9419d2a65 100644 --- a/guided_mrmp/utils/robot.py +++ b/guided_mrmp/utils/robot.py @@ -13,6 +13,7 @@ class Robot: self.next_step = None self.next_control = None + self.target_v = target_v self.x_history = [start[0]] self.y_history = [start[1]] diff --git a/settings.yaml b/settings.yaml index eaeb0bac9603c1f192ec86015d835f0d5c737f57..0e7eb16421cd44f55d63dcc9be91bafd758f02d3 100644 --- a/settings.yaml +++ b/settings.yaml @@ -1,6 +1,13 @@ +single_agent_planner: "RRTStar" + +prediction_horizon: 5 # seconds +discretization_step: .5 # seconds + model_predictive_controller: - dt: 0.1 - N: 10 + Q: [20, 20, 20] # state error cost for a differntial drive robot + Qf: [30, 30, 30] # state final error cost for a differntial drive robot + R: [10, 10] # input cost for a differntial drive robot + P: [10, 10] # input rate of change cost for a differntial drive robot multi_robot_traj_opt: rob_dist_weight: 10