Skip to content
Snippets Groups Projects
Commit 13ea4763 authored by rachelmoan's avatar rachelmoan
Browse files

Removing magic constants

parent b34189c9
No related branches found
No related tags found
No related merge requests found
import numpy as np
from guided_mrmp.utils import Roomba
# from guided_mrmp.utils import Roomba
from guided_mrmp.optimizer import Optimizer
np.seterr(divide="ignore", invalid="ignore")
......@@ -20,18 +20,18 @@ class MPC:
input_cost ():
input_rate_cost ():
"""
self.nx = 3 # number of state vars
self.nu = 2 # number of input/control vars
self.nx = model.state_dimension() # number of state vars
self.nu = model.control_dimension() # number of input/control vars
if len(state_cost) != self.nx:
raise ValueError(f"State Error cost matrix shuld be of size {self.nx}")
raise ValueError(f"State Error cost matrix should be of size {self.nx}")
if len(final_state_cost) != self.nx:
raise ValueError(f"End State Error cost matrix shuld be of size {self.nx}")
raise ValueError(f"End State Error cost matrix should be of size {self.nx}")
if len(input_cost) != self.nu:
raise ValueError(f"Control Effort cost matrix shuld be of size {self.nu}")
raise ValueError(f"Control Effort cost matrix should be of size {self.nu}")
if len(input_rate_cost) != self.nu:
raise ValueError(
f"Control Effort Difference cost matrix shuld be of size {self.nu}"
f"Control Effort Difference cost matrix should be of size {self.nu}"
)
self.robot_model = model
......
......@@ -3,13 +3,21 @@ import os
import random
import numpy as np
import pygame
from guided_mrmp.utils import Env, Roomba, Robot
from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals
from guided_mrmp.planners import RRT
from guided_mrmp.planners import RRTStar
from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP
from guided_mrmp.simulator import Simulator
class_names_dist = {
'Roomba': Roomba,
# 'RRT': RRT(),
# 'RRTStar': RRTStar(),
}
# robot = class_names_dist[config["robot_naame"]](**config["robot_params"])
def load_settings(file_path):
with open(file_path, 'r') as file:
settings = yaml.safe_load(file)
......@@ -81,40 +89,43 @@ def initialize_robots(starts, goals, dynamics_models, env):
if __name__ == "__main__":
# Load files using the provided arguments
# Load the settings
settings = load_settings("settings.yaml")
set_python_seed(42)
# Load and create the environment
circle_obstacles = settings['environment']['circle_obstacles']
rectangle_obstacles = settings['environment']['rectangle_obstacles']
robot_starts = settings['robot_starts']
robot_goals = settings['robot_goals']
x_range = settings['environment']['x_range']
y_range = settings['environment']['y_range']
env = Env(x_range, y_range, circle_obstacles, rectangle_obstacles)
dynamics_models = settings['dynamics_models']
# Load the dynamics models
dynamics_models_st = settings['dynamics_models']
set_python_seed(42)
# Create the environment
# circle_obstacles = [(100,100,20), (200,200,30)]
# rectangle_obstacles = [(400,400,50,50)]
circle_obstacles = []
rectangle_obstacles = []
env = Env([0,640], [0,480], circle_obstacles, rectangle_obstacles)
dynamics_models = []
for model in dynamics_models_st:
dynamics_models.append(class_names_dist[model]())
# Create the robots
# starts, goals = create_random_starts_and_goals(env, 4)
starts = [[10,10,0],[600,300,0]]
goals = [[600,300,0],[10,10,0]]
# Load and create the robots
robot_starts = settings['robot_starts']
robot_goals = settings['robot_goals']
if robot_starts is None:
starts, goals = create_random_starts_and_goals(env, 4)
robots = initialize_robots(starts, goals, dynamics_models, env)
dynamics_model1 = Roomba()
dynamics_models = [dynamics_model1, dynamics_model1]
robots = initialize_robots(robot_starts, robot_goals, dynamics_models, env)
# Create the Guided MRMP policy
policy = GuidedMRMP(env, robots, dynamics_models)
T = settings['prediction_horizon']
DT = settings['discretization_step']
policy = GuidedMRMP(env, robots, dynamics_models, T, DT, settings)
# Create the simulator
pygame.init()
screen = pygame.display.set_mode((640, 480))
sim = Simulator(robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy)
screen = pygame.display.set_mode((x_range[1], y_range[1]))
sim = Simulator(robots, dynamics_models, env, policy, settings)
# Run the simulation
sim.run(screen)
......@@ -19,11 +19,8 @@ from guided_mrmp.controllers.utils import get_ref_trajectory, compute_path_from_
from guided_mrmp.controllers.mpc import MPC
T = 5 # Prediction Horizon [s]
DT = .5 # discretization step [s]
class GuidedMRMP:
def __init__(self, env, robots, dynamics_models):
def __init__(self, env, robots, dynamics_models, T, DT, settings):
"""
inputs:
- robots (list): list of Robot class objects
......@@ -33,9 +30,18 @@ class GuidedMRMP:
self.dynamics_models = dynamics_models
self.env = env
self.guide_paths = [[]]*len(robots)
self.settings = settings
self.T = T
self.DT = DT
self.K = int(T / DT)
# for MPC
self.Q = self.settings['model_predictive_controller']['Q'] # state error cost
self.Qf = self.settings['model_predictive_controller']['Qf'] # state final error cost
self.R = self.settings['model_predictive_controller']['R'] # input cost
self.P = self.settings['model_predictive_controller']['P'] # input rate of change cost
for idx,r in enumerate(self.robots):
xs = []
......@@ -135,20 +141,16 @@ class GuidedMRMP:
# Get Reference_traj -> inputs are in worldframe
target_traj = get_ref_trajectory(np.array(state),
np.array(path),
3.0,
T,
DT)
r.target_v,
self.T,
self.DT)
# For a circular robot (easy dynamics)
Q = [20, 20, 20] # state error cost
Qf = [30, 30, 30] # state final error cost
R = [10, 10] # input cost
P = [10, 10] # input rate of change cost
mpc = MPC(self.dynamics_models[idx], T, DT, Q, Qf, R, P)
# dynamycs w.r.t robot frame
mpc = MPC(self.dynamics_models[idx], self.T, self.DT, self.Q, self.Qf, self.R, self.P)
# dynamics w.r.t robot frame
curr_state = np.array([0, 0, 0])
x_mpc, u_mpc = mpc.step(
curr_state,
......
......@@ -10,7 +10,7 @@ from guided_mrmp.planners import RRTStar
from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP
class Simulator:
def __init__(self, robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy):
def __init__(self, robots, dynamics_models, env, policy, settings):
"""
robots: list of Robot objects
dynamics_models: list of DynamicsModel objects
......@@ -21,8 +21,8 @@ class Simulator:
time: the current time
"""
self.robots = robots
self.circ_obstacles = circle_obstacles
self.rect_obstacles = rectangle_obstacles
self.circ_obstacles = env.circle_obs
self.rect_obstacles = env.rect_obs
self.policy = policy
self.state = [robot.current_position for robot in robots]
......
......@@ -13,6 +13,7 @@ class Robot:
self.next_step = None
self.next_control = None
self.target_v = target_v
self.x_history = [start[0]]
self.y_history = [start[1]]
......
single_agent_planner: "RRTStar"
prediction_horizon: 5 # seconds
discretization_step: .5 # seconds
model_predictive_controller:
dt: 0.1
N: 10
Q: [20, 20, 20] # state error cost for a differntial drive robot
Qf: [30, 30, 30] # state final error cost for a differntial drive robot
R: [10, 10] # input cost for a differntial drive robot
P: [10, 10] # input rate of change cost for a differntial drive robot
multi_robot_traj_opt:
rob_dist_weight: 10
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment