Skip to content
Snippets Groups Projects
Commit 29c55cab authored by rachelmoan's avatar rachelmoan
Browse files

Rename parallel MPC function.

parent e9780817
No related branches found
No related tags found
No related merge requests found
......@@ -93,7 +93,7 @@ def initialize_robots(starts, goals, dynamics_models, radii, target_v, env):
if __name__ == "__main__":
# Load the settings
settings = load_settings("settings.yaml")
settings = load_settings("settings_files/settings_mpc.yaml")
set_python_seed(1123)
# set_python_seed(11235813)
......
......@@ -7,8 +7,6 @@ class Optimizer:
if initial_guesses:
for param, value in initial_guesses.items():
print(f"param = {param}")
print(f"value = {value}")
opti.set_initial(self.problem[param], value)
# Set numerical backend, with options if provided
......
......@@ -18,6 +18,29 @@ from guided_mrmp.controllers.utils import get_ref_trajectory, compute_path_from_
from guided_mrmp.controllers.mpc import MPC
from guided_mrmp.utils.library import Library
def initialize_libraries(library_fnames=["guided_mrmp/database/2x3_library","guided_mrmp/database/3x3_library","guided_mrmp/database/5x2_library"]):
"""
Load the 2x3, 3x3, and 2x5 libraries. Store them in self.lib-x-
Inputs:
library_fnames - the folder containing the library files
"""
# Create each of the libraries
print(f"Loading libraries. This usually takes about 10 seconds...")
lib_2x3 = Library(library_fnames[0])
lib_2x3.read_library_from_file()
lib_3x3 = Library(library_fnames[1])
lib_3x3.read_library_from_file()
lib_2x5 = Library(library_fnames[2])
lib_2x5.read_library_from_file()
return [lib_2x3, lib_3x3, lib_2x5]
class GuidedMRMP:
def __init__(self, env, robots, dynamics_models, T, DT, settings):
......@@ -56,6 +79,8 @@ class GuidedMRMP:
self.scaling_factor = self.settings['simulator']['scaling_factor']
self.libs = initialize_libraries()
def ego_to_global(self, robot, mpc_out):
"""
transforms optimized trajectory XY points from ego (robot) reference
......@@ -118,7 +143,7 @@ class GuidedMRMP:
return conflicts
def get_next_controls_and_trajectories(self,screen):
def get_next_controls_and_trajectories_parallel(self,screen):
"""
Get the next control for each robot.
......@@ -169,7 +194,7 @@ class GuidedMRMP:
return next_controls, next_trajs
def get_next_controls_and_trajectories_old(self,screen):
def get_next_controls_and_trajectories(self,screen):
"""
Get the next control for each robot.
"""
......@@ -216,12 +241,10 @@ class GuidedMRMP:
"""
# get the next control for each robot
next_desired_controls, trajectories = self.get_next_controls_and_trajectories(None)
# print(f"next_desired_controls = {next_desired_controls}")
next_desired_controls, trajectories = self.get_next_controls_and_trajectories(screen)
# find all the conflicts at the next time horizon
# conflicts = self.find_all_conflicts(trajectories, dt)
conflicts = self.find_all_conflicts(trajectories, dt)
conflicts = []
if len(conflicts) == 0:
......@@ -232,14 +255,14 @@ class GuidedMRMP:
next_controls = [control[0][0], control[1][0]]
controls.append(np.asarray(next_controls))
# print(f"controls = {controls}")
for r, next_control in zip(self.robots, controls):
r.control = next_control
return False, controls
# resolve the conflicts using the database
# resolver = TrajOptDBResolver(10, 60, conflicts, self.robots)
resolver = TrajOptResolver(conflicts, self.robots, trajectories, dt, 2,self.env.circle_obs, self.env.rect_obs,1,1,1)
rad = self.robots[0].radius
resolver = TrajOptDBResolver(rad*2, 5, conflicts, self.robots, trajectories, dt, rad,self.env,1,1,1,self.libs[0],self.libs[1],self.libs[2])
next_desired_controls, trajs = resolver.get_local_controls(next_desired_controls)
self.add_vis_conflict_free_traj(screen, self.scaling_factor, trajs)
......
single_agent_planner: "RRTStar"
prediction_horizon: 10 # seconds
discretization_step: 1 # seconds
prediction_horizon: 3 # seconds
discretization_step: .5 # seconds
model_predictive_controller:
Q: [20, 20, 0] # state error cost for a differential drive robot
......@@ -10,11 +10,11 @@ model_predictive_controller:
P: [10, 10] # input rate of change cost for a differential drive robot
print_level: 0
print_time: 0
acceptable_tol: .1
acceptable_iter: 1000
acceptable_tol: 100000
acceptable_iter: 1
multi_robot_traj_opt:
rob_dist_weight: 20
rob_dist_weight: 10
obs_dist_weight: 10
time_weight: 5
......@@ -28,31 +28,17 @@ environment:
x_range: [0, 100]
y_range: [0, 100]
robot_starts: []
# - [80, 80, 0]
# - [20, 20, 0]
robot_starts:
- [60, 20, 0]
- [65, 80, 0]
robot_goals: []
# - [20, 20, 0]
# - [80, 80, 0]
robot_goals:
- [60, 80, 0]
- [65, 20, 0]
robot_radii:
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 1
- 5
- 5
target_v: 3.0
......
single_agent_planner: "RRTStar"
prediction_horizon: 3 # seconds
discretization_step: .5 # seconds
model_predictive_controller:
Q: [20, 20, 0] # state error cost for a differential drive robot
Qf: [30, 30, 0] # state final error cost for a differential drive robot
R: [10, 10] # input cost for a differential drive robot
P: [10, 10] # input rate of change cost for a differential drive robot
print_level: 0
print_time: 0
acceptable_tol: 999999999
acceptable_iter: 1
multi_robot_traj_opt:
rob_dist_weight: 10
obs_dist_weight: 10
time_weight: 5
simulator:
dt: .3
scaling_factor: 6.0
environment:
circle_obstacles: []
rectangle_obstacles: []
x_range: [0, 100]
y_range: [0, 100]
robot_starts: []
robot_goals: []
robot_radii:
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
- 5
target_v: 3.0
dynamics_models:
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
- Roomba
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment