Skip to content
Snippets Groups Projects
Commit 6e02bdcc authored by rachelmoan's avatar rachelmoan
Browse files

Removing some more magic constants

parent 13ea4763
No related branches found
No related tags found
No related merge requests found
...@@ -57,7 +57,7 @@ def plan_decoupled_path(env, start, goal, solver="RRT*", ...@@ -57,7 +57,7 @@ def plan_decoupled_path(env, start, goal, solver="RRT*",
return list(reversed(path)) return list(reversed(path))
def initialize_robots(starts, goals, dynamics_models, env): def initialize_robots(starts, goals, dynamics_models, radii, target_v, env):
""" """
NOTE This function (and the plan_decoupled_paths function could just exist as NOTE This function (and the plan_decoupled_paths function could just exist as
helper functions elsewhere to make this class easier to understand) helper functions elsewhere to make this class easier to understand)
...@@ -69,7 +69,7 @@ def initialize_robots(starts, goals, dynamics_models, env): ...@@ -69,7 +69,7 @@ def initialize_robots(starts, goals, dynamics_models, env):
colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))] colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
for i, (start, goal, dynamics_model, color) in enumerate(zip(starts, goals, dynamics_models, colors)): for i, (start, goal, dynamics_model, radius, color) in enumerate(zip(starts, goals, dynamics_models, radii, colors)):
print(f"planning path for robot {i} from {start} to {goal} ") print(f"planning path for robot {i} from {start} to {goal} ")
rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1])) rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
xs = [] xs = []
...@@ -82,7 +82,7 @@ def initialize_robots(starts, goals, dynamics_models, env): ...@@ -82,7 +82,7 @@ def initialize_robots(starts, goals, dynamics_models, env):
print(f"waypoints = {waypoints}") print(f"waypoints = {waypoints}")
r = Robot(i,color,RADIUS,start,goal,dynamics_model,3.0,1,.2,rrtpath,waypoints) r = Robot(i,color,radius,start,goal,dynamics_model,target_v,rrtpath,waypoints)
robots.append(r) robots.append(r)
return robots return robots
...@@ -111,10 +111,12 @@ if __name__ == "__main__": ...@@ -111,10 +111,12 @@ if __name__ == "__main__":
# Load and create the robots # Load and create the robots
robot_starts = settings['robot_starts'] robot_starts = settings['robot_starts']
robot_goals = settings['robot_goals'] robot_goals = settings['robot_goals']
robot_radii = settings['robot_radii']
target_v = settings['target_v']
if robot_starts is None: if robot_starts is None:
starts, goals = create_random_starts_and_goals(env, 4) starts, goals = create_random_starts_and_goals(env, 4)
robots = initialize_robots(robot_starts, robot_goals, dynamics_models, env) robots = initialize_robots(robot_starts, robot_goals, dynamics_models, robot_radii, target_v, env)
# Create the Guided MRMP policy # Create the Guided MRMP policy
T = settings['prediction_horizon'] T = settings['prediction_horizon']
......
...@@ -130,14 +130,9 @@ class GuidedMRMP: ...@@ -130,14 +130,9 @@ class GuidedMRMP:
next_controls = [] next_controls = []
for idx, r in enumerate(self.robots): for idx, r in enumerate(self.robots):
print(f"index = {idx}")
state = r.current_position state = r.current_position
path = self.guide_paths[idx] path = self.guide_paths[idx]
print(f"state = {state}")
print(f"path = {path}")
# Get Reference_traj -> inputs are in worldframe # Get Reference_traj -> inputs are in worldframe
target_traj = get_ref_trajectory(np.array(state), target_traj = get_ref_trajectory(np.array(state),
np.array(path), np.array(path),
...@@ -158,7 +153,6 @@ class GuidedMRMP: ...@@ -158,7 +153,6 @@ class GuidedMRMP:
r.control r.control
) )
print(f"optimized traj = {x_mpc}")
self.add_vis_target_traj(screen, r, x_mpc) self.add_vis_target_traj(screen, r, x_mpc)
# only the first one is used to advance the simulation # only the first one is used to advance the simulation
......
...@@ -36,7 +36,7 @@ class Simulator: ...@@ -36,7 +36,7 @@ class Simulator:
Advance the simulation by dt seconds Advance the simulation by dt seconds
""" """
controls = self.policy.advance(screen,self.state, self.time, dt) controls = self.policy.advance(screen,self.state, self.time, dt)
print(controls) # print(controls)
for i in range(self.num_robots): for i in range(self.num_robots):
new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt) new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt)
self.robots[i].current_position = new_state self.robots[i].current_position = new_state
...@@ -76,13 +76,13 @@ class Simulator: ...@@ -76,13 +76,13 @@ class Simulator:
self.policy.add_vis_guide_paths(screen) self.policy.add_vis_guide_paths(screen)
self.draw_robots(screen) self.draw_robots(screen)
self.advance(screen,.3) self.advance(screen,.5)
if not pause: if not pause:
self.draw_environment(screen) self.draw_environment(screen)
self.policy.add_vis_guide_paths(screen) self.policy.add_vis_guide_paths(screen)
self.draw_robots(screen) self.draw_robots(screen)
self.advance(screen,.3) self.advance(screen,.5)
pygame.display.flip() pygame.display.flip()
import numpy as np import numpy as np
class Robot: class Robot:
def __init__(self, label, color, radius, start, goal, dynamics_model, target_v, T, DT, rrtpath,waypoints): def __init__(self, label, color, radius, start, goal, dynamics_model, target_v, rrtpath,waypoints):
self.label = label self.label = label
self.color = color self.color = color
self.radius = radius self.radius = radius
......
single_agent_planner: "RRTStar" single_agent_planner: "RRTStar"
prediction_horizon: 5 # seconds prediction_horizon: 1 # seconds
discretization_step: .5 # seconds discretization_step: .2 # seconds
model_predictive_controller: model_predictive_controller:
Q: [20, 20, 20] # state error cost for a differntial drive robot Q: [20, 20, 20] # state error cost for a differntial drive robot
...@@ -15,7 +15,7 @@ multi_robot_traj_opt: ...@@ -15,7 +15,7 @@ multi_robot_traj_opt:
time_weight: 5 time_weight: 5
simulator: simulator:
dt: 0.3 dt: 0.6
environment: environment:
circle_obstacles: [] circle_obstacles: []
...@@ -31,6 +31,12 @@ robot_goals: ...@@ -31,6 +31,12 @@ robot_goals:
- [600, 300, 0] - [600, 300, 0]
- [10, 10, 0] - [10, 10, 0]
robot_radii:
- 10
- 10
target_v: 3.0
dynamics_models: dynamics_models:
- Roomba - Roomba
- Roomba - Roomba
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment