Skip to content
Snippets Groups Projects
Commit 31402be8 authored by rachelmoan's avatar rachelmoan
Browse files

run code from main.py and settings.yaml

parent d69eeef9
No related branches found
No related tags found
No related merge requests found
...@@ -49,7 +49,7 @@ def plan_decoupled_path(env, start, goal, solver="RRT*", ...@@ -49,7 +49,7 @@ def plan_decoupled_path(env, start, goal, solver="RRT*",
return list(reversed(path)) return list(reversed(path))
def initialize_robots(starts, goals, env): def initialize_robots(starts, goals, dynamics_models, env):
""" """
NOTE This function (and the plan_decoupled_paths function could just exist as NOTE This function (and the plan_decoupled_paths function could just exist as
helper functions elsewhere to make this class easier to understand) helper functions elsewhere to make this class easier to understand)
...@@ -61,7 +61,7 @@ def initialize_robots(starts, goals, env): ...@@ -61,7 +61,7 @@ def initialize_robots(starts, goals, env):
colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))] colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
for i, (start, goal, color) in enumerate(zip(starts, goals, colors)): for i, (start, goal, dynamics_model, color) in enumerate(zip(starts, goals, dynamics_models, colors)):
print(f"planning path for robot {i} from {start} to {goal} ") print(f"planning path for robot {i} from {start} to {goal} ")
rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1])) rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
xs = [] xs = []
...@@ -74,7 +74,7 @@ def initialize_robots(starts, goals, env): ...@@ -74,7 +74,7 @@ def initialize_robots(starts, goals, env):
print(f"waypoints = {waypoints}") print(f"waypoints = {waypoints}")
r = Robot(i,color,RADIUS,start,goal,3.0,1,.2,rrtpath,waypoints) r = Robot(i,color,RADIUS,start,goal,dynamics_model,3.0,1,.2,rrtpath,waypoints)
r.tracker.x_history.append(r.tracker.state[0]) r.tracker.x_history.append(r.tracker.state[0])
r.tracker.y_history.append(r.tracker.state[1]) r.tracker.y_history.append(r.tracker.state[1])
r.tracker.h_history.append(r.tracker.state[2]) r.tracker.h_history.append(r.tracker.state[2])
...@@ -89,8 +89,8 @@ if __name__ == "__main__": ...@@ -89,8 +89,8 @@ if __name__ == "__main__":
circle_obstacles = settings['environment']['circle_obstacles'] circle_obstacles = settings['environment']['circle_obstacles']
rectangle_obstacles = settings['environment']['rectangle_obstacles'] rectangle_obstacles = settings['environment']['rectangle_obstacles']
robot_starts = settings['robots']['starts'] robot_starts = settings['robot_starts']
robot_goals = settings['robots']['goals'] robot_goals = settings['robot_goals']
dynamics_models = settings['dynamics_models'] dynamics_models = settings['dynamics_models']
...@@ -108,7 +108,7 @@ if __name__ == "__main__": ...@@ -108,7 +108,7 @@ if __name__ == "__main__":
starts = [[10,10,0],[600,300,0]] starts = [[10,10,0],[600,300,0]]
goals = [[600,300,0],[10,10,0]] goals = [[600,300,0],[10,10,0]]
robots = initialize_robots(starts, goals, env) robots = initialize_robots(starts, goals, dynamics_models, env)
dynamics_model1 = Roomba() dynamics_model1 = Roomba()
dynamics_models = [dynamics_model1, dynamics_model1] dynamics_models = [dynamics_model1, dynamics_model1]
......
...@@ -41,6 +41,7 @@ class GuidedMRMP: ...@@ -41,6 +41,7 @@ class GuidedMRMP:
for idx, r1 in enumerate(self.robots): for idx, r1 in enumerate(self.robots):
control = r1.next_control control = r1.next_control
print(f"next control = {control}")
next_state = self.dynamics_models[idx].next_state(r1.current_position, control, dt) next_state = self.dynamics_models[idx].next_state(r1.current_position, control, dt)
circ1 = Point(next_state[0],next_state[1]) circ1 = Point(next_state[0],next_state[1])
circ1 = circ1.buffer(r1.radius) circ1 = circ1.buffer(r1.radius)
...@@ -79,19 +80,19 @@ class GuidedMRMP: ...@@ -79,19 +80,19 @@ class GuidedMRMP:
# get the next control for each robot # get the next control for each robot
for r in self.robots: for r in self.robots:
r.next_control = r.tracker.get_next_control(r.current_position) r.next_control = r.tracker.get_next_control(r.current_position)[1]
# find all the conflicts at the next timestep # find all the conflicts at the next timestep
conflicts = self.find_all_conflicts(dt) conflicts = self.find_all_conflicts(dt)
# resolve the conflicts using the database # resolve the conflicts using the database
# resolver = TrajOptDBResolver(10, 60, conflicts, self.robots) # resolver = TrajOptDBResolver(10, 60, conflicts, self.robots)
resolver = TrajOptResolver(conflicts, self.robots, dt, 10,) # resolver = TrajOptResolver(conflicts, self.robots, dt, 10,)
updated_controls = resolver.get_local_controls() # updated_controls = resolver.get_local_controls()
for conflict,new_controls in zip(conflicts,updated_controls): # for conflict,new_controls in zip(conflicts,updated_controls):
for r,control in zip(conflict,new_controls): # for r,control in zip(conflict,new_controls):
r.next_control = control # r.next_control = control
# update the state of each robot # update the state of each robot
......
...@@ -9,73 +9,6 @@ from guided_mrmp.planners import RRT ...@@ -9,73 +9,6 @@ from guided_mrmp.planners import RRT
from guided_mrmp.planners import RRTStar from guided_mrmp.planners import RRTStar
from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP
def set_python_seed(seed):
# print(f"***Setting Python Seed {seed}***")
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
random.seed(seed)
def plan_decoupled_path(env, start, goal, solver="RRT*",
step_length=20, goal_sample_rate=.5, num_samples=500000, r=80):
"""
Plan decoupled path from a given start to a given goal, using a single-agent solver.
inputs:
- start (tuple): (x,y) location of start
- goal (tuple): (x,y) location of goal
- solver (string): Name of single-agent solver to be used
- step_length (float):
- goal_sample_rate (float):
- num_samples (int):
- r (float):
output:
- path (list): list of nodes in path
"""
if solver == "RRT":
rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples)
path = rrt.run()
elif solver == "RRT*":
rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r)
path = rrtstar.run()
else:
print(f"Solver {solver} is not yet implemented. Choose something else.")
return None
return list(reversed(path))
def initialize_robots(starts, goals, env):
"""
NOTE This function (and the plan_decoupled_paths function could just exist as
helper functions elsewhere to make this class easier to understand)
"""
print("initializing robots")
robots = []
RADIUS = 10
colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
for i, (start, goal, color) in enumerate(zip(starts, goals, colors)):
print(f"planning path for robot {i} from {start} to {goal} ")
rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
xs = []
ys = []
for node in rrtpath:
xs.append(node[0])
ys.append(node[1])
waypoints = [xs,ys]
print(f"waypoints = {waypoints}")
r = Robot(i,color,RADIUS,start,goal,3.0,1,.2,rrtpath,waypoints)
r.tracker.x_history.append(r.tracker.state[0])
r.tracker.y_history.append(r.tracker.state[1])
r.tracker.h_history.append(r.tracker.state[2])
robots.append(r)
return robots
class Simulator: class Simulator:
def __init__(self, robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy): def __init__(self, robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy):
""" """
...@@ -139,45 +72,16 @@ class Simulator: ...@@ -139,45 +72,16 @@ class Simulator:
pause = not pause pause = not pause
if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT and pause: if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT and pause:
self.draw_environment(screen) self.draw_environment(screen)
policy.add_vis_guide_paths(screen) self.policy.add_vis_guide_paths(screen)
self.draw_robots(screen) self.draw_robots(screen)
self.advance(screen,.3) self.advance(screen,.3)
if not pause: if not pause:
self.draw_environment(screen) self.draw_environment(screen)
policy.add_vis_guide_paths(screen) self.policy.add_vis_guide_paths(screen)
self.draw_robots(screen) self.draw_robots(screen)
self.advance(screen,.3) self.advance(screen,.3)
pygame.display.flip() pygame.display.flip()
if __name__ == "__main__":
set_python_seed(42)
# Create the environment
# circle_obstacles = [(100,100,20), (200,200,30)]
# rectangle_obstacles = [(400,400,50,50)]
circle_obstacles = []
rectangle_obstacles = []
env = Env([0,640], [0,480], circle_obstacles, rectangle_obstacles)
# Create the robots
# starts, goals = create_random_starts_and_goals(env, 4)
starts = [[10,10,0],[600,300,0]]
goals = [[600,300,0],[10,10,0]]
robots = initialize_robots(starts, goals, env)
dynamics_model1 = Roomba()
dynamics_models = [dynamics_model1, dynamics_model1]
# Create the Guided MRMP policy
policy = GuidedMRMP(env, robots, dynamics_models)
# Create the simulator
pygame.init()
screen = pygame.display.set_mode((640, 480))
sim = Simulator(robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy)
sim.run(screen)
class Robot: class Robot:
def __init__(self, label, color, radius, start, goal, target_v, T, DT, rrtpath,waypoints): def __init__(self, label, color, radius, start, goal, dynamics_model, target_v, T, DT, rrtpath,waypoints):
self.label = label self.label = label
self.color = color self.color = color
self.radius = radius self.radius = radius
...@@ -13,7 +13,7 @@ class Robot: ...@@ -13,7 +13,7 @@ class Robot:
if rrtpath is not None: if rrtpath is not None:
from guided_mrmp.controllers.path_tracker import PathTracker from guided_mrmp.controllers.path_tracker import PathTracker
self.tracker = PathTracker(start, target_v, T, DT, waypoints) self.tracker = PathTracker(start, dynamics_model,target_v, T, DT, waypoints)
self.x_history = [start[0]] self.x_history = [start[0]]
......
...@@ -24,7 +24,7 @@ robot_goals: ...@@ -24,7 +24,7 @@ robot_goals:
- [600, 300, 0] - [600, 300, 0]
- [10, 10, 0] - [10, 10, 0]
dynamics_model: dynamics_models:
- Roomba - Roomba
- Roomba - Roomba
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment