-
rachelmoan authoredrachelmoan authored
main.py 4.11 KiB
import yaml
import os
import random
import numpy as np
import pygame
from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals
from guided_mrmp.planners import RRT
from guided_mrmp.planners import RRTStar
from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP
from guided_mrmp.simulator import Simulator
class_names_dist = {
'Roomba': Roomba,
# 'RRT': RRT(),
# 'RRTStar': RRTStar(),
}
# robot = class_names_dist[config["robot_naame"]](**config["robot_params"])
def load_settings(file_path):
with open(file_path, 'r') as file:
settings = yaml.safe_load(file)
return settings
def set_python_seed(seed):
print(f"***Setting Python Seed {seed}***")
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
random.seed(seed)
def plan_decoupled_path(env, start, goal, solver="RRT*",
step_length=20, goal_sample_rate=.5, num_samples=500000, r=80):
"""
Plan decoupled path from a given start to a given goal, using a single-agent solver.
inputs:
- start (tuple): (x,y) location of start
- goal (tuple): (x,y) location of goal
- solver (string): Name of single-agent solver to be used
- step_length (float):
- goal_sample_rate (float):
- num_samples (int):
- r (float):
output:
- path (list): list of nodes in path
"""
if solver == "RRT":
rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples)
path = rrt.run()
elif solver == "RRT*":
rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r)
path = rrtstar.run()
else:
print(f"Solver {solver} is not yet implemented. Choose something else.")
return None
return list(reversed(path))
def initialize_robots(starts, goals, dynamics_models, env):
"""
NOTE This function (and the plan_decoupled_paths function could just exist as
helper functions elsewhere to make this class easier to understand)
"""
print("initializing robots")
robots = []
RADIUS = 10
colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
for i, (start, goal, dynamics_model, color) in enumerate(zip(starts, goals, dynamics_models, colors)):
print(f"planning path for robot {i} from {start} to {goal} ")
rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
xs = []
ys = []
for node in rrtpath:
xs.append(node[0])
ys.append(node[1])
waypoints = [xs,ys]
print(f"waypoints = {waypoints}")
r = Robot(i,color,RADIUS,start,goal,dynamics_model,3.0,1,.2,rrtpath,waypoints)
robots.append(r)
return robots
if __name__ == "__main__":
# Load the settings
settings = load_settings("settings.yaml")
set_python_seed(42)
# Load and create the environment
circle_obstacles = settings['environment']['circle_obstacles']
rectangle_obstacles = settings['environment']['rectangle_obstacles']
x_range = settings['environment']['x_range']
y_range = settings['environment']['y_range']
env = Env(x_range, y_range, circle_obstacles, rectangle_obstacles)
# Load the dynamics models
dynamics_models_st = settings['dynamics_models']
dynamics_models = []
for model in dynamics_models_st:
dynamics_models.append(class_names_dist[model]())
# Load and create the robots
robot_starts = settings['robot_starts']
robot_goals = settings['robot_goals']
if robot_starts is None:
starts, goals = create_random_starts_and_goals(env, 4)
robots = initialize_robots(robot_starts, robot_goals, dynamics_models, env)
# Create the Guided MRMP policy
T = settings['prediction_horizon']
DT = settings['discretization_step']
policy = GuidedMRMP(env, robots, dynamics_models, T, DT, settings)
# Create the simulator
pygame.init()
screen = pygame.display.set_mode((x_range[1], y_range[1]))
sim = Simulator(robots, dynamics_models, env, policy, settings)
# Run the simulation
sim.run(screen)