import yaml import os import random import numpy as np import pygame from guided_mrmp.utils import Env, Roomba, Robot from guided_mrmp.planners import RRT from guided_mrmp.planners import RRTStar from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP from guided_mrmp.simulator import Simulator def load_settings(file_path): with open(file_path, 'r') as file: settings = yaml.safe_load(file) return settings def set_python_seed(seed): print(f"***Setting Python Seed {seed}***") os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) random.seed(seed) def plan_decoupled_path(env, start, goal, solver="RRT*", step_length=20, goal_sample_rate=.5, num_samples=500000, r=80): """ Plan decoupled path from a given start to a given goal, using a single-agent solver. inputs: - start (tuple): (x,y) location of start - goal (tuple): (x,y) location of goal - solver (string): Name of single-agent solver to be used - step_length (float): - goal_sample_rate (float): - num_samples (int): - r (float): output: - path (list): list of nodes in path """ if solver == "RRT": rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples) path = rrt.run() elif solver == "RRT*": rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r) path = rrtstar.run() else: print(f"Solver {solver} is not yet implemented. Choose something else.") return None return list(reversed(path)) def initialize_robots(starts, goals, dynamics_models, env): """ NOTE This function (and the plan_decoupled_paths function could just exist as helper functions elsewhere to make this class easier to understand) """ print("initializing robots") robots = [] RADIUS = 10 colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))] for i, (start, goal, dynamics_model, color) in enumerate(zip(starts, goals, dynamics_models, colors)): print(f"planning path for robot {i} from {start} to {goal} ") rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1])) xs = [] ys = [] for node in rrtpath: xs.append(node[0]) ys.append(node[1]) waypoints = [xs,ys] print(f"waypoints = {waypoints}") r = Robot(i,color,RADIUS,start,goal,dynamics_model,3.0,1,.2,rrtpath,waypoints) robots.append(r) return robots if __name__ == "__main__": # Load files using the provided arguments settings = load_settings("settings.yaml") circle_obstacles = settings['environment']['circle_obstacles'] rectangle_obstacles = settings['environment']['rectangle_obstacles'] robot_starts = settings['robot_starts'] robot_goals = settings['robot_goals'] dynamics_models = settings['dynamics_models'] set_python_seed(42) # Create the environment # circle_obstacles = [(100,100,20), (200,200,30)] # rectangle_obstacles = [(400,400,50,50)] circle_obstacles = [] rectangle_obstacles = [] env = Env([0,640], [0,480], circle_obstacles, rectangle_obstacles) # Create the robots # starts, goals = create_random_starts_and_goals(env, 4) starts = [[10,10,0],[600,300,0]] goals = [[600,300,0],[10,10,0]] robots = initialize_robots(starts, goals, dynamics_models, env) dynamics_model1 = Roomba() dynamics_models = [dynamics_model1, dynamics_model1] # Create the Guided MRMP policy policy = GuidedMRMP(env, robots, dynamics_models) # Create the simulator pygame.init() screen = pygame.display.set_mode((640, 480)) sim = Simulator(robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy) sim.run(screen)