diff --git a/guided_mrmp/main.py b/guided_mrmp/main.py
index 99dab536bd3b0da2591f8a35bb6831d473a49111..dbad3d57115427143203144c873a2bb74062365d 100644
--- a/guided_mrmp/main.py
+++ b/guided_mrmp/main.py
@@ -1,24 +1,123 @@
 import yaml
-import argparse
+import os
+import random
+import numpy as np
+import pygame
+from guided_mrmp.utils import Env, Roomba, Robot
+from guided_mrmp.planners import RRT
+from guided_mrmp.planners import RRTStar
+
+from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP
+from guided_mrmp.simulator import Simulator
 
 def load_settings(file_path):
     with open(file_path, 'r') as file:
         settings = yaml.safe_load(file)
     return settings
 
-def load_environment(file_path):
-    with open(file_path, 'r') as file:
-        environment = yaml.safe_load(file)
-    return environment
+def set_python_seed(seed):
+    print(f"***Setting Python Seed {seed}***")
+    os.environ['PYTHONHASHSEED'] = str(seed)
+    np.random.seed(seed)
+    random.seed(seed)
+
+def plan_decoupled_path(env, start, goal, solver="RRT*", 
+                        step_length=20, goal_sample_rate=.5, num_samples=500000, r=80):
+    """
+    Plan decoupled path from a given start to a given goal, using a single-agent solver.
+
+    inputs:
+        - start (tuple): (x,y) location of start 
+        - goal (tuple): (x,y) location of goal 
+        - solver (string): Name of single-agent solver to be used
+        - step_length (float): 
+        - goal_sample_rate (float):
+        - num_samples (int):
+        - r (float):
+    output:
+        - path (list): list of nodes in path 
+    """
+    if solver == "RRT":
+        rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples)
+        path = rrt.run()
+    elif solver == "RRT*":
+        rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r)
+        path = rrtstar.run()
+    else:
+        print(f"Solver {solver} is not yet implemented. Choose something else.")
+        return None
+
+    return list(reversed(path))
+
+def initialize_robots(starts, goals, env):
+    """
+    NOTE This function (and the plan_decoupled_paths function could just exist as 
+    helper functions elsewhere to make this class easier to understand)
+    """
+    print("initializing robots")
+
+    robots = []
+    RADIUS = 10
+
+    colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
+
+    for i, (start, goal, color) in enumerate(zip(starts, goals, colors)):
+        print(f"planning path for robot {i} from {start} to {goal} ")
+        rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
+        xs = []
+        ys = []
+        for node in rrtpath:
+            xs.append(node[0])
+            ys.append(node[1])
+
+        waypoints = [xs,ys]
+
+        print(f"waypoints = {waypoints}")
+        
+        r = Robot(i,color,RADIUS,start,goal,3.0,1,.2,rrtpath,waypoints)
+        r.tracker.x_history.append(r.tracker.state[0])
+        r.tracker.y_history.append(r.tracker.state[1])
+        r.tracker.h_history.append(r.tracker.state[2])
+        robots.append(r)
+
+    return robots
+
+
+if __name__ == "__main__":
+    # Load files using the provided arguments
+    settings = load_settings("settings.yaml")
+
+    circle_obstacles = settings['environment']['circle_obstacles']
+    rectangle_obstacles = settings['environment']['rectangle_obstacles']
+    robot_starts = settings['robots']['starts']
+    robot_goals = settings['robots']['goals']
+
+    dynamics_models = settings['dynamics_models']
+
+    set_python_seed(42)
+
+    # Create the environment
+    # circle_obstacles = [(100,100,20), (200,200,30)]
+    # rectangle_obstacles = [(400,400,50,50)]
+    circle_obstacles = []
+    rectangle_obstacles = []
+    env = Env([0,640], [0,480], circle_obstacles, rectangle_obstacles)
+
+    # Create the robots
+    # starts, goals = create_random_starts_and_goals(env, 4)
+    starts = [[10,10,0],[600,300,0]]
+    goals = [[600,300,0],[10,10,0]]
+
+    robots = initialize_robots(starts, goals, env)
+    dynamics_model1 = Roomba()
+    dynamics_models = [dynamics_model1, dynamics_model1]
 
-# Set up argument parser
-parser = argparse.ArgumentParser(description='Load settings and environment files.')
-parser.add_argument('--settings', required=True, help='Path to the settings file')
-parser.add_argument('--environment', required=True, help='Path to the environment file')
+    # Create the Guided MRMP policy
+    policy = GuidedMRMP(env, robots, dynamics_models)
 
-# Parse arguments
-args = parser.parse_args()
+    # Create the simulator
+    pygame.init()
+    screen = pygame.display.set_mode((640, 480)) 
+    sim = Simulator(robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy)
+    sim.run(screen)
 
-# Load files using the provided arguments
-settings = load_settings(args.settings)
-environment = load_environment(args.environment)
\ No newline at end of file
diff --git a/settings.yaml b/settings.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..10dda0d6f29e8c12d7e4a7ddc35f245f56505623
--- /dev/null
+++ b/settings.yaml
@@ -0,0 +1,32 @@
+model_predictive_controller:
+  dt: 0.1
+  N: 10
+
+multi_robot_traj_opt:
+  rob_dist_weight: 10
+  obs_dist_weight: 10
+  time_weight: 5
+
+simulator:
+  dt: 0.3
+
+environment:
+  circle_obstacles: []
+  rectangle_obstacles: []
+  x_range: [0, 640]
+  y_range: [0, 480]
+
+robot_starts:
+  - [10, 10, 0]
+  - [600, 300, 0]
+
+robot_goals:
+  - [600, 300, 0]
+  - [10, 10, 0]
+
+dynamics_model: 
+  - Roomba
+  - Roomba
+
+
+