diff --git a/guided_mrmp/main.py b/guided_mrmp/main.py
index f924c3a56d0c2ffe317a8ddd4d43437c14d4e22e..544c1713f2401e9a6445a1f0fe25814d5e961e00 100644
--- a/guided_mrmp/main.py
+++ b/guided_mrmp/main.py
@@ -57,7 +57,7 @@ def plan_decoupled_path(env, start, goal, solver="RRT*",
 
     return list(reversed(path))
 
-def initialize_robots(starts, goals, dynamics_models, env):
+def initialize_robots(starts, goals, dynamics_models, radii, target_v, env):
     """
     NOTE This function (and the plan_decoupled_paths function could just exist as 
     helper functions elsewhere to make this class easier to understand)
@@ -69,7 +69,7 @@ def initialize_robots(starts, goals, dynamics_models, env):
 
     colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
 
-    for i, (start, goal, dynamics_model, color) in enumerate(zip(starts, goals, dynamics_models, colors)):
+    for i, (start, goal, dynamics_model, radius, color) in enumerate(zip(starts, goals, dynamics_models, radii, colors)):
         print(f"planning path for robot {i} from {start} to {goal} ")
         rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
         xs = []
@@ -82,7 +82,7 @@ def initialize_robots(starts, goals, dynamics_models, env):
 
         print(f"waypoints = {waypoints}")
         
-        r = Robot(i,color,RADIUS,start,goal,dynamics_model,3.0,1,.2,rrtpath,waypoints)
+        r = Robot(i,color,radius,start,goal,dynamics_model,target_v,rrtpath,waypoints)
         robots.append(r)
 
     return robots
@@ -111,10 +111,12 @@ if __name__ == "__main__":
     # Load and create the robots
     robot_starts = settings['robot_starts']
     robot_goals = settings['robot_goals']
+    robot_radii = settings['robot_radii']
+    target_v = settings['target_v']
     if robot_starts is None:
         starts, goals = create_random_starts_and_goals(env, 4)
 
-    robots = initialize_robots(robot_starts, robot_goals, dynamics_models, env)
+    robots = initialize_robots(robot_starts, robot_goals, dynamics_models, robot_radii, target_v, env)
 
     # Create the Guided MRMP policy
     T = settings['prediction_horizon']
diff --git a/guided_mrmp/planners/multirobot/db_guided_mrmp.py b/guided_mrmp/planners/multirobot/db_guided_mrmp.py
index 580964a6a330b0bdaf2e71551e606d16ea2c6d5f..0d75ab423a8b28e551bbeba40e10c4c5f3ceed90 100644
--- a/guided_mrmp/planners/multirobot/db_guided_mrmp.py
+++ b/guided_mrmp/planners/multirobot/db_guided_mrmp.py
@@ -130,14 +130,9 @@ class GuidedMRMP:
         next_controls = []
         for idx, r in enumerate(self.robots):
 
-            print(f"index = {idx}")
-
             state = r.current_position
             path = self.guide_paths[idx]
 
-            print(f"state = {state}")
-            print(f"path = {path}")
-
             # Get Reference_traj -> inputs are in worldframe
             target_traj = get_ref_trajectory(np.array(state), 
                                              np.array(path), 
@@ -158,7 +153,6 @@ class GuidedMRMP:
                 r.control
             )
 
-            print(f"optimized traj = {x_mpc}")
             self.add_vis_target_traj(screen, r, x_mpc)
             
             # only the first one is used to advance the simulation
diff --git a/guided_mrmp/simulator.py b/guided_mrmp/simulator.py
index 6387f6256cade9a7e3b93881fa686d7deb668005..6a92cbdaa48ca756f0942ca235fc7880a5e59ba5 100644
--- a/guided_mrmp/simulator.py
+++ b/guided_mrmp/simulator.py
@@ -36,7 +36,7 @@ class Simulator:
         Advance the simulation by dt seconds
         """
         controls = self.policy.advance(screen,self.state, self.time, dt)
-        print(controls)
+        # print(controls)
         for i in range(self.num_robots):
             new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt)
             self.robots[i].current_position = new_state
@@ -76,13 +76,13 @@ class Simulator:
                     self.policy.add_vis_guide_paths(screen)
                     self.draw_robots(screen)
                     
-                    self.advance(screen,.3)
+                    self.advance(screen,.5)
             if not pause:
                 self.draw_environment(screen)
                 self.policy.add_vis_guide_paths(screen)
                 self.draw_robots(screen)
                 
-                self.advance(screen,.3)
+                self.advance(screen,.5)
 
             pygame.display.flip()
                 
diff --git a/guided_mrmp/utils/robot.py b/guided_mrmp/utils/robot.py
index c02e846d0f93870d7edd410726caa9c9419d2a65..fd208c71d3bcfa939ba9083c6ee2196095c03da1 100644
--- a/guided_mrmp/utils/robot.py
+++ b/guided_mrmp/utils/robot.py
@@ -1,7 +1,7 @@
 import numpy as np
 
 class Robot:
-    def __init__(self, label, color, radius, start, goal, dynamics_model, target_v, T, DT, rrtpath,waypoints):
+    def __init__(self, label, color, radius, start, goal, dynamics_model, target_v, rrtpath,waypoints):
         self.label = label
         self.color = color
         self.radius = radius
diff --git a/settings.yaml b/settings.yaml
index 0e7eb16421cd44f55d63dcc9be91bafd758f02d3..07d7bb598a9d4374071b55f9778234923f54fcea 100644
--- a/settings.yaml
+++ b/settings.yaml
@@ -1,7 +1,7 @@
 single_agent_planner: "RRTStar"
 
-prediction_horizon: 5   # seconds
-discretization_step: .5 # seconds 
+prediction_horizon: 1   # seconds
+discretization_step: .2 # seconds 
 
 model_predictive_controller:
   Q: [20, 20, 20]         # state error cost for a differntial drive robot
@@ -15,7 +15,7 @@ multi_robot_traj_opt:
   time_weight: 5
 
 simulator:
-  dt: 0.3
+  dt: 0.6
 
 environment:
   circle_obstacles: []
@@ -31,6 +31,12 @@ robot_goals:
   - [600, 300, 0]
   - [10, 10, 0]
 
+robot_radii:
+  - 10
+  - 10
+
+target_v: 3.0
+
 dynamics_models: 
   - Roomba
   - Roomba