diff --git a/guided_mrmp/planners/db_guided_mrmp.py b/guided_mrmp/planners/db_guided_mrmp.py
index f6e51a2b815dd2758de8b3077a111c48f16e6ddf..c9e7189dde137c1f14a74fce311665b44ebc2e5f 100644
--- a/guided_mrmp/planners/db_guided_mrmp.py
+++ b/guided_mrmp/planners/db_guided_mrmp.py
@@ -18,32 +18,8 @@ from guided_mrmp.controllers.utils import get_ref_trajectory, compute_path_from_
 
 from guided_mrmp.controllers.mpc import MPC
 
-from guided_mrmp.utils.library import Library
-
-def initialize_libraries(library_fnames=["guided_mrmp/database/2x3_library","guided_mrmp/database/3x3_library","guided_mrmp/database/5x2_library"]):
-    """
-    Load the 2x3, 3x3, and 2x5 libraries. Store them in self.lib-x- 
-    Inputs: 
-        library_fnames - the folder containing the library files
-    """
-    # Create each of the libraries
-    print(f"Loading libraries. This usually takes about 10 seconds...")
-    lib_2x3 = Library(library_fnames[0])
-    lib_2x3.read_library_from_file()
-    
-    lib_3x3 = Library(library_fnames[1])
-    lib_3x3.read_library_from_file()
-
-    
-    lib_2x5 = Library(library_fnames[2])
-    lib_2x5.read_library_from_file()
-
-    return [lib_2x3, lib_3x3, lib_2x5]
-
-
-
 class GuidedMRMP:
-    def __init__(self, env, robots, dynamics_models, T, DT, settings):
+    def __init__(self, env, robots, dynamics_models, T, DT, libs,settings):
         """
         inputs:
             - robots (list): list of Robot class objects
@@ -79,7 +55,7 @@ class GuidedMRMP:
 
         self.scaling_factor = self.settings['simulator']['scaling_factor']
 
-        self.libs = initialize_libraries()
+        self.libs = libs
 
     def ego_to_global(self, robot, mpc_out):
         """
@@ -219,6 +195,8 @@ class GuidedMRMP:
             curr_state = np.array([0, 0, 0])
             x_mpc, u_mpc = mpc.step(curr_state, target_traj, np.array(r.control))
 
+            
+
             self.add_vis_target_traj(screen, self.scaling_factor, r, x_mpc)
             
             # only the first one is used to advance the simulation
@@ -228,6 +206,12 @@ class GuidedMRMP:
 
             # print(f"control = {u_mpc}")
             next_trajs.append(self.ego_to_global(r, x_mpc))
+
+        # use matplotlib to plot the trajectories
+        # for r, traj in zip(self.robots, next_trajs):
+        #     plt.plot(traj[0], traj[1], label=r.label)
+        # plt.legend()
+        # plt.show()
         return next_controls, next_trajs
 
     def advance(self, screen, state, time, dt=0.1):
@@ -241,11 +225,14 @@ class GuidedMRMP:
         """
 
         # get the next control for each robot
-        next_desired_controls, trajectories = self.get_next_controls_and_trajectories(screen)
+        if self.settings['model_predictive_controller']['parallelize']:
+            next_desired_controls, trajectories = self.get_next_controls_and_trajectories_parallel(screen)
+        else:
+            next_desired_controls, trajectories = self.get_next_controls_and_trajectories(screen)
 
         # find all the conflicts at the next time horizon
-        conflicts = self.find_all_conflicts(trajectories, dt)
-        conflicts = []
+        # conflicts = self.find_all_conflicts(trajectories, dt)
+        conflicts=[]
 
         if len(conflicts) == 0:
             # no conflicts, return the desired controls
@@ -262,7 +249,7 @@ class GuidedMRMP:
         # resolve the conflicts using the database
         # resolver = TrajOptDBResolver(10, 60, conflicts, self.robots)
         rad = self.robots[0].radius
-        resolver = TrajOptDBResolver(rad*2, 5, conflicts, self.robots, trajectories, dt, rad,self.env,1,1,1,self.libs[0],self.libs[1],self.libs[2]) 
+        resolver = TrajOptDBResolver(rad*2, 5, conflicts, self.robots, trajectories, dt, rad,self.env,10,10,5,self.libs[0],self.libs[1],self.libs[2]) 
         next_desired_controls, trajs = resolver.get_local_controls(next_desired_controls)
 
         self.add_vis_conflict_free_traj(screen, self.scaling_factor, trajs)
@@ -283,7 +270,7 @@ class GuidedMRMP:
 
         # return the valid controls
         for r, next_control in zip(self.robots, next_desired_controls):
-            r.control = [next_control[0][-1], next_control[1][-1]]
+            r.control = next_control
         
         return True, next_desired_controls
 
@@ -323,14 +310,14 @@ class GuidedMRMP:
         """
         Add the visualization to the screen.
         """
+        # scaling_factor = 1
         traj = self.ego_to_global(robot, traj)
         for i in range(len(traj[0])-1):
-            x = int(traj[0,i])
-            y = int(traj[1,i])
-            next_x = int(traj[0,i+1])
-            next_y = int(traj[1,i+1])
-            pygame.draw.line(screen, (255,0,0), (x*scaling_factor,y*scaling_factor), 
-                             (next_x*scaling_factor,next_y*scaling_factor), 2)
+            x = traj[0,i]*scaling_factor
+            y = traj[1,i]*scaling_factor
+            next_x = traj[0,i+1]*scaling_factor
+            next_y = traj[1,i+1]*scaling_factor
+            pygame.draw.line(screen, (255,0,0), (x,y), (next_x,next_y), 3)
 
     def add_vis_conflict_free_traj(self,screen, scaling_factor, trajs):
         """
diff --git a/guided_mrmp/simulator.py b/guided_mrmp/simulator.py
index cf3728e94679d2d2d8ee008ef10d7b138587d2ea..0c731334875e1e40da3fc9a61c33cba91b83f410 100644
--- a/guided_mrmp/simulator.py
+++ b/guided_mrmp/simulator.py
@@ -37,24 +37,24 @@ class Simulator:
         """
         conflict_detected, controls = self.policy.advance(screen,self.state, self.time, dt)
 
-        if not conflict_detected:
-            for i in range(self.num_robots):
-                new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt)
-                self.robots[i].current_position = new_state
-                self.state[i] = new_state
-            self.time += dt
+        # if not conflict_detected:
+        for i in range(self.num_robots):
+            new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt)
+            self.robots[i].current_position = new_state
+            self.state[i] = new_state
+        self.time += dt
 
-        if conflict_detected:
-            print("Executing full trajectory")
-            for i in range(self.num_robots):
-                controls_list = controls[i]
-                for j in range(len(controls_list)):
-                    next_control = [controls_list[0][j], controls_list[1][j]]
-                    print(f"next control = {next_control}")
-                    new_state = self.dynamics_models[i].next_state(self.state[i], next_control, dt)
-                    self.robots[i].current_position = new_state
-                    self.state[i] = new_state
-            self.time += dt
+        # if conflict_detected:
+        #     print("Executing full trajectory")
+        #     for i in range(self.num_robots):
+        #         controls_list = controls[i]
+        #         for j in range(len(controls_list)):
+        #             next_control = [controls_list[0][j], controls_list[1][j]]
+        #             print(f"next control = {next_control}")
+        #             new_state = self.dynamics_models[i].next_state(self.state[i], next_control, dt)
+        #             self.robots[i].current_position = new_state
+        #             self.state[i] = new_state
+        #     self.time += dt
 
     def draw_environment(self, screen):
         screen.fill((255,255,255))