From 31402be85d46557c2594417c54f039b850778b83 Mon Sep 17 00:00:00 2001
From: rachelmoan <moanrachel516@gmail.com>
Date: Sun, 25 Aug 2024 16:51:09 -0500
Subject: [PATCH] run code from main.py and settings.yaml

---
 guided_mrmp/environments/environment1.yaml    |   0
 guided_mrmp/main.py                           |  12 +--
 .../planners/multirobot/db_guided_mrmp.py     |  13 +--
 guided_mrmp/simulator.py                      | 100 +-----------------
 guided_mrmp/utils/robot.py                    |   4 +-
 settings.yaml                                 |   2 +-
 6 files changed, 18 insertions(+), 113 deletions(-)
 delete mode 100644 guided_mrmp/environments/environment1.yaml

diff --git a/guided_mrmp/environments/environment1.yaml b/guided_mrmp/environments/environment1.yaml
deleted file mode 100644
index e69de29..0000000
diff --git a/guided_mrmp/main.py b/guided_mrmp/main.py
index dbad3d5..b6a453f 100644
--- a/guided_mrmp/main.py
+++ b/guided_mrmp/main.py
@@ -49,7 +49,7 @@ def plan_decoupled_path(env, start, goal, solver="RRT*",
 
     return list(reversed(path))
 
-def initialize_robots(starts, goals, env):
+def initialize_robots(starts, goals, dynamics_models, env):
     """
     NOTE This function (and the plan_decoupled_paths function could just exist as 
     helper functions elsewhere to make this class easier to understand)
@@ -61,7 +61,7 @@ def initialize_robots(starts, goals, env):
 
     colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
 
-    for i, (start, goal, color) in enumerate(zip(starts, goals, colors)):
+    for i, (start, goal, dynamics_model, color) in enumerate(zip(starts, goals, dynamics_models, colors)):
         print(f"planning path for robot {i} from {start} to {goal} ")
         rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
         xs = []
@@ -74,7 +74,7 @@ def initialize_robots(starts, goals, env):
 
         print(f"waypoints = {waypoints}")
         
-        r = Robot(i,color,RADIUS,start,goal,3.0,1,.2,rrtpath,waypoints)
+        r = Robot(i,color,RADIUS,start,goal,dynamics_model,3.0,1,.2,rrtpath,waypoints)
         r.tracker.x_history.append(r.tracker.state[0])
         r.tracker.y_history.append(r.tracker.state[1])
         r.tracker.h_history.append(r.tracker.state[2])
@@ -89,8 +89,8 @@ if __name__ == "__main__":
 
     circle_obstacles = settings['environment']['circle_obstacles']
     rectangle_obstacles = settings['environment']['rectangle_obstacles']
-    robot_starts = settings['robots']['starts']
-    robot_goals = settings['robots']['goals']
+    robot_starts = settings['robot_starts']
+    robot_goals = settings['robot_goals']
 
     dynamics_models = settings['dynamics_models']
 
@@ -108,7 +108,7 @@ if __name__ == "__main__":
     starts = [[10,10,0],[600,300,0]]
     goals = [[600,300,0],[10,10,0]]
 
-    robots = initialize_robots(starts, goals, env)
+    robots = initialize_robots(starts, goals, dynamics_models, env)
     dynamics_model1 = Roomba()
     dynamics_models = [dynamics_model1, dynamics_model1]
 
diff --git a/guided_mrmp/planners/multirobot/db_guided_mrmp.py b/guided_mrmp/planners/multirobot/db_guided_mrmp.py
index 1849142..5447325 100644
--- a/guided_mrmp/planners/multirobot/db_guided_mrmp.py
+++ b/guided_mrmp/planners/multirobot/db_guided_mrmp.py
@@ -41,6 +41,7 @@ class GuidedMRMP:
 
         for idx, r1 in enumerate(self.robots):
             control = r1.next_control
+            print(f"next control = {control}")
             next_state = self.dynamics_models[idx].next_state(r1.current_position, control, dt)
             circ1 = Point(next_state[0],next_state[1])
             circ1 = circ1.buffer(r1.radius)
@@ -79,19 +80,19 @@ class GuidedMRMP:
 
         # get the next control for each robot
         for r in self.robots:
-            r.next_control = r.tracker.get_next_control(r.current_position)
+            r.next_control = r.tracker.get_next_control(r.current_position)[1]
 
         # find all the conflicts at the next timestep
         conflicts = self.find_all_conflicts(dt)
 
         # resolve the conflicts using the database
         # resolver = TrajOptDBResolver(10, 60, conflicts, self.robots)
-        resolver = TrajOptResolver(conflicts, self.robots, dt, 10,)
-        updated_controls = resolver.get_local_controls()
+        # resolver = TrajOptResolver(conflicts, self.robots, dt, 10,)
+        # updated_controls = resolver.get_local_controls()
 
-        for conflict,new_controls in zip(conflicts,updated_controls):
-            for r,control in zip(conflict,new_controls):
-                r.next_control = control
+        # for conflict,new_controls in zip(conflicts,updated_controls):
+        #     for r,control in zip(conflict,new_controls):
+        #         r.next_control = control
             
         # update the state of each robot
 
diff --git a/guided_mrmp/simulator.py b/guided_mrmp/simulator.py
index 60dce5a..7e40367 100644
--- a/guided_mrmp/simulator.py
+++ b/guided_mrmp/simulator.py
@@ -9,73 +9,6 @@ from guided_mrmp.planners import RRT
 from guided_mrmp.planners import RRTStar
 from guided_mrmp.planners.multirobot.db_guided_mrmp import GuidedMRMP
 
-def set_python_seed(seed):
-    # print(f"***Setting Python Seed {seed}***")
-    os.environ['PYTHONHASHSEED'] = str(seed)
-    np.random.seed(seed)
-    random.seed(seed)
-
-def plan_decoupled_path(env, start, goal, solver="RRT*", 
-                        step_length=20, goal_sample_rate=.5, num_samples=500000, r=80):
-    """
-    Plan decoupled path from a given start to a given goal, using a single-agent solver.
-
-    inputs:
-        - start (tuple): (x,y) location of start 
-        - goal (tuple): (x,y) location of goal 
-        - solver (string): Name of single-agent solver to be used
-        - step_length (float): 
-        - goal_sample_rate (float):
-        - num_samples (int):
-        - r (float):
-    output:
-        - path (list): list of nodes in path 
-    """
-    if solver == "RRT":
-        rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples)
-        path = rrt.run()
-    elif solver == "RRT*":
-        rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r)
-        path = rrtstar.run()
-    else:
-        print(f"Solver {solver} is not yet implemented. Choose something else.")
-        return None
-
-    return list(reversed(path))
-
-def initialize_robots(starts, goals, env):
-    """
-    NOTE This function (and the plan_decoupled_paths function could just exist as 
-    helper functions elsewhere to make this class easier to understand)
-    """
-    print("initializing robots")
-
-    robots = []
-    RADIUS = 10
-
-    colors = [list(np.random.choice(range(256), size=3)) for i in range(len(starts))]
-
-    for i, (start, goal, color) in enumerate(zip(starts, goals, colors)):
-        print(f"planning path for robot {i} from {start} to {goal} ")
-        rrtpath = plan_decoupled_path(env, (start[0],start[1]), (goal[0],goal[1]))
-        xs = []
-        ys = []
-        for node in rrtpath:
-            xs.append(node[0])
-            ys.append(node[1])
-
-        waypoints = [xs,ys]
-
-        print(f"waypoints = {waypoints}")
-        
-        r = Robot(i,color,RADIUS,start,goal,3.0,1,.2,rrtpath,waypoints)
-        r.tracker.x_history.append(r.tracker.state[0])
-        r.tracker.y_history.append(r.tracker.state[1])
-        r.tracker.h_history.append(r.tracker.state[2])
-        robots.append(r)
-
-    return robots
-
 class Simulator:
     def __init__(self, robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy):
         """
@@ -139,45 +72,16 @@ class Simulator:
                     pause = not pause
                 if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT and pause:
                     self.draw_environment(screen)
-                    policy.add_vis_guide_paths(screen)
+                    self.policy.add_vis_guide_paths(screen)
                     self.draw_robots(screen)
                     
                     self.advance(screen,.3)
             if not pause:
                 self.draw_environment(screen)
-                policy.add_vis_guide_paths(screen)
+                self.policy.add_vis_guide_paths(screen)
                 self.draw_robots(screen)
                 
                 self.advance(screen,.3)
 
             pygame.display.flip()
                 
-if __name__ == "__main__":
-
-    set_python_seed(42)
-
-    # Create the environment
-    # circle_obstacles = [(100,100,20), (200,200,30)]
-    # rectangle_obstacles = [(400,400,50,50)]
-    circle_obstacles = []
-    rectangle_obstacles = []
-    env = Env([0,640], [0,480], circle_obstacles, rectangle_obstacles)
-
-    # Create the robots
-    # starts, goals = create_random_starts_and_goals(env, 4)
-    starts = [[10,10,0],[600,300,0]]
-    goals = [[600,300,0],[10,10,0]]
-    
-    robots = initialize_robots(starts, goals, env)
-    dynamics_model1 = Roomba()
-    dynamics_models = [dynamics_model1, dynamics_model1]
-
-    # Create the Guided MRMP policy
-    policy = GuidedMRMP(env, robots, dynamics_models)
-
-    # Create the simulator
-    pygame.init()
-    screen = pygame.display.set_mode((640, 480)) 
-    sim = Simulator(robots, dynamics_models, circle_obstacles, rectangle_obstacles, policy)
-    sim.run(screen)
-
diff --git a/guided_mrmp/utils/robot.py b/guided_mrmp/utils/robot.py
index 2f73fe3..46bcfab 100644
--- a/guided_mrmp/utils/robot.py
+++ b/guided_mrmp/utils/robot.py
@@ -1,5 +1,5 @@
 class Robot:
-    def __init__(self, label, color, radius, start, goal, target_v, T, DT, rrtpath,waypoints):
+    def __init__(self, label, color, radius, start, goal, dynamics_model, target_v, T, DT, rrtpath,waypoints):
         self.label = label
         self.color = color
         self.radius = radius
@@ -13,7 +13,7 @@ class Robot:
 
         if rrtpath is not None:
             from guided_mrmp.controllers.path_tracker import PathTracker
-            self.tracker = PathTracker(start, target_v, T, DT, waypoints)
+            self.tracker = PathTracker(start, dynamics_model,target_v, T, DT, waypoints)
         
 
         self.x_history = [start[0]]
diff --git a/settings.yaml b/settings.yaml
index 10dda0d..eaeb0ba 100644
--- a/settings.yaml
+++ b/settings.yaml
@@ -24,7 +24,7 @@ robot_goals:
   - [600, 300, 0]
   - [10, 10, 0]
 
-dynamics_model: 
+dynamics_models: 
   - Roomba
   - Roomba
 
-- 
GitLab