Skip to content
Snippets Groups Projects
Commit 8c6b17f2 authored by rachelmoan's avatar rachelmoan
Browse files

Fixing visualization, removing magic constants

parent f90864a6
No related branches found
No related tags found
No related merge requests found
...@@ -18,6 +18,8 @@ class MultiPathTracker: ...@@ -18,6 +18,8 @@ class MultiPathTracker:
""" """
# State of the robot [x,y, heading] # State of the robot [x,y, heading]
self.env = env self.env = env
self.x_range = env.boundary[0]
self.y_range = env.boundary[1]
self.states = initial_positions self.states = initial_positions
self.num_robots = len(initial_positions) self.num_robots = len(initial_positions)
self.dynamics = dynamics self.dynamics = dynamics
......
This diff is collapsed.
...@@ -7,7 +7,6 @@ from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals ...@@ -7,7 +7,6 @@ from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals
from guided_mrmp.planners import RRT from guided_mrmp.planners import RRT
from guided_mrmp.planners import RRTStar from guided_mrmp.planners import RRTStar
from guided_mrmp.planners.db_guided_mrmp import GuidedMRMP
from guided_mrmp.simulator import Simulator from guided_mrmp.simulator import Simulator
from guided_mrmp.utils.helpers import initialize_libraries from guided_mrmp.utils.helpers import initialize_libraries
from guided_mrmp.controllers import MultiPathTracker, MultiPathTrackerDB from guided_mrmp.controllers import MultiPathTracker, MultiPathTrackerDB
...@@ -146,7 +145,7 @@ if __name__ == "__main__": ...@@ -146,7 +145,7 @@ if __name__ == "__main__":
) )
# Create the simulator # Create the simulator
show_vis = False show_vis = settings['simulator']['show_plots']
sim = Simulator(robots, dynamics_models, env, policy, settings) sim = Simulator(robots, dynamics_models, env, policy, settings)
# Run the simulation # Run the simulation
......
...@@ -19,7 +19,8 @@ class Simulator: ...@@ -19,7 +19,8 @@ class Simulator:
self.env = env self.env = env
self.circ_obstacles = env.circle_obs self.circ_obstacles = env.circle_obs
self.rect_obstacles = env.rect_obs self.rect_obstacles = env.rect_obs
self.policy = policy self.policy = policy
self.settings = settings
self.state = [robot.current_position for robot in robots] self.state = [robot.current_position for robot in robots]
...@@ -27,8 +28,6 @@ class Simulator: ...@@ -27,8 +28,6 @@ class Simulator:
self.dynamics_models = dynamics_models self.dynamics_models = dynamics_models
self.time = 0 self.time = 0
self.scaling_factor = settings['simulator']['scaling_factor']
# Helper variables to keep track of the sim # Helper variables to keep track of the sim
self.sim_time = 0 self.sim_time = 0
self.x_history = [ [] for _ in range(self.num_robots) ] self.x_history = [ [] for _ in range(self.num_robots) ]
...@@ -49,7 +48,7 @@ class Simulator: ...@@ -49,7 +48,7 @@ class Simulator:
""" """
# Get the controls from the policy # Get the controls from the policy
x_mpc, controls = self.policy.advance(screen, self.state) x_mpc, controls = self.policy.advance(self.state, show_plots=self.settings['simulator']['show_collision_resolution'])
# # Update the state of each robot # # Update the state of each robot
# for i in range(self.num_robots): # for i in range(self.num_robots):
...@@ -76,9 +75,10 @@ class Simulator: ...@@ -76,9 +75,10 @@ class Simulator:
self.x_history[i].append(self.state[i, 0]) self.x_history[i].append(self.state[i, 0])
self.y_history[i].append(self.state[i, 1]) self.y_history[i].append(self.state[i, 1])
self.h_history[i].append(self.state[i, 2]) self.h_history[i].append(self.state[i, 2])
self.optimized_trajectories_hist[i].append([])
# if show_plots: self.plot_sim() # if show_plots: self.plot_sim()
self.plot_current_world_state() # self.plot_current_world_state()
while 1: while 1:
# check if all robots have reached their goal # check if all robots have reached their goal
...@@ -87,7 +87,7 @@ class Simulator: ...@@ -87,7 +87,7 @@ class Simulator:
return np.asarray([self.x_history, self.y_history, self.h_history]) return np.asarray([self.x_history, self.y_history, self.h_history])
# plot the current state of the robots # plot the current state of the robots
self.plot_current_world_state() if show_plots: self.plot_current_world_state()
# get the next control for all robots # get the next control for all robots
x_mpc, controls = self.advance(self.state, self.policy.DT) x_mpc, controls = self.advance(self.state, self.policy.DT)
...@@ -103,12 +103,7 @@ class Simulator: ...@@ -103,12 +103,7 @@ class Simulator:
self.x_history[i].append(self.state[i, 0]) self.x_history[i].append(self.state[i, 0])
self.y_history[i].append(self.state[i, 1]) self.y_history[i].append(self.state[i, 1])
self.h_history[i].append(self.state[i, 2]) self.h_history[i].append(self.state[i, 2])
self.optimized_trajectories_hist[i].append(self.policy.trajs[i])
# use the optimizer output to preview the predicted state trajectory
# self.optimized_trajectory = self.ego_to_global(x_mpc.value)
# if show_plots: self.optimized_trajectory = self.ego_to_global_roomba(x_mpc)
# if show_plots: self.plot_sim()
def plot_current_world_state(self): def plot_current_world_state(self):
...@@ -133,7 +128,6 @@ class Simulator: ...@@ -133,7 +128,6 @@ class Simulator:
circle1 = plt.Circle((x, y), self.policy.radius, color=colors[i], fill=False) circle1 = plt.Circle((x, y), self.policy.radius, color=colors[i], fill=False)
plt.gca().add_artist(circle1) plt.gca().add_artist(circle1)
# plot the ref path of each robot # plot the ref path of each robot
for i in range(self.num_robots): for i in range(self.num_robots):
plt.plot(self.policy.paths[i][0, :], self.policy.paths[i][1, :], '--', color=colors[i]) plt.plot(self.policy.paths[i][0, :], self.policy.paths[i][1, :], '--', color=colors[i])
...@@ -143,16 +137,18 @@ class Simulator: ...@@ -143,16 +137,18 @@ class Simulator:
plt.xlim(x_range[0], x_range[1]) plt.xlim(x_range[0], x_range[1])
plt.ylim(y_range[0], y_range[1]) plt.ylim(y_range[0], y_range[1])
self.plot_all_traj(self.policy.radius)
# force equal aspect ratio # force equal aspect ratio
plt.gca().set_aspect('equal', adjustable='box') plt.gca().set_aspect('equal', adjustable='box')
plt.tight_layout() plt.tight_layout()
plt.show() # plt.show()
# plt.draw() plt.draw()
# plt.pause(0.1) plt.pause(0.3)
# plt.clf() plt.clf()
def plot_roomba(self, x, y, yaw, color, fill, radius): def plot_roomba(self, x, y, yaw, color, fill, radius):
""" """
...@@ -173,3 +169,31 @@ class Simulator: ...@@ -173,3 +169,31 @@ class Simulator:
dy = 1 * np.sin(yaw) dy = 1 * np.sin(yaw)
ax.arrow(x, y, dx, dy, head_width=0.1, head_length=0.1, fc='r', ec='r') ax.arrow(x, y, dx, dy, head_width=0.1, head_length=0.1, fc='r', ec='r')
def plot_all_traj(self, radius):
"""
Plot the trajectories of two robots.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Plot the current state of each robot using the most recent values from
# x_history, y_history, and h_history
colors = cm.rainbow(np.linspace(0, 1, self.num_robots))
for i in range(self.num_robots):
self.plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, radius)
# plot the goal of each robot with solid circle
for i in range(self.num_robots):
x, y, theta = self.policy.paths[i][:, -1]
plt.plot(x, y, 'o', color=colors[i])
circle1 = plt.Circle((x, y), radius, color=colors[i], fill=False)
plt.gca().add_artist(circle1)
for i in range(self.num_robots):
traj = self.optimized_trajectories_hist[i][-1]
if len(traj) == 0:
continue
traj = self.policy.ego_to_global_roomba([self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1]], traj)
plt.plot(traj[0, :], traj[1, :], '+-',color=colors[i])
...@@ -25,7 +25,8 @@ multi_robot_traj_opt: ...@@ -25,7 +25,8 @@ multi_robot_traj_opt:
simulator: simulator:
dt: .1 dt: .1
scaling_factor: 10.0 show_plots: 1
show_collision_resolution: 1
environment: environment:
circle_obstacles: [] circle_obstacles: []
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment