Skip to content
Snippets Groups Projects
Commit 73ba6d0e authored by rachelmoan's avatar rachelmoan
Browse files

Update visualization in main

parent c62452a0
No related branches found
No related tags found
No related merge requests found
...@@ -5,7 +5,7 @@ from guided_mrmp.controllers.optimizer import Optimizer ...@@ -5,7 +5,7 @@ from guided_mrmp.controllers.optimizer import Optimizer
np.seterr(divide="ignore", invalid="ignore") np.seterr(divide="ignore", invalid="ignore")
class MultiMPC: class MultiMPC:
def __init__(self, num_robots, model, T, DT, state_cost, final_state_cost, input_cost, input_rate_cost, settings, circle_obs): def __init__(self, num_robots, model, T, DT, settings, circle_obs):
""" """
Initializes the MPC controller. Initializes the MPC controller.
""" """
...@@ -19,6 +19,11 @@ class MultiMPC: ...@@ -19,6 +19,11 @@ class MultiMPC:
self.circle_obs = circle_obs self.circle_obs = circle_obs
state_cost = settings['model_predictive_controller']['Q'] # state error cost
final_state_cost = settings['model_predictive_controller']['Qf'] # state final error cost
input_cost = settings['model_predictive_controller']['R'] # input cost
input_rate_cost = settings['model_predictive_controller']['P'] # input rate of change cost
# how far we can look into the future divided by our dt # how far we can look into the future divided by our dt
# is the number of control intervals # is the number of control intervals
self.control_horizon = int(T / DT) self.control_horizon = int(T / DT)
...@@ -33,10 +38,14 @@ class MultiMPC: ...@@ -33,10 +38,14 @@ class MultiMPC:
self.R = np.diag(input_cost) self.R = np.diag(input_cost)
self.P = np.diag(input_rate_cost) self.P = np.diag(input_rate_cost)
self.acceptable_tol = settings['acceptable_tol'] # Optimization settings
self.acceptable_iter = settings['acceptable_iter'] self.robot_robot_collision_weight = settings['model_predictive_controller']['robot_robot_collision_weight']
self.print_level = settings['print_level'] self.obstacle_collision_weight = settings['model_predictive_controller']['obstacle_collision_weight']
self.print_time = settings['print_time'] self.acceptable_tol = settings['model_predictive_controller']['acceptable_tol']
self.acceptable_iter = settings['model_predictive_controller']['acceptable_iter']
self.print_level = settings['model_predictive_controller']['print_level']
self.print_time = settings['model_predictive_controller']['print_time']
def apply_quadratic_barrier(self, d_max, d, c): def apply_quadratic_barrier(self, d_max, d, c):
""" """
...@@ -131,7 +140,7 @@ class MultiMPC: ...@@ -131,7 +140,7 @@ class MultiMPC:
d = ca.sqrt(d) d = ca.sqrt(d)
obstacle_cost += self.apply_quadratic_barrier(6*self.robot_radius, d-self.robot_radius*2, 1) obstacle_cost += self.apply_quadratic_barrier(6*self.robot_radius, d-self.robot_radius*2, 1)
opti.minimize(cost + .5*dist_to_other_robots + .5*obstacle_cost) opti.minimize(cost + self.robot_robot_collision_weight*dist_to_other_robots + self.obstacle_collision_weight*obstacle_cost)
# Constraints # Constraints
for i in range(self.num_robots): for i in range(self.num_robots):
......
...@@ -18,7 +18,6 @@ class MultiPathTracker: ...@@ -18,7 +18,6 @@ class MultiPathTracker:
""" """
# State of the robot [x,y, heading] # State of the robot [x,y, heading]
self.env = env self.env = env
self.states = initial_positions self.states = initial_positions
self.num_robots = len(initial_positions) self.num_robots = len(initial_positions)
self.dynamics = dynamics self.dynamics = dynamics
...@@ -37,25 +36,14 @@ class MultiPathTracker: ...@@ -37,25 +36,14 @@ class MultiPathTracker:
self.K = int(T / DT) self.K = int(T / DT)
# For a car model
# Q = [20, 20, 10, 20] # state error cost
# Qf = [30, 30, 30, 30] # state final error cost
# R = [10, 10] # input cost
# P = [10, 10] # input rate of change cost
# self.mpc = MPC(VehicleModel(), T, DT, Q, Qf, R, P)
# libraries for the discrete solver # libraries for the discrete solver
self.lib_2x3 = lib_2x3 self.lib_2x3 = lib_2x3
self.lib_3x3 = lib_3x3 self.lib_3x3 = lib_3x3
self.lib_2x5 = lib_2x5 self.lib_2x5 = lib_2x5
self.settings = settings
# For a circular robot (easy dynamics) self.mpc = MultiMPC(self.num_robots, dynamics, T, DT, settings, settings['environment']['circle_obstacles'])
Q = settings['model_predictive_controller']['Q'] # state error cost
Qf = settings['model_predictive_controller']['Qf'] # state final error cost
R = settings['model_predictive_controller']['R'] # input cost
P = settings['model_predictive_controller']['P'] # input rate of change cost
self.mpc = MultiMPC(self.num_robots, dynamics, T, DT, Q, Qf, R, P, settings['model_predictive_controller'], settings['environment']['circle_obstacles'])
self.circle_obs = settings['environment']['circle_obstacles'] self.circle_obs = settings['environment']['circle_obstacles']
...@@ -80,7 +68,6 @@ class MultiPathTracker: ...@@ -80,7 +68,6 @@ class MultiPathTracker:
self.optimized_trajectories_hist = [ [] for _ in range(self.num_robots) ] self.optimized_trajectories_hist = [ [] for _ in range(self.num_robots) ]
self.optimized_trajectory = None self.optimized_trajectory = None
def trajectories_overlap(self, traj1, traj2, threshold): def trajectories_overlap(self, traj1, traj2, threshold):
""" """
Checks if two trajectories overlap. We only care about xy positions. Checks if two trajectories overlap. We only care about xy positions.
...@@ -99,7 +86,6 @@ class MultiPathTracker: ...@@ -99,7 +86,6 @@ class MultiPathTracker:
return True return True
return False return False
def ego_to_global_roomba(self, state, mpc_out): def ego_to_global_roomba(self, state, mpc_out):
""" """
Transforms optimized trajectory XY points from ego (robot) reference Transforms optimized trajectory XY points from ego (robot) reference
...@@ -134,7 +120,7 @@ class MultiPathTracker: ...@@ -134,7 +120,7 @@ class MultiPathTracker:
return trajectory return trajectory
def advance(self, state, show_plots=False): def advance(self, screen, state, show_plots=False):
# optimization loop # optimization loop
# start=time.time() # start=time.time()
...@@ -142,8 +128,16 @@ class MultiPathTracker: ...@@ -142,8 +128,16 @@ class MultiPathTracker:
# 1. Get the reference trajectory for each robot # 1. Get the reference trajectory for each robot
targets = [] targets = []
for i in range(self.num_robots): for i in range(self.num_robots):
targets.append(get_ref_trajectory(np.array(state[i]), np.array(self.paths[i]), self.target_v, self.T, self.DT, len(self.x_history[i])+1)) ref, visited_guide_points = get_ref_trajectory(np.array(state[i]),
np.array(self.paths[i]),
self.target_v,
self.T,
self.DT,
[])
self.visited_points_on_guide_paths[i] = visited_guide_points
targets.append(ref)
# dynamycs w.r.t robot frame # dynamycs w.r.t robot frame
# curr_state = np.array([0, 0, self.state[2], 0]) # curr_state = np.array([0, 0, self.state[2], 0])
curr_states = np.zeros((self.num_robots, 3)) curr_states = np.zeros((self.num_robots, 3))
...@@ -162,7 +156,6 @@ class MultiPathTracker: ...@@ -162,7 +156,6 @@ class MultiPathTracker:
return x_mpc, self.control return x_mpc, self.control
def done(self): def done(self):
for i in range(self.num_robots): for i in range(self.num_robots):
# print(f"state = {self.states[i]}") # print(f"state = {self.states[i]}")
...@@ -183,6 +176,11 @@ class MultiPathTracker: ...@@ -183,6 +176,11 @@ class MultiPathTracker:
# x_history, y_history, and h_history # x_history, y_history, and h_history
colors = cm.rainbow(np.linspace(0, 1, self.num_robots)) colors = cm.rainbow(np.linspace(0, 1, self.num_robots))
# plot the obstacles
for obs in self.circle_obs:
circle1 = plt.Circle((obs[0], obs[1]), obs[2], color='k', fill=True)
plt.gca().add_artist(circle1)
for i in range(self.num_robots): for i in range(self.num_robots):
plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, self.radius) plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, self.radius)
...@@ -195,7 +193,7 @@ class MultiPathTracker: ...@@ -195,7 +193,7 @@ class MultiPathTracker:
# plot the ref path of each robot # plot the ref path of each robot
for i in range(self.num_robots): for i in range(self.num_robots):
plt.plot(self.paths[i][0, :], self.paths[i][1, :], '--', color=colors[i]) plt.plot(self.paths[i][0, :], self.paths[i][1, :], 'o-', color=colors[i], alpha=0.5)
# set the size of the plot to be 10x10 # set the size of the plot to be 10x10
...@@ -250,10 +248,6 @@ class MultiPathTracker: ...@@ -250,10 +248,6 @@ class MultiPathTracker:
self.x_history[i].append(self.states[i, 0]) self.x_history[i].append(self.states[i, 0])
self.y_history[i].append(self.states[i, 1]) self.y_history[i].append(self.states[i, 1])
self.h_history[i].append(self.states[i, 2]) self.h_history[i].append(self.states[i, 2])
if self.update_ref_paths:
self.update_reference_paths()
self.update_ref_paths = False
# use the optimizer output to preview the predicted state trajectory # use the optimizer output to preview the predicted state trajectory
# self.optimized_trajectory = self.ego_to_global(x_mpc.value) # self.optimized_trajectory = self.ego_to_global(x_mpc.value)
......
...@@ -105,7 +105,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -105,7 +105,7 @@ class MultiPathTrackerDB(MultiPathTracker):
print(f"all conflicts = {all_conflicts}") print(f"all conflicts = {all_conflicts}")
# visualize the grid # visualize the grid
self.draw_grid() self.draw_grid(self.states)
grid_solver = DiscreteResolver(disc_conflict, disc_robots, starts, goals, disc_all_conflicts,grid, self.lib_2x3, self.lib_3x3, self.lib_2x5) grid_solver = DiscreteResolver(disc_conflict, disc_robots, starts, goals, disc_all_conflicts,grid, self.lib_2x3, self.lib_3x3, self.lib_2x5)
subproblem = grid_solver.find_subproblem() subproblem = grid_solver.find_subproblem()
...@@ -234,7 +234,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -234,7 +234,7 @@ class MultiPathTrackerDB(MultiPathTracker):
print(f"self.grid_size = {self.grid_size}") print(f"self.grid_size = {self.grid_size}")
print(f"top_left_grid = {self.top_left_grid}") print(f"top_left_grid = {self.top_left_grid}")
self.draw_grid() self.draw_grid(self.states)
# Check if, for every robot, the cell value of the start and the cell value # Check if, for every robot, the cell value of the start and the cell value
# of the goal are the same. If they are, then we can't find a discrete solution that # of the goal are the same. If they are, then we can't find a discrete solution that
...@@ -316,7 +316,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -316,7 +316,7 @@ class MultiPathTrackerDB(MultiPathTracker):
y = self.top_left_grid[1] - (cell_y + 0.5) * self.cell_size y = self.top_left_grid[1] - (cell_y + 0.5) * self.cell_size
return x, y return x, y
def plot_trajs(self, traj1, traj2, radius): def plot_trajs(self, state, traj1, traj2, radius):
""" """
Plot the trajectories of two robots. Plot the trajectories of two robots.
""" """
...@@ -328,7 +328,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -328,7 +328,7 @@ class MultiPathTrackerDB(MultiPathTracker):
colors = cm.rainbow(np.linspace(0, 1, self.num_robots)) colors = cm.rainbow(np.linspace(0, 1, self.num_robots))
for i in range(self.num_robots): for i in range(self.num_robots):
plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, self.radius) plot_roomba(state[i][0], state[i][1], state[i][2], colors[i], False, self.radius)
# plot the goal of each robot with solid circle # plot the goal of each robot with solid circle
for i in range(self.num_robots): for i in range(self.num_robots):
...@@ -358,7 +358,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -358,7 +358,7 @@ class MultiPathTrackerDB(MultiPathTracker):
plt.show() plt.show()
def draw_grid(self): def draw_grid(self,state):
starts, goals = self.get_temp_starts_and_goals() starts, goals = self.get_temp_starts_and_goals()
# draw the whole environment with the local grid drawn on top # draw the whole environment with the local grid drawn on top
...@@ -370,7 +370,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -370,7 +370,7 @@ class MultiPathTrackerDB(MultiPathTracker):
colors = cm.rainbow(np.linspace(0, 1, self.num_robots)) colors = cm.rainbow(np.linspace(0, 1, self.num_robots))
for i in range(self.num_robots): for i in range(self.num_robots):
plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, self.radius) plot_roomba(self.states[i][0], self.states[i][1], self.states[i][2], colors[i], False, self.radius)
# plot the goal of each robot with solid circle # plot the goal of each robot with solid circle
for i in range(self.num_robots): for i in range(self.num_robots):
...@@ -413,7 +413,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -413,7 +413,7 @@ class MultiPathTrackerDB(MultiPathTracker):
plt.show() plt.show()
def draw_grid_solution(self, state): def draw_grid_solution(self, state, state_of_robots):
# draw the whole environment with the local grid drawn on top # draw the whole environment with the local grid drawn on top
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
...@@ -424,7 +424,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -424,7 +424,7 @@ class MultiPathTrackerDB(MultiPathTracker):
colors = cm.rainbow(np.linspace(0, 1, self.num_robots)) colors = cm.rainbow(np.linspace(0, 1, self.num_robots))
for i in range(self.num_robots): for i in range(self.num_robots):
plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, self.radius) plot_roomba(state_of_robots[i][0], state_of_robots[i][1], state_of_robots[i][2], colors[i], False, self.radius)
# plot the goal of each robot with solid circle # plot the goal of each robot with solid circle
for i in range(self.num_robots): for i in range(self.num_robots):
...@@ -497,6 +497,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -497,6 +497,7 @@ class MultiPathTrackerDB(MultiPathTracker):
def advance(self, state, show_plots=False): def advance(self, state, show_plots=False):
# optimization loop # optimization loop
# start=time.time() # start=time.time()
self.states = state
self.update_ref_paths = False self.update_ref_paths = False
...@@ -528,7 +529,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -528,7 +529,7 @@ class MultiPathTrackerDB(MultiPathTracker):
if self.trajectories_overlap(traj1, traj2, self.radius): if self.trajectories_overlap(traj1, traj2, self.radius):
# plot the trajectories # plot the trajectories
self.plot_trajs(traj1, traj2, self.radius) self.plot_trajs(state, traj1, traj2, self.radius)
print(f"Collision detected between robot {i} and robot {j}") print(f"Collision detected between robot {i} and robot {j}")
...@@ -571,7 +572,7 @@ class MultiPathTrackerDB(MultiPathTracker): ...@@ -571,7 +572,7 @@ class MultiPathTrackerDB(MultiPathTracker):
initial_guess = self.get_initial_guess(grid_solution, self.num_robots, 20, 1) initial_guess = self.get_initial_guess(grid_solution, self.num_robots, 20, 1)
initial_guess_state = initial_guess['X'] initial_guess_state = initial_guess['X']
self.draw_grid_solution(initial_guess_state) self.draw_grid_solution(initial_guess_state, self.states)
print(f"initial_guess_state shape = {initial_guess_state.shape}") print(f"initial_guess_state shape = {initial_guess_state.shape}")
print(f"initial_guess_state = {initial_guess_state}") print(f"initial_guess_state = {initial_guess_state}")
......
...@@ -99,14 +99,11 @@ def get_ref_trajectory(state, path, target_v, T, DT, path_visited_points=[]): ...@@ -99,14 +99,11 @@ def get_ref_trajectory(state, path, target_v, T, DT, path_visited_points=[]):
""" """
K = int(T / DT) K = int(T / DT)
print(f"path_visited_points = {path_visited_points}")
xref = np.zeros((3, K)) # Reference trajectory for [x, y, theta] xref = np.zeros((3, K)) # Reference trajectory for [x, y, theta]
# find the nearest path point to the current state # find the nearest path point to the current state
ind = get_nn_idx(state, path, path_visited_points) ind = get_nn_idx(state, path, path_visited_points)
print(f"closest point = {path[:, ind]}")
path_visited_points.append([path[0, ind], path[1, ind]]) path_visited_points.append([path[0, ind], path[1, ind]])
# calculate the cumulative distance along the path # calculate the cumulative distance along the path
...@@ -133,6 +130,4 @@ def get_ref_trajectory(state, path, target_v, T, DT, path_visited_points=[]): ...@@ -133,6 +130,4 @@ def get_ref_trajectory(state, path, target_v, T, DT, path_visited_points=[]):
xref[2, :] = (xref[2, :] + np.pi) % (2.0 * np.pi) - np.pi xref[2, :] = (xref[2, :] + np.pi) % (2.0 * np.pi) - np.pi
xref[2, :] = fix_angle_reference(xref[2, :], xref[2, 0]) xref[2, :] = fix_angle_reference(xref[2, :], xref[2, 0])
print(f"returning path_visited_points = {path_visited_points}")
return xref, path_visited_points return xref, path_visited_points
\ No newline at end of file
...@@ -2,7 +2,6 @@ import yaml ...@@ -2,7 +2,6 @@ import yaml
import os import os
import random import random
import numpy as np import numpy as np
import pygame
import time import time
from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals from guided_mrmp.utils import Env, Roomba, Robot, create_random_starts_and_goals
from guided_mrmp.planners import RRT from guided_mrmp.planners import RRT
...@@ -33,7 +32,7 @@ def set_python_seed(seed): ...@@ -33,7 +32,7 @@ def set_python_seed(seed):
random.seed(seed) random.seed(seed)
def plan_decoupled_path(env, start, goal, solver="RRT*", def plan_decoupled_path(env, start, goal, solver="RRT*",
step_length=20, goal_sample_rate=.5, num_samples=500000, r=80): step_length=.5, goal_sample_rate=.05, num_samples=500, r=2):
""" """
Plan decoupled path from a given start to a given goal, using a single-agent solver. Plan decoupled path from a given start to a given goal, using a single-agent solver.
...@@ -82,9 +81,7 @@ def initialize_robots(starts, goals, dynamics_models, radii, target_v, env): ...@@ -82,9 +81,7 @@ def initialize_robots(starts, goals, dynamics_models, radii, target_v, env):
waypoints = [xs,ys] waypoints = [xs,ys]
# print(f"waypoints = {waypoints}") start_heading = np.arctan2(ys[1] - start[1], xs[1] - start[0])
start_heading = np.arctan2(goal[1] - start[1], goal[0] - start[0])
start = [start[0], start[1], start_heading] start = [start[0], start[1], start_heading]
r = Robot(i,color,radius,start,goal,dynamics_model,target_v,rrtpath,waypoints, tree) r = Robot(i,color,radius,start,goal,dynamics_model,target_v,rrtpath,waypoints, tree)
...@@ -92,9 +89,8 @@ def initialize_robots(starts, goals, dynamics_models, radii, target_v, env): ...@@ -92,9 +89,8 @@ def initialize_robots(starts, goals, dynamics_models, radii, target_v, env):
return robots return robots
if __name__ == "__main__": if __name__ == "__main__":
# get the name of the settings file from the command line # get the name of the settings file from the command line
import sys import sys
if len(sys.argv) < 2: if len(sys.argv) < 2:
...@@ -136,7 +132,7 @@ if __name__ == "__main__": ...@@ -136,7 +132,7 @@ if __name__ == "__main__":
# Create the Guided MRMP policy # Create the Guided MRMP policy
T = settings['prediction_horizon'] T = settings['prediction_horizon']
DT = settings['discretization_step'] DT = settings['discretization_step']
policy = MultiPathTrackerDB(env=env, policy = MultiPathTrackerDB(env=env,
initial_positions=robot_starts, initial_positions=robot_starts,
dynamics=dynamics_models[0], # NOTE: Using the same dynamics model for all robots for now dynamics=dynamics_models[0], # NOTE: Using the same dynamics model for all robots for now
target_v=target_v, target_v=target_v,
...@@ -150,19 +146,12 @@ if __name__ == "__main__": ...@@ -150,19 +146,12 @@ if __name__ == "__main__":
) )
# Create the simulator # Create the simulator
scaling_factor = settings['simulator']['scaling_factor'] show_vis = False
show_vis = True
if show_vis:
pygame.init()
screen = pygame.display.set_mode((x_range[1]*scaling_factor, y_range[1]*scaling_factor))
else: screen = None
sim = Simulator(robots, dynamics_models, env, policy, settings) sim = Simulator(robots, dynamics_models, env, policy, settings)
# Run the simulation # Run the simulation
start = time.time() start = time.time()
sim.run(screen, show_vis) sim.run(show_vis)
end = time.time() end = time.time()
print(f"Simulation took {end-start} seconds") print(f"Simulation took {end-start} seconds")
...@@ -12,16 +12,18 @@ from shapely.geometry import Polygon, Point ...@@ -12,16 +12,18 @@ from shapely.geometry import Polygon, Point
from guided_mrmp.utils import Env from guided_mrmp.utils import Env
from guided_mrmp.utils.helpers import * from guided_mrmp.utils.helpers import *
from guided_mrmp.conflict_resolvers import TrajOptResolver,TrajOptDBResolver
from guided_mrmp.controllers.utils import get_ref_trajectory, compute_path_from_wp from guided_mrmp.controllers.utils import get_ref_trajectory, compute_path_from_wp
from guided_mrmp.planners import RRT
from guided_mrmp.planners import RRTStar
from guided_mrmp.controllers.mpc import MPC from guided_mrmp.controllers.mpc import MPC
from guided_mrmp.controllers.multi_mpc import MultiMPC
# from guided_mrmp.controllers.multi_path_tracking import MultiPathTracker
def plan_decoupled_path(env, start, goal, solver="RRT*", def plan_decoupled_path(env, start, goal, solver="RRT*",
step_length=20, goal_sample_rate=.5, num_samples=500000, r=80): step_length=20, goal_sample_rate=.5, num_samples=500000, r=10):
from guided_mrmp.planners import RRT
from guided_mrmp.planners import RRTStar
""" """
Plan decoupled path from a given start to a given goal, using a single-agent solver. Plan decoupled path from a given start to a given goal, using a single-agent solver.
...@@ -38,15 +40,15 @@ def plan_decoupled_path(env, start, goal, solver="RRT*", ...@@ -38,15 +40,15 @@ def plan_decoupled_path(env, start, goal, solver="RRT*",
""" """
if solver == "RRT": if solver == "RRT":
rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples) rrt = RRT(env, start, goal, step_length, goal_sample_rate, num_samples)
path = rrt.run() path,tree = rrt.run()
elif solver == "RRT*": elif solver == "RRT*":
rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r) rrtstar = RRTStar(env, start, goal, step_length, goal_sample_rate, num_samples,r)
path = rrtstar.run() path,tree = rrtstar.run()
else: else:
print(f"Solver {solver} is not yet implemented. Choose something else.") print(f"Solver {solver} is not yet implemented. Choose something else.")
return None return None
return list(reversed(path)) return list(reversed(path)), tree
class GuidedMRMP: class GuidedMRMP:
def __init__(self, env, robots, dynamics_models, T, DT, libs,settings): def __init__(self, env, robots, dynamics_models, T, DT, libs,settings):
...@@ -207,7 +209,7 @@ class GuidedMRMP: ...@@ -207,7 +209,7 @@ class GuidedMRMP:
return next_controls, next_trajs return next_controls, next_trajs
def get_next_controls_and_trajectories(self,screen): def get_next_controls_and_trajectories(self,screen):
""" """
Get the next control for each robot. Get the next control for each robot.
...@@ -233,8 +235,7 @@ class GuidedMRMP: ...@@ -233,8 +235,7 @@ class GuidedMRMP:
curr_state = np.array([0, 0, 0]) curr_state = np.array([0, 0, 0])
x_mpc, u_mpc = mpc.step(curr_state, target_traj, np.array(r.control)) x_mpc, u_mpc = mpc.step(curr_state, target_traj, np.array(r.control))
self.add_vis_target_traj(screen, self.scaling_factor, r, x_mpc) self.add_vis_target_traj(screen, self.scaling_factor, r, x_mpc)
# only the first one is used to advance the simulation # only the first one is used to advance the simulation
...@@ -242,7 +243,7 @@ class GuidedMRMP: ...@@ -242,7 +243,7 @@ class GuidedMRMP:
next_controls.append(np.asarray(control)) next_controls.append(np.asarray(control))
# print(f"control = {u_mpc}") print(f"x_mpc = {x_mpc}")
next_trajs.append(self.ego_to_global(r, x_mpc)) next_trajs.append(self.ego_to_global(r, x_mpc))
# use matplotlib to plot the trajectories # use matplotlib to plot the trajectories
...@@ -252,6 +253,52 @@ class GuidedMRMP: ...@@ -252,6 +253,52 @@ class GuidedMRMP:
# plt.show() # plt.show()
return next_controls, next_trajs return next_controls, next_trajs
def get_next_controls_and_trajectories_multi_robot(self,screen):
"""
Get the next control for each robot, but use MPC that considers all robots at once.
"""
multiMPC = MultiMPC(len(self.robots), self.dynamics_models[0], self.T, self.DT, self.Q, self.Qf, self.R, self.P, self.settings['model_predictive_controller'])
# 1. Get the reference trajectory for each robot
targets = []
state = []
for r in self.robots:
state.append(r.current_position)
for i in range(len(self.robots)):
print(f"state i = {state[i]}")
# print(f"path i = {self.paths[i]}")
targets.append(get_ref_trajectory(np.array(state[i]), np.array(self.guide_paths[i]), self.robots[i].target_v, self.T, self.DT))
# dynamycs w.r.t robot frame
# curr_state = np.array([0, 0, self.state[2], 0])
curr_states = np.zeros((len(self.robots), 3))
controls = []
for r in self.robots:
controls.append(r.control)
x_mpc, u_mpc = multiMPC.step(
curr_states,
targets,
controls
)
# only the first one is used to advance the simulation
# self.control[:] = [u_mpc[0, 0], u_mpc[1, 0]]
next_controls = []
for i in range(len(self.robots)):
next_controls.append([u_mpc[i*2, 0], u_mpc[i*2+1, 0]])
next_trajs = []
for i in range(len(self.robots)):
print([x_mpc[i*3,:], x_mpc[i*3+1,:], x_mpc[i*3+2,:]])
next_trajs.append(self.ego_to_global(self.robots[i], np.array([x_mpc[i*3,:], x_mpc[i*3+1,:], x_mpc[i*3+2,:]])))
return next_controls, next_trajs
def advance(self, screen, state, time, dt=0.1): def advance(self, screen, state, time, dt=0.1):
""" """
Advance the simulation by one timestep. Advance the simulation by one timestep.
...@@ -263,122 +310,147 @@ class GuidedMRMP: ...@@ -263,122 +310,147 @@ class GuidedMRMP:
""" """
# update the guide paths of each robot # update the guide paths of each robot
for idx, r in enumerate(self.robots): # for idx, r in enumerate(self.robots):
start = r.current_position # start = r.current_position
goal = r.goal # goal = r.goal
xs = [] # xs = []
ys = [] # ys = []
rrtpath = plan_decoupled_path(self.env, (start[0],start[1]), (goal[0],goal[1])) # rrtpath, tree = plan_decoupled_path(self.env, (start[0],start[1]), (goal[0],goal[1]))
for node in rrtpath: # for node in rrtpath:
xs.append(node[0]) # xs.append(node[0])
ys.append(node[1]) # ys.append(node[1])
waypoints = [xs,ys] # waypoints = [xs,ys]
# self.guide_paths[idx] = compute_path_from_wp(waypoints[0], waypoints[1], .05)
self.guide_paths[idx] = compute_path_from_wp(waypoints[0], waypoints[1], .05)
# get the next control for each robot # get the next control for each robot
if self.settings['model_predictive_controller']['parallelize']: # if self.settings['model_predictive_controller']['parallelize']:
if False:
next_desired_controls, trajectories = self.get_next_controls_and_trajectories_parallel(screen) next_desired_controls, trajectories = self.get_next_controls_and_trajectories_parallel(screen)
else: else:
next_desired_controls, trajectories = self.get_next_controls_and_trajectories(screen) next_desired_controls, trajectories = self.get_next_controls_and_trajectories_multi_robot(screen)
# find all the conflicts at the next time horizon # find all the conflicts at the next time horizon
conflicts = self.find_all_conflicts(trajectories, dt) conflicts = self.find_all_conflicts(trajectories, dt)
# conflicts=[]
if len(conflicts) == 0: print(f"conflicts = {conflicts}")
if True:
# no conflicts, return the desired controls # no conflicts, return the desired controls
controls = [] controls = []
for control in next_desired_controls: for control in next_desired_controls:
next_controls = [control[0][0], control[1][0]] # print(control)
controls.append(np.asarray(next_controls)) # next_controls = [control[0][0], control[1][0]]
controls.append(np.asarray(control))
for r, next_control in zip(self.robots, controls): for r, next_control in zip(self.robots, controls):
r.control = next_control r.control = next_control
return False, controls return False, controls
# resolve the conflicts using the database # resolve the conflicts using the database
# resolver = TrajOptDBResolver(10, 60, conflicts, self.robots)
rad = self.robots[0].radius rad = self.robots[0].radius
# resolver = TrajOptDBResolver(rad*2, 5, conflicts, self.robots, trajectories, dt, rad,self.env,10,10,5,self.libs[0],self.libs[1],self.libs[2])
# num_robots, robot_radius, starts, goals, circle_obstacles, rectangle_obstacles,
# rob_dist_weight, obs_dist_weight, control_weight, time_weight, goal_weight)
# the starts are the current positions of all robots
starts = []
for r in self.robots:
starts.append(r.current_position)
# the goals are the last point on the desired trajectory of all robots # resolver = TrajOptResolver(len(starts), rad, starts, goals, self.env.circle_obs, self.env.rect_obs,
goals = [] # self.rob_dist_weight, self.obstacle_weight, self.control_weight,
for r in trajectories: # self.time_weight, self.goal_weight, conflicts, self.robots)
goals.append(r[:,-1]) # resolver = TrajOptDBResolver(cell_size = rad*2.5,
# grid_size = 5,
# all_conflicts = conflicts,
# all_robots=self.robots,
# trajs=trajectories,
# robot_radius=rad,
# env=self.env,
# rob_dist_weight=self.rob_dist_weight,
# obs_dist_weight=self.obstacle_weight,
# control_weight=self.control_weight,
# time_weight=self.time_weight,
# goal_weight=self.goal_weight,
# lib_2x3=self.libs[0],
# lib_3x3=self.libs[1],
# lib_2x5=self.libs[2])
starts = [r.current_position for r in self.robots]
print(f"starts = {starts}")
print(f"trajs = {trajectories}")
resolver = MultiPathTracker(starts, self.dynamics_models[0], 3.0, self.T, self.DT, trajectories, self.settings)
# get the current positiosn
# starts = [r.current_position for r in self.robots]
# goals = [r.goal for r in self.robots]
# dump all the data need for trajoptDBresolver to a yaml file for debugging
# data = {
# "conflicts": conflicts,
# "dt": dt,
# "env": self.env,
# "next_desired_controls": next_desired_controls,
# "robots": self.robots,
# "trajectories": trajectories,
# "starts": starts,
# "goals": goals,
# "rad": rad,
# "rob_dist_weight": self.rob_dist_weight,
# "obstacle_weight": self.obstacle_weight,
# "control_weight": self.control_weight,
# "time_weight": self.time_weight,
# "goal_weight": self.goal_weight
# }
# import yaml
# with open("guided_mrmp/tests/db_opt_data1.yaml", "w") as file:
# yaml.dump(data, file)
# get the local controls
# next_desired_controls, trajs = resolver.get_local_controls(next_desired_controls)
resolver.run()
vs = resolver.v_history
omegas = resolver.a_history
next_desired_controls = []
for v, omega in zip(vs, omegas):
next_desired_controls.append([v[1], omega[1]])
# # get the goals that were used by the resolver
# starts, goals = resolver.get_temp_starts_and_goals()
# # get the continuous space values of these goals
# continuous_subgoals = []
# for g in goals:
# continuous_subgoals.append(resolver.get_grid_cell_location(g[0], g[1]))
# reroute the robots' guide paths to include the new subgoals in the guidepaths
# update the guide paths of each robot
# for idx, r in enumerate(self.robots):
# subgoal = continuous_subgoals[idx]
# start = r.current_position
# goal = r.goal
# xs = []
# ys = []
# # rrtpath, tree = plan_decoupled_path(self.env, (subgoal[0],subgoal[1]), (goal[0],goal[1]))
# rrtstar = RRTStar(self.env, (subgoal[0],subgoal[1]), (goal[0],goal[1]), step_len=20, goal_sample_rate=.5, iter_max=500000, r=80, sampled_vertices=r.tree)
# rrtpath,tree = rrtstar.run()
# rrtpath = list(reversed(rrtpath))
# print(f"rrtpath = {rrtpath}")
# for node in rrtpath:
# print(f"node = {node}")
# xs.append(node[0])
# ys.append(node[1])
# xs.insert(0,start[0])
# ys.insert(0,start[1])
# waypoints = [xs,ys]
# self.guide_paths[idx] = compute_path_from_wp(waypoints[0], waypoints[1], .05)
def check_goal_overlap(goal1, goal2, rad): # self.add_vis_conflict_free_traj(screen, self.scaling_factor, trajs)
"""
Check if the goals overlap
"""
# get the vector between the two goals
v = np.array(goal2) - np.array(goal1)
# check if the distance between the two goals is less than 2*rad
if np.linalg.norm(v) < 2*rad:
return True
return False
def fix_goal_overlap(goal1, goal2):
"""
Fix the goal overlap
"""
# get the vector between the two goals
v = np.array(goal2) - np.array(goal1)
# move goal2 in the positive direction of the vector
goal2 = goal2 + .5*v
# move goal1 in the negative direction of the vector
goal1 = goal1 - .5*v
return goal1, goal2
if check_goal_overlap(goals[0], goals[1], rad):
print("fixing goal overlap")
goals[0], goals[1] = fix_goal_overlap(goals[0], goals[1])
resolver = TrajOptResolver(len(starts), rad, starts, goals, self.env.circle_obs, self.env.rect_obs,
self.rob_dist_weight, self.obstacle_weight, self.control_weight,
self.time_weight, self.goal_weight, conflicts, self.robots)
next_desired_controls, trajs = resolver.get_local_controls(next_desired_controls)
self.add_vis_conflict_free_traj(screen, self.scaling_factor, trajs)
# dump all the data need for trajoptresolver to a yaml file for debugging
data = {
"conflicts": conflicts,
"dt": dt,
"env": self.env,
"next_desired_controls": next_desired_controls,
"robots": self.robots,
"trajectories": trajectories,
"starts": starts,
"goals": goals,
"rad": rad,
"rob_dist_weight": self.rob_dist_weight,
"obstacle_weight": self.obstacle_weight,
"control_weight": self.control_weight,
"time_weight": self.time_weight,
"goal_weight": self.goal_weight
}
import yaml
with open("guided_mrmp/tests/db_opt_data2.yaml", "w") as file:
yaml.dump(data, file)
# return the valid controls # return the valid controls
...@@ -394,14 +466,23 @@ class GuidedMRMP: ...@@ -394,14 +466,23 @@ class GuidedMRMP:
""" """
for r in self.robots: for r in self.robots:
path = self.guide_paths[r.label] # path = self.guide_paths[r.label]
xs = path[0] # xs = path[0]
ys = path[1] # ys = path[1]
# for node in path: # # for node in path:
# pygame.draw.circle(screen, r.color, (int(node[0]), int(node[1])), 2) # # pygame.draw.circle(screen, r.color, (int(node[0]), int(node[1])), 2)
for i in range(len(xs)-1): # for i in range(len(xs)-1):
pygame.draw.line(screen, r.color, (xs[i]*scaling_factor,ys[i]*scaling_factor), # pygame.draw.line(screen, r.color, (xs[i]*scaling_factor,ys[i]*scaling_factor),
(xs[i+1]*scaling_factor,ys[i+1]*scaling_factor), 2) # (xs[i+1]*scaling_factor,ys[i+1]*scaling_factor), 2)
tree = r.tree
for node in tree:
# draw a line from the node to its parent
print(f"node = {node}")
if node.parent is not None:
print(f"parent = {node.parent}")
pygame.draw.line(screen, r.color, (node.current[0]*scaling_factor,node.current[1]*scaling_factor),
(node.parent[0]*scaling_factor,node.parent[1]*scaling_factor), 2)
def add_vis_grid(self, screen, grid_size, top_left, cell_size): def add_vis_grid(self, screen, grid_size, top_left, cell_size):
""" """
...@@ -446,16 +527,3 @@ class GuidedMRMP: ...@@ -446,16 +527,3 @@ class GuidedMRMP:
next_y = traj[1][i+1] next_y = traj[1][i+1]
pygame.draw.line(screen, (0,255,0), (x*scaling_factor,y*scaling_factor), pygame.draw.line(screen, (0,255,0), (x*scaling_factor,y*scaling_factor),
(next_x*scaling_factor,next_y*scaling_factor), 2) (next_x*scaling_factor,next_y*scaling_factor), 2)
if __name__ == "__main__":
# create the environment
env = Env()
# starts, goals = create_random_starts_and_goals(env, 4)
starts = [[0,0.1,0],[10,5.1,0]]
goals = [[10,3,0],[5,1,0]]
alg = GuidedMRMP(Env)
alg.run(starts, goals)
\ No newline at end of file
import pygame import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np import numpy as np
# Simulator class: # Simulator class:
...@@ -15,6 +16,7 @@ class Simulator: ...@@ -15,6 +16,7 @@ class Simulator:
time: the current time time: the current time
""" """
self.robots = robots self.robots = robots
self.env = env
self.circ_obstacles = env.circle_obs self.circ_obstacles = env.circle_obs
self.rect_obstacles = env.rect_obs self.rect_obstacles = env.rect_obs
self.policy = policy self.policy = policy
...@@ -25,12 +27,19 @@ class Simulator: ...@@ -25,12 +27,19 @@ class Simulator:
self.dynamics_models = dynamics_models self.dynamics_models = dynamics_models
self.time = 0 self.time = 0
# scaling factor used to adjust the size of the pygame window
self.scaling_factor = settings['simulator']['scaling_factor'] self.scaling_factor = settings['simulator']['scaling_factor']
# Helper variables to keep track of the sim
self.sim_time = 0
self.x_history = [ [] for _ in range(self.num_robots) ]
self.y_history = [ [] for _ in range(self.num_robots) ]
self.h_history = [ [] for _ in range(self.num_robots) ]
self.optimized_trajectories_hist = [ [] for _ in range(self.num_robots) ]
self.optimized_trajectory = None
def all_robots_at_goal(self): def all_robots_at_goal(self):
for r in self.robots: for i in range(self.num_robots):
if (np.sqrt((r.current_position[0] - r.goal[0]) ** 2 + (r.current_position[1] - r.goal[1]) ** 2) > 10): if (np.sqrt((self.state[i][0] - self.robots[i].goal[0]) ** 2 + (self.state[i][1] - self.robots[i].goal[1]) ** 2) > .5):
return False return False
return True return True
...@@ -40,62 +49,127 @@ class Simulator: ...@@ -40,62 +49,127 @@ class Simulator:
""" """
# Get the controls from the policy # Get the controls from the policy
x_mpc, controls = self.policy.advance(self.state) x_mpc, controls = self.policy.advance(screen, self.state)
# Update the state of each robot # # Update the state of each robot
for i in range(self.num_robots): # for i in range(self.num_robots):
new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt) # new_state = self.dynamics_models[i].next_state(self.state[i], controls[i], dt)
self.robots[i].current_position = new_state # self.robots[i].current_position = new_state
self.state[i] = new_state # self.state[i] = new_state
# Update the time # Update the time
self.time += dt self.time += dt
return x_mpc, controls
def run(self, show_plots=False):
"""
Run the path tracker algorithm.
Parameters:
- show_plots (bool): Flag indicating whether to show plots during the simulation. Default is False.
Returns:
- numpy.ndarray: Array containing the history of x, y, and h coordinates.
"""
# Add the initial state to the histories
self.state = np.array(self.state)
for i in range(self.num_robots):
self.x_history[i].append(self.state[i, 0])
self.y_history[i].append(self.state[i, 1])
self.h_history[i].append(self.state[i, 2])
# if show_plots: self.plot_sim()
self.plot_current_world_state()
while 1:
# check if all robots have reached their goal
if self.all_robots_at_goal():
print("Success! Goal Reached")
return np.asarray([self.x_history, self.y_history, self.h_history])
# plot the current state of the robots
self.plot_current_world_state()
# get the next control for all robots
x_mpc, controls = self.advance(self.state, self.policy.DT)
next_states = []
for i in range(self.num_robots):
next_states.append(self.policy.dynamics.next_state(self.state[i], controls[i], self.policy.DT))
self.state = next_states
self.state = np.array(self.state)
for i in range(self.num_robots):
self.x_history[i].append(self.state[i, 0])
self.y_history[i].append(self.state[i, 1])
self.h_history[i].append(self.state[i, 2])
# use the optimizer output to preview the predicted state trajectory
# self.optimized_trajectory = self.ego_to_global(x_mpc.value)
# if show_plots: self.optimized_trajectory = self.ego_to_global_roomba(x_mpc)
# if show_plots: self.plot_sim()
def plot_current_world_state(self):
"""
Plot the current state of the world.
"""
colors = cm.rainbow(np.linspace(0, 1, self.num_robots))
def draw_environment(self, screen): # plot the obstacles
""" Draw the environment on the screen """
screen.fill((255,255,255))
for obs in self.circ_obstacles: for obs in self.circ_obstacles:
pygame.draw.circle(screen, (0,0,0), (obs[0]*self.scaling_factor,obs[1]*self.scaling_factor), obs[2]*self.scaling_factor) circle1 = plt.Circle((obs[0], obs[1]), obs[2], color='k', fill=True)
for obs in self.rect_obstacles: plt.gca().add_artist(circle1)
pygame.draw.rect(screen, (0,0,0), obs)
# Plot the current state of each robot using the most recent values from
def draw_robots(self, screen): # x_history, y_history, and h_history
""" Draw the robots on the screen """ for i in range(self.num_robots):
for robot in self.robots: self.plot_roomba(self.x_history[i][-1], self.y_history[i][-1], self.h_history[i][-1], colors[i], False, self.policy.radius)
x,y,yaw = robot.current_position
pygame.draw.circle(screen, robot.color, (x*self.scaling_factor,y*self.scaling_factor), robot.radius*self.scaling_factor)
# Plot direction marker x, y, theta = self.policy.paths[i][:, -1]
dx = robot.radius * np.cos(robot.current_position[2]) plt.plot(x, y, 'o', color=colors[i])
dy = robot.radius * np.sin(robot.current_position[2]) circle1 = plt.Circle((x, y), self.policy.radius, color=colors[i], fill=False)
pygame.draw.line(screen, (0,0,0), (x*self.scaling_factor,y*self.scaling_factor), (x*self.scaling_factor+dx*self.scaling_factor,y*self.scaling_factor+dy*self.scaling_factor), width=2) plt.gca().add_artist(circle1)
def run(self, screen, show_vis=False):
clock = pygame.time.Clock() # plot the ref path of each robot
pause = False for i in range(self.num_robots):
while not self.all_robots_at_goal(): plt.plot(self.policy.paths[i][0, :], self.policy.paths[i][1, :], '--', color=colors[i])
clock.tick(30)
if show_vis: x_range = self.env.boundary[0]
for event in pygame.event.get(): y_range = self.env.boundary[1]
if event.type == pygame.QUIT: plt.xlim(x_range[0], x_range[1])
return plt.ylim(y_range[0], y_range[1])
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
return # force equal aspect ratio
if event.type == pygame.KEYDOWN and event.key == pygame.K_p: plt.gca().set_aspect('equal', adjustable='box')
pause = not pause
if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT and pause: plt.tight_layout()
self.draw_environment(screen)
self.policy.add_vis_guide_paths(screen, self.scaling_factor)
self.draw_robots(screen) plt.show()
# plt.draw()
self.advance(screen,self.policy.DT) # plt.pause(0.1)
if not pause: # plt.clf()
if show_vis:
self.draw_environment(screen) def plot_roomba(self, x, y, yaw, color, fill, radius):
# self.policy.add_vis_guide_paths(screen, self.scaling_factor) """
self.draw_robots(screen)
Args:
self.advance(screen,self.policy.DT) x ():
y ():
if show_vis: yaw ():
pygame.display.flip() """
ax = plt.gca()
if fill: alpha = .3
else: alpha = 1
circle = plt.Circle((x, y), radius, color=color, fill=fill, alpha=alpha)
ax.add_patch(circle)
# Plot direction marker
dx = 1 * np.cos(yaw)
dy = 1 * np.sin(yaw)
ax.arrow(x, y, dx, dy, head_width=0.1, head_length=0.1, fc='r', ec='r')
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment