diff --git a/guided_mrmp/controllers/mpc.py b/guided_mrmp/controllers/mpc.py index ba3f77b6685bd4f13ebfbfdb0a44c6f6790968ca..f4c9c729ebd7c5a054f4e153a556d7c9091b6c3f 100644 --- a/guided_mrmp/controllers/mpc.py +++ b/guided_mrmp/controllers/mpc.py @@ -1,14 +1,10 @@ import numpy as np -# from guided_mrmp.utils import Roomba -from guided_mrmp.optimizers.optimizer import Optimizer, iLQR +import casadi as ca np.seterr(divide="ignore", invalid="ignore") -import cvxpy as opt - - class MPC: - def __init__(self, model, T, DT, state_cost, final_state_cost, input_cost, input_rate_cost): + def __init__(self, model, T, DT, state_cost, final_state_cost, input_cost, input_rate_cost, settings): """ Args: @@ -40,65 +36,99 @@ class MPC: self.R = np.diag(input_cost) self.P = np.diag(input_rate_cost) - # Instantiate the optimizer - self.optimizer = iLQR(self.nx, self.nu, self.control_horizon) + self.acceptable_tol = settings['acceptable_tol'] + self.acceptable_iter = settings['acceptable_iter'] + self.print_level = settings['print_level'] + self.print_time = settings['print_time'] - def cost_function(self, x, u, target): - """ - Calculate the cost function for the optimizer. - Parameters: - - target: (numpy.ndarray) The target values for tracking. - - u: (numpy.ndarray) The control inputs. - - x: (numpy.ndarray) The state variables. - Returns: - - cost: (float) The calculated cost. - """ - cost = 0 + def setup_mpc_problem(self, initial_state, target, prev_cmd, A, B, C): - + opti = ca.Opti() - # Tracking error cost - for k in range(self.control_horizon): - cost += opt.quad_form(x[:, k + 1] - target[:, k], self.Q) + # Decision variables + X = opti.variable(self.nx, self.control_horizon + 1) + U = opti.variable(self.nu, self.control_horizon) - # Final point tracking cost - cost += opt.quad_form(x[:, -1] - target[:, -1], self.Qf) + # Parameters + initial_state = ca.DM(initial_state) + target = ca.DM(target) + prev_cmd = ca.DM(prev_cmd) + A = ca.DM(A) + B = ca.DM(B) + C = ca.DM(C) - # Actuation magnitude cost + # Cost function + cost = 0 for k in range(self.control_horizon): - cost += opt.quad_form(u[:, k], self.R) - - # Actuation rate of change cost - for k in range(1, self.control_horizon): - cost += opt.quad_form(u[:, k] - u[:, k - 1], self.P) + cost += ca.mtimes([(X[:, k+1] - target[:, k]).T, self.Q, X[:, k+1] - target[:, k]]) + cost += ca.mtimes([U[:, k].T, self.R, U[:, k]]) + if k > 0: + cost += ca.mtimes([(U[:, k] - U[:, k-1]).T, self.P, U[:, k] - U[:, k-1]]) - return cost + cost += ca.mtimes([(X[:, -1] - target[:, -1]).T, self.Qf, X[:, -1] - target[:, -1]]) - def constraints(self, initial_state, A, B,C, x, u, robot_model, dt, prev_cmd): + opti.minimize(cost) - constr = [] - - # Kinematics Constraints + # Constraints for k in range(self.control_horizon): - constr += [x[:, k + 1] == A @ x[:, k] + B @ u[:, k] + C] - - # initial state - constr += [x[:, 0] == initial_state] + opti.subject_to(X[:, k+1] == ca.mtimes(A, X[:, k]) + ca.mtimes(B, U[:, k]) + C) - # actuation bounds - constr += [opt.abs(u[:, 0]) <= robot_model.max_acc] - constr += [opt.abs(u[:, 1]) <= robot_model.max_steer] + opti.subject_to(X[:, 0] == initial_state) - # Actuation rate of change bounds - constr += [opt.abs(u[0, 0] - prev_cmd[0]) / dt <= robot_model.max_d_acc] - constr += [opt.abs(u[1, 0] - prev_cmd[1]) / dt <= robot_model.max_d_steer] - for k in range(1, self.control_horizon): - constr += [opt.abs(u[0, k] - u[0, k - 1]) / dt <= robot_model.max_d_acc] - constr += [opt.abs(u[1, k] - u[1, k - 1]) / dt <= robot_model.max_d_steer] + opti.subject_to(opti.bounded(-self.robot_model.max_acc, U[0, :], self.robot_model.max_acc)) + opti.subject_to(opti.bounded(-self.robot_model.max_steer, U[1, :], self.robot_model.max_steer)) - return constr + opti.subject_to(ca.fabs(U[0, 0] - prev_cmd[0]) / self.dt <= self.robot_model.max_d_acc) + opti.subject_to(ca.fabs(U[1, 0] - prev_cmd[1]) / self.dt <= self.robot_model.max_d_steer) - def step(self, initial_state, target, prev_cmd, initial_guess=None): + for k in range(1, self.control_horizon): + opti.subject_to(ca.fabs(U[0, k] - U[0, k-1]) / self.dt <= self.robot_model.max_d_acc) + opti.subject_to(ca.fabs(U[1, k] - U[1, k-1]) / self.dt <= self.robot_model.max_d_steer) + + return { + 'opti': opti, + 'X': X, + 'U': U, + 'initial_state': initial_state, + 'target': target, + 'prev_cmd': prev_cmd + } + + def solve_optimization_problem(self, problem, initial_guesses=None, solver_options=None): + opti = problem['opti'] + + if initial_guesses: + for param, value in initial_guesses.items(): + # print(f"param = {param}") + # print(f"value = {value}") + opti.set_initial(problem[param], value) + + # Set numerical backend, with options if provided + if solver_options: + opti.solver('ipopt', solver_options) + else: + opti.solver('ipopt') + + try: + sol = opti.solve() # actual solve + status = 'succeeded' + except: + sol = None + status = 'failed' + + results = { + 'status' : status, + 'solution' : sol, + } + + if sol: + for var_name, var in problem.items(): + if var_name != 'opti': + results[var_name] = sol.value(var) + + return results + + def step(self, initial_state, target, prev_cmd, initial_guesses=None): """ Sets up and solves the optimization problem. @@ -113,21 +143,23 @@ class MPC: x_opt: Optimal state trajectory u_opt: Optimal control trajectory """ - A,B,C = self.robot_model.linearize(initial_state, prev_cmd, self.dt) - - # set up variables for the optimization problem - x = opt.Variable((self.nx, self.control_horizon + 1)) - u = opt.Variable((self.nu, self.control_horizon)) - - # Define the cost function - cost = self.cost_function(x, u, target) - - # Define the constraints - constraints = self.constraints(initial_state, A, B, C, x, u, self.robot_model, self.dt, prev_cmd) + A, B, C = self.robot_model.linearize(initial_state, prev_cmd, self.dt) + + solver_options = {'ipopt.print_level': self.print_level, + 'print_time': self.print_time, + 'ipopt.acceptable_tol': self.acceptable_tol, + 'ipopt.acceptable_iter': self.acceptable_iter} + + problem = self.setup_mpc_problem(initial_state, target, prev_cmd, A, B, C) - # Use the Optimizer class to solve the optimization problem - x_opt, u_opt = self.optimizer.solve(x,u,cost, constraints, initial_guess) + result = self.solve_optimization_problem(problem, initial_guesses, solver_options) - return x_opt, u_opt - + if result['status'] == 'succeeded': + x_opt = result['X'] + u_opt = result['U'] + else: + print("Optimization failed") + x_opt = None + u_opt = None + return x_opt, u_opt \ No newline at end of file diff --git a/settings.yaml b/settings.yaml index 42242de9d23314d2e62b6611bb1f2fb6169c42c8..92bf090675924a74a75af5629ff8b467c1926a36 100644 --- a/settings.yaml +++ b/settings.yaml @@ -1,47 +1,59 @@ single_agent_planner: "RRTStar" -prediction_horizon: 8 # seconds +prediction_horizon: 10 # seconds discretization_step: 1 # seconds model_predictive_controller: - Q: [20, 20, 20] # state error cost for a differntial drive robot - Qf: [30, 30, 30] # state final error cost for a differntial drive robot - R: [10, 10] # input cost for a differntial drive robot + Q: [20, 20, 0] # state error cost for a differential drive robot + Qf: [30, 30, 0] # state final error cost for a differential drive robot + R: [10, 10] # input cost for a differential drive robot P: [10, 10] # input rate of change cost for a differential drive robot + print_level: 0 + print_time: 0 + acceptable_tol: .1 + acceptable_iter: 1000 multi_robot_traj_opt: - rob_dist_weight: 10 + rob_dist_weight: 20 obs_dist_weight: 10 time_weight: 5 simulator: - dt: 0.6 + dt: .3 + scaling_factor: 6.0 environment: circle_obstacles: [] rectangle_obstacles: [] - x_range: [0, 640] - y_range: [0, 480] + x_range: [0, 100] + y_range: [0, 100] -robot_starts: - - [10, 10, 0] - - [600, 300, 0] +robot_starts: [] + # - [80, 80, 0] + # - [20, 20, 0] -robot_goals: - - [600, 300, 0] - - [10, 10, 0] +robot_goals: [] + # - [20, 20, 0] + # - [80, 80, 0] robot_radii: - - 10 - - 10 - - 10 - - 10 - - 10 - - 10 - - 10 - - 10 - - 10 - - 10 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + - 1 + + target_v: 3.0 @@ -56,5 +68,10 @@ dynamics_models: - Roomba - Roomba - Roomba + - Roomba + - Roomba + - Roomba + - Roomba + - Roomba