Skip to content
Snippets Groups Projects
mpc.py 4.85 KiB
Newer Older
  • Learn to ignore specific revisions
  • import numpy as np
    
    from guided_mrmp.optimizer import Optimizer
    
    
    np.seterr(divide="ignore", invalid="ignore")
    
    class MPC:
    
        def __init__(self, model, T, DT, state_cost, final_state_cost, input_cost, input_rate_cost, settings):
    
            """
    
            Args:
                vehicle ():
                T ():
                DT ():
                state_cost ():
                final_state_cost ():
                input_cost ():
                input_rate_cost ():
            """
    
    rachelmoan's avatar
    rachelmoan committed
            self.nx = model.state_dimension()  # number of state vars 
            self.nu = model.control_dimension()  # number of input/control vars
    
    
            self.robot_model = model
            self.dt = DT
    
            # how far we can look into the future divided by our dt 
            # is the number of control intervals
            self.control_horizon = int(T / DT) 
    
            # Weight for the error in state
            self.Q = np.diag(state_cost)
    
            # Weight for the error in final state
            self.Qf = np.diag(final_state_cost)
    
            # weight for error in control
            self.R = np.diag(input_cost)
            self.P = np.diag(input_rate_cost)
    
            self.acceptable_tol = settings['acceptable_tol']
            self.acceptable_iter = settings['acceptable_iter']
            self.print_level = settings['print_level']
            self.print_time = settings['print_time']
    
        def setup_mpc_problem(self, initial_state, target, prev_cmd, A, B, C):
    
            # Decision variables
            X = opti.variable(self.nx, self.control_horizon + 1)
            U = opti.variable(self.nu, self.control_horizon)
    
            # Parameters
            initial_state = ca.DM(initial_state)
            target = ca.DM(target)
            prev_cmd = ca.DM(prev_cmd)
            A = ca.DM(A)
            B = ca.DM(B)
            C = ca.DM(C)
    
    rachelmoan's avatar
    rachelmoan committed
            for k in range(self.control_horizon):
    
                cost += ca.mtimes([(X[:, k+1] - target[:, k]).T, self.Q, X[:, k+1] - target[:, k]])
                cost += ca.mtimes([U[:, k].T, self.R, U[:, k]])
                if k > 0:
                    cost += ca.mtimes([(U[:, k] - U[:, k-1]).T, self.P, U[:, k] - U[:, k-1]])
    
            cost += ca.mtimes([(X[:, -1] - target[:, -1]).T, self.Qf, X[:, -1] - target[:, -1]])
    
    rachelmoan's avatar
    rachelmoan committed
            for k in range(self.control_horizon):
    
                opti.subject_to(X[:, k+1] == ca.mtimes(A, X[:, k]) + ca.mtimes(B, U[:, k]) + C)
    
            opti.subject_to(X[:, 0] == initial_state)
    
            opti.subject_to(opti.bounded(-self.robot_model.max_acc, U[0, :], self.robot_model.max_acc))
            opti.subject_to(opti.bounded(-self.robot_model.max_steer, U[1, :], self.robot_model.max_steer))
    
            opti.subject_to(ca.fabs(U[0, 0] - prev_cmd[0]) / self.dt <= self.robot_model.max_d_acc)
            opti.subject_to(ca.fabs(U[1, 0] - prev_cmd[1]) / self.dt <= self.robot_model.max_d_steer)
    
            for k in range(1, self.control_horizon):
                opti.subject_to(ca.fabs(U[0, k] - U[0, k-1]) / self.dt <= self.robot_model.max_d_acc)
                opti.subject_to(ca.fabs(U[1, k] - U[1, k-1]) / self.dt <= self.robot_model.max_d_steer)
    
            return {
                'opti': opti,
                'X': X,
                'U': U,
                'initial_state': initial_state,
                'target': target,
                'prev_cmd': prev_cmd
            }
    
        def solve_optimization_problem(self, problem, initial_guesses=None, solver_options=None):
    
            opt = Optimizer(problem)
            results = opt.solve_optimization_problem(initial_guesses, solver_options)
    
            return results
    
        def step(self, initial_state, target, prev_cmd, initial_guesses=None):
    
    rachelmoan's avatar
    rachelmoan committed
            Sets up and solves the optimization problem.
            
            Args:
                initial_state: Current estimate of [x, y, heading]
                target: State space reference, in the same frame as the provided current state
                prev_cmd: Previous [v, delta]
                A, B, C: Linearized state-space matrices
                initial_guess: Optional initial guess for the optimizer
            
            Returns:
                x_opt: Optimal state trajectory
                u_opt: Optimal control trajectory
            """
    
            A, B, C = self.robot_model.linearize(initial_state, prev_cmd, self.dt)
    
            solver_options = {'ipopt.print_level': self.print_level, 
                              'print_time': self.print_time, 
    
                            #   'ipopt.tol': 1e-3,
    
                              'ipopt.acceptable_tol': self.acceptable_tol, 
                              'ipopt.acceptable_iter': self.acceptable_iter}
    
            problem = self.setup_mpc_problem(initial_state, target, prev_cmd, A, B, C)
    
            result = self.solve_optimization_problem(problem, initial_guesses, solver_options)
    
            if result['status'] == 'succeeded':
                x_opt = result['X']
                u_opt = result['U']
            else:
                print("Optimization failed")
                x_opt = None
                u_opt = None