Skip to content
Snippets Groups Projects
mpc.py 5.42 KiB
import numpy as np
import casadi as ca
from guided_mrmp.controllers.optimizer import Optimizer

class MPC:
    def __init__(self, model, T, DT, settings):
        """

        Args:
            vehicle ():
            T ():
            DT ():
            state_cost ():
            final_state_cost ():
            input_cost ():
            input_rate_cost ():
        """
        self.nx = model.state_dimension()  # number of state vars 
        self.nu = model.control_dimension()  # number of input/control vars

        self.robot_model = model
        self.dt = DT

        state_cost = settings['model_predictive_controller']['Q']  # state error cost
        final_state_cost = settings['model_predictive_controller']['Qf']  # state final error cost
        input_cost = settings['model_predictive_controller']['R']  # input cost
        input_rate_cost = settings['model_predictive_controller']['P']  # input rate of change cost

        # how far we can look into the future divided by our dt 
        # is the number of control intervals
        self.control_horizon = int(T / DT) 

        # Weight for the error in state
        self.Q = np.diag(state_cost)

        # Weight for the error in final state
        self.Qf = np.diag(final_state_cost)

        # weight for error in control
        self.R = np.diag(input_cost)
        self.P = np.diag(input_rate_cost)

        self.acceptable_tol = settings['model_predictive_controller']['acceptable_tol']
        self.acceptable_iter = settings['model_predictive_controller']['acceptable_iter']
        self.print_level = settings['model_predictive_controller']['print_level']
        self.print_time = settings['model_predictive_controller']['print_time']

    def setup_mpc_problem(self, initial_state, target, prev_cmd, A, B, C):

        opti = ca.Opti()

        # Decision variables
        X = opti.variable(self.nx, self.control_horizon + 1)
        U = opti.variable(self.nu, self.control_horizon)

        # Parameters
        initial_state = ca.DM(initial_state)
        target = ca.DM(target)
        prev_cmd = ca.DM(prev_cmd)
        A = ca.DM(A)
        B = ca.DM(B)
        C = ca.DM(C)

        # Cost function
        cost = 0
        for k in range(self.control_horizon):

            # difference between the current state and the target state
            cost += ca.mtimes([(X[:, k+1] - target[:, k]).T, self.Q, X[:, k+1] - target[:, k]])

            # control effort
            cost += ca.mtimes([U[:, k].T, self.R, U[:, k]])
            if k > 0:
                # Penalize large changes in control
                cost += ca.mtimes([(U[:, k] - U[:, k-1]).T, self.P, U[:, k] - U[:, k-1]])

        # Final state cost
        cost += ca.mtimes([(X[:, -1] - target[:, -1]).T, self.Qf, X[:, -1] - target[:, -1]])

        opti.minimize(cost)

        # Constraints
        for k in range(self.control_horizon):
            opti.subject_to(X[:, k+1] == ca.mtimes(A, X[:, k]) + ca.mtimes(B, U[:, k]) + C)

        opti.subject_to(X[:, 0] == initial_state)

        opti.subject_to(opti.bounded(-self.robot_model.max_acc, U[0, :], self.robot_model.max_acc))
        opti.subject_to(opti.bounded(-self.robot_model.max_steer, U[1, :], self.robot_model.max_steer))

        opti.subject_to(ca.fabs(U[0, 0] - prev_cmd[0]) / self.dt <= self.robot_model.max_d_acc)
        opti.subject_to(ca.fabs(U[1, 0] - prev_cmd[1]) / self.dt <= self.robot_model.max_d_steer)

        for k in range(1, self.control_horizon):
            opti.subject_to(ca.fabs(U[0, k] - U[0, k-1]) / self.dt <= self.robot_model.max_d_acc)
            opti.subject_to(ca.fabs(U[1, k] - U[1, k-1]) / self.dt <= self.robot_model.max_d_steer)

        return {
            'opti': opti,
            'X': X,
            'U': U,
            'initial_state': initial_state,
            'target': target,
            'prev_cmd': prev_cmd
        }

    def solve_optimization_problem(self, problem, initial_guesses=None, solver_options=None):
        opt = Optimizer(problem)
        results = opt.solve_optimization_problem(initial_guesses, solver_options)
        return results

    def step(self, initial_state, target, prev_cmd, initial_guesses=None):
        """
        Sets up and solves the optimization problem.
        
        Args:
            initial_state: Current estimate of [x, y, heading]
            target: State space reference, in the same frame as the provided current state
            prev_cmd: Previous [v, delta]
            A, B, C: Linearized state-space matrices
            initial_guess: Optional initial guess for the optimizer
        
        Returns:
            x_opt: Optimal state trajectory
            u_opt: Optimal control trajectory
        """
        A, B, C = self.robot_model.linearize(initial_state, prev_cmd, self.dt)

        solver_options = {'ipopt.print_level': self.print_level, 
                          'print_time': self.print_time, 
                        #   'ipopt.tol': 1e-3,
                          'ipopt.acceptable_tol': self.acceptable_tol, 
                          'ipopt.acceptable_iter': self.acceptable_iter}

        problem = self.setup_mpc_problem(initial_state, target, prev_cmd, A, B, C)

        result = self.solve_optimization_problem(problem, initial_guesses, solver_options)

        if result['status'] == 'succeeded':
            x_opt = result['X']
            u_opt = result['U']
        else:
            print("Optimization failed")
            x_opt = None
            u_opt = None

        return x_opt, u_opt