Skip to content
Snippets Groups Projects
Commit 540bd5a6 authored by rachelmoan's avatar rachelmoan
Browse files

Add comments/ delete spaces

parent 202c2569
No related branches found
No related tags found
No related merge requests found
......@@ -2,8 +2,6 @@ import numpy as np
import casadi as ca
from guided_mrmp.optimizer import Optimizer
np.seterr(divide="ignore", invalid="ignore")
class MPC:
def __init__(self, model, T, DT, state_cost, final_state_cost, input_cost, input_rate_cost, settings):
"""
......@@ -61,11 +59,17 @@ class MPC:
# Cost function
cost = 0
for k in range(self.control_horizon):
# difference between the current state and the target state
cost += ca.mtimes([(X[:, k+1] - target[:, k]).T, self.Q, X[:, k+1] - target[:, k]])
# control effort
cost += ca.mtimes([U[:, k].T, self.R, U[:, k]])
if k > 0:
# Penalize large changes in control
cost += ca.mtimes([(U[:, k] - U[:, k-1]).T, self.P, U[:, k] - U[:, k-1]])
# Final state cost
cost += ca.mtimes([(X[:, -1] - target[:, -1]).T, self.Qf, X[:, -1] - target[:, -1]])
opti.minimize(cost)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment