Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
db-guided-mrmp
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
rmoan2
db-guided-mrmp
Commits
4072dcb7
Commit
4072dcb7
authored
5 months ago
by
rachelmoan
Browse files
Options
Downloads
Patches
Plain Diff
Use casadi's acceptable_iter and acceptable_tol parameters when doing MPC
parent
749fe7df
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
guided_mrmp/controllers/mpc.py
+98
-66
98 additions, 66 deletions
guided_mrmp/controllers/mpc.py
settings.yaml
+41
-24
41 additions, 24 deletions
settings.yaml
with
139 additions
and
90 deletions
guided_mrmp/controllers/mpc.py
+
98
−
66
View file @
4072dcb7
import
numpy
as
np
# from guided_mrmp.utils import Roomba
from
guided_mrmp.optimizers.optimizer
import
Optimizer
,
iLQR
import
casadi
as
ca
np
.
seterr
(
divide
=
"
ignore
"
,
invalid
=
"
ignore
"
)
import
cvxpy
as
opt
class
MPC
:
def
__init__
(
self
,
model
,
T
,
DT
,
state_cost
,
final_state_cost
,
input_cost
,
input_rate_cost
):
def
__init__
(
self
,
model
,
T
,
DT
,
state_cost
,
final_state_cost
,
input_cost
,
input_rate_cost
,
settings
):
"""
Args:
...
...
@@ -40,65 +36,99 @@ class MPC:
self
.
R
=
np
.
diag
(
input_cost
)
self
.
P
=
np
.
diag
(
input_rate_cost
)
# Instantiate the optimizer
self
.
optimizer
=
iLQR
(
self
.
nx
,
self
.
nu
,
self
.
control_horizon
)
self
.
acceptable_tol
=
settings
[
'
acceptable_tol
'
]
self
.
acceptable_iter
=
settings
[
'
acceptable_iter
'
]
self
.
print_level
=
settings
[
'
print_level
'
]
self
.
print_time
=
settings
[
'
print_time
'
]
def
cost_function
(
self
,
x
,
u
,
target
):
"""
Calculate the cost function for the optimizer.
Parameters:
- target: (numpy.ndarray) The target values for tracking.
- u: (numpy.ndarray) The control inputs.
- x: (numpy.ndarray) The state variables.
Returns:
- cost: (float) The calculated cost.
"""
cost
=
0
def
setup_mpc_problem
(
self
,
initial_state
,
target
,
prev_cmd
,
A
,
B
,
C
):
opti
=
ca
.
Opti
()
#
Tracking error cost
for
k
in
range
(
self
.
control_horizon
)
:
cost
+=
opt
.
quad_form
(
x
[:,
k
+
1
]
-
target
[:,
k
],
self
.
Q
)
#
Decision variables
X
=
opti
.
variable
(
self
.
nx
,
self
.
control_horizon
+
1
)
U
=
opti
.
variable
(
self
.
nu
,
self
.
control_horizon
)
# Final point tracking cost
cost
+=
opt
.
quad_form
(
x
[:,
-
1
]
-
target
[:,
-
1
],
self
.
Qf
)
# Parameters
initial_state
=
ca
.
DM
(
initial_state
)
target
=
ca
.
DM
(
target
)
prev_cmd
=
ca
.
DM
(
prev_cmd
)
A
=
ca
.
DM
(
A
)
B
=
ca
.
DM
(
B
)
C
=
ca
.
DM
(
C
)
# Actuation magnitude cost
# Cost function
cost
=
0
for
k
in
range
(
self
.
control_horizon
):
cost
+=
opt
.
quad_form
(
u
[:,
k
],
self
.
R
)
# Actuation rate of change cost
for
k
in
range
(
1
,
self
.
control_horizon
):
cost
+=
opt
.
quad_form
(
u
[:,
k
]
-
u
[:,
k
-
1
],
self
.
P
)
cost
+=
ca
.
mtimes
([(
X
[:,
k
+
1
]
-
target
[:,
k
]).
T
,
self
.
Q
,
X
[:,
k
+
1
]
-
target
[:,
k
]])
cost
+=
ca
.
mtimes
([
U
[:,
k
].
T
,
self
.
R
,
U
[:,
k
]])
if
k
>
0
:
cost
+=
ca
.
mtimes
([(
U
[:,
k
]
-
U
[:,
k
-
1
]).
T
,
self
.
P
,
U
[:,
k
]
-
U
[:,
k
-
1
]])
return
cost
cost
+=
ca
.
mtimes
([(
X
[:,
-
1
]
-
target
[:,
-
1
]).
T
,
self
.
Qf
,
X
[:,
-
1
]
-
target
[:,
-
1
]])
def
constraints
(
self
,
initial_state
,
A
,
B
,
C
,
x
,
u
,
robot_model
,
dt
,
prev_cmd
):
opti
.
minimize
(
cost
)
constr
=
[]
# Kinematics Constraints
# Constraints
for
k
in
range
(
self
.
control_horizon
):
constr
+=
[
x
[:,
k
+
1
]
==
A
@
x
[:,
k
]
+
B
@
u
[:,
k
]
+
C
]
# initial state
constr
+=
[
x
[:,
0
]
==
initial_state
]
opti
.
subject_to
(
X
[:,
k
+
1
]
==
ca
.
mtimes
(
A
,
X
[:,
k
])
+
ca
.
mtimes
(
B
,
U
[:,
k
])
+
C
)
# actuation bounds
constr
+=
[
opt
.
abs
(
u
[:,
0
])
<=
robot_model
.
max_acc
]
constr
+=
[
opt
.
abs
(
u
[:,
1
])
<=
robot_model
.
max_steer
]
opti
.
subject_to
(
X
[:,
0
]
==
initial_state
)
# Actuation rate of change bounds
constr
+=
[
opt
.
abs
(
u
[
0
,
0
]
-
prev_cmd
[
0
])
/
dt
<=
robot_model
.
max_d_acc
]
constr
+=
[
opt
.
abs
(
u
[
1
,
0
]
-
prev_cmd
[
1
])
/
dt
<=
robot_model
.
max_d_steer
]
for
k
in
range
(
1
,
self
.
control_horizon
):
constr
+=
[
opt
.
abs
(
u
[
0
,
k
]
-
u
[
0
,
k
-
1
])
/
dt
<=
robot_model
.
max_d_acc
]
constr
+=
[
opt
.
abs
(
u
[
1
,
k
]
-
u
[
1
,
k
-
1
])
/
dt
<=
robot_model
.
max_d_steer
]
opti
.
subject_to
(
opti
.
bounded
(
-
self
.
robot_model
.
max_acc
,
U
[
0
,
:],
self
.
robot_model
.
max_acc
))
opti
.
subject_to
(
opti
.
bounded
(
-
self
.
robot_model
.
max_steer
,
U
[
1
,
:],
self
.
robot_model
.
max_steer
))
return
constr
opti
.
subject_to
(
ca
.
fabs
(
U
[
0
,
0
]
-
prev_cmd
[
0
])
/
self
.
dt
<=
self
.
robot_model
.
max_d_acc
)
opti
.
subject_to
(
ca
.
fabs
(
U
[
1
,
0
]
-
prev_cmd
[
1
])
/
self
.
dt
<=
self
.
robot_model
.
max_d_steer
)
def
step
(
self
,
initial_state
,
target
,
prev_cmd
,
initial_guess
=
None
):
for
k
in
range
(
1
,
self
.
control_horizon
):
opti
.
subject_to
(
ca
.
fabs
(
U
[
0
,
k
]
-
U
[
0
,
k
-
1
])
/
self
.
dt
<=
self
.
robot_model
.
max_d_acc
)
opti
.
subject_to
(
ca
.
fabs
(
U
[
1
,
k
]
-
U
[
1
,
k
-
1
])
/
self
.
dt
<=
self
.
robot_model
.
max_d_steer
)
return
{
'
opti
'
:
opti
,
'
X
'
:
X
,
'
U
'
:
U
,
'
initial_state
'
:
initial_state
,
'
target
'
:
target
,
'
prev_cmd
'
:
prev_cmd
}
def
solve_optimization_problem
(
self
,
problem
,
initial_guesses
=
None
,
solver_options
=
None
):
opti
=
problem
[
'
opti
'
]
if
initial_guesses
:
for
param
,
value
in
initial_guesses
.
items
():
# print(f"param = {param}")
# print(f"value = {value}")
opti
.
set_initial
(
problem
[
param
],
value
)
# Set numerical backend, with options if provided
if
solver_options
:
opti
.
solver
(
'
ipopt
'
,
solver_options
)
else
:
opti
.
solver
(
'
ipopt
'
)
try
:
sol
=
opti
.
solve
()
# actual solve
status
=
'
succeeded
'
except
:
sol
=
None
status
=
'
failed
'
results
=
{
'
status
'
:
status
,
'
solution
'
:
sol
,
}
if
sol
:
for
var_name
,
var
in
problem
.
items
():
if
var_name
!=
'
opti
'
:
results
[
var_name
]
=
sol
.
value
(
var
)
return
results
def
step
(
self
,
initial_state
,
target
,
prev_cmd
,
initial_guesses
=
None
):
"""
Sets up and solves the optimization problem.
...
...
@@ -113,21 +143,23 @@ class MPC:
x_opt: Optimal state trajectory
u_opt: Optimal control trajectory
"""
A
,
B
,
C
=
self
.
robot_model
.
linearize
(
initial_state
,
prev_cmd
,
self
.
dt
)
# set up variables for the optimization problem
x
=
opt
.
Variable
((
self
.
nx
,
self
.
control_horizon
+
1
))
u
=
opt
.
Variable
((
self
.
nu
,
self
.
control_horizon
))
# Define the cost function
cost
=
self
.
cost_function
(
x
,
u
,
target
)
# Define the constraints
constraints
=
self
.
constraints
(
initial_state
,
A
,
B
,
C
,
x
,
u
,
self
.
robot_model
,
self
.
dt
,
prev_cmd
)
A
,
B
,
C
=
self
.
robot_model
.
linearize
(
initial_state
,
prev_cmd
,
self
.
dt
)
solver_options
=
{
'
ipopt.print_level
'
:
self
.
print_level
,
'
print_time
'
:
self
.
print_time
,
'
ipopt.acceptable_tol
'
:
self
.
acceptable_tol
,
'
ipopt.acceptable_iter
'
:
self
.
acceptable_iter
}
problem
=
self
.
setup_mpc_problem
(
initial_state
,
target
,
prev_cmd
,
A
,
B
,
C
)
# Use the Optimizer class to solve the optimization problem
x_opt
,
u_opt
=
self
.
optimizer
.
solve
(
x
,
u
,
cost
,
constraints
,
initial_guess
)
result
=
self
.
solve_optimization_problem
(
problem
,
initial_guesses
,
solver_options
)
return
x_opt
,
u_opt
if
result
[
'
status
'
]
==
'
succeeded
'
:
x_opt
=
result
[
'
X
'
]
u_opt
=
result
[
'
U
'
]
else
:
print
(
"
Optimization failed
"
)
x_opt
=
None
u_opt
=
None
return
x_opt
,
u_opt
\ No newline at end of file
This diff is collapsed.
Click to expand it.
settings.yaml
+
41
−
24
View file @
4072dcb7
single_agent_planner
:
"
RRTStar"
prediction_horizon
:
8
# seconds
prediction_horizon
:
10
# seconds
discretization_step
:
1
# seconds
model_predictive_controller
:
Q
:
[
20
,
20
,
2
0
]
# state error cost for a differntial drive robot
Qf
:
[
30
,
30
,
3
0
]
# state final error cost for a differntial drive robot
R
:
[
10
,
10
]
# input cost for a differntial drive robot
Q
:
[
20
,
20
,
0
]
# state error cost for a differ
e
ntial drive robot
Qf
:
[
30
,
30
,
0
]
# state final error cost for a differ
e
ntial drive robot
R
:
[
10
,
10
]
# input cost for a differ
e
ntial drive robot
P
:
[
10
,
10
]
# input rate of change cost for a differential drive robot
print_level
:
0
print_time
:
0
acceptable_tol
:
.1
acceptable_iter
:
1000
multi_robot_traj_opt
:
rob_dist_weight
:
1
0
rob_dist_weight
:
2
0
obs_dist_weight
:
10
time_weight
:
5
simulator
:
dt
:
0.6
dt
:
.3
scaling_factor
:
6.0
environment
:
circle_obstacles
:
[]
rectangle_obstacles
:
[]
x_range
:
[
0
,
64
0
]
y_range
:
[
0
,
48
0
]
x_range
:
[
0
,
10
0
]
y_range
:
[
0
,
10
0
]
robot_starts
:
-
[
1
0
,
1
0
,
0
]
-
[
60
0
,
30
0
,
0
]
robot_starts
:
[]
#
- [
8
0,
8
0, 0]
#
- [
2
0,
2
0, 0]
robot_goals
:
-
[
60
0
,
30
0
,
0
]
-
[
1
0
,
1
0
,
0
]
robot_goals
:
[]
#
- [
2
0,
2
0, 0]
#
- [
8
0,
8
0, 0]
robot_radii
:
-
10
-
10
-
10
-
10
-
10
-
10
-
10
-
10
-
10
-
10
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
-
1
target_v
:
3.0
...
...
@@ -56,5 +68,10 @@ dynamics_models:
-
Roomba
-
Roomba
-
Roomba
-
Roomba
-
Roomba
-
Roomba
-
Roomba
-
Roomba
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment