Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
db-guided-mrmp
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
rmoan2
db-guided-mrmp
Commits
91a2b022
Commit
91a2b022
authored
7 months ago
by
rachelmoan
Browse files
Options
Downloads
Patches
Plain Diff
setting up code to run with main and a setting file
parent
97eb02b0
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
guided_mrmp/main.py
+113
-14
113 additions, 14 deletions
guided_mrmp/main.py
settings.yaml
+32
-0
32 additions, 0 deletions
settings.yaml
with
145 additions
and
14 deletions
guided_mrmp/main.py
+
113
−
14
View file @
91a2b022
import
yaml
import
yaml
import
argparse
import
os
import
random
import
numpy
as
np
import
pygame
from
guided_mrmp.utils
import
Env
,
Roomba
,
Robot
from
guided_mrmp.planners
import
RRT
from
guided_mrmp.planners
import
RRTStar
from
guided_mrmp.planners.multirobot.db_guided_mrmp
import
GuidedMRMP
from
guided_mrmp.simulator
import
Simulator
def
load_settings
(
file_path
):
def
load_settings
(
file_path
):
with
open
(
file_path
,
'
r
'
)
as
file
:
with
open
(
file_path
,
'
r
'
)
as
file
:
settings
=
yaml
.
safe_load
(
file
)
settings
=
yaml
.
safe_load
(
file
)
return
settings
return
settings
def
load_environment
(
file_path
):
def
set_python_seed
(
seed
):
with
open
(
file_path
,
'
r
'
)
as
file
:
print
(
f
"
***Setting Python Seed
{
seed
}
***
"
)
environment
=
yaml
.
safe_load
(
file
)
os
.
environ
[
'
PYTHONHASHSEED
'
]
=
str
(
seed
)
return
environment
np
.
random
.
seed
(
seed
)
random
.
seed
(
seed
)
def
plan_decoupled_path
(
env
,
start
,
goal
,
solver
=
"
RRT*
"
,
step_length
=
20
,
goal_sample_rate
=
.
5
,
num_samples
=
500000
,
r
=
80
):
"""
Plan decoupled path from a given start to a given goal, using a single-agent solver.
inputs:
- start (tuple): (x,y) location of start
- goal (tuple): (x,y) location of goal
- solver (string): Name of single-agent solver to be used
- step_length (float):
- goal_sample_rate (float):
- num_samples (int):
- r (float):
output:
- path (list): list of nodes in path
"""
if
solver
==
"
RRT
"
:
rrt
=
RRT
(
env
,
start
,
goal
,
step_length
,
goal_sample_rate
,
num_samples
)
path
=
rrt
.
run
()
elif
solver
==
"
RRT*
"
:
rrtstar
=
RRTStar
(
env
,
start
,
goal
,
step_length
,
goal_sample_rate
,
num_samples
,
r
)
path
=
rrtstar
.
run
()
else
:
print
(
f
"
Solver
{
solver
}
is not yet implemented. Choose something else.
"
)
return
None
return
list
(
reversed
(
path
))
def
initialize_robots
(
starts
,
goals
,
env
):
"""
NOTE This function (and the plan_decoupled_paths function could just exist as
helper functions elsewhere to make this class easier to understand)
"""
print
(
"
initializing robots
"
)
robots
=
[]
RADIUS
=
10
colors
=
[
list
(
np
.
random
.
choice
(
range
(
256
),
size
=
3
))
for
i
in
range
(
len
(
starts
))]
for
i
,
(
start
,
goal
,
color
)
in
enumerate
(
zip
(
starts
,
goals
,
colors
)):
print
(
f
"
planning path for robot
{
i
}
from
{
start
}
to
{
goal
}
"
)
rrtpath
=
plan_decoupled_path
(
env
,
(
start
[
0
],
start
[
1
]),
(
goal
[
0
],
goal
[
1
]))
xs
=
[]
ys
=
[]
for
node
in
rrtpath
:
xs
.
append
(
node
[
0
])
ys
.
append
(
node
[
1
])
waypoints
=
[
xs
,
ys
]
print
(
f
"
waypoints =
{
waypoints
}
"
)
r
=
Robot
(
i
,
color
,
RADIUS
,
start
,
goal
,
3.0
,
1
,.
2
,
rrtpath
,
waypoints
)
r
.
tracker
.
x_history
.
append
(
r
.
tracker
.
state
[
0
])
r
.
tracker
.
y_history
.
append
(
r
.
tracker
.
state
[
1
])
r
.
tracker
.
h_history
.
append
(
r
.
tracker
.
state
[
2
])
robots
.
append
(
r
)
return
robots
if
__name__
==
"
__main__
"
:
# Load files using the provided arguments
settings
=
load_settings
(
"
settings.yaml
"
)
circle_obstacles
=
settings
[
'
environment
'
][
'
circle_obstacles
'
]
rectangle_obstacles
=
settings
[
'
environment
'
][
'
rectangle_obstacles
'
]
robot_starts
=
settings
[
'
robots
'
][
'
starts
'
]
robot_goals
=
settings
[
'
robots
'
][
'
goals
'
]
dynamics_models
=
settings
[
'
dynamics_models
'
]
set_python_seed
(
42
)
# Create the environment
# circle_obstacles = [(100,100,20), (200,200,30)]
# rectangle_obstacles = [(400,400,50,50)]
circle_obstacles
=
[]
rectangle_obstacles
=
[]
env
=
Env
([
0
,
640
],
[
0
,
480
],
circle_obstacles
,
rectangle_obstacles
)
# Create the robots
# starts, goals = create_random_starts_and_goals(env, 4)
starts
=
[[
10
,
10
,
0
],[
600
,
300
,
0
]]
goals
=
[[
600
,
300
,
0
],[
10
,
10
,
0
]]
robots
=
initialize_robots
(
starts
,
goals
,
env
)
dynamics_model1
=
Roomba
()
dynamics_models
=
[
dynamics_model1
,
dynamics_model1
]
# Set up argument parser
# Create the Guided MRMP policy
parser
=
argparse
.
ArgumentParser
(
description
=
'
Load settings and environment files.
'
)
policy
=
GuidedMRMP
(
env
,
robots
,
dynamics_models
)
parser
.
add_argument
(
'
--settings
'
,
required
=
True
,
help
=
'
Path to the settings file
'
)
parser
.
add_argument
(
'
--environment
'
,
required
=
True
,
help
=
'
Path to the environment file
'
)
# Parse arguments
# Create the simulator
args
=
parser
.
parse_args
()
pygame
.
init
()
screen
=
pygame
.
display
.
set_mode
((
640
,
480
))
sim
=
Simulator
(
robots
,
dynamics_models
,
circle_obstacles
,
rectangle_obstacles
,
policy
)
sim
.
run
(
screen
)
# Load files using the provided arguments
settings
=
load_settings
(
args
.
settings
)
environment
=
load_environment
(
args
.
environment
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
settings.yaml
0 → 100644
+
32
−
0
View file @
91a2b022
model_predictive_controller
:
dt
:
0.1
N
:
10
multi_robot_traj_opt
:
rob_dist_weight
:
10
obs_dist_weight
:
10
time_weight
:
5
simulator
:
dt
:
0.3
environment
:
circle_obstacles
:
[]
rectangle_obstacles
:
[]
x_range
:
[
0
,
640
]
y_range
:
[
0
,
480
]
robot_starts
:
-
[
10
,
10
,
0
]
-
[
600
,
300
,
0
]
robot_goals
:
-
[
600
,
300
,
0
]
-
[
10
,
10
,
0
]
dynamics_model
:
-
Roomba
-
Roomba
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment