Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
distiller
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
distiller
Commits
aba6e49b
Commit
aba6e49b
authored
6 years ago
by
Neta Zmora
Browse files
Options
Downloads
Patches
Plain Diff
Jupyter notebooks: Add details to the Performance notebook
Showing various details about the performance of ResNet50
parent
09dc2f25
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
jupyter/performance.ipynb
+85
-2
85 additions, 2 deletions
jupyter/performance.ipynb
with
85 additions
and
2 deletions
jupyter/performance.ipynb
+
85
−
2
View file @
aba6e49b
...
@@ -38,7 +38,7 @@
...
@@ -38,7 +38,7 @@
"metadata": {},
"metadata": {},
"outputs": [],
"outputs": [],
"source": [
"source": [
"model = models.create_model(pretrained=False, dataset='imagenet', arch='resnet
18
', parallel=False)"
"model = models.create_model(pretrained=False, dataset='imagenet', arch='resnet
50
', parallel=False)"
]
]
},
},
{
{
...
@@ -56,6 +56,49 @@
...
@@ -56,6 +56,49 @@
"print(\"Total MACs: \" + \"{:,}\".format(total_macs))"
"print(\"Total MACs: \" + \"{:,}\".format(total_macs))"
]
]
},
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Let's take a look at how our compute is distibuted:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"MAC distribution:\")\n",
"counts = df['MACs'].value_counts()\n",
"print(counts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Let's look at which convolutions kernel sizes we're using, and how many instances:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Convolution kernel size distribution:\")\n",
"counts = df['Attrs'].value_counts()\n",
"print(counts)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Let's look at how the MACs are distributed between the layers and the convolution kernel sizes"
]
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count": null,
"execution_count": null,
...
@@ -64,11 +107,30 @@
...
@@ -64,11 +107,30 @@
},
},
"outputs": [],
"outputs": [],
"source": [
"source": [
"def get_layer_color(layer_type, attrs):\n",
" if layer_type == \"Conv2d\":\n",
" if attrs == 'k=(1, 1)':\n",
" return 'tomato'\n",
" elif attrs == 'k=(3, 3)':\n",
" return 'limegreen'\n",
" else:\n",
" return 'steelblue'\n",
" return 'indigo'\n",
"\n",
"df_compute = df['MACs']\n",
"df_compute = df['MACs']\n",
"ax = df_compute.plot.bar(figsize=[15,10], title=\"MACs\");\n",
"ax = df_compute.plot.bar(figsize=[15,10], title=\"MACs\", \n",
" color=[get_layer_color(layer_type, attrs) for layer_type,attrs in zip(df['Type'], df['Attrs'])])\n",
"\n",
"ax.set_xticklabels(df.Name, rotation=90);"
"ax.set_xticklabels(df.Name, rotation=90);"
]
]
},
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### How do the Weights and Feature-maps footprints distribute across the layers:"
]
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count": null,
"execution_count": null,
...
@@ -81,6 +143,27 @@
...
@@ -81,6 +143,27 @@
"ax.set_xticklabels(df.Name, rotation=90);"
"ax.set_xticklabels(df.Name, rotation=90);"
]
]
},
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### How the Arithmetic Intensity distributes across the layers:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_performance = df\n",
"df_performance['raw traffic'] = df_footprint['FM volume'] + df_footprint['Weights volume']\n",
"df_performance['arithmetic intensity'] = df['MACs'] / df_performance['raw traffic']\n",
"df_performance2 = df_performance['arithmetic intensity']\n",
"ax = df_performance2.plot.bar(figsize=[15,10], title=\"Arithmetic Intensity\");\n",
"ax.set_xticklabels(df.Name, rotation=90);"
]
},
{
{
"cell_type": "markdown",
"cell_type": "markdown",
"metadata": {},
"metadata": {},
...
...
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
import
torch
import
torch
import
torchvision
import
torchvision
import
torch.nn
as
nn
import
torch.nn
as
nn
from
torch.autograd
import
Variable
from
torch.autograd
import
Variable
# Relative import of code from distiller, w/o installing the package
# Relative import of code from distiller, w/o installing the package
import
os
import
os
import
sys
import
sys
module_path
=
os
.
path
.
abspath
(
os
.
path
.
join
(
'
..
'
))
module_path
=
os
.
path
.
abspath
(
os
.
path
.
join
(
'
..
'
))
if
module_path
not
in
sys
.
path
:
if
module_path
not
in
sys
.
path
:
sys
.
path
.
append
(
module_path
)
sys
.
path
.
append
(
module_path
)
import
pandas
as
pd
import
pandas
as
pd
import
distiller
import
distiller
import
models
import
models
from
apputils
import
*
from
apputils
import
*
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
## Performance overview
## Performance overview
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
model
=
models
.
create_model
(
pretrained
=
False
,
dataset
=
'
imagenet
'
,
arch
=
'
resnet
18
'
,
parallel
=
False
)
model
=
models
.
create_model
(
pretrained
=
False
,
dataset
=
'
imagenet
'
,
arch
=
'
resnet
50
'
,
parallel
=
False
)
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
dummy_input
=
Variable
(
torch
.
randn
(
1
,
3
,
224
,
224
),
requires_grad
=
False
)
dummy_input
=
Variable
(
torch
.
randn
(
1
,
3
,
224
,
224
),
requires_grad
=
False
)
df
=
distiller
.
model_performance_summary
(
model
,
dummy_input
,
batch_size
=
1
)
df
=
distiller
.
model_performance_summary
(
model
,
dummy_input
,
batch_size
=
1
)
display
(
df
)
display
(
df
)
total_macs
=
df
[
'
MACs
'
].
sum
()
total_macs
=
df
[
'
MACs
'
].
sum
()
print
(
"
Total MACs:
"
+
"
{:,}
"
.
format
(
total_macs
))
print
(
"
Total MACs:
"
+
"
{:,}
"
.
format
(
total_macs
))
```
```
%% Cell type:markdown id: tags:
### Let's take a look at how our compute is distibuted:
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
print
(
"
MAC distribution:
"
)
counts
=
df
[
'
MACs
'
].
value_counts
()
print
(
counts
)
```
%% Cell type:markdown id: tags:
### Let's look at which convolutions kernel sizes we're using, and how many instances:
%% Cell type:code id: tags:
```
python
print
(
"
Convolution kernel size distribution:
"
)
counts
=
df
[
'
Attrs
'
].
value_counts
()
print
(
counts
)
```
%% Cell type:markdown id: tags:
### Let's look at how the MACs are distributed between the layers and the convolution kernel sizes
%% Cell type:code id: tags:
```
python
def
get_layer_color
(
layer_type
,
attrs
):
if
layer_type
==
"
Conv2d
"
:
if
attrs
==
'
k=(1, 1)
'
:
return
'
tomato
'
elif
attrs
==
'
k=(3, 3)
'
:
return
'
limegreen
'
else
:
return
'
steelblue
'
return
'
indigo
'
df_compute
=
df
[
'
MACs
'
]
df_compute
=
df
[
'
MACs
'
]
ax
=
df_compute
.
plot
.
bar
(
figsize
=
[
15
,
10
],
title
=
"
MACs
"
);
ax
=
df_compute
.
plot
.
bar
(
figsize
=
[
15
,
10
],
title
=
"
MACs
"
,
color
=
[
get_layer_color
(
layer_type
,
attrs
)
for
layer_type
,
attrs
in
zip
(
df
[
'
Type
'
],
df
[
'
Attrs
'
])])
ax
.
set_xticklabels
(
df
.
Name
,
rotation
=
90
);
ax
.
set_xticklabels
(
df
.
Name
,
rotation
=
90
);
```
```
%% Cell type:markdown id: tags:
### How do the Weights and Feature-maps footprints distribute across the layers:
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
df
[
'
FM volume
'
]
=
df
[
'
IFM volume
'
]
+
df
[
'
OFM volume
'
]
df
[
'
FM volume
'
]
=
df
[
'
IFM volume
'
]
+
df
[
'
OFM volume
'
]
df_footprint
=
df
[[
'
FM volume
'
,
'
Weights volume
'
]]
df_footprint
=
df
[[
'
FM volume
'
,
'
Weights volume
'
]]
ax
=
df_footprint
.
plot
.
bar
(
figsize
=
[
15
,
10
],
title
=
"
Footprint
"
);
ax
=
df_footprint
.
plot
.
bar
(
figsize
=
[
15
,
10
],
title
=
"
Footprint
"
);
ax
.
set_xticklabels
(
df
.
Name
,
rotation
=
90
);
ax
.
set_xticklabels
(
df
.
Name
,
rotation
=
90
);
```
```
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
### How the Arithmetic Intensity distributes across the layers:
%% Cell type:code id: tags:
```
python
df_performance
=
df
df_performance
[
'
raw traffic
'
]
=
df_footprint
[
'
FM volume
'
]
+
df_footprint
[
'
Weights volume
'
]
df_performance
[
'
arithmetic intensity
'
]
=
df
[
'
MACs
'
]
/
df_performance
[
'
raw traffic
'
]
df_performance2
=
df_performance
[
'
arithmetic intensity
'
]
ax
=
df_performance2
.
plot
.
bar
(
figsize
=
[
15
,
10
],
title
=
"
Arithmetic Intensity
"
);
ax
.
set_xticklabels
(
df
.
Name
,
rotation
=
90
);
```
%% Cell type:markdown id: tags:
## ResNet20 channel pruning using SSL
## ResNet20 channel pruning using SSL
Let's see how many MACs we saved by using SSL to prune filters from ResNet20:
Let's see how many MACs we saved by using SSL to prune filters from ResNet20:
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
resnet20_dense
=
models
.
create_model
(
pretrained
=
False
,
dataset
=
'
cifar10
'
,
arch
=
'
resnet20_cifar
'
,
parallel
=
True
)
resnet20_dense
=
models
.
create_model
(
pretrained
=
False
,
dataset
=
'
cifar10
'
,
arch
=
'
resnet20_cifar
'
,
parallel
=
True
)
resnet20_sparse
=
models
.
create_model
(
pretrained
=
False
,
dataset
=
'
cifar10
'
,
arch
=
'
resnet20_cifar
'
,
parallel
=
True
)
resnet20_sparse
=
models
.
create_model
(
pretrained
=
False
,
dataset
=
'
cifar10
'
,
arch
=
'
resnet20_cifar
'
,
parallel
=
True
)
checkpoint_file
=
"
../examples/ssl/checkpoints/checkpoint_trained_channel_regularized_resnet20_finetuned.pth.tar
"
checkpoint_file
=
"
../examples/ssl/checkpoints/checkpoint_trained_channel_regularized_resnet20_finetuned.pth.tar
"
load_checkpoint
(
resnet20_sparse
,
checkpoint_file
);
load_checkpoint
(
resnet20_sparse
,
checkpoint_file
);
```
```
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
dummy_input
=
Variable
(
torch
.
randn
(
1
,
3
,
32
,
32
),
requires_grad
=
False
)
dummy_input
=
Variable
(
torch
.
randn
(
1
,
3
,
32
,
32
),
requires_grad
=
False
)
df_dense
=
distiller
.
model_performance_summary
(
resnet20_dense
,
dummy_input
,
batch_size
=
1
)
df_dense
=
distiller
.
model_performance_summary
(
resnet20_dense
,
dummy_input
,
batch_size
=
1
)
df_sparse
=
distiller
.
model_performance_summary
(
resnet20_sparse
,
dummy_input
,
batch_size
=
1
)
df_sparse
=
distiller
.
model_performance_summary
(
resnet20_sparse
,
dummy_input
,
batch_size
=
1
)
dense_macs
=
df_dense
[
'
MACs
'
].
sum
()
dense_macs
=
df_dense
[
'
MACs
'
].
sum
()
sparse_macs
=
df_sparse
[
'
MACs
'
].
sum
()
sparse_macs
=
df_sparse
[
'
MACs
'
].
sum
()
print
(
"
Dense MACs:
"
+
"
{:,}
"
.
format
(
int
(
dense_macs
)))
print
(
"
Dense MACs:
"
+
"
{:,}
"
.
format
(
int
(
dense_macs
)))
print
(
"
Sparse MACs:
"
+
"
{:,}
"
.
format
(
int
(
sparse_macs
)))
print
(
"
Sparse MACs:
"
+
"
{:,}
"
.
format
(
int
(
sparse_macs
)))
print
(
"
Saved MACs: %.2f%%
"
%
((
1
-
sparse_macs
/
dense_macs
)
*
100
))
print
(
"
Saved MACs: %.2f%%
"
%
((
1
-
sparse_macs
/
dense_macs
)
*
100
))
```
```
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment