Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
b78d0a09
Commit
b78d0a09
authored
5 years ago
by
Hashim Sharif
Browse files
Options
Downloads
Patches
Plain Diff
Adapting VGG16_cifar10 keras script to use new Benchmark class
parent
f28b8bd7
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
llvm/projects/keras/src/vgg16_cifar10.py
+43
-78
43 additions, 78 deletions
llvm/projects/keras/src/vgg16_cifar10.py
with
43 additions
and
78 deletions
llvm/projects/keras/src/vgg16_cifar10.py
+
43
−
78
View file @
b78d0a09
...
@@ -14,102 +14,84 @@ from keras import backend as K
...
@@ -14,102 +14,84 @@ from keras import backend as K
from
keras
import
regularizers
from
keras
import
regularizers
import
os
import
os
import
sys
import
sys
from
Benchmark
import
Benchmark
from
frontend.approxhpvm_translator
import
translate_to_approxhpvm
from
frontend.approxhpvm_translator
import
translate_to_approxhpvm
from
frontend.weight_utils
import
dumpCalibrationData
from
frontend.weight_utils
import
dumpCalibrationData
class
cifar10vgg
:
class
VGG16_CIFAR10
(
Benchmark
):
def
__init__
(
self
,
train
=
True
):
self
.
num_classes
=
10
def
buildModel
(
self
):
# Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
self
.
weight_decay
=
0.0005
self
.
weight_decay
=
0.0005
self
.
x_shape
=
[
3
,
32
,
32
]
self
.
x_shape
=
[
3
,
32
,
32
]
self
.
model
=
self
.
build_model
()
if
train
:
self
.
model
=
self
.
train
(
self
.
model
)
else
:
self
.
model
.
load_weights
(
'
cifar10vgg.h5
'
)
def
build_model
(
self
):
# Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
model
=
Sequential
()
model
=
Sequential
()
weight_decay
=
self
.
weight_decay
weight_decay
=
self
.
weight_decay
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
padding
=
'
same
'
,
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
padding
=
'
same
'
,
input_shape
=
self
.
x_shape
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
input_shape
=
self
.
x_shape
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
#model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.3
))
model
.
add
(
Dropout
(
0.3
))
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
64
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
Conv2D
(
128
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
128
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
#model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
128
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
128
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
#model.add(BatchNormalization())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
#model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Dropout
(
0.4
))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Conv2D
(
512
,
(
3
,
3
),
padding
=
'
same
'
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Flatten
())
model
.
add
(
Flatten
())
model
.
add
(
Dense
(
512
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Dense
(
512
,
kernel_regularizer
=
regularizers
.
l2
(
weight_decay
)))
model
.
add
(
Activation
(
'
relu
'
))
model
.
add
(
Activation
(
'
relu
'
))
# model.add(BatchNormalization())
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Dense
(
self
.
num_classes
))
model
.
add
(
Dense
(
self
.
num_classes
))
model
.
add
(
Activation
(
'
softmax
'
))
model
.
add
(
Activation
(
'
softmax
'
))
...
@@ -127,30 +109,28 @@ class cifar10vgg:
...
@@ -127,30 +109,28 @@ class cifar10vgg:
X_test
=
(
X_test
-
mean
)
/
(
std
+
1e-7
)
X_test
=
(
X_test
-
mean
)
/
(
std
+
1e-7
)
return
X_train
,
X_test
return
X_train
,
X_test
def
normalize_production
(
self
,
x
):
#this function is used to normalize instances in production according to saved training set statistics
# Input: X - a training set
# Output X - a normalized training set according to normalization constants.
#these values produced during first training and are general for the standard cifar10 training set normalization
def
data_preprocess
(
self
):
mean
=
120.707
std
=
64.15
return
(
x
-
mean
)
/
(
std
+
1e-7
)
(
X_train
,
Y_train
),
(
X_test
,
Y_test
)
=
cifar10
.
load_data
()
def
predict
(
self
,
x
,
normalize
=
True
,
batch_size
=
50
):
#X_train = X_train / 255.0
if
normalize
:
#X_test = X_test / 255.0
x
=
self
.
normalize_production
(
x
)
return
self
.
model
.
predict
(
x
,
batch_size
)
mean
=
np
.
mean
(
X_train
,
axis
=
(
0
,
1
,
2
,
3
))
std
=
np
.
std
(
X_train
,
axis
=
(
0
,
1
,
2
,
3
))
X_train
=
(
X_train
-
mean
)
/
(
std
+
1e-7
)
X_test
=
(
X_test
-
mean
)
/
(
std
+
1e-7
)
return
X_train
,
Y_train
,
X_test
,
Y_test
def
train
(
self
,
model
):
def
train
Model
(
self
,
model
):
#training parameters
#training parameters
batch_size
=
128
batch_size
=
128
#maxepoches = 250
#maxepoches = 250
#maxepoches = 250
maxepoches
=
30
maxepoches
=
30
learning_rate
=
0.01
learning_rate
=
0.01
lr_decay
=
1e-6
lr_decay
=
1e-6
...
@@ -159,7 +139,7 @@ class cifar10vgg:
...
@@ -159,7 +139,7 @@ class cifar10vgg:
(
x_train
,
y_train
),
(
x_test
,
y_test
)
=
cifar10
.
load_data
()
(
x_train
,
y_train
),
(
x_test
,
y_test
)
=
cifar10
.
load_data
()
x_train
=
x_train
.
astype
(
'
float32
'
)
x_train
=
x_train
.
astype
(
'
float32
'
)
x_test
=
x_test
.
astype
(
'
float32
'
)
x_test
=
x_test
.
astype
(
'
float32
'
)
x_train
,
x_test
=
self
.
normalize
(
x_train
,
x_test
)
x_train
,
y_train
,
x_test
,
y_test
=
self
.
data_preprocess
(
)
y_train
=
keras
.
utils
.
to_categorical
(
y_train
,
self
.
num_classes
)
y_train
=
keras
.
utils
.
to_categorical
(
y_train
,
self
.
num_classes
)
y_test
=
keras
.
utils
.
to_categorical
(
y_test
,
self
.
num_classes
)
y_test
=
keras
.
utils
.
to_categorical
(
y_test
,
self
.
num_classes
)
...
@@ -184,7 +164,6 @@ class cifar10vgg:
...
@@ -184,7 +164,6 @@ class cifar10vgg:
datagen
.
fit
(
x_train
)
datagen
.
fit
(
x_train
)
#optimization details
#optimization details
sgd
=
optimizers
.
SGD
(
lr
=
learning_rate
,
decay
=
lr_decay
,
momentum
=
0.9
,
nesterov
=
True
)
sgd
=
optimizers
.
SGD
(
lr
=
learning_rate
,
decay
=
lr_decay
,
momentum
=
0.9
,
nesterov
=
True
)
model
.
compile
(
loss
=
'
categorical_crossentropy
'
,
optimizer
=
sgd
,
metrics
=
[
'
accuracy
'
])
model
.
compile
(
loss
=
'
categorical_crossentropy
'
,
optimizer
=
sgd
,
metrics
=
[
'
accuracy
'
])
...
@@ -193,49 +172,35 @@ class cifar10vgg:
...
@@ -193,49 +172,35 @@ class cifar10vgg:
# training process in a for loop with learning rate drop every 25 epoches.
# training process in a for loop with learning rate drop every 25 epoches.
historytemp
=
model
.
fit_generator
(
datagen
.
flow
(
x_train
,
y_train
,
historytemp
=
model
.
fit_generator
(
datagen
.
flow
(
x_train
,
y_train
,
batch_size
=
batch_size
),
batch_size
=
batch_size
),
steps_per_epoch
=
x_train
.
shape
[
0
]
//
batch_size
,
steps_per_epoch
=
x_train
.
shape
[
0
]
//
batch_size
,
epochs
=
maxepoches
,
epochs
=
maxepoches
,
validation_data
=
(
x_test
,
y_test
),
callbacks
=
[
reduce_lr
],
verbose
=
2
)
validation_data
=
(
x_test
,
y_test
),
callbacks
=
[
reduce_lr
],
verbose
=
2
)
model
.
save_weights
(
'
cifar10vgg.h5
'
)
return
model
return
model
if
__name__
==
'
__main__
'
:
K
.
set_image_data_format
(
'
channels_first
'
)
os
.
environ
[
"
CUDA_VISIBLE_DEVICES
"
]
=
"
1
"
(
x_train
,
y_train
),
(
x_test
,
y_test
)
=
cifar10
.
load_data
()
test_labels
=
y_test
train_labels
=
y_train
x_train
=
x_train
.
astype
(
'
float32
'
)
x_test
=
x_test
.
astype
(
'
float32
'
)
y_train
=
keras
.
utils
.
to_categorical
(
y_train
,
10
)
y_test
=
keras
.
utils
.
to_categorical
(
y_test
,
10
)
model
=
cifar10vgg
()
predicted_x
=
model
.
predict
(
x_test
)
norm_test
=
model
.
normalize_production
(
x_test
)
# Normalizing train data before dumping
#x_train, x_test = model.normalize(x_train, x_test)
x_train
=
model
.
normalize_production
(
x_train
)
# dumpCalibrationData("vgg16_cifar_calib.bin", x_train, "vgg16_train_labels.bin", train_labels)
if
__name__
==
"
__main__
"
:
translate_to_approxhpvm
(
model
.
model
,
"
data/vgg16_cifar10/
"
,
norm_test
,
test_labels
,
10
)
residuals
=
np
.
argmax
(
predicted_x
,
1
)
!=
np
.
argmax
(
y_test
,
1
)
os
.
environ
[
"
CUDA_VISIBLE_DEVICES
"
]
=
"
0
"
# Changing to NCHW format
K
.
set_image_data_format
(
'
channels_first
'
)
loss
=
sum
(
residuals
)
/
len
(
residuals
)
print
(
"
the validation 0/1 loss is:
"
,
loss
)
### Parameters specific to each benchmark
reload_dir
=
"
/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/vgg16_cifar10/
"
keras_model_file
=
"
vgg16_cifar10.h5
"
hpvm_dir
=
"
data/vgg16_cifar10/
"
num_classes
=
10
vgg16_cifar10
=
VGG16_CIFAR10
(
"
vgg16_cifar10
"
,
reload_dir
,
keras_model_file
,
hpvm_dir
,
num_classes
)
vgg16_cifar10
.
run
(
sys
.
argv
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment