Skip to content
Snippets Groups Projects
Commit 6708d6d3 authored by shingjan's avatar shingjan
Browse files

proceed to onnx runtime evaluation

parent a258f264
No related branches found
No related tags found
No related merge requests found
Showing
with 1284 additions and 19 deletions
class CodeGen: class GraphCodeGen:
def __init__(self, graph): def __init__(self, graph):
self._headers = "" self._headers = ""
self._nodes = "" self._nodes = ""
...@@ -100,7 +100,7 @@ class CodeGen: ...@@ -100,7 +100,7 @@ class CodeGen:
f.write(source) f.write(source)
f.close() f.close()
def compileModel(self, model, weights_dir, test_data): def codegen(self, model, weights_dir, test_data):
self.emitHeaders() self.emitHeaders()
self.emitRoot() self.emitRoot()
self.emitMainFunc(test_data) self.emitMainFunc(test_data)
......
...@@ -178,7 +178,8 @@ class GraphBuilder(object): ...@@ -178,7 +178,8 @@ class GraphBuilder(object):
return dtype return dtype
################################################ ################################################
# Graph Building functions # Top level Graph Building functions
# return the compile-ready graph
################################################ ################################################
def build_graph(self): def build_graph(self):
...@@ -225,5 +226,6 @@ class GraphBuilder(object): ...@@ -225,5 +226,6 @@ class GraphBuilder(object):
#print("input: " + str(node.input)) #print("input: " + str(node.input))
#print("output: " + str(node.output)) #print("output: " + str(node.output))
#print(self._nodes) #print(self._nodes)
return self.dfg
\ No newline at end of file
...@@ -2,10 +2,10 @@ import sys ...@@ -2,10 +2,10 @@ import sys
import numpy as np import numpy as np
import os import os
from operators import * from graph_builder import *
from ir import * from graph_ir import *
class GraphCodegen(object): class GraphCodeGen(object):
def __init__(self, DFG): def __init__(self, DFG):
self.program_str = "" self.program_str = ""
self.dfg = DFG self.dfg = DFG
...@@ -121,4 +121,5 @@ class GraphCodegen(object): ...@@ -121,4 +121,5 @@ class GraphCodegen(object):
self.emit_graph() self.emit_graph()
self.emit_batch_loop_end() self.emit_batch_loop_end()
self.emit_footer(test_data) self.emit_footer(test_data)
# Write the program to source/disk
self.emit_source(weights_dir) self.emit_source(weights_dir)
...@@ -3,28 +3,34 @@ import sys ...@@ -3,28 +3,34 @@ import sys
import numpy as np import numpy as np
import onnx import onnx
import glob import glob
from onnxruntime.backend.backend import OnnxRuntimeBackend as backend
from onnx import numpy_helper, version_converter from onnx import numpy_helper, version_converter
from onnxruntime.backend.backend import OnnxRuntimeBackend as backend
# onnx2hpvm modules # onnx2hpvm modules
from graph_builder import GraphBuilder from graph_builder import GraphBuilder
from graph_codegen import GraphCodegen from graph_codegen import GraphCodeGen
# from approx_codegen import GraphCodeGen
def main(): def convert_version(model):
model = onnx.load('../models/keras/alexnet.onnx')
test_data_dir = '../models/mnist/test_data_set_0'
#model = onnx.load('../models/mnist/mnist.onnx')
# print('The model before conversion:\n{}'.format(model)) print('The model before conversion:\n{}'.format(model))
# A full list of supported adapters can be found here: # A full list of supported adapters can be found here:
# https://github.com/onnx/onnx/blob/master/onnx/version_converter.py#L21 # https://github.com/onnx/onnx/blob/master/onnx/version_converter.py#L21
# Apply the version conversion on the original model # Apply the version conversion on the original model
# converted_model = version_converter.convert_version(model, 12) converted_model = version_converter.convert_version(model, 12)
print('The model after conversion:\n{}'.format(converted_model))
return converted_model
def main():
model = onnx.load('../models/keras/alexnet.onnx')
weights_dir = './test_src'
test_data_dir = '../models/mnist/test_data_set_0'
# converted_model = convert_version(model)
# print('The model after conversion:\n{}'.format(converted_model))
graph = model.graph graph = model.graph
try: try:
opset = model.opset_import[0].version if model.opset_import else 1 opset = model.opset_import[0].version if model.opset_import else 1
...@@ -32,9 +38,8 @@ def main(): ...@@ -32,9 +38,8 @@ def main():
opset = 1 # default opset version set to 1 if not specified opset = 1 # default opset version set to 1 if not specified
print("opset version: ", opset) print("opset version: ", opset)
gBuilder = GraphBuilder(model, None, "float32", opset) gBuilder = GraphBuilder(model, None, "float32", opset)
gBuilder.build_graph() gCodegen = GraphCodeGen(gBuilder.build_graph())
gCodegen = GraphCodegen(gBuilder.dfg) gCodegen.codegen(weights_dir, test_data)#, test_labels)
gCodegen.codegen(weights_dir, test_data, test_labels)
if __name__ == "__main__": if __name__ == "__main__":
......
This diff is collapsed.
%% Cell type:code id: tags:
``` python
import numpy as np
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten, Activation
from keras.layers.convolutional import Conv2D
from keras.optimizers import Adam
from keras.layers.pooling import MaxPooling2D
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from keras import regularizers
from keras.callbacks import LearningRateScheduler
import sys
import struct
import keras
import numpy as np
import os
```
%% Output
Using TensorFlow backend.
%% Cell type:code id: tags:
``` python
def buildModel2():
activation_type = "tanh"
weight_decay = 1e-4
model = Sequential()
model.add(Conv2D(64, kernel_size=(11, 11), activation=activation_type,
input_shape=(3, 32, 32), padding = 'same',
kernel_regularizer=regularizers.l2(weight_decay) ))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2) ))
model.add(Dropout(0.2))
model.add(Conv2D(192, kernel_size=(5, 5), activation=activation_type, padding = 'same',
kernel_regularizer=regularizers.l2(weight_decay)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2) ))
model.add(Dropout(0.3))
model.add(Conv2D(384, kernel_size=(3, 3), activation=activation_type, padding = 'same',
kernel_regularizer=regularizers.l2(weight_decay) ))
model.add(Conv2D(256, kernel_size=(3, 3), activation=activation_type, padding = 'same',
kernel_regularizer=regularizers.l2(weight_decay) ))
model.add(Conv2D(256, kernel_size=(3, 3), activation=activation_type, padding = 'same',
kernel_regularizer=regularizers.l2(weight_decay) ))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2) ))
model.add(Dropout(0.4))
model.add(Flatten())
#model.add(Flatten())
#model.add(Dense(256))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
```
%% Cell type:code id: tags:
``` python
def trainModel(model):
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
test_labels = Y_test
train_labels = Y_train
#X_train = X_train.astype('float32')
#X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0
mean = np.mean(X_train,axis=(0,1,2,3))
std = np.std(X_train,axis=(0,1,2,3))
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
dir_prefix = "/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/alexnet_cifar10/"
#opt_rms = keras.optimizers.rmsprop(lr=0.001,decay=1e-6)
# Compile the model
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.0001, decay=1e-6),
#optimizer = opt_rms,
metrics=['accuracy'])
#print to_categorical(Y_train, 10)
print (to_categorical(Y_train))
datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
)
datagen.fit(X_train)
model.fit(X_train, to_categorical(Y_train, 10),
batch_size=128,
shuffle=True,
epochs = 1,
#epochs=100,
validation_data=(X_test, to_categorical(Y_test, 10)), callbacks=[LearningRateScheduler(lr_schedule)])
# Evaluate the model
scores = model.evaluate(X_test, to_categorical(Y_test, 10))
print('Loss: %.3f' % scores[0])
print('Accuracy: %.3f' % scores[1])
print ("*** TRAINED MODEL ****\n")
```
%% Cell type:code id: tags:
``` python
K.set_image_data_format('channels_first')
model = buildModel2()
trainModel(model)
```
%% Output
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-913f2a0d099c> in <module>
1 K.set_image_data_format('channels_first')
2
----> 3 model = buildModel2()
4 trainModel(model)
<ipython-input-4-9d348f64e01f> in buildModel2()
7 model.add(Conv2D(64, kernel_size=(11, 11), activation=activation_type,
8 input_shape=(3, 32, 32), padding = 'same',
----> 9 kernel_regularizer=regularizers.l2(weight_decay) ))
10 model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2) ))
11 model.add(Dropout(0.2))
~/opt/anaconda3/lib/python3.7/site-packages/keras/engine/sequential.py in add(self, layer)
164 # and create the node connecting the current layer
165 # to the input layer we just created.
--> 166 layer(x)
167 set_inputs = True
168 else:
~/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in symbolic_fn_wrapper(*args, **kwargs)
73 if _SYMBOLIC_SCOPE.value:
74 with get_graph().as_default():
---> 75 return func(*args, **kwargs)
76 else:
77 return func(*args, **kwargs)
~/opt/anaconda3/lib/python3.7/site-packages/keras/engine/base_layer.py in __call__(self, inputs, **kwargs)
487 # Actually call the layer,
488 # collecting output(s), mask(s), and shape(s).
--> 489 output = self.call(inputs, **kwargs)
490 output_mask = self.compute_mask(inputs, previous_mask)
491
~/opt/anaconda3/lib/python3.7/site-packages/keras/layers/convolutional.py in call(self, inputs)
169 padding=self.padding,
170 data_format=self.data_format,
--> 171 dilation_rate=self.dilation_rate)
172 if self.rank == 3:
173 outputs = K.conv3d(
~/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in conv2d(x, kernel, strides, padding, data_format, dilation_rate)
3699 data_format = normalize_data_format(data_format)
3700
-> 3701 x, tf_data_format = _preprocess_conv2d_input(x, data_format)
3702
3703 padding = _preprocess_padding(padding)
~/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in _preprocess_conv2d_input(x, data_format, force_transpose)
3572 tf_data_format = 'NHWC'
3573 if data_format == 'channels_first':
-> 3574 if not _has_nchw_support() or force_transpose:
3575 x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
3576 else:
~/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in _has_nchw_support()
520 """
521 explicitly_on_cpu = _is_current_explicit_device('cpu')
--> 522 gpus_available = len(_get_available_gpus()) > 0
523 return (not explicitly_on_cpu and gpus_available)
524
~/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py in _get_available_gpus()
504 _LOCAL_DEVICES = [x.name for x in devices]
505 else:
--> 506 _LOCAL_DEVICES = tf.config.experimental_list_devices()
507 return [x for x in _LOCAL_DEVICES if 'device:gpu' in x.lower()]
508
AttributeError: module 'tensorflow_core._api.v2.config' has no attribute 'experimental_list_devices'
%% Cell type:code id: tags:
``` python
```
import os
import sys
import numpy as np
import onnx
import glob
from onnx import numpy_helper, version_converter
from onnxruntime.backend.backend import OnnxRuntimeBackend as backend
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment