From a258f264bba35981ad317b82dc26a70108b702cf Mon Sep 17 00:00:00 2001 From: shingjan <yjshi03@gmail.com> Date: Sat, 28 Mar 2020 03:06:55 -0500 Subject: [PATCH] better naming conventions --- hpvm/projects/onnx/frontend/codegen.py | 58 ---- hpvm/projects/onnx/frontend/graph_builder.py | 248 +++++++++--------- hpvm/projects/onnx/frontend/graph_codegen.py | 124 +++++++++ .../frontend/{operators.py => graph_ir.py} | 44 +++- hpvm/projects/onnx/frontend/ir.py | 140 ---------- hpvm/projects/onnx/frontend/main.py | 39 ++- .../onnx/frontend/onnx_frontend/util.py | 15 -- hpvm/projects/onnx/frontend/util.py | 62 ----- 8 files changed, 323 insertions(+), 407 deletions(-) delete mode 100644 hpvm/projects/onnx/frontend/codegen.py create mode 100644 hpvm/projects/onnx/frontend/graph_codegen.py rename hpvm/projects/onnx/frontend/{operators.py => graph_ir.py} (83%) delete mode 100644 hpvm/projects/onnx/frontend/ir.py delete mode 100644 hpvm/projects/onnx/frontend/util.py diff --git a/hpvm/projects/onnx/frontend/codegen.py b/hpvm/projects/onnx/frontend/codegen.py deleted file mode 100644 index f4b53209b9..0000000000 --- a/hpvm/projects/onnx/frontend/codegen.py +++ /dev/null @@ -1,58 +0,0 @@ -class CodeGen: - def __init__ - self.dfg = dfg - self.output_map = {} - self.counter = 0 - self.weight_str = "" - self.program_str = "" - self.input_str = "" - self.filter_names = {} - self.headers = "" - - def emitHeaders(self): - headers = "\n#include <stdio.h> \n" - headers += "#include <stdlib.h> \n" - headers += "#include <unistd.h> \n" - headers += "#include <fcntl.h> \n" - headers += "#include <sys/types.h> \n" - headers += "#include <sys/stat.h> \n" - headers += "#include <string.h> \n" - - headers += "#include \"../../tensor_runtime/include/tensor_runtime.h\" \n" - headers += "#include \"../include/utils.h\" \n\n" - self._headers = headers - - def emitMainFunc(self, test_data): - main_func_str = "int main(){ \n\n" - main_func_str += self.weight_str - main_func_str += self.input_str - main_func_str += "\n__visc__init(); \n" - main_func_str += "RootIn* args = static_cast<RootIn*>(malloc(sizeof(RootIn))); \n\n" - for f_name in self.filter_names: - main_func_str += "args->" + f_name + " = " + f_name + "; \n" - main_func_str += "args->" + f_name + "_bytes = 0; \n" - main_func_str += "\nvoid* dfg = __visc__launch(0, root, (void*) args); \n\n" - main_func_str += "__visc__wait(dfg); \n\n" - main_func_str += "void *result = static_cast<RootIn*>(args)->input; \n" - main_func_str += "hpvm_request_tensor(result, 0); \n\n" - main_func_str += "__visc__cleanup(); \n " - main_func_str += "computeAccuracy3(labels, result); \n" - main_func_str += "return 0; \n\n" - main_func_str += "} \n" - self._main_func += main_func_str - - - def emitSource(self, dir_prefix): - source = self._headers + self._nodes + self._root - source += self._root_struct + self._main_func - print (source) - f = open(dir_prefix + "/onnx_src.cc", "w+") - f.write(source) - f.close() - - def compileModel(self, model, weights_dir, test_data): - self.emitHeaders() - self.emitRoot() - self.emitMainFunc(test_data) - # dump generated program string to source file - self.emitSource(weights_dir) \ No newline at end of file diff --git a/hpvm/projects/onnx/frontend/graph_builder.py b/hpvm/projects/onnx/frontend/graph_builder.py index 5ee60b034d..680a4d1c8c 100644 --- a/hpvm/projects/onnx/frontend/graph_builder.py +++ b/hpvm/projects/onnx/frontend/graph_builder.py @@ -1,21 +1,117 @@ -import sys -import numpy as np -import os +class DFG(object): + + root_set = False; + + def __init__(self): + self.node_map = {} + self.root_node = None + self.last_node = None + self.singleInputLayers = {"DepthwiseConv2D", + "Conv2D", + "Dense", + "MaxPooling2D", + "Activation", + "BatchNormalization", + "Flatten"} + self.mutliInputLayers = {"Add"} + + + def hasSingleInput(self, layer): + layer_name = layer.__class__.__name__ + return layer_name in self.singleInputLayers + + + def hasMultipleInputs(self, layer): + layer_name = layer.__class__.__name__ + return layer_name in self.multiInputLayers + + def add_dfg_edge(self, inbound_node_name, dfg_node): + inbound_node_name = inbound_node_name.split(":")[0] + inbound_node_name = inbound_node_name.split("/")[0] + if inbound_node_name in self.node_map: + inbound_node = self.node_map[inbound_node_name] + print (inbound_node_name, " found!") + inbound_node.add_output(dfg_node) + dfg_node.add_input(inbound_node) + else: + print ("--inbound node NOT FOUND!") + + def add_to_graph(self, layer): + dfg_node = DFGNode(layer) + if not self.root_set: + self.root_node = dfg_node + self.root_set = True # DFG root node is now set + + if self.hasMultipleInputs(layer): + for j in range(len(layer.input)): + print(type(layer.input[j])) + print(layer.input[j].op.name) + self.add_dfg_edge(layer.input[j].op.name, dfg_node) + else: + print (layer.input.name) + self.add_dfg_edge(layer.input.name, dfg_node) + # Adding DFG node to name mapping + self.node_map[layer.name] = dfg_node + + + # Check if all predecessor nodes have been visited thus far - reverse postorder traversal + def predVisited(self, cur_node, visited_nodes): + for input_node in cur_node.inputs: + if input_node.layer_name not in visited_nodes: + return False; + # All predecessors are visited + return True + + def traverseNode(self, cur_node, visited_nodes): + # Skip visited nodes + if cur_node.layer_name in visited_nodes: + return -from operators import * -from ir import * + if self.predVisited(cur_node, visited_nodes): + print(cur_node.layer_type) + print(cur_node.layer_name) + visited_nodes[cur_node.layer_name] = True + + # Invoking traversal on outbound nodes + for output_node in cur_node.outputs: + self.traverseNode(output_node, visited_nodes) + + # NOTE: Assuming that no outbound edges implies the last node in the graph + if len(cur_node.outputs) == 0: + self.last_node = cur_node + + + #Build and Print the DFG in reverse postorder + def buildDFG(self): + print ("\n\n ****** Traversing and Printing DFG ******* \n\n") + visited_nodes = {} + # Starting traversal at the DFG root node + self.traverseNode(self.root_node, visited_nodes) + + ## This should be the place where partial evaluation happens + def emitNode(self, layer): + if layer.op_type == "Conv": + return Conv2DNode() + elif layer.op_type == "Tanh": + pass + elif layer.op_type == "MaxPool": + pass + elif layer.op_type == "Flatten": + pass + elif layer.op_type == "MatMul": + pass + elif layer.op_type == "Add": + pass + elif layer.op_type == "SoftMax": + pass + elif layer.op_type == "Identity": + pass + else: + raise ValueError("Unsupported operator type!") -class Node(object): - def __init__(self, name, shape, dtype): - self._name = name - self._shape = shape if shape else {} - self._dtype = dtype - def __str__(self): - return "Node: " + self._name + " with shape: " + str(self._shape) + " and data type " + str(self._dtype) - __repr__ = __str__ - class GraphBuilder(object): def __init__(self, model, shape, dtype, opset): + self._check_model(model) self._nodes = {} self._params = {} self._renames = {} @@ -31,6 +127,21 @@ class GraphBuilder(object): ################################################ # Aux functions for graph building ################################################ + + def _check_model(self, onnx_model): + try: + from onnx import checker, onnx_cpp2py_export + if hasattr(checker, 'check_model'): + # try use onnx's own model checker before converting any model + try: + checker.check_model(onnx_model) + print("onnx model is checked valid!") + except onnx_cpp2py_export.checker.ValidationError as e: + import warnings + warnings.warn(str(e)) + except ImportError as e: + raise ImportError("Unable to import onnx.checker which is required {}".format(e)) + def _build_shape(self): shape = {} for input in self._graph.input: @@ -67,11 +178,8 @@ class GraphBuilder(object): return dtype ################################################ - # Emit functions for code generation + # Graph Building functions ################################################ - def dump_weights(self): - for init_tensor in self._graph.initializer: - print(init_tensor) def build_graph(self): # parse init tensors @@ -117,105 +225,5 @@ class GraphBuilder(object): #print("input: " + str(node.input)) #print("output: " + str(node.output)) #print(self._nodes) - - def traverse_graph(self, cur_node, visited): - if cur_node in visited: - return - - if dfg.predVisited(cur_node, visited): - visited_nodes[cur_node.layer_name] = True - self.program_str += cur_node.codegen() - for output_node in cur_node.outputs: - self.codegenNode(dfg, output_node, visited) - - def emit_graph(self): - self.build_graph() - visited_nodes = {} - self.traverse_graph(self.dfg.root, visited) - - def emit_header(self): - headers = "\n#include <stdio.h> \n" - headers += "#include <stdlib.h> \n" - headers += "#include <unistd.h> \n" - headers += "#include <fcntl.h> \n" - headers += "#include <sys/types.h> \n" - headers += "#include <sys/stat.h> \n" - headers += "#include <string.h> \n" - headers += "#include \"../../tensor_runtime/include/tensor_runtime.h\" \n" - headers += "#include \"../include/utils.h\" \n\n" - - main_func = "int main(){ \n\n" - initialization = "llvm_hpvm_initTensorRt(0); \n\n" - self.program_str += headers - self.program_str += main_func - self.program_str += initialization - - def emit_footer(self, test_data): - if test_data is not None and self.dfg.last_node is not None: - last_node = self.dfg.last_node - output_var = self.output_map[last_node.layer_name] - - destructors = "\nllvm_hpvm_cleanupTensorRt(); \n" - end_main = "\nreturn 0; \n\n}\n" - self.program_str += destructors - self.program_str += end_main - - def emit_batch_loop(self, x_test): - N = x_test.shape[0] - C = x_test.shape[1] - H = x_test.shape[2] - W = x_test.shape[3] - - loop_str = "" - loop_str += "\nstartMemTracking(); \n\n" - - loop_str += "int test_input_size = " + str(N) + "; \n" - loop_str += "int batch_size = " + str(N) + "; \n" - loop_str += "int batch_count = test_input_size / batch_size; \n" - loop_str += "float final_accuracy = 0.0; \n\n" - - loop_str += "for(int i = 0; i < batch_count; i++){ \n\n" - loop_str += "int start = i * batch_size; \n" - loop_str += "int end = (i + 1) * batch_size; \n" - - loop_str += "\nvoid* input = readInputBatch(input_path.c_str(),0,start,end," - loop_str += str(C) + "," + str(H) + "," + str(W) + "); \n\n" - - self.program_str += loop_str - - def emit_batch_loop_end(self): - end_loop_str = "" - end_loop_str += "\nuint32_t* labels = readLabelsBatch3(labels_path.c_str(),start,end); \n" - last_node = self.dfg.last_node - output_var = self.output_map[last_node.layer_name] - accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, " + output_var + "); \n" - end_loop_str += accuracy_call - #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); " - end_loop_str += "final_accuracy += accuracy; \n" - end_loop_str += "freeBatchMemory(); \n " - end_loop_str += "\n}\n\n" - - end_loop_str += "final_accuracy = final_accuracy / batch_count; \n" - end_loop_str += "dumpFinalAccuracy(final_accuracy); \n\n" - - self.program_str += end_loop_str - - def emit_program(self, src_dir): - f = open(src_dir + "/src.cc", "w+") - f.write(self.program_str) - f.close() - ''' - Compile is a top level function to compile an onnx model into C/C++ - program with HPVM intrinsics - ''' - def codegen(self, weights_dir, test_data, test_labels): - if os.path.exists(weights_dir): - raise ValueError("Weight dir existed. Compilation interrupted!") - os.mkdir(weights_dir) - self.emit_header() - self.emit_weights(weights_dir) - self.emit_batch_loop(test_data) - self.emit_graph() - self.emit_batch_loop_end() - self.emit_footer(test_data) - self.emit_program(weights_dir) + + \ No newline at end of file diff --git a/hpvm/projects/onnx/frontend/graph_codegen.py b/hpvm/projects/onnx/frontend/graph_codegen.py new file mode 100644 index 0000000000..ac78e56b0c --- /dev/null +++ b/hpvm/projects/onnx/frontend/graph_codegen.py @@ -0,0 +1,124 @@ +import sys +import numpy as np +import os + +from operators import * +from ir import * + +class GraphCodegen(object): + def __init__(self, DFG): + self.program_str = "" + self.dfg = DFG + + ################################################ + # Emit functions for code generation + ################################################ + + def dump_weights(self): + for init_tensor in self._graph.initializer: + #print(init_tensor) + pass + + def traverse_graph(self, cur_node, visited): + if cur_node in visited: + return + + if dfg.predVisited(cur_node, visited): + visited_nodes[cur_node.layer_name] = True + self.program_str += cur_node.codegen() + for output_node in cur_node.outputs: + self.traverse_graph(dfg, output_node, visited) + + def emit_graph(self): + self.build_graph() + visited_nodes = {} + self.traverse_graph(self.dfg.root, visited) + + def emit_header(self): + headers = "\n#include <stdio.h> \n" + headers += "#include <stdlib.h> \n" + headers += "#include <unistd.h> \n" + headers += "#include <fcntl.h> \n" + headers += "#include <sys/types.h> \n" + headers += "#include <sys/stat.h> \n" + headers += "#include <string.h> \n" + headers += "#include \"../../tensor_runtime/include/tensor_runtime.h\" \n" + headers += "#include \"../include/utils.h\" \n\n" + + main_func = "int main(){ \n\n" + initialization = "llvm_hpvm_initTensorRt(0); \n\n" + self.program_str += headers + self.program_str += main_func + self.program_str += initialization + + def emit_footer(self, test_data): + if test_data is not None and self.dfg.last_node is not None: + last_node = self.dfg.last_node + output_var = self.output_map[last_node.layer_name] + + destructors = "\nllvm_hpvm_cleanupTensorRt(); \n" + end_main = "\nreturn 0; \n\n}\n" + self.program_str += destructors + self.program_str += end_main + + def emit_batch_loop(self, x_test): + N = x_test.shape[0] + C = x_test.shape[1] + H = x_test.shape[2] + W = x_test.shape[3] + + loop_str = "" + loop_str += "\nstartMemTracking(); \n\n" + + loop_str += "int test_input_size = " + str(N) + "; \n" + loop_str += "int batch_size = " + str(N) + "; \n" + loop_str += "int batch_count = test_input_size / batch_size; \n" + loop_str += "float final_accuracy = 0.0; \n\n" + + loop_str += "for(int i = 0; i < batch_count; i++){ \n\n" + loop_str += "int start = i * batch_size; \n" + loop_str += "int end = (i + 1) * batch_size; \n" + + loop_str += "\nvoid* input = readInputBatch(input_path.c_str(),0,start,end," + loop_str += str(C) + "," + str(H) + "," + str(W) + "); \n\n" + + self.program_str += loop_str + + def emit_batch_loop_end(self): + end_loop_str = "" + end_loop_str += "\nuint32_t* labels = readLabelsBatch3(labels_path.c_str(),start,end); \n" + last_node = self.dfg.last_node + output_var = self.output_map[last_node.layer_name] + accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, " + output_var + "); \n" + end_loop_str += accuracy_call + #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); " + end_loop_str += "final_accuracy += accuracy; \n" + end_loop_str += "freeBatchMemory(); \n " + end_loop_str += "\n}\n\n" + + end_loop_str += "final_accuracy = final_accuracy / batch_count; \n" + end_loop_str += "dumpFinalAccuracy(final_accuracy); \n\n" + + self.program_str += end_loop_str + + def emit_source(self, src_dir): + f = open(src_dir + "/src.cc", "w+") + f.write(self.program_str) + f.close() + + ################################################ + # Compile is a top level function to compile an onnx model into C/C++ + # program with HPVM intrinsics + ################################################ + + def codegen(self, weights_dir, test_data, test_labels): + if os.path.exists(weights_dir): + raise ValueError("Weight dir existed. Compilation interrupted!") + os.mkdir(weights_dir) + self.emit_header() + self.emit_weights(weights_dir) + self.emit_batch_loop(test_data) + self.emit_graph() + self.emit_batch_loop_end() + self.emit_footer(test_data) + self.emit_source(weights_dir) diff --git a/hpvm/projects/onnx/frontend/operators.py b/hpvm/projects/onnx/frontend/graph_ir.py similarity index 83% rename from hpvm/projects/onnx/frontend/operators.py rename to hpvm/projects/onnx/frontend/graph_ir.py index 3a52237307..c9da1e9ef7 100644 --- a/hpvm/projects/onnx/frontend/operators.py +++ b/hpvm/projects/onnx/frontend/graph_ir.py @@ -1,4 +1,46 @@ -from ir import DFGNode, ActivationNode, LogicalOpNode +################################################ +# Top Level DFGNode interface +################################################ +class DFGNode(object): + def add_output(self, output_node): + self.outputs.append(output_node) + def add_input(self, input_node): + self.inputs.append(input_node) + def __init__(self, layer): + self.inputs = [] + self.outputs = [] + self.name = layer.name + self.op_type = layer.op_type + +''' +Element wise operatos that is for activation function +e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal, +Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu, +Abs, Ceil, Elu, Floor, Neg +''' +class ActivationNode(DFGNode): + pass +''' +ELement wise operators that is not for activation function. +In other words, they are logical comparison operators +e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual, +Or, Xor +''' +class LogicalOpNode(DFGNode): + pass + +class Node(object): + def __init__(self, name, shape, dtype): + self._name = name + self._shape = shape if shape else {} + self._dtype = dtype + def __str__(self): + return "Node: " + self._name + " with shape: " + str(self._shape) + " and data type " + str(self._dtype) + __repr__ = __str__ + +################################################ +# Actually Implementation of Operators +################################################ class AddNode(DFGNode): def __init__(self, layer): diff --git a/hpvm/projects/onnx/frontend/ir.py b/hpvm/projects/onnx/frontend/ir.py deleted file mode 100644 index 6ebf1f17c6..0000000000 --- a/hpvm/projects/onnx/frontend/ir.py +++ /dev/null @@ -1,140 +0,0 @@ -class DFG(object): - - root_set = False; - - def __init__(self): - self.node_map = {} - self.root_node = None - self.last_node = None - self.singleInputLayers = {"DepthwiseConv2D", - "Conv2D", - "Dense", - "MaxPooling2D", - "Activation", - "BatchNormalization", - "Flatten"} - self.mutliInputLayers = {"Add"} - - - def hasSingleInput(self, layer): - layer_name = layer.__class__.__name__ - return layer_name in self.singleInputLayers - - - def hasMultipleInputs(self, layer): - layer_name = layer.__class__.__name__ - return layer_name in self.multiInputLayers - - def add_dfg_edge(self, inbound_node_name, dfg_node): - inbound_node_name = inbound_node_name.split(":")[0] - inbound_node_name = inbound_node_name.split("/")[0] - if inbound_node_name in self.node_map: - inbound_node = self.node_map[inbound_node_name] - print (inbound_node_name, " found!") - inbound_node.add_output(dfg_node) - dfg_node.add_input(inbound_node) - else: - print ("--inbound node NOT FOUND!") - - def add_to_graph(self, layer): - dfg_node = DFGNode(layer) - if not self.root_set: - self.root_node = dfg_node - self.root_set = True # DFG root node is now set - - if self.hasMultipleInputs(layer): - for j in range(len(layer.input)): - print(type(layer.input[j])) - print(layer.input[j].op.name) - self.add_dfg_edge(layer.input[j].op.name, dfg_node) - else: - print (layer.input.name) - self.add_dfg_edge(layer.input.name, dfg_node) - # Adding DFG node to name mapping - self.node_map[layer.name] = dfg_node - - - # Check if all predecessor nodes have been visited thus far - reverse postorder traversal - def predVisited(self, cur_node, visited_nodes): - for input_node in cur_node.inputs: - if input_node.layer_name not in visited_nodes: - return False; - # All predecessors are visited - return True - - def traverseNode(self, cur_node, visited_nodes): - # Skip visited nodes - if cur_node.layer_name in visited_nodes: - return - - if self.predVisited(cur_node, visited_nodes): - print(cur_node.layer_type) - print(cur_node.layer_name) - visited_nodes[cur_node.layer_name] = True - - # Invoking traversal on outbound nodes - for output_node in cur_node.outputs: - self.traverseNode(output_node, visited_nodes) - - # NOTE: Assuming that no outbound edges implies the last node in the graph - if len(cur_node.outputs) == 0: - self.last_node = cur_node - - - #Build and Print the DFG in reverse postorder - def buildDFG(self): - print ("\n\n ****** Traversing and Printing DFG ******* \n\n") - visited_nodes = {} - # Starting traversal at the DFG root node - self.traverseNode(self.root_node, visited_nodes) - - ## This should be the place where partial evaluation happens - def emitNode(self, layer): - if layer.op_type == "Conv": - pass - elif layer.op_type == "Tanh": - pass - elif layer.op_type == "MaxPool": - pass - elif layer.op_type == "Flatten": - pass - elif layer.op_type == "MatMul": - pass - elif layer.op_type == "Add": - pass - elif layer.op_type == "SoftMax": - pass - elif layer.op_type == "Identity": - pass - else: - raise ValueError("Unsupported operator type!") - - -class DFGNode(object): - def add_output(self, output_node): - self.outputs.append(output_node) - def add_input(self, input_node): - self.inputs.append(input_node) - def __init__(self, layer): - self.inputs = [] - self.outputs = [] - self.name = layer.name - self.op_type = layer.op_type - -''' -Element wise operatos that is for activation function -e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal, -Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu, -Abs, Ceil, Elu, Floor, Neg -''' -class ActivationNode(DFGNode): - pass -''' -ELement wise operators that is not for activation function. -In other words, they are logical comparison operators -e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual, -Or, Xor -''' -class LogicalOpNode(DFGNode): - pass - \ No newline at end of file diff --git a/hpvm/projects/onnx/frontend/main.py b/hpvm/projects/onnx/frontend/main.py index bd8d34f2e1..97253a3ea5 100644 --- a/hpvm/projects/onnx/frontend/main.py +++ b/hpvm/projects/onnx/frontend/main.py @@ -8,17 +8,34 @@ from onnxruntime.backend.backend import OnnxRuntimeBackend as backend from onnx import numpy_helper, version_converter # onnx2hpvm modules -from onnx_frontend.util import convert_to_hpvm +from graph_builder import GraphBuilder +from graph_codegen import GraphCodegen -#model = onnx.load('../models/mnist/mnist.onnx') -model = onnx.load('../models/keras/alexnet.onnx') -test_data_dir = '../models/mnist/test_data_set_0' -# print('The model before conversion:\n{}'.format(model)) -# A full list of supported adapters can be found here: -# https://github.com/onnx/onnx/blob/master/onnx/version_converter.py#L21 -# Apply the version conversion on the original model -# converted_model = version_converter.convert_version(model, 12) +def main(): + model = onnx.load('../models/keras/alexnet.onnx') + test_data_dir = '../models/mnist/test_data_set_0' + #model = onnx.load('../models/mnist/mnist.onnx') -# print('The model after conversion:\n{}'.format(converted_model)) -convert_to_hpvm(model) \ No newline at end of file + # print('The model before conversion:\n{}'.format(model)) + + # A full list of supported adapters can be found here: + # https://github.com/onnx/onnx/blob/master/onnx/version_converter.py#L21 + # Apply the version conversion on the original model + # converted_model = version_converter.convert_version(model, 12) + + # print('The model after conversion:\n{}'.format(converted_model)) + graph = model.graph + try: + opset = model.opset_import[0].version if model.opset_import else 1 + except AttributeError: + opset = 1 # default opset version set to 1 if not specified + print("opset version: ", opset) + gBuilder = GraphBuilder(model, None, "float32", opset) + gBuilder.build_graph() + gCodegen = GraphCodegen(gBuilder.dfg) + gCodegen.codegen(weights_dir, test_data, test_labels) + + +if __name__ == "__main__": + main() diff --git a/hpvm/projects/onnx/frontend/onnx_frontend/util.py b/hpvm/projects/onnx/frontend/onnx_frontend/util.py index 78091e7c32..dd5565219d 100644 --- a/hpvm/projects/onnx/frontend/onnx_frontend/util.py +++ b/hpvm/projects/onnx/frontend/onnx_frontend/util.py @@ -3,21 +3,6 @@ import numpy as np import os from graph_builder import GraphBuilder -def check_model(onnx_model): - try: - from onnx import checker, onnx_cpp2py_export - if hasattr(checker, 'check_model'): - # try use onnx's own model checker before converting any model - try: - checker.check_model(onnx_model) - print("onnx model is checked valid.") - except onnx_cpp2py_export.checker.ValidationError as e: - import warnings - # the checker is a bit violent about errors, so simply print warnings here - warnings.warn(str(e)) - except ImportError as e: - raise ImportError("Unable to import onnx.checker which is required {}".format(e)) - def convert_to_hpvm(model, shape=None, dtype="float32", diff --git a/hpvm/projects/onnx/frontend/util.py b/hpvm/projects/onnx/frontend/util.py deleted file mode 100644 index 2900f2faf3..0000000000 --- a/hpvm/projects/onnx/frontend/util.py +++ /dev/null @@ -1,62 +0,0 @@ -import sys -import numpy as np -import os -from .graph_builder import GraphBuilder - -def check_model(onnx_model): - try: - from onnx import checker, onnx_cpp2py_export - if hasattr(checker, 'check_model'): - # try use onnx's own model checker before converting any model - try: - checker.check_model(onnx_model) - print("onnx model is checked valid.") - except onnx_cpp2py_export.checker.ValidationError as e: - import warnings - # the checker is a bit violent about errors, so simply print warnings here - warnings.warn(str(e)) - except ImportError as e: - raise ImportError("Unable to import onnx.checker which is required {}".format(e)) - -def convert_to_hpvm(model, - shape=None, - dtype="float32", - opset=None): - """Converting an onnx model to equivalent HPVM IR - - ONNX graphs are represented as Python Protobuf objects. - The companion parameters will be handled automatically. - However, the input names from onnx graph is vague, mixing inputs and - network weights/bias such as "1", "2"... - For convenience, we rename the `real` input names to "input_0", - "input_1"... And renaming parameters to "param_0", "param_1"... - - Parameters - ---------- - model : protobuf object - ONNX ModelProto after ONNX v1.1.0 - - shape : dict of str to tuple, optional - The input shape to the graph - - dtype : str or dict of str to str - The input types to the graph - - opset : int, optional - Override to autodetected opset. - This can be helpful for some testing. - - Returns - ------- - - """ - check_model(model) - graph = model.graph - if opset is None: - try: - opset = model.opset_import[0].version if model.opset_import else 1 - except AttributeError: - opset = 1 # default opset version set to 1 if not specified - print("opset version: ", opset) - gb = GraphBuilder(model, shape, dtype, opset) - gb.build_cfg() \ No newline at end of file -- GitLab