Skip to content
Snippets Groups Projects
Commit f3ce696b authored by shingjan's avatar shingjan
Browse files

changes on hpvm codegen for root node

parent fc2c9afb
No related branches found
No related tags found
No related merge requests found
......@@ -153,7 +153,7 @@ class GraphBuilder(object):
for tensor in self.tensors.values():
if isinstance(tensor, WeightTensor):
print("Dump weight: {0}".format(weight_tensor.name))
tensor.dump_weight(self.weight_dir + "/" + tensor.get_mapped_name() + "_path.bin")
#tensor.dump_weight(self.weight_dir + "/" + tensor.get_mapped_name() + "_path.bin")
return DFG(self.graph, self.tensors)
class DFG(object):
......
......@@ -138,7 +138,7 @@ class GraphCodeGen(object):
return inst_str
################################################
# Emit functions for code generation
# CodeGen functions
################################################
def emit_weights(self):
......@@ -215,7 +215,7 @@ class GraphCodeGen(object):
self.program_str += end_main
def emit_batch_loop(self, x_test=None):
# FIXME: Dimensions from test data
# FIXME: Dimensions from test data not available in ONNX
N = 1#x_test.shape[0]
C = 1#x_test.shape[1]
H = 1#x_test.shape[2]
......
......@@ -11,7 +11,10 @@ class DFGNode(object):
self.inst_str = ""
def codegen(self, tensors):
pass
return self.inst_str
def hpvm_codegen(self, tensors):
return self.inst_str
'''
......@@ -20,8 +23,6 @@ e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal,
Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu,
Abs, Ceil, Elu, Floor, Neg
'''
class ActivationNode(DFGNode):
pass
......@@ -32,8 +33,6 @@ In other words, they are logical comparison operators
e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual,
Or, Xor
'''
class LogicalOpNode(DFGNode):
pass
......@@ -222,45 +221,22 @@ class PadNode(DFGNode):
def __init__(self, layer):
DFGNode.__init__(self, layer)
def codegen(self, tensors):
return self.inst_str
class IdentityNode(DFGNode):
def __init__(self, layer):
DFGNode.__init__(self, layer)
def codegen(self, tensors):
return self.inst_str
class FlattenNode(DFGNode):
def __init__(self, layer):
DFGNode.__init__(self, layer)
def codegen(self, tensors):
return self.inst_str
class ZeroPadding2DNode(DFGNode):
def __init__(self, layer):
DFGNode.__init__(self, layer)
def codegen(self, tensors):
return self.inst_str
class DepthwiseConv2DNode(DFGNode):
def __init__(self, layer):
DFGNode.__init__(self, layer)
def codegen(self, tensors):
return self.inst_str
class DenseNode(DFGNode):
def __init__(self, layer):
DFGNode.__init__(self, layer)
def codegen(self, tensors):
return self.inst_str
from tensor import WeightTensor
skip_layer = ["Identity", "Flatten", "Pad"]
class HpvmCodeGen:
def __init__(self, DFG, weights_dir, test_data=None, test_labels=None):
self.program_str = ""
......@@ -8,8 +12,25 @@ class HpvmCodeGen:
self.weights_dir = weights_dir
self.test_data = test_data
self.test_labels = test_labels
self.filter_names = {} # essentially tensors
self.filter_names = dict() # essentially tensors
for tensor in self.tensors.values():
if isinstance(tensor, WeightTensor):
self.filter_names[tensor.get_mapped_name()] = 1
print(self.filter_names)
################################################
# Aux functions
################################################
def get_last_var(self):
return "var_" + str(self.var_cnt)
def get_new_var(self):
self.var_cnt = self.var_cnt + 1
return "var_" + str(self.var_cnt)
################################################
# CodeGen functions
################################################
def emit_header(self):
headers = "\n#include <stdio.h> \n"
headers += "#include <stdlib.h> \n"
......@@ -22,61 +43,70 @@ class HpvmCodeGen:
headers += "#include <tensorUtils.h> \n\n"
self.program_str += headers
def emit_root(self):
def emitRootNodeHeader():
root_signature = "void root("
index = 0
for f_name in self.filter_names:
if index > 0:
root_signature += "\t "
self.filter_names[f_name] = index
root_signature += "void* " + f_name + ", "
root_signature += "size_t " + f_name + "_bytes"
if index < len(self.filter_names) - 1:
root_signature += ", \n"
def emit_root_node_header(self):
root_signature = "void root("
index = 0
for f_name in self.filter_names:
if index > 0:
root_signature += "\t "
self.filter_names[f_name] = index
root_signature += "void* " + f_name + ", "
root_signature += "size_t " + f_name + "_bytes"
if index < len(self.filter_names) - 1:
root_signature += ", \n"
index += 1
root_signature += "){ \n\n"
root_signature += "\n __visc__hint(visc::CPU_TARGET); \n"
root_signature += " __visc__attributes(" + \
str(len(self.filter_names)) + ", "
index = 0
for f_name in self.filter_names:
root_signature += f_name
if index < len(self.filter_names) - 1:
root_signature += ", "
index += 1
root_signature += "){ \n\n"
root_signature += "\n __visc__hint(visc::CPU_TARGET); \n"
root_signature += " __visc__attributes(" + \
str(len(self.filter_names)) + ", "
index = 0
for f_name in self.filter_names:
root_signature += f_name
if index < len(self.filter_names) - 1:
root_signature += ", "
index += 1
root_signature += ", 0); \n\n"
return root_signature
root_signature += ", 0); \n\n"
self.program_str += root_signature
def emitRootNodeFooter(self):
last_node = self.dfg.last_node
output_var = self.output_map[last_node.layer_name]
# Binding output of last DFG node to the Root Node output
root_footer_str = "\n __visc__bindOut(" + \
output_var + ", 0, 0, 0); \n"
root_footer_str += " __visc__bindOut(" + \
output_var + ", 1, 1, 0); \n"
root_footer_str += "\n}\n\n"
return root_footer_str
def emit_root_structure(self):
root_struct = ""
root_struct += "struct ret_t {\n"
root_struct += " void* tensor; \n"
root_struct += " size_t bytes; \n"
root_struct += "}; \n\n"
root_struct += "typedef struct __attribute__((__packed__)) {\n"
for f_name in self.filter_names:
root_struct += " void* " + f_name + "; \n"
root_struct += " size_t " + f_name + "_bytes; \n"
root_struct += "\n struct ret_t r; \n"
root_struct += "}\nRootIn;\n\n"
self.program_str += root_struct
def emitRootStructure(self):
root_struct = ""
root_struct += "struct ret_t {\n"
root_struct += " void* tensor; \n"
root_struct += " size_t bytes; \n"
root_struct += "}; \n\n"
root_struct += "typedef struct __attribute__((__packed__)) {\n"
for f_name in self.filter_names:
root_struct += " void* " + f_name + "; \n"
root_struct += " size_t " + f_name + "_bytes; \n"
root_struct += "\n struct ret_t r; \n"
root_struct += "}\nRootIn;\n\n"
return root_struct
def emit_hpvm_graph(self):
for node in self.nodes:
# check if all inputs of this node is mapped
cur_node = node.onnx_node
for i in cur_node.input:
self.tensors[i].get_mapped_name()
# set var name for output node
if len(cur_node.output) > 1:
raise ValueError("Output number for a single layer larger than 1!")
if cur_node.op_type in skip_layer:
mapped_output_name = self.get_last_var()
else:
mapped_output_name = self.get_new_var()
self.tensors[cur_node.output[0]].set_mapped_name(mapped_output_name)
self.program_str += node.hpvm_codegen(self.tensors)
self.program_str += emitRootNodeHeader()
self.program_str += emitRootStructure()
# self.codegen(self.dfg)
self.program_str += emitRootNodeFooter()
def emit_root_node_footer(self):
mapped_output_var = self.tensors[self.graph.output[0].name].get_mapped_name()
# Binding output of last DFG node to the Root Node output
root_footer_str = "\n __visc__bindOut(" + \
mapped_output_var + ", 0, 0, 0); \n"
root_footer_str += " __visc__bindOut(" + \
mapped_output_var + ", 1, 1, 0); \n"
root_footer_str += "\n}\n\n"
self.program_str += root_footer_str
def emit_main(self, test_data):
main_func_str = "int main(){ \n\n"
......@@ -99,13 +129,16 @@ class HpvmCodeGen:
def emit_source(self, dir_prefix):
print(self.program_str)
f = open(dir_prefix + "/approxhpvm_src.cc", "w+")
f = open(dir_prefix + "/hpvm_src.cc", "w+")
f.write(self.program_str)
f.close()
def compile(self):
self.emit_header()
# self.emitRoot()
self.emit_root_node_header()
self.emit_root_structure()
self.emit_hpvm_graph()
self.emit_root_node_footer()
self.emit_main(self.test_data)
# dump generated program string to source file
self.emit_source(self.weights_dir)
......@@ -7,6 +7,7 @@ import glob
onnx_file_dir = "../models/keras/lenet.onnx"
src_emit_dir = "./test_src"
opset_version_default = 11
def check_version(model, new_version):
try:
......@@ -35,15 +36,17 @@ def compile(model):
# TODO: make this in constant
# make a cmd option, default value -> constant
weights_dir = src_emit_dir
opset_version_default = 11
# test_data_dir = '../models/mnist/test_data_set_0'
# converted_model = convert_version(model)
# model = check_version(model, 11)
from graph_builder import GraphBuilder
from graph_codegen import GraphCodeGen
from hpvm_codegen import HpvmCodeGen
gBuilder = GraphBuilder(model, None, "float32", weights_dir)
gCodegen = GraphCodeGen(gBuilder.build_graph(), weights_dir)
gCodegen.compile()
#gCodegen = GraphCodeGen(gBuilder.build_graph(), weights_dir)
hCodegen = HpvmCodeGen(gBuilder.build_graph(), weights_dir)
#gCodegen.compile()
hCodegen.compile()
def main():
# TODO: Put it in args
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment