Skip to content
Snippets Groups Projects
Commit 3c63fb81 authored by ys26's avatar ys26
Browse files

Update llvm/projects/onnx/frontend/operators.py

parent 939f6803
No related branches found
No related tags found
No related merge requests found
...@@ -32,7 +32,39 @@ class DepthwiseConv2DNode(DFGNode): ...@@ -32,7 +32,39 @@ class DepthwiseConv2DNode(DFGNode):
print("\t", self.strides) print("\t", self.strides)
print("\tPadding = ", self.padding) print("\tPadding = ", self.padding)
def codegen(self): def codegen(self):
pass input_var_name = self.getSingleInputName(cur_node)
weights = cur_node.weights
strides = cur_node.strides
padding = 0
if cur_node.padding.strip() == "valid":
padding = 0
else:
padding = cur_node.padding
padding = int((weights.shape[0] - 1) / 2)
prev_padding = self.getPrevLayerPadding(cur_node)
if prev_padding != None:
# FIXME: currently only supporting symmetric padding
padding = prev_padding[0][0]
inst_str = "void* " + out_var_name1 + " = "
inst_str += "tensorConvolution(" + input_var_name + ", "
inst_str += cur_node.layer_name + "_w, "
inst_str += str(padding) + ", "
inst_str += str(padding) + ", "
inst_str += str(strides[0]) + ", "
inst_str += str(strides[1]) + ", "
inst_str += "1, "
if layer_type == "DepthwiseConv2D":
C = weights.shape[2]
inst_str += str(C) + "); \n"
else:
inst_str += "1); \n"
if strides[0] > 1 and cur_node.padding.strip() == "same":
print ("!ERROR: Same Padding not supported for Conv with Stride > 1")
print ("Use: ZeroPadding2D(padding=(" + str(padding) + "," + str(padding) + "));\n");
sys.exit(0)
return += inst_str
class DenseNode(DFGNode): class DenseNode(DFGNode):
def __init__(self, layer): def __init__(self, layer):
DFGNode.__init__(self, layer) DFGNode.__init__(self, layer)
...@@ -40,7 +72,14 @@ class DenseNode(DFGNode): ...@@ -40,7 +72,14 @@ class DenseNode(DFGNode):
print("\t", self.weights.shape) print("\t", self.weights.shape)
self.use_bias = layer.use_bias self.use_bias = layer.use_bias
def codegen(self): def codegen(self):
pass input_var_name = self.getSingleInputName(cur_node)
weights = cur_node.weights
inst_str = "void* " + out_var_name1 + " = "
inst_str += "tensorGemmGPU(" + input_var_name + ", "
inst_str += cur_node.layer_name + "_w"
inst_str += "); \n"
return inst_str
class MaxPool2DNode(DFGNode): class MaxPool2DNode(DFGNode):
def __init__(self, layer): def __init__(self, layer):
DFGNode.__init__(self, layer) DFGNode.__init__(self, layer)
...@@ -76,4 +115,12 @@ class BatchNormalizationNode(DFGNode): ...@@ -76,4 +115,12 @@ class BatchNormalizationNode(DFGNode):
self.moving_mean = layer.moving_mean self.moving_mean = layer.moving_mean
self.moving_variance = layer.moving_variance self.moving_variance = layer.moving_variance
def codegen(self): def codegen(self):
pass input_var_name = self.getSingleInputName(cur_node)
inst_str = "void* " + out_var_name1 + " = "
inst_str += "tensorBatchNorm(" + input_var_name + ", "
inst_str += cur_node.layer_name + "_gamma, "
inst_str += cur_node.layer_name + "_beta, "
inst_str += cur_node.layer_name + "_mean, "
inst_str += cur_node.layer_name + "_variance, "
inst_str += str(cur_node.epsilon)
inst_str += "); \n"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment