Skip to content
Snippets Groups Projects
Commit adcddde7 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Simplifies codegen node return value

parent f3dea9d3
No related branches found
No related tags found
No related merge requests found
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
class DFGNode(object): class DFGNode(object):
def __init__(self, onnx_node): def __init__(self, onnx_node):
# self.onnx_node = onnx_node
self.name = onnx_node.name self.name = onnx_node.name
self.op_type = onnx_node.op_type self.op_type = onnx_node.op_type
self.input = onnx_node.input self.input = onnx_node.input
...@@ -14,28 +13,28 @@ class DFGNode(object): ...@@ -14,28 +13,28 @@ class DFGNode(object):
def codegen(self, tensors): def codegen(self, tensors):
return "\n***Not Implemented***\n" return "\n***Not Implemented***\n"
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return "\n***Not Implemented***\n" return "", []
def __repr__(self): def __repr__(self):
return f"{self.__class__.__name__}({self.input}) -> {self.output}" return f"{self.__class__.__name__}({self.input}) -> {self.output}"
''' """
Element wise operators that is for activation function Element wise operators that is for activation function
e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal, e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal,
Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu, Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu,
Abs, Ceil, Elu, Floor, Neg Abs, Ceil, Elu, Floor, Neg
''' """
class ActivationNode(DFGNode): class ActivationNode(DFGNode):
pass pass
''' """
ELement wise operators that is not for activation function. ELement wise operators that is not for activation function.
In other words, they are logical comparison operators In other words, they are logical comparison operators
e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual, e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual,
Or, Xor Or, Xor
''' """
class LogicalOpNode(DFGNode): class LogicalOpNode(DFGNode):
pass pass
...@@ -55,8 +54,8 @@ class AddNode(DFGNode): ...@@ -55,8 +54,8 @@ class AddNode(DFGNode):
inst_str += "tensorAdd(" + left_input + ", " + right_input + "); \n" inst_str += "tensorAdd(" + left_input + ", " + right_input + "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return " void *r = __visc__tensor_add(t1, t2); \n" return "__visc__tensor_add", []
class BiasAddNode(DFGNode): class BiasAddNode(DFGNode):
...@@ -76,8 +75,8 @@ class BiasAddNode(DFGNode): ...@@ -76,8 +75,8 @@ class BiasAddNode(DFGNode):
inst_str += "tensorAdd(" + left_input + ", " + right_input + "); \n" inst_str += "tensorAdd(" + left_input + ", " + right_input + "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return " void *r = __visc__tensor_add(t1, t2); \n" return "__visc__tensor_add", []
class MatMulNode(DFGNode): class MatMulNode(DFGNode):
...@@ -91,8 +90,8 @@ class MatMulNode(DFGNode): ...@@ -91,8 +90,8 @@ class MatMulNode(DFGNode):
", " + right_input + "); \n" ", " + right_input + "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return " void *r = __visc__tensor_mul(t1, t2); \n" return "__visc__tensor_mul", []
class SoftMaxNode(DFGNode): class SoftMaxNode(DFGNode):
...@@ -105,8 +104,8 @@ class SoftMaxNode(DFGNode): ...@@ -105,8 +104,8 @@ class SoftMaxNode(DFGNode):
inst_str += "tensorSoftmax(" + mapped_input_name + "); \n" inst_str += "tensorSoftmax(" + mapped_input_name + "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return " void* r = __visc__tensor_softmax(t1); \n" return "__visc__tensor_softmax", []
class Conv2DNode(DFGNode): class Conv2DNode(DFGNode):
...@@ -143,15 +142,8 @@ class Conv2DNode(DFGNode): ...@@ -143,15 +142,8 @@ class Conv2DNode(DFGNode):
inst_str += "1, 1); \n" inst_str += "1, 1); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
inst_str = " void *r = __visc__tensor_convolution(t1, t2, " return "__visc__tensor_convolution", [self.padding, self.padding, self.strides[0], self.strides[1]]
inst_str += str(self.padding) + ", "
inst_str += str(self.padding) + ", "
inst_str += str(self.strides[0]) + ", "
inst_str += str(self.strides[1])
inst_str += "); \n"
return inst_str
class MaxPool2DNode(DFGNode): class MaxPool2DNode(DFGNode):
...@@ -182,12 +174,8 @@ class MaxPool2DNode(DFGNode): ...@@ -182,12 +174,8 @@ class MaxPool2DNode(DFGNode):
inst_str += "); \n" inst_str += "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
inst_str = " void* r = __visc__tensor_pool_max(t1, " return "__visc__tensor_pool_max", [*self.pool_size, self.padding, self.padding, *self.strides]
inst_str += str(self.pool_size[0]) + ", " + str(self.pool_size[1]) + ", "
inst_str += str(self.padding) + ", " + str(self.padding) + ", "
inst_str += str(self.strides[0]) + ", " + str(self.strides[1]) + "); \n"
return inst_str
class AveragePool2DNode(DFGNode): class AveragePool2DNode(DFGNode):
...@@ -217,12 +205,8 @@ class AveragePool2DNode(DFGNode): ...@@ -217,12 +205,8 @@ class AveragePool2DNode(DFGNode):
inst_str += "); \n" inst_str += "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
inst_str = " void* r = __visc__tensor_pool_avg(t1, " return "__visc__tensor_pool_avg", [*self.pool_size, self.padding, self.padding, *self.strides]
inst_str += str(self.pool_size[0]) + ", " + str(self.pool_size[1]) + ", "
inst_str += str(self.padding) + ", " + str(self.padding) + ", "
inst_str += str(self.strides[0]) + ", " + str(self.strides[1]) + "); \n"
return inst_str
class ReluNode(DFGNode): class ReluNode(DFGNode):
...@@ -234,8 +218,8 @@ class ReluNode(DFGNode): ...@@ -234,8 +218,8 @@ class ReluNode(DFGNode):
inst_str += "tensorRelu(" + mapped_input_name + "); \n" inst_str += "tensorRelu(" + mapped_input_name + "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return " void* r = __visc__tensor_relu(t1); \n" return "__visc__tensor_relu", []
class TanhNode(DFGNode): class TanhNode(DFGNode):
...@@ -246,8 +230,8 @@ class TanhNode(DFGNode): ...@@ -246,8 +230,8 @@ class TanhNode(DFGNode):
inst_str += "tensorTanh(" + mapped_input_name + "); \n" inst_str += "tensorTanh(" + mapped_input_name + "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
return " void* r = __visc__tensor_tanh(t1); \n" return "__visc__tensor_tanh", []
class BatchNormalizationNode(DFGNode): class BatchNormalizationNode(DFGNode):
...@@ -272,26 +256,18 @@ class BatchNormalizationNode(DFGNode): ...@@ -272,26 +256,18 @@ class BatchNormalizationNode(DFGNode):
inst_str += "); \n" inst_str += "); \n"
return inst_str return inst_str
def hpvm_codegen(self, tensors): def hpvm_codegen(self):
inst_str = " void *r = __visc__tensor_batchnorm(t1, t2, t3, t4, t5, " return "__visc__tensor_batchnorm", [self.epsilon]
inst_str += str(self.epsilon) + "); \n"
return inst_str
class PadNode(DFGNode): class PadNode(DFGNode):
def codegen(self, tensors): def codegen(self, tensors):
return "" return ""
def hpvm_codegen(self, tensors):
return ""
class IdentityNode(DFGNode): class IdentityNode(DFGNode):
def codegen(self, tensors): def codegen(self, tensors):
return "" return ""
def hpvm_codegen(self, tensors):
return ""
class FlattenNode(DFGNode): class FlattenNode(DFGNode):
def __init__(self, name: str, op_type: str, input, output): def __init__(self, name: str, op_type: str, input, output):
self.name = name self.name = name
...@@ -311,9 +287,6 @@ class FlattenNode(DFGNode): ...@@ -311,9 +287,6 @@ class FlattenNode(DFGNode):
def codegen(self, tensors): def codegen(self, tensors):
return "" return ""
def hpvm_codegen(self, tensors):
return ""
class ZeroPadding2DNode(DFGNode): class ZeroPadding2DNode(DFGNode):
pass pass
......
...@@ -74,8 +74,8 @@ class HpvmCodeGen: ...@@ -74,8 +74,8 @@ class HpvmCodeGen:
def emit_hpvm_node_structures(self) -> List[dict]: def emit_hpvm_node_structures(self) -> List[dict]:
node_envs = [] node_envs = []
for node in self.dfg.traverse_order: for node in self.dfg.traverse_order:
generated_code = node.hpvm_codegen(self.tensors) func_name, extra_args = node.hpvm_codegen()
if generated_code == "": if func_name == "": # No code generation
# Node must have single input, we equate the output to # Node must have single input, we equate the output to
# the input and skip code generation. # the input and skip code generation.
assert len(node.input) == 1 and len(node.output) == 1 assert len(node.input) == 1 and len(node.output) == 1
...@@ -88,7 +88,8 @@ class HpvmCodeGen: ...@@ -88,7 +88,8 @@ class HpvmCodeGen:
"name": varname, "name": varname,
"input_size": len(node.input), "input_size": len(node.input),
"edges": self._emit_hpvm_node_edges(node.input), "edges": self._emit_hpvm_node_edges(node.input),
"code": generated_code, "call_name": func_name,
"call_args": extra_args
} }
) )
return node_envs return node_envs
......
...@@ -10,14 +10,16 @@ ...@@ -10,14 +10,16 @@
{% for node in nodes %} {% for node in nodes %}
void {{node.name}}_node( void {{node.name}}_node(
{%- for n in range(1, node.input_size + 1) -%} {%- for n in range(node.input_size) -%}
void *t{{n}}, size_t bytes_t{{n}}{{", " if not loop.last}} void *t{{n}}, size_t bytes_t{{n}}{{", " if not loop.last}}
{%- endfor %}) { {%- endfor %}) {
__visc__hint(visc::CUDNN_TARGET); __visc__hint(visc::CUDNN_TARGET);
__visc__attributes({{node.input_size}}, {% for n in range(1, node.input_size + 1) -%} __visc__attributes({{node.input_size}}, {% for n in range(node.input_size) -%}
t{{n}}{{", " if not loop.last}} t{{n}}{{", " if not loop.last}}
{%- endfor %}, 0); {%- endfor %}, 0);
{{node.code}} void *r = {{node.call_name}}({% for n in range(node.input_size) -%}
t{{n}}{{", " if not loop.last}}
{%- endfor %}{{", " if node.call_args}}{{node.call_args|join(", ")}});
__visc__return(2, r, (size_t) 0); __visc__return(2, r, (size_t) 0);
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment