diff --git a/hpvm/projects/onnx/frontend/graph_ir.py b/hpvm/projects/onnx/frontend/graph_ir.py index dd4b82880227df1b77a80c9f53f659b6939e4036..5c0a2eef3cfd70e78efd95da5a36dc8cde65d68e 100644 --- a/hpvm/projects/onnx/frontend/graph_ir.py +++ b/hpvm/projects/onnx/frontend/graph_ir.py @@ -55,7 +55,7 @@ class AddNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_add", [] + return "__hpvm__tensor_add", [] class BiasAddNode(DFGNode): @@ -76,7 +76,7 @@ class BiasAddNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_add", [] + return "__hpvm__tensor_add", [] class MatMulNode(DFGNode): @@ -91,7 +91,7 @@ class MatMulNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_mul", [] + return "__hpvm__tensor_mul", [] class SoftMaxNode(DFGNode): @@ -105,7 +105,7 @@ class SoftMaxNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_softmax", [] + return "__hpvm__tensor_softmax", [] class Conv2DNode(DFGNode): @@ -143,7 +143,7 @@ class Conv2DNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_convolution", [self.padding, self.padding, self.strides[0], self.strides[1]] + return "__hpvm__tensor_convolution", [self.padding, self.padding, self.strides[0], self.strides[1]] class MaxPool2DNode(DFGNode): @@ -175,7 +175,7 @@ class MaxPool2DNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_pool_max", [*self.pool_size, self.padding, self.padding, *self.strides] + return "__hpvm__tensor_pool_max", [*self.pool_size, self.padding, self.padding, *self.strides] class AveragePool2DNode(DFGNode): @@ -206,7 +206,7 @@ class AveragePool2DNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_pool_avg", [*self.pool_size, self.padding, self.padding, *self.strides] + return "__hpvm__tensor_pool_avg", [*self.pool_size, self.padding, self.padding, *self.strides] class ReluNode(DFGNode): @@ -219,7 +219,7 @@ class ReluNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_relu", [] + return "__hpvm__tensor_relu", [] class TanhNode(DFGNode): @@ -231,7 +231,7 @@ class TanhNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_tanh", [] + return "__hpvm__tensor_tanh", [] class BatchNormalizationNode(DFGNode): @@ -257,7 +257,7 @@ class BatchNormalizationNode(DFGNode): return inst_str def hpvm_codegen(self): - return "__visc__tensor_batchnorm", [self.epsilon] + return "__hpvm__tensor_batchnorm", [self.epsilon] class PadNode(DFGNode): diff --git a/hpvm/projects/onnx/frontend/hpvm_template.cpp b/hpvm/projects/onnx/frontend/hpvm_template.cpp index efbc7a45645b9ad78782294a7db2402f11d231c8..4c4e3afa48ea163426afb2287ef1ee1c51acc66d 100644 --- a/hpvm/projects/onnx/frontend/hpvm_template.cpp +++ b/hpvm/projects/onnx/frontend/hpvm_template.cpp @@ -4,7 +4,7 @@ #include <fcntl.h> #include <sys/stat.h> #include <cstring> -#include <visc.h> +#include <hpvm.h> #include <tensorTypes.h> #include <tensorUtils.h> @@ -13,14 +13,14 @@ void {{node.name}}_node( {%- for n in range(node.input_size) -%} void *t{{n}}, size_t bytes_t{{n}}{{", " if not loop.last}} {%- endfor %}) { - __visc__hint(visc::CUDNN_TARGET); - __visc__attributes({{node.input_size}}, {% for n in range(node.input_size) -%} + __hpvm__hint(hpvm::CUDNN_TARGET); + __hpvm__attributes({{node.input_size}}, {% for n in range(node.input_size) -%} t{{n}}{{", " if not loop.last}} {%- endfor %}, 0); void *r = {{node.call_name}}({% for n in range(node.input_size) -%} t{{n}}{{", " if not loop.last}} {%- endfor %}{{", " if node.call_args}}{{node.call_args|join(", ")}}); - __visc__return(2, r, (size_t) 0); + __hpvm__return(2, r, (size_t) 0); } {% endfor -%} @@ -28,26 +28,26 @@ t{{n}}{{", " if not loop.last}} void root({%- for n in inputs -%} void *{{n}}, size_t {{n}}_bytes{{", " if not loop.last}} {%- endfor %}) { - __visc__hint(visc::CPU_TARGET); - __visc__attributes({{inputs|length}}, {% for n in inputs -%} + __hpvm__hint(hpvm::CPU_TARGET); + __hpvm__attributes({{inputs|length}}, {% for n in inputs -%} {{n}}{{", " if not loop.last}} {%- endfor %}, 0); {% for node in nodes %} - void* {{node.name}} = __visc__createNodeND(0, {{node.name}}_node); + void* {{node.name}} = __hpvm__createNodeND(0, {{node.name}}_node); {% for edge in node.edges %} {% if edge.is_bindin %} - __visc__bindIn({{node.name}}, {{edge.input_idx * 2}}, {{edge.edge_idx * 2}}, 0); - __visc__bindIn({{node.name}}, {{edge.input_idx * 2 + 1}}, {{edge.edge_idx * 2 + 1}}, 0); + __hpvm__bindIn({{node.name}}, {{edge.input_idx * 2}}, {{edge.edge_idx * 2}}, 0); + __hpvm__bindIn({{node.name}}, {{edge.input_idx * 2 + 1}}, {{edge.edge_idx * 2 + 1}}, 0); {% else %} - __visc__edge({{edge.input_node}}, {{node.name}}, 1, 0, {{edge.edge_idx * 2}}, 0); - __visc__edge({{edge.input_node}}, {{node.name}}, 1, 1, {{edge.edge_idx * 2 + 1}}, 0); + __hpvm__edge({{edge.input_node}}, {{node.name}}, 1, 0, {{edge.edge_idx * 2}}, 0); + __hpvm__edge({{edge.input_node}}, {{node.name}}, 1, 1, {{edge.edge_idx * 2 + 1}}, 0); {% endif %} {% endfor %} {% endfor %} - __visc__bindOut({{output}}, 0, 0, 0); - __visc__bindOut({{output}}, 1, 1, 0); + __hpvm__bindOut({{output}}, 0, 0, 0); + __hpvm__bindOut({{output}}, 1, 1, 0); } struct ret_t { @@ -79,12 +79,12 @@ int main(){ args->{{n}}_bytes = 0 {% endfor %} - __visc__init(); - void* dfg = __visc__launch(0, root, (void*) args); - __visc__wait(dfg); + __hpvm__init(); + void* dfg = __hpvm__launch(0, root, (void*) args); + __hpvm__wait(dfg); void *result = static_cast<RootIn*>(args)->input; hpvm_request_tensor(result, 0); - __visc__cleanup(); + __hpvm__cleanup(); computeAccuracy3(labels, result); return 0;