Skip to content
Snippets Groups Projects
Commit 37464d36 authored by Yifan Zhao's avatar Yifan Zhao
Browse files

Reverting 5a1329a5 but fixing bugs

parent b58b8819
No related branches found
No related tags found
No related merge requests found
......@@ -222,6 +222,7 @@ class DFG(object):
mul_node = g.MatMulNode(onnx_node)
bias_node = g.BiasAddNode(onnx_node)
self._allocate_insert_var(mul_node, bias_node)
mul_node.input.pop()
return [mul_node, bias_node]
one_to_one_nodes = {
"MaxPool": g.MaxPool2DNode,
......
......@@ -34,7 +34,7 @@ class AddNode(DFGNode):
return "tensorAdd", []
def hpvm_codegen(self):
return "__hpvm__tensor_add", []
return "__visc__tensor_add", []
class BiasAddNode(DFGNode):
......@@ -49,7 +49,7 @@ class BiasAddNode(DFGNode):
return "tensorAdd", []
def hpvm_codegen(self):
return "__hpvm__tensor_add", []
return "__visc__tensor_add", []
class MatMulNode(DFGNode):
......@@ -57,7 +57,7 @@ class MatMulNode(DFGNode):
return "tensorGemmGPU", []
def hpvm_codegen(self):
return "__hpvm__tensor_mul", []
return "__visc__tensor_mul", []
class SoftMaxNode(DFGNode):
......@@ -65,7 +65,7 @@ class SoftMaxNode(DFGNode):
return "tensorSoftmax", []
def hpvm_codegen(self):
return "__hpvm__tensor_softmax", []
return "__visc__tensor_softmax", []
class Conv2DNode(DFGNode):
......@@ -94,7 +94,7 @@ class Conv2DNode(DFGNode):
def hpvm_codegen(self):
return (
"__hpvm__tensor_convolution",
"__visc__tensor_convolution",
[self.padding, self.padding, self.strides[0], self.strides[1]],
)
......@@ -128,7 +128,7 @@ class MaxPool2DNode(DFGNode):
def hpvm_codegen(self):
return (
"__hpvm__tensor_pool_max",
"__visc__tensor_pool_max",
[*self.pool_size, self.padding, self.padding, *self.strides],
)
......@@ -162,7 +162,7 @@ class AveragePool2DNode(DFGNode):
def hpvm_codegen(self):
return (
"__hpvm__tensor_pool_avg",
"__visc__tensor_pool_avg",
[*self.pool_size, self.padding, self.padding, *self.strides],
)
......@@ -172,7 +172,7 @@ class ReluNode(DFGNode):
return "tensorRelu", []
def hpvm_codegen(self):
return "__hpvm__tensor_relu", []
return "__visc__tensor_relu", []
class TanhNode(DFGNode):
......@@ -180,7 +180,7 @@ class TanhNode(DFGNode):
return "tensorTanh", []
def hpvm_codegen(self):
return "__hpvm__tensor_tanh", []
return "__visc__tensor_tanh", []
class BatchNormalizationNode(DFGNode):
......@@ -195,7 +195,7 @@ class BatchNormalizationNode(DFGNode):
return "tensorBatchNorm", [self.epsilon]
def hpvm_codegen(self):
return "__hpvm__tensor_batchnorm", [self.epsilon]
return "__visc__tensor_batchnorm", [self.epsilon]
class FlattenNode(DFGNode):
......
......@@ -4,7 +4,7 @@
#include <fcntl.h>
#include <sys/stat.h>
#include <cstring>
#include <hpvm.h>
#include <visc.h>
#include <tensorTypes.h>
#include <tensorUtils.h>
......@@ -13,14 +13,14 @@ void {{node.name}}_node(
{%- for n in range(node.input_size) -%}
void *t{{n}}, size_t bytes_t{{n}}{{", " if not loop.last}}
{%- endfor %}) {
__hpvm__hint(hpvm::CUDNN_TARGET);
__hpvm__attributes({{node.input_size}}, {% for n in range(node.input_size) -%}
__visc__hint(visc::CUDNN_TARGET);
__visc__attributes({{node.input_size}}, {% for n in range(node.input_size) -%}
t{{n}}{{", " if not loop.last}}
{%- endfor %}, 0);
void *r = {{node.call_name}}({% for n in range(node.input_size) -%}
t{{n}}{{", " if not loop.last}}
{%- endfor %}{{", " if node.call_args}}{{node.call_args|join(", ")}});
__hpvm__return(2, r, (size_t) 0);
__visc__return(2, r, (size_t) 0);
}
{% endfor -%}
......@@ -28,26 +28,26 @@ t{{n}}{{", " if not loop.last}}
void root({%- for n in inputs -%}
void *{{n}}, size_t {{n}}_bytes{{", " if not loop.last}}
{%- endfor %}) {
__hpvm__hint(hpvm::CPU_TARGET);
__hpvm__attributes({{inputs|length}}, {% for n in inputs -%}
__visc__hint(visc::CPU_TARGET);
__visc__attributes({{inputs|length}}, {% for n in inputs -%}
{{n}}{{", " if not loop.last}}
{%- endfor %}, 0);
{% for node in nodes %}
void* {{node.name}} = __hpvm__createNodeND(0, {{node.name}}_node);
void* {{node.name}} = __visc__createNodeND(0, {{node.name}}_node);
{% for edge in node.edges %}
{% if edge.is_bindin %}
__hpvm__bindIn({{node.name}}, {{edge.input_idx * 2}}, {{edge.edge_idx * 2}}, 0);
__hpvm__bindIn({{node.name}}, {{edge.input_idx * 2 + 1}}, {{edge.edge_idx * 2 + 1}}, 0);
__visc__bindIn({{node.name}}, {{edge.input_idx * 2}}, {{edge.edge_idx * 2}}, 0);
__visc__bindIn({{node.name}}, {{edge.input_idx * 2 + 1}}, {{edge.edge_idx * 2 + 1}}, 0);
{% else %}
__hpvm__edge({{edge.input_node}}, {{node.name}}, 1, 0, {{edge.edge_idx * 2}}, 0);
__hpvm__edge({{edge.input_node}}, {{node.name}}, 1, 1, {{edge.edge_idx * 2 + 1}}, 0);
__visc__edge({{edge.input_node}}, {{node.name}}, 1, 0, {{edge.edge_idx * 2}}, 0);
__visc__edge({{edge.input_node}}, {{node.name}}, 1, 1, {{edge.edge_idx * 2 + 1}}, 0);
{% endif %}
{% endfor %}
{% endfor %}
__hpvm__bindOut({{output}}, 0, 0, 0);
__hpvm__bindOut({{output}}, 1, 1, 0);
__visc__bindOut({{output}}, 0, 0, 0);
__visc__bindOut({{output}}, 1, 1, 0);
}
struct ret_t {
......@@ -79,12 +79,12 @@ int main(){
args->{{n}}_bytes = 0;
{% endfor %}
__hpvm__init();
void* dfg = __hpvm__launch(0, root, (void*) args);
__hpvm__wait(dfg);
__visc__init();
void* dfg = __visc__launch(0, root, (void*) args);
__visc__wait(dfg);
void *result = static_cast<RootIn*>(args)->input;
hpvm_request_tensor(result, 0);
__hpvm__cleanup();
visc_request_tensor(result, 0);
__visc__cleanup();
computeAccuracy3(labels, result);
return 0;
......
from os import PathLike
from typing import List
import onnx
from onnx import numpy_helper
......@@ -22,8 +24,8 @@ class InputTensor(Tensor):
# get type of input tensor
tensor_type = input_proto.type.tensor_type
# check if it has a shape:
shape = tensor_type.shape if tensor_type.HasField("shape") else None
self.shape = [d.dim_value for d in shape.dim] if shape else None
shape = tensor_type.shape
self.shape: List[int] = [d.dim_value for d in shape.dim]
# Can be either input or weight tensor
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment