Skip to content
Snippets Groups Projects
Commit ecacc226 authored by Hashim Sharif's avatar Hashim Sharif
Browse files

Adding 1st Lenet Conv1 Layer in Lenet Benchmark

parent b6f6d14c
No related branches found
No related tags found
No related merge requests found
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
using namespace std; using namespace std;
void tensorConvNode(void *t1, size_t bytes1, void *t2, size_t bytes2) { void tensorConvNode1(void *t1, size_t bytes1, void *t2, size_t bytes2) {
__visc__hint(visc::CUDNN_TARGET); __visc__hint(visc::CUDNN_TARGET);
__visc__attributes(2, t1, t2, 0); __visc__attributes(2, t1, t2, 0);
...@@ -19,7 +19,7 @@ void tensorConvNode(void *t1, size_t bytes1, void *t2, size_t bytes2) { ...@@ -19,7 +19,7 @@ void tensorConvNode(void *t1, size_t bytes1, void *t2, size_t bytes2) {
} }
void tensorAddNode(void *t1, size_t bytest1, void *t2, size_t bytest2) { void tensorAddNode1(void *t1, size_t bytest1, void *t2, size_t bytest2) {
__visc__hint(visc::CUDNN_TARGET); __visc__hint(visc::CUDNN_TARGET);
__visc__attributes(2, t1, t2, 0); __visc__attributes(2, t1, t2, 0);
...@@ -27,6 +27,23 @@ void tensorAddNode(void *t1, size_t bytest1, void *t2, size_t bytest2) { ...@@ -27,6 +27,23 @@ void tensorAddNode(void *t1, size_t bytest1, void *t2, size_t bytest2) {
__visc__return(2, r, (size_t) 0); __visc__return(2, r, (size_t) 0);
} }
void tensorPoolNode1(void *t1, size_t bytest1) {
__visc__hint(visc::CUDNN_TARGET);
__visc__attributes(1, t1, 0);
void* r = __visc__tensor_pool_max(t1, 2, 2, 0, 0, 2, 2);
__visc__return(2, r, (size_t) 0);
}
void tensorTanhNode1(void *t1, size_t bytest1) {
__visc__hint(visc::CUDNN_TARGET);
__visc__attributes(1, t1, 0);
void* r = __visc__tensor_tanh(t1);
__visc__return(2, r, (size_t) 0);
}
void root(void *x, size_t x_bytes, void root(void *x, size_t x_bytes,
void *conv1_w, size_t conv1_w_bytes, void *conv1_w, size_t conv1_w_bytes,
void *conv1_b, size_t conv1_b_bytes, void *conv1_b, size_t conv1_b_bytes,
...@@ -36,8 +53,10 @@ void root(void *x, size_t x_bytes, ...@@ -36,8 +53,10 @@ void root(void *x, size_t x_bytes,
__visc__hint(visc::CPU_TARGET); __visc__hint(visc::CPU_TARGET);
__visc__attributes(5, x, conv1_w, conv1_b, conv2_w, conv2_b, 0); __visc__attributes(5, x, conv1_w, conv1_b, conv2_w, conv2_b, 0);
void *nodeConv1 = __visc__createNodeND(0, tensorConvNode); void *nodeConv1 = __visc__createNodeND(0, tensorConvNode1);
void *nodeAdd = __visc__createNodeND(0, tensorAddNode); void *nodeAdd1 = __visc__createNodeND(0, tensorAddNode1);
void *nodePool1 = __visc__createNodeND(0, tensorPoolNode1);
void *nodeTanh1 = __visc__createNodeND(0, tensorTanhNode1);
// node, src, dst, stream // node, src, dst, stream
__visc__bindIn(nodeConv1, 0, 0, 0); __visc__bindIn(nodeConv1, 0, 0, 0);
...@@ -46,14 +65,24 @@ void root(void *x, size_t x_bytes, ...@@ -46,14 +65,24 @@ void root(void *x, size_t x_bytes,
__visc__bindIn(nodeConv1, 3, 3, 0); __visc__bindIn(nodeConv1, 3, 3, 0);
// node, node, type, src, dst, stream // node, node, type, src, dst, stream
__visc__edge(nodeConv1, nodeAdd, 1, 0, 0, 0); __visc__edge(nodeConv1, nodeAdd1, 1, 0, 0, 0);
__visc__edge(nodeConv1, nodeAdd, 1, 1, 1, 0); __visc__edge(nodeConv1, nodeAdd1, 1, 1, 1, 0);
__visc__bindIn(nodeAdd, 4, 2, 0); // parent_index, dest_index, bind_type
__visc__bindIn(nodeAdd, 5, 3, 0); __visc__bindIn(nodeAdd1, 4, 2, 0);
__visc__bindIn(nodeAdd1, 5, 3, 0);
__visc__bindOut(nodeAdd, 0, 0, 0); // node, node, type, src, dst, stream
__visc__bindOut(nodeAdd, 1, 1, 0); __visc__edge(nodeAdd1, nodePool1, 1, 0, 0, 0);
__visc__edge(nodeAdd1, nodePool1, 1, 1, 1, 0);
// node, node, type, src, dst, stream
__visc__edge(nodePool1, nodeTanh1, 1, 0, 0, 0);
__visc__edge(nodePool1, nodeTanh1, 1, 1, 1, 0);
__visc__bindOut(nodeTanh1, 0, 0, 0);
__visc__bindOut(nodeTanh1, 1, 1, 0);
} }
...@@ -67,12 +96,12 @@ struct ret_t { ...@@ -67,12 +96,12 @@ struct ret_t {
typedef struct __attribute__((__packed__)) { typedef struct __attribute__((__packed__)) {
void *x; void *x;
size_t x_bytes; size_t x_bytes;
// 1st Layer parameters
void *conv1_w; void *conv1_w;
size_t conv1_w_bytes; size_t conv1_w_bytes;
void *conv1_b; void *conv1_b;
size_t conv1_b_bytes; size_t conv1_b_bytes;
// 2nd Layer parameters
void *conv2_w; void *conv2_w;
size_t conv2_w_bytes; size_t conv2_w_bytes;
void *conv2_b; void *conv2_b;
...@@ -95,7 +124,7 @@ int main() { ...@@ -95,7 +124,7 @@ int main() {
printf("Reading Input Data from = %s \n", input_data_path.c_str()); printf("Reading Input Data from = %s \n", input_data_path.c_str());
void* x = readTrainedWeights(input_data_path.c_str(), float_type, void* x = readTrainedWeights(input_data_path.c_str(), float_type,
test_batch_size, 1, 28, 28); test_batch_size, 1, 28, 28);
void* conv1_w = readTrainedWeights(conv1_w_path.c_str(), float_type, 32, 1, 5, 5); void* conv1_w = readTrainedWeights(conv1_w_path.c_str(), float_type, 32, 1, 5, 5);
void* conv1_b = readTrainedWeights(conv1_b_path.c_str(), float_type, 1, 32, 1, 1); void* conv1_b = readTrainedWeights(conv1_b_path.c_str(), float_type, 1, 32, 1, 1);
void* conv2_w = readTrainedWeights(conv2_w_path.c_str(), float_type, 64, 32, 5, 5); void* conv2_w = readTrainedWeights(conv2_w_path.c_str(), float_type, 64, 32, 5, 5);
...@@ -115,7 +144,6 @@ int main() { ...@@ -115,7 +144,6 @@ int main() {
args->conv2_b = conv2_b; args->conv2_b = conv2_b;
args->conv2_b_bytes = 0; args->conv2_b_bytes = 0;
void *dfg = __visc__launch(0, root, (void *)args); void *dfg = __visc__launch(0, root, (void *)args);
__visc__wait(dfg); __visc__wait(dfg);
......
...@@ -95,6 +95,7 @@ float __visc__cos(float); ...@@ -95,6 +95,7 @@ float __visc__cos(float);
void* __visc__tensor_add(void*, void*); void* __visc__tensor_add(void*, void*);
void* __visc__tensor_mul(void*, void*); void* __visc__tensor_mul(void*, void*);
void* __visc__tensor_convolution(void*, void*, int, int, int, int); void* __visc__tensor_convolution(void*, void*, int, int, int, int);
void* __visc__tensor_pool_max(void*, int, int, int, int, int, int);
void* __visc__tensor_relu(void*); void* __visc__tensor_relu(void*);
void* __visc__tensor_tanh(void*); void* __visc__tensor_tanh(void*);
void* __visc__tensor_softmax(void*); void* __visc__tensor_softmax(void*);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment