Skip to content
Snippets Groups Projects
Commit 4c7402b3 authored by Hashim Sharif's avatar Hashim Sharif
Browse files

Cleaning up Wrapper Runtime prints

parent 16585eb6
No related branches found
No related tags found
No related merge requests found
......@@ -145,7 +145,7 @@ extern "C"{
switch (activation_id) {
case -1:
{ // No activation
INFO("No activation Function\n");
//INFO("No activation Function\n");
activation_out = add_out;
}
break;
......@@ -259,6 +259,8 @@ extern "C"{
// NOTE: out_min, out_max are only relevant for ClippedRelu
float out_min, float out_max){
INFO ("*** Conv Layer \n");
NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
if (NodeConf->isPROMISENodeConfiguration()) {
......@@ -333,9 +335,9 @@ extern "C"{
GPUConf->getApproxChoices();
printf("*** Convolution \n ApproxChoice = %d \n BatchNorm = %d \n CONV = %d \n", ApproxChoices[0].first,
GPUNodeConfiguration::TENSOR_OP::BATCHNORM,
GPUNodeConfiguration::TENSOR_OP::CONV);
//printf("*** Convolution \n ApproxChoice = %d \n BatchNorm = %d \n CONV = %d \n", ApproxChoices[0].first,
// GPUNodeConfiguration::TENSOR_OP::BATCHNORM,
// GPUNodeConfiguration::TENSOR_OP::CONV);
// Check for convolution as first operation
CUSTOM_ASSERT((ApproxChoices.size() >= 1) &&
......@@ -363,7 +365,7 @@ extern "C"{
switch (activation_id) {
case -1:
{ // No activation
INFO("No activation Function\n");
//INFO("No activation Function\n");
activation_out = add_out;
}
break;
......@@ -411,13 +413,6 @@ extern "C"{
// If we remove the asserts, we can have all cases handled by a single call
CUSTOM_ASSERT((ApproxChoices.back().first == GPUNodeConfiguration::TENSOR_OP::POOL_MAX) &&
"Expected POOL_MAX in provided Conv layer configuration");
/*pool_out =
handleTensorPoolingApproximationTuples(ApproxChoices.back().second,
activation_out, pool_id,
pool_size, pool_size, 0, 0,
pool_size, pool_size);
*/
pool_out = handleTensorPoolingApproximationTuples(ApproxChoices.back().second,
activation_out, pool_id,
......@@ -488,6 +483,8 @@ extern "C"{
// NOTE: out_min and out_max are only relevant for ClippedRelu
float out_min, float out_max){
INFO ("*** Dense Layer \n");
NodeConfiguration *NodeConf = RC->getNodeConfiguration(hpvm_node_id);
if (NodeConf->isPROMISENodeConfiguration()) {
......@@ -573,7 +570,7 @@ extern "C"{
{ // No activation
CUSTOM_ASSERT((ApproxChoices.size() == 2) &&
"Incorrect number of operations in provided FC layer configuration");
INFO("No activation Function\n");
//INFO("No activation Function\n");
activation_out = add_out;
}
break;
......@@ -625,8 +622,9 @@ extern "C"{
void* wrapper_tensorRelu(const char* hpvm_node_id, void* input_ptr){
// return tensorRelu(input_ptr);
INFO("*** Relu Operation \n");
// Only mapped to GPU - get a GPU configuration
GPUNodeConfiguration *GPUConf =
(GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
......@@ -693,7 +691,8 @@ extern "C"{
void* wrapper_tensorBatchNorm(const char* hpvm_node_id,
void* input_ptr, void* gamma_ptr, void* beta_ptr,
void* mean_ptr, void* variance_ptr, double epsilon){
INFO("*** BatchNorm Operation \n");
// Only mapped to GPU - get a GPU configuration
GPUNodeConfiguration *GPUConf =
......@@ -704,11 +703,10 @@ extern "C"{
int> > > > &ApproxChoices =
GPUConf->getApproxChoices();
printf("*** BatchNorm \n ApproxChoice = %d \n BatchNorm = %d \n CONV = %d \n", ApproxChoices[0].first,
GPUNodeConfiguration::TENSOR_OP::BATCHNORM,
GPUNodeConfiguration::TENSOR_OP::CONV);
// printf("*** BatchNorm \n ApproxChoice = %d \n BatchNorm = %d \n CONV = %d \n", ApproxChoices[0].first,
// GPUNodeConfiguration::TENSOR_OP::BATCHNORM,
// GPUNodeConfiguration::TENSOR_OP::CONV);
// Approximation choices must be for a batchnorm operation
CUSTOM_ASSERT(ApproxChoices.size() == 1 &&
......@@ -723,8 +721,8 @@ extern "C"{
void* wrapper_tensorAdd(const char* hpvm_node_id, void* input_ptr, void* bias_ptr){
// return tensorAdd(input_ptr, bias_ptr);
// Only mapped to GPU - get a GPU configuration
GPUNodeConfiguration *GPUConf =
(GPUNodeConfiguration *)RC->getNodeConfiguration(hpvm_node_id);
......@@ -753,6 +751,8 @@ extern "C"{
int vertical_pad, int horizontal_pad,
int vertical_stride, int horizontal_stride){
INFO("*** TensorPooling Operation \n");
// return tensorPooling(input_ptr, poolFunction, window_height, window_width,
// vertical_pad, horizontal_pad, vertical_stride, horizontal_stride);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment