Skip to content
Snippets Groups Projects
Commit 582639e7 authored by Hashim Sharif's avatar Hashim Sharif
Browse files

Handling visc.node.id and selecting NodeConfigs using IDs

parent d26d1329
No related branches found
No related tags found
No related merge requests found
......@@ -416,9 +416,18 @@ std::vector<float> &RuntimeController::getQuantizationRanges(const char *data) {
}
NodeConfiguration *RuntimeController::getNodeConfiguration(const char *data) {
std::string s(data);
// All nodes are expected to have a configuration
return (*Configurations)[configurationIdx]->setup.at(s);
// if visc.node.id Not specified for this HPVM Node
if (currentTensorID == -1){
std::string s(data);
// All nodes are expected to have a configuration
return (*Configurations)[configurationIdx]->setup.at(s);
}
else{
DEBUG("-- currentTensorID = \%u \n", currentTensorID);
return (*Configurations)[configurationIdx]->idConfigMap.at(currentTensorID);
}
}
void RuntimeController::init(const char *Cstr, const char *Qstr) {
......@@ -810,7 +819,7 @@ void RuntimeController::readConfigurationFile(const char *str) {
// FIXME: Do same for CPU and PROMISE configs
InitialConfigurations.back().idConfigMap.insert(
std::make_pair(firstTensorID, NodeConf));
printf ("*** firstTensorID = %d \n\n", firstTensorID);
DEBUG("*** firstTensorID = %d \n\n", firstTensorID);
unsigned idx = 2;
while (idx < tokens.size()) {
......
This diff is collapsed.
#include <stdio.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include <cudnn.h>
#include <cublas_api.h>
#include <cuda_fp16.h>
#include <driver_types.h>
// Tensor runtime header files
#include "../include/tensor_runtime.h"
#include "../include/tensor_utils.h"
#include "../include/debug.h"
#include "../include/profiling.h"
#include "../include/fp16_conversion.h"
#include "../include/global_data.h"
#include "../include/error.h"
#include "../include/tensor.h"
#include "../include/op_overheads.h"
#include "../include/half_precision_api.h"
/*********** Generic Layer API **************/
void* ConvLayerWrapper(void* input,
void* filter,
void* bias,
int conv_pad_h, int conv_pad_w,
int conv_stride_h, int conv_stride_w,
int pool_id, int pool_size,
int activation_id,
// NOTE: out_min, out_max are only relevant for ClippedRelu
float out_min, float out_max){
void* conv_out = tensorConvolution(input, filter,
conv_pad_h, conv_pad_w,
conv_stride_h, conv_stride_w,
1, 0);
void* conv_add = tensorAdd(conv_out, bias);
void* pool_out;
// NOTE: Skip pooling when pool size is not greater than 0
if(pool_size > 0){
//FIXME: Currently only using MaxPooling
pool_out = tensorPooling(conv_add, 0, pool_size, pool_size, 0, 0, pool_size, pool_size);
}
else{
pool_out = conv_add;
}
void* activation_out;
switch(activation_id){
case -1:
activation_out = pool_out;
INFO("NO Activation Function \n");
break;
case 0:
activation_out = tensorTanh(pool_out);
break;
case 1:
activation_out = tensorRelu(pool_out);
break;
case 2:
activation_out = tensorRelu2(pool_out, out_min, out_max);
break;
default:
ERROR("Activation id %d NOT supported \n", activation_out);
break;
}
return activation_out;
}
void* FCLayerWrapper(void* input,
void* weights,
void* bias,
int activation_id,
// NOTE: out_min and out_max are only relevant for ClippedRelu
float out_min, float out_max){
void* gemm_out = tensorGemmGPU(input, weights);
void* gemmbias_out = tensorAdd(gemm_out, bias);
void* activation_out;
switch(activation_id){
case -1:
activation_out = gemmbias_out;
INFO("No Activation Function \n");
break;
case 0:
activation_out = tensorTanh(gemmbias_out);
break;
case 1:
activation_out = tensorRelu(gemmbias_out);
break;
case 2:
activation_out = tensorRelu2(gemmbias_out, out_min, out_max);
break;
default:
ERROR("Activation id %d NOT supported \n", activation_out);
break;
}
return activation_out;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment