Skip to content
Snippets Groups Projects
Commit 74b82169 authored by kotsifa2's avatar kotsifa2
Browse files

Utility functions for wrapper API implementation - first cut. Compiled, need to be tested.

parent dd2263d2
No related branches found
No related tags found
No related merge requests found
......@@ -10,5 +10,260 @@
// Utilities header for ApproxHPVM runtime API (wrapper runtime API)
void* handleTensorAddApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input, void* bias) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorAdd(input, bias);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfAdd(input, bias);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorMulApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* lhs, void* rhs) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorGemmGPU(lhs, rhs);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfGemmGPU(lhs, rhs);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorConvApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input, void* filter,
int conv_pad_h, int conv_pad_w,
int conv_stride_h, int conv_stride_w) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorConvolution(input, filter,
conv_pad_h, conv_pad_w,
conv_stride_h, conv_stride_w,
1, 1);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfConvolution(input, filter,
conv_pad_h, conv_pad_w,
conv_stride_h, conv_stride_w,
1, 1);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorGroupConvApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input, void* filter,
int vertical_pad, int horizontal_pad,
int vertical_stride, int horizontal_stride,
int conv_mode, int conv_groups) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorConvolution(input, filter,
vertical_pad, horizontal_pad,
vertical_stride, horizontal_stride,
conv_mode, conv_groups);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfConvolution(input, filter,
vertical_pad, horizontal_pad,
vertical_stride, horizontal_stride,
conv_mode, conv_groups);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorBatchNormApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input_ptr, void* gamma_ptr, void* beta_ptr,
void* mean_ptr, void* variance_ptr, double epsilon) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorBatchNorm(input_ptr, gamma_ptr, beta_ptr,
mean_ptr, variance_ptr, epsilon);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfBatchNorm(input_ptr, gamma_ptr, beta_ptr,
mean_ptr, variance_ptr, epsilon);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorReluApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorRelu(input);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfRelu(input);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorClippedReluApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input, float min, float max) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorRelu2(input, min, max);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfRelu2(input, min, max);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorTanhApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorTanh(input);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfTanh(input);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorPoolingApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input_ptr, int poolFunction,
int window_height, int window_width,
int vertical_pad, int horizontal_pad,
int vertical_stride, int horizontal_stride) {
if (approxTuples.size() == 1) {
enum GPUNodeConfiguration::APPROX approx = approxTuples[0].first;
int param = approxTuples[0].second;
switch (approx) {
case GPUNodeConfiguration::APPROX::FP32 :
return tensorPooling(input_ptr,
poolFunction,
window_height, window_width,
vertical_pad, horizontal_pad,
vertical_stride, horizontal_stride);
case GPUNodeConfiguration::APPROX::FP16 :
return tensorHalfPooling(input_ptr,
poolFunction,
window_height, window_width,
vertical_pad, horizontal_pad,
vertical_stride, horizontal_stride);
default :
assert(false && "Unknown approximation type");
// TODO additional approx methods implemented here
}
} else if (approxTuples.size() == 2) {
assert(false && "Currently unsupported case");
} else {
assert(false && "Unsupported case");
}
return NULL;
}
void* handleTensorSoftmaxApproximationTuples(
std::vector< std::pair<GPUNodeConfiguration::APPROX, int> > &approxTuples,
void* input_ptr) {
//TODO: if approximation choices are added for softmax operation,
// implement this like the other handle* functions
return tensorSoftmax(input_ptr);
}
#endif
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment