Skip to content
Snippets Groups Projects
Commit d504c474 authored by Hashim Sharif's avatar Hashim Sharif
Browse files

Removing some unused unit tests

parent 28cbe932
No related branches found
No related tags found
No related merge requests found
......@@ -83,56 +83,9 @@ void testTensorSgemm(){
void testTensorGemmGPU(){
printf("***** TensorSgemm ***** \n\n");
void* lhs_ptr = create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 5, 4, 1, 1);
struct Tensor* lhs = (struct Tensor*) lhs_ptr;
fillTensorWithOnes(lhs);
float* data_arr = (float*) lhs->host_data;
for(int i = 0; i < lhs->num_elems; i++){
data_arr[i] = (i / 4) + 1;
}
void* rhs = create4DTensor(CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, 4, 3);
fillTensorWithOnes(rhs);
void* output = tensorGemmGPU(lhs, rhs);
printTensorValues(output);
void* bias_ptr = create4DTensor(CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 3, 1, 1);
struct Tensor* bias = (struct Tensor*) bias_ptr;
fillTensorWithOnes(bias);
float* bias_arr = (float*) bias->host_data;
for(int i = 0; i < bias->num_elems; i++){
bias_arr[i] = i + 1;
}
void* output2 = tensorAdd(output, bias);
printTensorValues(output2);
}
void testTensorGemmBias(){
// NOTE: 2nd dim of bias and d2*d3*d4 for the input tensor MUST match
printf("***** TensorGemmBias ***** \n\n");
void* input = create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2, 1, 2, 2);
fillTensorWithOnes(input);
void* bias = create2DTensor(CUDNN_DATA_FLOAT, 1, 4);
fillTensorWithOnes(bias);
void* output = tensorGemmBias(input, bias);
printTensorValues(output);
}
void testTensorConv2(){
void testTensorConcatAndSplit(){
int conv_mode = 1; // CROSS_CORRELATION mode
int compute_precision = 0; // floating point precision
......@@ -166,49 +119,6 @@ void testTensorConv2(){
void testTensorConv3(){
int conv_mode = 1; // CROSS_CORRELATION mode
int compute_precision = 0; // floating point precision
void* input = create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2, 96, 28, 28);
fillTensorWithOnes(input);
void** splits = tensorSplit(input, 2, 1);
void* conv2W = readTrainedWeights("../alexnet/params/conv2.bin",
CUDNN_DATA_FLOAT, 256, 48, 5, 5);
void** conv2fils = tensorSplit(conv2W, 2, 0);
void* conv2a_out = tensorConvolution(splits[0], conv2fils[0], 2, 2,
1, 1, conv_mode, compute_precision);
printTensorDims(conv2a_out);
void* conv2b_out = tensorConvolution(splits[1], conv2fils[1], 2, 2,
1, 1, conv_mode, compute_precision);
printTensorDims(conv2b_out);
void* conv2_outs[2];
conv2_outs[0] = conv2a_out;
conv2_outs[1] = conv2b_out;
void* conv2_concat_out = tensorConcat(conv2_outs, 2, 1);
printTensorDims(conv2_concat_out);
//printTensorValues(conv2_concat_out);
dumpWeightsToFile("tensors_out/conv2_test.out", conv2_concat_out);
void* conv2bias = readTrainedWeights("../alexnet/params/conv2.bias.bin",
CUDNN_DATA_FLOAT, 1, 256, 1, 1);
void* conv2bias_out = tensorAdd(conv2_concat_out, conv2bias);
printTensorDims(conv2bias_out);
dumpWeightsToFile("tensors_out/conv2_bias_test.out", conv2bias_out);
}
......@@ -857,92 +767,6 @@ void testPerforation(){
/*void testPerforation(){
printf("***** Testing Perforation ***** \n\n");
Tensor* input = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, 6, 6);
fillTensorWithVal(input, 3);
//fillWithOnesAndTwos(input);
Tensor* filter = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, 3, 3);
fillTensorWithVal(filter, 2);
float* host_ptr = (float*) ((struct Tensor*) input)->host_data;
host_ptr[0] = 0;
host_ptr[1] = 0;
host_ptr[2] = 0;
host_ptr[3] = 0;
host_ptr[4] = 0;
host_ptr[5] = 0;
//printTensorValues(input);
void* res = tensorConvPerfCuda(input, filter, 0, 0, 1, 1, 1, 1, 2, 1, 1);
printTensorValues(res);
void* res2 = tensorConvApproxHalf2(input, filter, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1);
//void* res2 = tensorConvPerfCuda(input, filter, 0, 0, 1, 1, 1, 1, 2, 1, 0);
convertToFP32((struct Tensor*) res2);
printTensorValues(res2);
void* res3 = tensorConvApprox(input, filter, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1);
printTensorValues(res3);
//void* res4 = tensorConvApprox(input, filter, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0);
//printTensorValues(res4);
}
*/
void testPerforation2(){
printf("***** Testing Perforation ***** \n\n");
Tensor* input = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, 8, 8);
fillTensorWithVal(input, 3);
//fillWithOnesAndTwos(input);
Tensor* filter = (Tensor*) create4DTensor(CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, 3, 3);
fillTensorWithVal(filter, 2);
float* host_ptr = (float*) ((struct Tensor*) input)->host_data;
host_ptr[16] = 0;
host_ptr[17] = 0;
host_ptr[18] = 0;
host_ptr[19] = 0;
host_ptr[20] = 0;
host_ptr[21] = 0;
host_ptr[22] = 0;
host_ptr[23] = 0;
host_ptr[56] = 0;
host_ptr[57] = 0;
host_ptr[58] = 0;
host_ptr[59] = 0;
host_ptr[60] = 0;
host_ptr[61] = 0;
host_ptr[62] = 0;
host_ptr[63] = 0;
void* res2 = tensorConvApproxHalf2(input, filter, 0, 0, 1, 1, 1, 1, 4, 1, 1, 1);
convertToFP32((struct Tensor*) res2);
printTensorValues(res2);
}
......@@ -1376,14 +1200,31 @@ void testNewTensorOps(){
class UnitTestResults{
private:
unsigned int total_tests;
unsigned int failed_tests;
unsigned int passed_tests;
public:
UnitTestResults(){
total_tests = 0;
failed_tests = 0;
passed_tests = 0;
}
};
int main(){
llvm_hpvm_initTensorRt(0);
startProfiling();
// Function call per unit test
testTensorHgemm();
testTensorSgemm();
......@@ -1405,44 +1246,20 @@ int main(){
testPerforation();
//testTensorError();
//testQuantization();
//testTensorGemm();
//testTensorGemmGPU();
//testTensorGemmBias();
//testTensorConv2();
//testTensorConv3();
//testLRN();
//testSampleFilter();
//-- testPerforation();
// testPerforation2();
/********* SAMPLING TESTS ****
*************/
//testNewTensorOps();
//testQuantization();
// testTensorError();
// testQuantization();
// testTensorGemm();
// testTensorGemmGPU();
// testTensorGemmBias();
// testTensorConv2();
// testTensorConv3();
// testLRN();
// testSampleFilter();
// testNewTensorOps();
// testQuantization();
// testPromiseError();
stopProfiling();
return 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment