Skip to content
Snippets Groups Projects
Commit 795c684c authored by Hashim Sharif's avatar Hashim Sharif
Browse files

moving more outdated files to ./legacy

parent d1f4ec73
No related branches found
No related tags found
No related merge requests found
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "../../tensor_runtime/include/tensor_runtime.h"
#include "../include/utils.h"
/* NOTE: Reference Architecture to use for profiling */
void testLenet2Arch(){
printf("********* Lenet-2 Architecture ********** \n");
// FIXIT: Extend this to batch of images - currently 5 images
int test_batch_size = 10000;
void* input = readInputTensor("../model_params/lenet_params/datasets/t10k-images-idx3-ubyte",
CUDNN_DATA_FLOAT,
test_batch_size, 1, 28, 28);
// NOTE: Filter descriptors do NOT have batch size
// NOTE: First two dims are output channels (configurable), input channels (MUST match input channels)
// IMP: The output channels matches the trained model - not the Lenet arch proposed in Andrew Ng's class
void* conv1_filter = readTrainedWeights("../model_params/lenet2_params/conv1.bin",
float_type, 32, 1, 5, 5);
void* conv1_bias = readTrainedWeights("../model_params/lenet2_params/conv1_bias.bin",
float_type, 1, 32, 1, 1);
void* conv2_filter = readTrainedWeights("../model_params/lenet2_params/conv2.bin",
float_type, 64, 32, 5, 5);
void* conv2_bias = readTrainedWeights("../model_params/lenet2_params/conv2_bias.bin",
float_type, 1, 64, 1, 1);
void* fc1_weights = readTrainedWeights("../model_params/lenet2_params/fc1.bin",
float_type, 1, 1, 7*7*64, 1024);
void* fc1_bias = readTrainedWeights("../model_params/lenet2_params/fc1_bias.bin",
float_type, 1, 1024, 1, 1);
void* fc2_weights = readTrainedWeights("../model_params/lenet2_params/fc2.bin",
float_type, 1, 1, 1024, 10);
void* fc2_bias = readTrainedWeights("../model_params/lenet2_params/fc2_bias.bin",
float_type, 1, 10, 1, 1);
// Start power and performnce profiling
startProfiling();
int conv_mode = 1; // NOTE: using CROSS_CORRELATION
int conv_precision = 0; // NOTE: using Float as compute precision. FIXIT: use enum
// NOTE: 'SAME' convolution
void* conv1out = tensorConvolution(input, conv1_filter, 2, 2, 1, 1,
conv_mode, conv_precision);
// NOTE: For tensorAdd, the only dimension that MUST match is channels
tensorAdd(conv1out, conv1_bias); // NOTE: In place operation
printTensorDims(conv1out);
void* conv1_reluout = tensorRelu(conv1out);
//dumpWeightsToFile("tensors_out/conv1.out", conv1_reluout);
void* pool1out = tensorPooling(conv1_reluout, 0, 2, 2, 0, 0, 2, 2);
printTensorDims(pool1out);
// NOTE: input channels have to match between tensor op inputs and outputs
void* conv2out = tensorConvolution(pool1out, conv2_filter, 2, 2, 1, 1,
conv_mode, conv_precision);
tensorAdd(conv2out, conv2_bias); // NOTE: In place operation
printTensorDims(conv2out);
void* conv2_reluout = tensorRelu(conv2out);
//dumpWeightsToFile("tensors_out/conv2.out", conv2_reluout);
void* pool2out = tensorPooling(conv2_reluout, 0, 2, 2, 0, 0, 2, 2);
printTensorDims(pool2out);
//dumpWeightsToFile("tensors_out/maxpool2.out", pool2out);
void* gemm1out = tensorGemmGPU(pool2out, fc1_weights);
printTensorDims(gemm1out);
//dumpWeightsToFile("tensors_out/gemm1.out", gemm1out);
void* gemm1biasout = tensorAdd(gemm1out, fc1_bias);
printTensorDims(gemm1biasout);
void* relu1out = tensorRelu(gemm1biasout);
printTensorDims(relu1out);
void* gemm2out = tensorGemmGPU(relu1out, fc2_weights);
printTensorDims(gemm2out);
void* gemm2_biasout = tensorAdd(gemm2out, fc2_bias);
printTensorDims(gemm2_biasout);
void* result = tensorSoftmax(gemm2_biasout);
printTensorDims(result);
// End profiling and dump output to profile.txt
stopProfiling();
computeAccuracy("../model_params/lenet_params/datasets/t10k-labels-idx1-ubyte",
test_batch_size, result);
// THINK: I believe that comparing the results do not need to be part of the HPVM graph
}
int main(){
llvm_hpvm_initTensorRt(0);
testLenet2Arch();
return 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment