From 8ac11bec97d52f1a23f4eedacd6537978e08a791 Mon Sep 17 00:00:00 2001 From: Hashim Sharif <hsharif3@tyler.cs.illinois.edu> Date: Mon, 24 Feb 2020 22:34:00 -0600 Subject: [PATCH] Adding utils support for dumping class confidences and predicted labels --- .../dnn_sources/include/utils.h | 84 ++++++++++++++++++- .../src/promise/alexnet_promise.cc | 42 ++++++++-- 2 files changed, 116 insertions(+), 10 deletions(-) diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h b/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h index a627f83e6b..9068445670 100644 --- a/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h +++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h @@ -17,6 +17,7 @@ std::vector<float> run_accuracies; void printTensorInfo(void* tensor_ptr){ + struct Tensor* tensor = (struct Tensor*) tensor_ptr; if(tensor->gpu_data != NULL){ @@ -76,13 +77,21 @@ void fillWithOnesAndTwos(void* tensor_ptr){ // initialization is specific to the floating point type if(tensor->data_type == CUDNN_DATA_FLOAT){ float* data_arr = (float*) tensor->host_data; - for(unsigned int i = 0; i < tensor->num_elems/2; i++){ + + for(unsigned int i = 0; i < tensor->num_elems; i++){ + if (i % 2 == 0) + data_arr[i] = 1.0; + else + data_arr[i] = 2.0; + } + + /*for(unsigned int i = 0; i < tensor->num_elems/2; i++){ data_arr[i] = 1.0; } for(unsigned int i = tensor->num_elems/2; i < tensor->num_elems; i++){ data_arr[i] = 2.0; - } + }*/ } } @@ -852,4 +861,75 @@ void dumpOutput(void* output_ptr, const char* file_name){ + + +void copyClassConfsAndLabels(void* result_ptr, + float* classConfs, + int* predictedLabels, + int start, int end){ + + + struct Tensor* result = (struct Tensor*) result_ptr; + + size_t num_classes = result->dims.dim_sizes[1]; + float* data = (float*) result->host_data; + + + int it_count = end - start; + for(int i = 0; i < it_count; i++){ + + int chosen = 0; + for (int id = 1; id < num_classes; ++id){ + if (data[i * num_classes + chosen] < data[i * num_classes + id]) chosen = id; + } + + predictedLabels[start + i] = chosen; + classConfs[start + i] = data[i * num_classes + chosen]; + } + + +} + + +void dumpClassConfsAndLabels(float* classConfs, + int* predictedLabels, + uint32_t* goldLabels, + int test_input_size){ + + FILE* labels_fp = fopen("predicted_confs_labels.txt", "w+"); + + for (int i = 0; i < test_input_size; i++){ + + int label = predictedLabels[i]; + int gold_label = (int) goldLabels[i]; + printf ("gold_label = %u \n", goldLabels[i]); + float conf = classConfs[i]; + + std::ostringstream ss; + ss << std::fixed << conf; + std::string print_str = ss.str(); + fwrite(print_str.c_str(), 1, print_str.length(), labels_fp); + fwrite(" ", 1, 1, labels_fp); + + + std::ostringstream label_ss; + label_ss << label; + std::string label_str = label_ss.str(); + fwrite(label_str.c_str(), 1, label_str.length(), labels_fp); + fwrite(" ", 1, 1, labels_fp); + + + std::ostringstream gold_ss; + gold_ss << gold_label; + std::string gold_str = gold_ss.str(); + fwrite(gold_str.c_str(), 1, gold_str.length(), labels_fp); + fwrite("\n", 1, 1, labels_fp); + + + } + + fclose(labels_fp); +} + + #endif diff --git a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc index 2529e8eabf..a40add25e1 100644 --- a/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc +++ b/llvm/projects/hpvm-tensor-rt/dnn_sources/src/promise/alexnet_promise.cc @@ -41,10 +41,18 @@ int main(int argc, char* argv[]){ offset = atoi(argv[5]); } + bool shouldDumpClassConf = false; + float* classConfs; + int* predictedLabels; + if(argc > 6){ + shouldDumpClassConf = true; + classConfs = (float*) malloc(sizeof(float) * test_input_size); + predictedLabels = (int*) malloc(sizeof(int) * test_input_size); + } + llvm_hpvm_initTensorRt(0); - int missed = 0; for (int i = 0 ; i < total_runs; i++){ @@ -58,11 +66,13 @@ int main(int argc, char* argv[]){ int batch_count = test_input_size / batch_size; float final_accuracy = 0.0; + std::string dir_prefix = std::string("../model_params/alexnet_cifar10_test/"); + std::string input_path = dir_prefix + std::string("input.bin"); + std::string labels_path = dir_prefix + std::string("labels.bin"); + std::string labels32_path = dir_prefix + std::string("labels32.bin"); + for(int i = 0; i < batch_count; i++){ - std::string dir_prefix = std::string("../model_params/alexnet_cifar10_test/"); - std::string input_path = dir_prefix + std::string("input.bin"); - std::string labels_path = dir_prefix + std::string("labels.bin"); std::string conv2d_1_w_path = dir_prefix + std::string("conv2d_1_w.bin"); void* conv2d_1_w = readTrainedWeights(conv2d_1_w_path.c_str(), 0,64,3,11,11); std::string conv2d_1_b_path = dir_prefix + std::string("conv2d_1_b.bin"); @@ -107,21 +117,37 @@ int main(int argc, char* argv[]){ float accuracy = computeAccuracy2(labels, batch_size, var_6); final_accuracy += accuracy; - freeBatchMemory(); - + + if(shouldDumpClassConf){ + int relative_start = start - offset; + int relative_end = end - offset; + copyClassConfsAndLabels(var_6, classConfs, predictedLabels, relative_start, relative_end); + } + + + freeBatchMemory(); } final_accuracy = final_accuracy / batch_count; dumpFinalAccuracy(final_accuracy); - if (final_accuracy < bench_acc) missed += 1; + + + if(shouldDumpClassConf){ + int labels_start = offset; + int labels_end = offset + test_input_size; + uint32_t* goldLabels = readLabelsBatch3(labels32_path.c_str(), labels_start, labels_end); + dumpClassConfsAndLabels(classConfs, predictedLabels, goldLabels, test_input_size); + } + } + dumpExecutionAccuracies(); - dumpExecutionAccuracies(); + llvm_hpvm_cleanupTensorRt(); return 0; -- GitLab