From 644bf40430c4f168181afd29857e8c25f77a552a Mon Sep 17 00:00:00 2001
From: Yifan Zhao <yifanz16@illinois.edu>
Date: Tue, 2 Feb 2021 03:03:36 -0600
Subject: [PATCH] Fixed some warning in compilation

---
 .../dnn_sources/include/utils.h               | 68 ++++++-------------
 .../tensor_runtime/src/global_data.cc         |  2 +-
 .../tensor_runtime/src/hpvm-rt-controller.cpp |  7 +-
 .../tensor_runtime/src/tensor_cpu_runtime.cc  | 10 ---
 4 files changed, 25 insertions(+), 62 deletions(-)

diff --git a/hpvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h b/hpvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h
index 178454153b..43d4975749 100644
--- a/hpvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h
+++ b/hpvm/projects/hpvm-tensor-rt/dnn_sources/include/utils.h
@@ -46,10 +46,6 @@ void dumpWeightsToFile(const char *file_name, void *weights_ptr) {
     abort();
   }
 
-  // printf("size_in_bytes = %lu \n", weights->size_in_bytes);
-  size_t bytes_written =
-      fwrite(weights->host_data, 1, weights->size_in_bytes, fp);
-  // printf("bytes_written = %lu \n", bytes_written);
   fclose(fp);
 }
 
@@ -204,12 +200,12 @@ void compareValues(void *tensor_ptr, float *data, size_t num_elems) {
 void *readInputTensor(const char *file_name, int data_type, int dim1_size,
                       int dim2_size, int dim3_size, int dim4_size) {
 
-  int type_size = 4; // NOTE: Assuming floating point tensors
-  int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
-  int size_in_bytes = type_size * dim1_size * dim2_size * dim3_size * dim4_size;
+  size_t type_size = 4; // NOTE: Assuming floating point tensors
+  size_t num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
+  size_t size_in_bytes = type_size * num_elems;
   uint8_t *file_data = (uint8_t *)malloc(sizeof(char) * num_elems);
   float *tensor_data = (float *)malloc(sizeof(float) * num_elems);
-  int file_header_size = 16;
+  size_t file_header_size = 16;
 
   FILE *file = fopen(file_name, "rb");
   if (file == NULL) {
@@ -218,8 +214,7 @@ void *readInputTensor(const char *file_name, int data_type, int dim1_size,
   }
 
   fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
-  size_t bytes_read = fread(file_data, 1, sizeof(uint8_t) * num_elems, file);
-
+  fread(file_data, 1, sizeof(uint8_t) * num_elems, file);
   fclose(file);
 
   for (size_t i = 0; i < num_elems; ++i) {
@@ -256,11 +251,7 @@ struct Tensor *readTrainedWeightsCPU(const char *file_name, int data_type,
   }
 
   fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
-  size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
-
-  // printf("size in bytes = %lu, bytes read = %lu \n", size_in_bytes,
-  // bytes_read);
-
+  fread(tensor_data, 1, size_in_bytes, file);
   fclose(file);
 
   struct Tensor *weights = (struct Tensor *)create4DTensor(
@@ -294,11 +285,7 @@ struct Tensor *readTrainedWeights(const char *file_name, int data_type,
   }
 
   fseek(file, file_header_size, SEEK_CUR); // Skipping the file header
-  size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
-
-  // printf("size in bytes = %lu, bytes read = %lu \n", size_in_bytes,
-  // bytes_read);
-
+  fread(tensor_data, 1, size_in_bytes, file);
   fclose(file);
 
   struct Tensor *weights = (struct Tensor *)create4DTensor(
@@ -332,8 +319,7 @@ struct Tensor *readInputBatch(const char *file_name, int data_type,
   }
 
   fseek(file, file_header_size, SEEK_SET); // Skipping the file header
-  size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
-
+  fread(tensor_data, 1, size_in_bytes, file);
   fclose(file);
 
   struct Tensor *weights = (struct Tensor *)create4DTensor(
@@ -367,8 +353,7 @@ void *copyInputBatch(const char *file_name, int start, int end,
   }
 
   fseek(file, file_header_size, SEEK_SET); // Skipping the file header
-  size_t bytes_read = fread(tensor_data, 1, size_in_bytes, file);
-
+  fread(tensor_data, 1, size_in_bytes, file);
   fclose(file);
 
   initTensorData(inputTensor, tensor_data, size_in_bytes);
@@ -392,9 +377,7 @@ uint8_t *readLabels(const char *labels_file, int num_labels) {
     printf("Data file %s is not found. Aborting...\n", labels_file);
     abort();
   }
-
-  size_t bytes_read = fread(labels, 1, sizeof(uint8_t) * num_labels, file);
-
+  fread(labels, 1, sizeof(uint8_t) * num_labels, file);
   fclose(file);
 
   return labels;
@@ -408,9 +391,7 @@ uint32_t *readLabels3(const char *labels_file, int num_labels) {
     printf("Data file %s is not found. Aborting...\n", labels_file);
     abort();
   }
-
-  size_t bytes_read = fread(labels, 1, sizeof(uint32_t) * num_labels, file);
-
+  fread(labels, 1, sizeof(uint32_t) * num_labels, file);
   fclose(file);
 
   return labels;
@@ -429,9 +410,7 @@ uint8_t *readLabelsBatch(const char *labels_file, int start, int end) {
   }
 
   fseek(file, file_header_size, SEEK_SET); // Skipping the file header
-
-  size_t bytes_read = fread(labels, 1, sizeof(uint8_t) * num_labels, file);
-
+  fread(labels, 1, sizeof(uint8_t) * num_labels, file);
   fclose(file);
 
   // printf("--labels bytes_read = %lu \n", bytes_read);
@@ -451,9 +430,7 @@ uint32_t *readLabelsBatch3(const char *labels_file, int start, int end) {
   }
 
   fseek(file, file_header_size, SEEK_SET); // Skipping the file header
-
-  size_t bytes_read = fread(labels, 1, sizeof(uint32_t) * num_labels, file);
-
+  fread(labels, 1, sizeof(uint32_t) * num_labels, file);
   fclose(file);
 
   return labels;
@@ -470,7 +447,7 @@ void computeAccuracy(const char *labels_file, int num_labels,
   float *data = (float *)result->host_data;
   int num_errors = 0;
 
-  for (int i = 0; i < batch_dim; i++) {
+  for (size_t i = 0; i < batch_dim; i++) {
     int chosen = 0;
     for (int id = 1; id < 10; ++id) {
       if (data[i * channels + chosen] < data[i * channels + id])
@@ -513,7 +490,7 @@ float computeAccuracy2(uint8_t *labels, int batch_size, void *result_ptr,
   for (unsigned int i = 0; i < batch_dim; i++) {
 
     int chosen = 0;
-    for (int id = 1; id < num_classes; ++id) {
+    for (size_t id = 1; id < num_classes; ++id) {
       if (data[i * num_classes + chosen] < data[i * num_classes + id])
         chosen = id;
     }
@@ -551,10 +528,10 @@ float computeAccuracy3(uint32_t *labels, void *result_ptr) {
 
   printf("batch_dim = %lu, num_classes = %lu \n", batch_dim, num_classes);
 
-  for (int i = 0; i < batch_dim; i++) {
+  for (size_t i = 0; i < batch_dim; i++) {
 
-    int chosen = 0;
-    for (int id = 1; id < num_classes; ++id) {
+    uint32_t chosen = 0;
+    for (size_t id = 1; id < num_classes; ++id) {
       if (data[i * num_classes + chosen] < data[i * num_classes + id])
         chosen = id;
     }
@@ -605,15 +582,14 @@ float computeTop5Accuracy(uint8_t *labels, int num_labels, void *result_ptr,
   for (int i = 0; i < num_labels; i++) {
 
     std::vector<ClassProb> elem_probs;
-    for (int id = 0; id < num_classes; ++id) {
+    for (size_t id = 0; id < num_classes; ++id) {
       ClassProb cProb;
       cProb.prob = data[i * channels + id];
       cProb.index = id;
       elem_probs.push_back(cProb);
     }
 
-  std:
-    sort(elem_probs.begin(), elem_probs.end(), descendFloatComp);
+    std::sort(elem_probs.begin(), elem_probs.end(), descendFloatComp);
     // Check if any of top-5 predictions matches
     bool matched = false;
     for (int j = 0; j < 5; j++) {
@@ -692,7 +668,7 @@ void dumpExecutionAccuracies() {
 
   FILE *fp = fopen("run_accuracies.txt", "w+");
   if (fp != NULL) {
-    for (int i = 0; i < run_accuracies.size(); i++) {
+    for (size_t i = 0; i < run_accuracies.size(); i++) {
       float accuracy = run_accuracies[i];
       std::ostringstream ss;
       ss << std::fixed << accuracy;
@@ -822,7 +798,7 @@ void copyClassConfsAndLabels(void *result_ptr, float *classConfs,
   for (int i = 0; i < it_count; i++) {
 
     int chosen = 0;
-    for (int id = 1; id < num_classes; ++id) {
+    for (size_t id = 1; id < num_classes; ++id) {
       if (data[i * num_classes + chosen] < data[i * num_classes + id])
         chosen = id;
     }
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc
index b812a51d7e..aeb12e9f6e 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/global_data.cc
@@ -47,4 +47,4 @@ std::string profile_data = "";
 PerfParamSet *perfParamSet;
 SampParamSet *sampParamSet;
 
-unsigned int currentTensorID = -1;
+unsigned int currentTensorID = ~0U;
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
index c7237c0076..66e8e3d1ba 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/hpvm-rt-controller.cpp
@@ -417,12 +417,12 @@ double RuntimeController::getCurrentConfigurationAccuracyLoss() {
 NodeConfiguration *RuntimeController::getNodeConfiguration(const char *data) {
 
   // if visc.node.id Not specified for this HPVM Node
-  if (currentTensorID == -1) {
+  if (currentTensorID == ~0U) {
     std::string s(data);
     // All nodes are expected to have a configuration
     return (*Configurations)[configurationIdx]->setup.at(s);
   } else {
-    DEBUG("-- currentTensorID = \%u \n", currentTensorID);
+    DEBUG("-- currentTensorID = %u \n", currentTensorID);
     return (*Configurations)[configurationIdx]->idConfigMap.at(currentTensorID);
   }
 }
@@ -664,7 +664,6 @@ void RuntimeController::readConfigurationFile(const char *str) {
     abort();
   }
 
-  bool readingConfiguration = false;
   bool readingFirstLine = false;
 
   // Read baseline_time from first line of configuration file
@@ -697,13 +696,11 @@ void RuntimeController::readConfigurationFile(const char *str) {
 
     if (tokens[0] == "+++++") { // Found new configuration start token
       // Mark the start of a new configuration
-      readingConfiguration = true;
       readingFirstLine = true;
       continue;
     }
 
     if (tokens[0] == "-----") { // Found configuration end token
-      readingConfiguration = false;
       // Mark the end of current configuration
       continue;
     }
diff --git a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_cpu_runtime.cc b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_cpu_runtime.cc
index 7a1acd2ba0..939f6e0619 100644
--- a/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_cpu_runtime.cc
+++ b/hpvm/projects/hpvm-tensor-rt/tensor_runtime/src/tensor_cpu_runtime.cc
@@ -371,7 +371,6 @@ void *tensorIrregularFilterSamplingConvolutionCPU(
   float *reduced_kernels = (float *)malloc(reduced_filer_size);
 
   float fac = (((float)skip_every) / ((float)skip_every - 1));
-  int reduced_filter_dim = reduced_num_filter_elem / channels;
 
   // Create Reduced filter
   omp_set_num_threads(4);
@@ -750,7 +749,6 @@ void *tensorConvApproxCPU(void *input_ptr, void *filter_ptr, int vertical_pad,
   }
   if (skip_every > 1) {
     printf("INPUT FILTERING\n");
-    Tensor *input = (Tensor *)input_ptr;
     Tensor *filter = (Tensor *)filter_ptr;
 
     const int kernel_height = filter->dims.dim_sizes[2];
@@ -1026,7 +1024,6 @@ void *tensorGemmCPU(void *lhs_ptr, void *rhs_ptr) {
 
   int m = lhs->dims.dim_sizes[0];
   int n = rhs->dims.dim_sizes[rhs->dims.num_dims - 1]; // output neurons
-  int rhs_k = rhs->dims.dim_sizes[rhs->dims.num_dims - 2];
 
   Tensor *output = (Tensor *)create4DTensorCPU(0, 0, m, n, 1, 1);
 
@@ -1098,17 +1095,10 @@ void *tensorBatchNormCPU(void *input_ptr, void *gamma_ptr, void *beta_ptr,
   Tensor *input = (Tensor *)input_ptr;
   Tensor *gamma = (Tensor *)gamma_ptr;
   Tensor *beta = (Tensor *)beta_ptr;
-  Tensor *mean = (Tensor *)mean_ptr;
-  Tensor *variance = (Tensor *)variance_ptr;
 
   float *__restrict__ host_image = (float *)input->host_data;
   float *__restrict__ host_beta = (float *)beta->host_data;
   float *__restrict__ host_gamma = (float *)gamma->host_data;
-  float *__restrict__ host_mean = (float *)mean->host_data;
-  float *__restrict__ host_variance = (float *)variance->host_data;
-
-  float alpha_val = 1.0f, beta_val = 0.0f;
-  size_t num_elems = input->num_elems;
 
   int batch_size = input->dims.dim_sizes[0];
   int channels = input->dims.dim_sizes[1];
-- 
GitLab