diff --git a/hpvm/include/nvdla/tensorUtils.h b/hpvm/include/nvdla/tensorUtils.h
index 47441bae9ddade48849b2d17b0e97c183d41f1c3..ecfd1092877128b17cc40a23800a36fa60771158 100644
--- a/hpvm/include/nvdla/tensorUtils.h
+++ b/hpvm/include/nvdla/tensorUtils.h
@@ -15,9 +15,9 @@ std::string model_params_path = "../../test/dnn_benchmarks/model_params/";
 
 
 
-__attribute__((noinline)) struct Tensor *readTrainedWeights(const char *file_name, int data_type,
-							    long int dim1_size, long int dim2_size,
-							    long int dim3_size, long int dim4_size) {
+__attribute__((noinline)) void *readTrainedWeights(const char *file_name, int data_type,
+						   long int dim1_size, long int dim2_size,
+						   long int dim3_size, long int dim4_size) {
   
   int type_size = 4; // NOTE: Assuming floating point tensors
   long int num_elems = dim1_size * dim2_size * dim3_size * dim4_size;
@@ -45,9 +45,9 @@ __attribute__((noinline)) struct Tensor *readTrainedWeights(const char *file_nam
 }
 
 
-__attribute__((noinline)) struct Tensor *readInputBatch(const char *file_name, long data_type,
-							long start, long end,
-							long dim2_size, long dim3_size, long dim4_size) {
+__attribute__((noinline)) void * readInputBatch(const char *file_name, long data_type,
+						long start, long end,
+						long dim2_size, long dim3_size, long dim4_size) {
   long int dim1_size = end - start;
   // FIXIT: Don't assume floating point types
   long int type_size = 4; // NOTE: Assuming floating point tensors