diff --git a/hpvm/include/nvdla/tensor.h b/hpvm/include/nvdla/tensor.h
index 3a566ffdfc85b41403b9833c4caf742a17c42cdc..33b9e0aa21b624b3e66971712d2d414bb1e547ac 100644
--- a/hpvm/include/nvdla/tensor.h
+++ b/hpvm/include/nvdla/tensor.h
@@ -23,5 +23,8 @@ struct Tensor {
   struct Dimension dims;
 };
 
+// NOTE: Currently only NCHW is supported due to limited cuDNN support
+enum Tensor_format_t { nchw, nhwc };
+
 
 #endif
diff --git a/hpvm/include/nvdla/tensorUtils.h b/hpvm/include/nvdla/tensorUtils.h
index ea36e1d2fb08ed1a856556d1d7e45207f5f02de9..ab197ccf8396f10dd913f8249cf9c5307391f382 100644
--- a/hpvm/include/nvdla/tensorUtils.h
+++ b/hpvm/include/nvdla/tensorUtils.h
@@ -48,4 +48,40 @@ float computeAccuracy3(uint32_t *labels, void *result_ptr) {
 }
 
 
+// tensor_runtime.h empty definitions - for NVDLA-based compilation to work (functions not actually used)
+
+
+void *create4DTensor(int data_type, int data_format, size_t dim1_size,
+                     size_t dim2_size, size_t dim3_size, size_t dim4_size){
+ 
+}
+
+
+void startMemTracking(){
+}
+  
+void freeBatchMemory(){
+
+}
+
+void hpvm_request_tensor(void *tensor, int destination){
+
+}
+
+
+void llvm_hpvm_initializeRuntimeController(const char *){
+
+}
+
+
+void llvm_hpvm_invokeRtControl(void *result,
+			       const char *str,
+			       int start,
+                               int end){
+
+
+}
+
+
+
 #endif