From 5644fdfc7c060a069753a15969d9316f5a90cf54 Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@miranda.cs.illinois.edu>
Date: Thu, 24 Jun 2021 21:47:17 -0500
Subject: [PATCH] Correctly handling readInputBatch -- arg ordering changed

---
 .../Transforms/HPVM2NVDLA/HPVM2NVDLAPass.cpp  | 110 +++++++++++++-----
 1 file changed, 81 insertions(+), 29 deletions(-)

diff --git a/hpvm/lib/Transforms/HPVM2NVDLA/HPVM2NVDLAPass.cpp b/hpvm/lib/Transforms/HPVM2NVDLA/HPVM2NVDLAPass.cpp
index 87f706a77e..fced0b3ccd 100644
--- a/hpvm/lib/Transforms/HPVM2NVDLA/HPVM2NVDLAPass.cpp
+++ b/hpvm/lib/Transforms/HPVM2NVDLA/HPVM2NVDLAPass.cpp
@@ -21,6 +21,7 @@
 #include <vector>
 #include <map>
 #include <set>
+#include <string.h>
 
 #include "dlaerror.h"
 #include "dlatypes.h"
@@ -347,6 +348,7 @@ Weights CGT_NVDLA::readTrainedWeights(User *TensorPtr,
 	return Weight;
 }
 
+
 // For a tensor to be a input weight tensor, it has to come from the root node
 User *CGT_NVDLA::getBindingTensor(DFLeafNode* N, unsigned index) {
 	// HPVM internal API needs fixing. Remove this lambda function when bug is fixed.
@@ -508,9 +510,16 @@ User *CGT_NVDLA::getBindingTensor(DFLeafNode* N, unsigned index) {
                                 }
                         }
 		}
+
+		errs() << *ArgObj <<  "   :  "<< *ArgObj->getType() << "\n";
 		auto *ArgObjPtrType = dyn_cast<PointerType>(ArgObj->getType());
+		// FIXIT: LLVM-9 handling of structs/pointers may be different.
+		// This assertion is firing
+		
 		auto *ArgObjType = dyn_cast<StructType>(ArgObjPtrType->getElementType());
-		assert(ArgObjType && "Arguments to launch is a structure.");
+		// HASH-NOTE: removed assertion for the time being - i8* root struct
+		// HASH-NOTE: CANNOT remove this assertion. The DL of Struct is to be used later
+		//assert(ArgObjType && "Arguments to launch is a structure.");
 		DEBUG(errs() << "ARG OBJ: ");
 		DEBUG(ArgObj->print(errs()));
 		DEBUG(errs() << "\n");
@@ -608,27 +617,61 @@ void CGT_NVDLA::getaddOpSucceedsNode(DFNode *N, SmallVector<DFLeafNode *, 4> &Ad
 }
 
 ITensor *CGT_NVDLA::getNVDLAInputTensor(DFLeafNode* N, const User *InputBindingTensor) {
-	if(InputBindingTensor) {
-		auto *BatchesConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(2));
-		auto *ChannelsConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(3));
-		auto *HeightConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(4));
-		auto *WidthConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(5));
-		assert(HeightConst && WidthConst && ChannelsConst && BatchesConst 
+
+  if(InputBindingTensor) {
+
+    ConstantInt* BatchesConst;
+    ConstantInt* ChannelsConst;
+    ConstantInt* WidthConst;
+    ConstantInt* HeightConst;    
+
+    const CallInst* readCall = dyn_cast<CallInst>(InputBindingTensor);
+    const char* functionName = readCall->getCalledFunction()->getName().data();
+    
+    if(strstr(functionName, "readInputBatch") != NULL){
+      errs()<< "inputFunction = " << functionName<<"  \n";
+     
+      BatchesConst = ConstantInt::get(Type::getInt32Ty(M.getContext()), 1);
+      ChannelsConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(4));
+      HeightConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(5));
+      WidthConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(6));
+
+      errs()<< *InputBindingTensor <<"\n";
+      errs()<< *BatchesConst << "\n";
+      errs()<< *ChannelsConst << "\n";
+      
+    }
+    else{
+
+      errs()<< " weightsFunction = " << functionName<<"  \n";
+     
+      BatchesConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(2));
+      ChannelsConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(3));
+      HeightConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(4));
+      WidthConst = dyn_cast<ConstantInt>(InputBindingTensor->getOperand(5));
+    }
+	  
+		
+    assert(HeightConst && WidthConst && ChannelsConst && BatchesConst 
 				&& "Number of input dimensions must be constants.");
 		
-		// Input dimensions
-		int InputW = WidthConst->getZExtValue();
-	        int InputH = HeightConst->getZExtValue();
-		int InputC = ChannelsConst->getZExtValue();
-		int InputN = BatchesConst->getZExtValue();
-	
-		// Create a new input tensor
-		Dims4 dims(InputN, InputC, InputH, InputW);
-		return Network->addInput("", dims);
-	}
-	return getIntermediateInputTensor(N);
+
+    // Input dimensions
+    int InputW = WidthConst->getZExtValue();
+    int InputH = HeightConst->getZExtValue();
+    int InputC = ChannelsConst->getZExtValue();
+    int InputN = BatchesConst->getZExtValue();
+      
+    // Create a new input tensor
+    Dims4 dims(InputN, InputC, InputH, InputW);
+    return Network->addInput("", dims);
+
+  }
+
+  return getIntermediateInputTensor(N);
 }
 
+
 unsigned CGT_NVDLA::getInputIndex(DFLeafNode* N, const IntrinsicInst *II) {
 	DEBUG(errs() << "GET INPUT INDEX\n");
         auto *F = N->getFuncPointer();
@@ -661,8 +704,11 @@ std::string CGT_NVDLA::getLayerName(std::string Name) {
 	return std::to_string(LayerNameMap[Name]);
 }
 
+
+
 void CGT_NVDLA::generateConvolutionLayer(DFLeafNode* N, const IntrinsicInst *II) {
-	DEBUG(errs() << "*****CONVOLUTION LAYER*****\n");
+
+        DEBUG(errs() << "*****CONVOLUTION LAYER*****\n");
 	// FIXME: What is number of "groups". Setting it to 1 for now.
 	int numGroups  = 1;
 	
@@ -707,18 +753,22 @@ void CGT_NVDLA::generateConvolutionLayer(DFLeafNode* N, const IntrinsicInst *II)
 	int kernelN = KernelNConst->getZExtValue();
 	DEBUG(errs() << "\nKERNEL H: " << kernelH << "\n");
         DEBUG(errs() << "KERNEL W: " << kernelW << "\n");
-	 DEBUG(errs() << "KERNEL C: " << kernelC << "\n");
+	DEBUG(errs() << "KERNEL C: " << kernelC << "\n");
         DEBUG(errs() << "KERNEL N: " << kernelN << "\n");
 	
-	 int numOutputs;
+	int numOutputs;
         if(!InputTensor) {
                 DEBUG(errs() << "INPUT FROM EDGE\n");
                 numOutputs = (InputNVDLATensor->getDimensions()).n * kernelN;
                              //    (InputNVDLATensor->getDimensions()).c;
         } else {
                 DEBUG(errs() << "INPUT FROM WEIGHT TENSOR\n");
-                auto *BatchesConst = dyn_cast<ConstantInt>(InputTensor->getOperand(2));
-                auto *ChannelsConst = dyn_cast<ConstantInt>(InputTensor->getOperand(3));
+
+		// FIXME: Reasonable assumption for now to assume batch_size = 1
+		auto *BatchesConst = ConstantInt::get(Type::getInt32Ty(M.getContext()), 1);
+                auto *ChannelsConst = dyn_cast<ConstantInt>(InputTensor->getOperand(4));
+		//auto *BatchesConst = dyn_cast<ConstantInt>(InputTensor->getOperand(2));
+                //auto *ChannelsConst = dyn_cast<ConstantInt>(InputTensor->getOperand(3));
                 numOutputs = BatchesConst->getZExtValue() * kernelN;
                                // ChannelsConst->getZExtValue();
                 DEBUG(errs() << "NUM OUTPUTS: " << numOutputs << "\n");
@@ -816,14 +866,16 @@ void CGT_NVDLA::generateConvolutionLayer(DFLeafNode* N, const IntrinsicInst *II)
 	Dims2 kernelSize = Dims2(kernelH, kernelW);
 
 	auto *Layer = Network->addConvolution(InputNVDLATensor, numOutputs, 0,
-									kernelSize, tlPadding, brPadding, stride, dilation,
-									kernelWeights, biasWeights, biasMode, numGroups);
+					      kernelSize, tlPadding, brPadding, stride, dilation,
+					      kernelWeights, biasWeights, biasMode, numGroups);
 	if(AddOpNodes.size()) {
-		auto *Node = AddOpNodes[0];
-		mapOutputTensor(Node, Layer->getOutput(0));
-	} else {
-		mapOutputTensor(N, Layer->getOutput(0));
+	  auto *Node = AddOpNodes[0];
+	  mapOutputTensor(Node, Layer->getOutput(0));
+	}
+	else{
+	  mapOutputTensor(N, Layer->getOutput(0));
 	}
+	
 	Layer->setName((std::string("conv") + getLayerName(std::string("conv"))).c_str());
 	DEBUG(errs() << Layer->getName() << "\n");
 }
-- 
GitLab