From 6a49e2b111fc1d4c929375efc966eeb6460c187b Mon Sep 17 00:00:00 2001
From: Yifan Zhao <yifanz16@illinois.edu>
Date: Sat, 3 Apr 2021 13:52:09 -0500
Subject: [PATCH] Removed pass outputs

---
 hpvm/include/SupportHPVM/HPVMUtils.h          |   4 +-
 hpvm/lib/Transforms/BuildDFG/BuildDFG.cpp     |   2 +-
 .../Transforms/DFG2LLVM_CPU/DFG2LLVM_CPU.cpp  |   4 +-
 .../DFG2LLVM_CUDNN/DFG2LLVM_CUDNN.cpp         |  18 +--
 .../DFG2LLVM_WrapperAPI.cpp                   | 120 +++++++++---------
 .../FuseHPVMTensorNodes.cpp                   |  62 ++++-----
 hpvm/lib/Transforms/GenHPVM/GenHPVM.cpp       |   2 +-
 .../InPlaceDFG/InPlaceDFGAnalysis.cpp         |   2 +-
 8 files changed, 107 insertions(+), 107 deletions(-)

diff --git a/hpvm/include/SupportHPVM/HPVMUtils.h b/hpvm/include/SupportHPVM/HPVMUtils.h
index 781306956d..ee18a81273 100644
--- a/hpvm/include/SupportHPVM/HPVMUtils.h
+++ b/hpvm/include/SupportHPVM/HPVMUtils.h
@@ -448,7 +448,7 @@ hpvm::Target getUpdatedTag(hpvm::Target Tag, hpvm::Target T) {
 
 // This functions add the hint as metadata in hpvm code
 void addHint(Function *F, hpvm::Target T) {
-  errs() << "ADD HINT *************************\n";
+  DEBUG(errs() << "ADD HINT *************************\n");
   // Get Module
   Module *M = F->getParent();
   DEBUG(errs() << "Set preferred target for " << F->getName() << ": ");
@@ -474,7 +474,7 @@ void addHint(Function *F, hpvm::Target T) {
       break;
   case hpvm::TENSOR_TARGET:
       DEBUG(errs() << "PROMISE Target\n");
-      errs() << "PROMISE\n";
+      DEBUG(errs() << "PROMISE\n");
       HintNode = M->getOrInsertNamedMetadata("hpvm_hint_promise");
       break;
   default:
diff --git a/hpvm/lib/Transforms/BuildDFG/BuildDFG.cpp b/hpvm/lib/Transforms/BuildDFG/BuildDFG.cpp
index e7293a0640..b3b46de482 100644
--- a/hpvm/lib/Transforms/BuildDFG/BuildDFG.cpp
+++ b/hpvm/lib/Transforms/BuildDFG/BuildDFG.cpp
@@ -59,7 +59,7 @@ bool BuildDFG::runOnModule(Module &M) {
         BuildGraph(Root, F);
 
         Root->getChildGraph()->sortChildren();
-        viewDFGraph(Root->getChildGraph());
+        // viewDFGraph(Root->getChildGraph());
       }
     }
   }
diff --git a/hpvm/lib/Transforms/DFG2LLVM_CPU/DFG2LLVM_CPU.cpp b/hpvm/lib/Transforms/DFG2LLVM_CPU/DFG2LLVM_CPU.cpp
index d5904bd83c..10667ddeec 100644
--- a/hpvm/lib/Transforms/DFG2LLVM_CPU/DFG2LLVM_CPU.cpp
+++ b/hpvm/lib/Transforms/DFG2LLVM_CPU/DFG2LLVM_CPU.cpp
@@ -1412,7 +1412,7 @@ void CGT_CPU::codeGen(DFLeafNode *N) {
       break;
     }
     case hpvm::CUDNN_TARGET: {
-      errs() << "CUDNN hint found. Store CUDNN function as CPU funtion.\n";
+      DEBUG(errs() << "CUDNN hint found. Store CUDNN function as CPU funtion.\n");
       // Make sure there is a generated CPU function for cudnn
       assert(N->getGenFuncForTarget(hpvm::CUDNN_TARGET) && "");
       assert(N->hasCPUGenFuncForTarget(hpvm::CUDNN_TARGET) && "");
@@ -1431,7 +1431,7 @@ void CGT_CPU::codeGen(DFLeafNode *N) {
     }
      case hpvm::TENSOR_TARGET: 
      {
-       errs() << "Promise hint found. Store PROMISE function as CPU funtion.\n";
+       DEBUG(errs() << "Promise hint found. Store PROMISE function as CPU funtion.\n");
        // Make sure there is a generated x86 function for promise
        assert(N->getGenFuncForTarget(hpvm::TENSOR_TARGET) && "");
        assert(N->hasCPUGenFuncForTarget(hpvm::TENSOR_TARGET) && "");
diff --git a/hpvm/lib/Transforms/DFG2LLVM_CUDNN/DFG2LLVM_CUDNN.cpp b/hpvm/lib/Transforms/DFG2LLVM_CUDNN/DFG2LLVM_CUDNN.cpp
index 0559e8136d..110f8918ef 100644
--- a/hpvm/lib/Transforms/DFG2LLVM_CUDNN/DFG2LLVM_CUDNN.cpp
+++ b/hpvm/lib/Transforms/DFG2LLVM_CUDNN/DFG2LLVM_CUDNN.cpp
@@ -171,8 +171,8 @@ void CGT_CUDNN::initRuntimeAPI() {
 }
 
 void CGT_CUDNN::codeGen(DFInternalNode *N) {
-  errs() << "Inside node: " << N->getFuncPointer()->getName() << "\n";
-  errs() << "Skipping internal node\n";
+  DEBUG(errs() << "Inside node: " << N->getFuncPointer()->getName() << "\n");
+  DEBUG(errs() << "Skipping internal node\n");
 }
 
 void CGT_CUDNN::codeGen(DFLeafNode *N) {
@@ -191,13 +191,13 @@ void CGT_CUDNN::codeGen(DFLeafNode *N) {
 
   // Generate code only if it has the right hint
   if (!checkPreferredTarget(N, hpvm::CUDNN_TARGET)) {
-    errs() << "Skipping node: " << N->getFuncPointer()->getName() << "\n";
+    DEBUG(errs() << "Skipping node: " << N->getFuncPointer()->getName() << "\n");
     return;
   }
 
   // Get the function associated with the dataflow node
   Function *F = N->getFuncPointer();
-  errs() << "function name = " << F->getName() << "\n";
+  DEBUG(errs() << "function name = " << F->getName() << "\n");
 
   /* Removing HPVM in/out/inout function attributes */
   for (Function::arg_iterator ai = F->arg_begin(), ae = F->arg_end(); ai != ae;
@@ -224,7 +224,7 @@ void CGT_CUDNN::codeGen(DFLeafNode *N) {
   std::string FName(F->getName().data());
   F_cudnn = CloneFunction(F, VMap);
   F_cudnn->setName(FName + "_cudnn");
-  errs() << "Cloned function name2 = " << F_cudnn->getName() << "\n";
+  DEBUG(errs() << "Cloned function name2 = " << F_cudnn->getName() << "\n");
   F_cudnn->removeFromParent();
   M.getFunctionList().push_back(F_cudnn);
 
@@ -496,7 +496,7 @@ void CGT_CUDNN::codeGen(DFLeafNode *N) {
         } else if (II->getIntrinsicID() == Intrinsic::hpvm_tensor_tanh) {
           // Create cudnn runtime function call
           FunctionCallee tensorTanh;
-          errs() << "tensorTanh Call = \n\n";
+          DEBUG(errs() << "tensorTanh Call = \n\n");
           DECLARE(tensorTanh);
           // errs()<<"tensorTanh Call = "<<*tensorTanh<<"\l";
           CallInst::Create(tensorTanh, Args, "", II);
@@ -569,7 +569,7 @@ void CGT_CUDNN::codeGen(DFLeafNode *N) {
                                                       re = IItoRemove.rend();
        ri != re; ++ri) {
     DEBUG(errs() << "Erasing: " << **ri << "\n");
-    errs() << "Erasing: " << **ri << "\n";
+    DEBUG(errs() << "Erasing: " << **ri << "\n");
     (*ri)->eraseFromParent();
   }
 
@@ -577,7 +577,7 @@ void CGT_CUDNN::codeGen(DFLeafNode *N) {
 }
 
 bool DFG2LLVM_CUDNN::runOnModule(Module &M) {
-  errs() << "\nDFG2LLVM_CUDNN PASS\n";
+  DEBUG(errs() << "\nDFG2LLVM_CUDNN PASS\n");
 
   // Get the BuildDFG Analysis Results:
   // - Dataflow graph
@@ -587,7 +587,7 @@ bool DFG2LLVM_CUDNN::runOnModule(Module &M) {
   InPlaceDFGAnalysis::InPlaceDFGParameter IPP =
       (getAnalysis<InPlaceDFGAnalysisWrapper>()).getIPP();
   // Print results
-  printInPlaceDFGParameter(IPP);
+  // printInPlaceDFGParameter(IPP);
 
   std::vector<DFInternalNode *> Roots = DFG.getRoots();
 
diff --git a/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp b/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp
index ec5a84cffb..c0dbd3899b 100644
--- a/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp
+++ b/hpvm/lib/Transforms/DFG2LLVM_WrapperAPI/DFG2LLVM_WrapperAPI.cpp
@@ -265,7 +265,7 @@ public:
 
 void InitialState::transition(CodeGenStateMachine *Mch, IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "INITIAL STATE\n";
+    DEBUG(errs() << "INITIAL STATE\n");
     switch (II->getIntrinsicID()) {
     case Intrinsic::hpvm_tensor_convolution: {
       Mch->addIntrinsicInst(II);
@@ -273,7 +273,7 @@ void InitialState::transition(CodeGenStateMachine *Mch, IntrinsicInst *II) {
       Mch->addArgument(II->getOperand(1)); // conv kernel
 
       Mch->setCurrent(new ConvolutionLayer_1());
-      errs() << "TO CONVOLUTION LAYER 1\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 1\n");
     } break;
     case Intrinsic::hpvm_tensor_mul: {
       Mch->addIntrinsicInst(II);
@@ -281,7 +281,7 @@ void InitialState::transition(CodeGenStateMachine *Mch, IntrinsicInst *II) {
       Mch->addArgument(II->getOperand(1)); // 2nd gemm input
 
       Mch->setCurrent(new FullyConnectedLayer_1());
-      errs() << "TO FULLY CONNECTED LAYER 1\n";
+      DEBUG(errs() << "TO FULLY CONNECTED LAYER 1\n");
     } break;
 
     case Intrinsic::hpvm_node_id: {
@@ -304,36 +304,36 @@ void InitialState::transition(CodeGenStateMachine *Mch, IntrinsicInst *II) {
 
       Mch->addIntrinsicToRemove(II);
       Mch->setCurrent(new InitialState());
-      errs() << "TO INIT STATE\n";
+      DEBUG(errs() << "TO INIT STATE\n");
     } break;
 
     default: // Other HPVM intrinsic
     {
       Mch->addIntrinsicInst(II);
       Mch->setCurrent(new SingleTensorOperation());
-      errs() << "TO SINGLE OP\n";
+      DEBUG(errs() << "TO SINGLE OP\n");
     } break;
     }
     delete this;
   } // else {} // No HPVM intrinsic received. Remain at initial
-  errs() << "TO NO CHANGE\n";
+  DEBUG(errs() << "TO NO CHANGE\n");
 }
 
 void SingleTensorOperation::transition(CodeGenStateMachine *Mch,
                                        IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "SINGLE TENSOR OP\n";
+    DEBUG(errs() << "SINGLE TENSOR OP\n");
     Mch->setCurrent(new NoPattern());
-    errs() << "TO NO PATTERN\n";
+    DEBUG(errs() << "TO NO PATTERN\n");
     delete this;
   }
-  errs() << "NO CHANGE\n";
+  DEBUG(errs() << "NO CHANGE\n");
 }
 
 void FullyConnectedLayer_1::transition(CodeGenStateMachine *Mch,
                                        IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "FULLY CONNECTED LAYER 1\n";
+    DEBUG(errs() << "FULLY CONNECTED LAYER 1\n");
     switch (II->getIntrinsicID()) {
     case Intrinsic::hpvm_tensor_add: {
       IntrinsicInst *MulII = Mch->getIntrinsicInstAt(0);
@@ -344,16 +344,16 @@ void FullyConnectedLayer_1::transition(CodeGenStateMachine *Mch,
       Mch->addArgument(II->getOperand(1)); // bias
 
       Mch->setCurrent(new FullyConnectedLayer_2());
-      errs() << "TO FULLY CONNECTED LAYER 2\n";
+      DEBUG(errs() << "TO FULLY CONNECTED LAYER 2\n");
     } break;
     default:
       Mch->setCurrent(new NoPattern());
-      errs() << "TO NO PATERN\n";
+      DEBUG(errs() << "TO NO PATERN\n");
       break;
     }
   } else {
     Mch->setCurrent(new NoPattern());
-    errs() << "TO NO PATERN\n";
+    DEBUG(errs() << "TO NO PATERN\n");
   }
   delete this;
 }
@@ -361,7 +361,7 @@ void FullyConnectedLayer_1::transition(CodeGenStateMachine *Mch,
 void FullyConnectedLayer_2::transition(CodeGenStateMachine *Mch,
                                        IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "FULLY CONNECTED LAYER 2\n";
+    DEBUG(errs() << "FULLY CONNECTED LAYER 2\n");
     switch (II->getIntrinsicID()) {
     case Intrinsic::hpvm_tensor_tanh: {
       // Type of activation : TanH
@@ -371,7 +371,7 @@ void FullyConnectedLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new FullyConnectedLayer_3());
-      errs() << "TO FULLY CONNECTED LAYER 3\n";
+      DEBUG(errs() << "TO FULLY CONNECTED LAYER 3\n");
     } break;
     case Intrinsic::hpvm_tensor_relu: {
       // Type of activation : ReLU
@@ -381,7 +381,7 @@ void FullyConnectedLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new FullyConnectedLayer_3());
-      errs() << "TO FULLY CONNECTED LAYER 3\n";
+      DEBUG(errs() << "TO FULLY CONNECTED LAYER 3\n");
     } break;
     case Intrinsic::hpvm_tensor_clipped_relu: {
       // Type of activation : Clipped ReLU
@@ -391,11 +391,11 @@ void FullyConnectedLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new FullyConnectedLayer_3());
-      errs() << "TO FULLY CONNECTED LAYER 3\n";
+      DEBUG(errs() << "TO FULLY CONNECTED LAYER 3\n");
     } break;
     default: // No activation, but HPVM intrinsic
       Mch->setCurrent(new NoPattern());
-      errs() << "TO NO PATTERN\n";
+      DEBUG(errs() << "TO NO PATTERN\n");
       break;
     }
   } else { // End of instruction stream
@@ -404,7 +404,7 @@ void FullyConnectedLayer_2::transition(CodeGenStateMachine *Mch,
         ConstantInt::get(Type::getInt32Ty(Mch->getModule()->getContext()), -1));
 
     Mch->setCurrent(new FullyConnectedLayer());
-    errs() << "TO FULLY CONNECTED LAYER\n";
+    DEBUG(errs() << "TO FULLY CONNECTED LAYER\n");
   }
   delete this;
 }
@@ -412,12 +412,12 @@ void FullyConnectedLayer_2::transition(CodeGenStateMachine *Mch,
 void FullyConnectedLayer_3::transition(CodeGenStateMachine *Mch,
                                        IntrinsicInst *II) {
   if (!II) { // End of instruction stream
-    errs() << "FULLY CONNECTED LAYER 3\n";
+    DEBUG(errs() << "FULLY CONNECTED LAYER 3\n");
     Mch->setCurrent(new FullyConnectedLayer());
-    errs() << "TO FULLY CONNECTED LAYER\n";
+    DEBUG(errs() << "TO FULLY CONNECTED LAYER\n");
   } else {
     Mch->setCurrent(new NoPattern());
-    errs() << "TO NO PATTERN\n";
+    DEBUG(errs() << "TO NO PATTERN\n");
   }
   delete this;
 }
@@ -425,18 +425,18 @@ void FullyConnectedLayer_3::transition(CodeGenStateMachine *Mch,
 void FullyConnectedLayer::transition(CodeGenStateMachine *Mch,
                                      IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "FULLY CONNECTED LAYER\n";
+    DEBUG(errs() << "FULLY CONNECTED LAYER\n");
     Mch->setCurrent(new NoPattern());
-    errs() << "TO NO PATTERN\n";
+    DEBUG(errs() << "TO NO PATTERN\n");
     delete this;
   }
-  errs() << "TO NO CHANGE\n";
+  DEBUG(errs() << "TO NO CHANGE\n");
 }
 
 void ConvolutionLayer_1::transition(CodeGenStateMachine *Mch,
                                     IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "CONVOLUTION LAYER 1\n";
+    DEBUG(errs() << "CONVOLUTION LAYER 1\n");
     switch (II->getIntrinsicID()) {
     case Intrinsic::hpvm_tensor_add: {
       IntrinsicInst *ConvII = Mch->getIntrinsicInstAt(0);
@@ -452,11 +452,11 @@ void ConvolutionLayer_1::transition(CodeGenStateMachine *Mch,
       Mch->addArgument(ConvII->getOperand(5)); // 4th numeric arg of conv
 
       Mch->setCurrent(new ConvolutionLayer_2());
-      errs() << "TO CONVOLUTION LAYER 2\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 2\n");
     } break;
     default:
       Mch->setCurrent(new NoPattern());
-      errs() << "TO NO PATTERN\n";
+      DEBUG(errs() << "TO NO PATTERN\n");
       break;
     }
   } else {
@@ -497,7 +497,7 @@ void ConvolutionLayer_1::transition(CodeGenStateMachine *Mch,
         ConstantInt::get(Type::getInt32Ty(Mch->getModule()->getContext()), -1));
 
     Mch->setCurrent(new ConvolutionLayer());
-    errs() << "TO CONVOLUTION LAYER\n";
+    DEBUG(errs() << "TO CONVOLUTION LAYER\n");
   }
   delete this;
 }
@@ -505,7 +505,7 @@ void ConvolutionLayer_1::transition(CodeGenStateMachine *Mch,
 void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
                                     IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "CONVOLUTION LAYER 2\n";
+    DEBUG(errs() << "CONVOLUTION LAYER 2\n");
     switch (II->getIntrinsicID()) {
     case Intrinsic::hpvm_tensor_tanh: {
       // Type of activation : TanH
@@ -515,7 +515,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new ConvolutionLayer_3());
-      errs() << "TO CONVOLUTION LAYER 3\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 3\n");
     } break;
     case Intrinsic::hpvm_tensor_relu: {
       // Type of activation : ReLU
@@ -525,7 +525,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new ConvolutionLayer_3());
-      errs() << "TO CONVOLUTION LAYER 3\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 3\n");
     } break;
     case Intrinsic::hpvm_tensor_clipped_relu: {
       // Type of activation : Clipped ReLU
@@ -535,7 +535,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new ConvolutionLayer_3());
-      errs() << "TO CONVOLUTION LAYER 3\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 3\n");
     } break;
     case Intrinsic::hpvm_tensor_pool_max: {
       // pool max
@@ -552,7 +552,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new ConvolutionLayer_4());
-      errs() << "TO CONVOLUTION LAYER 4\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 4\n");
     } break;
     case Intrinsic::hpvm_tensor_pool_min: {
       // pool min FIXME: 2: supported?
@@ -569,7 +569,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new ConvolutionLayer_4());
-      errs() << "TO CONVOLUTION LAYER 4\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 4\n");
     } break;
     case Intrinsic::hpvm_tensor_pool_mean: {
       // pool mean
@@ -586,11 +586,11 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
       Mch->addIntrinsicInst(II);
 
       Mch->setCurrent(new ConvolutionLayer_4());
-      errs() << "TO CONVOLUTION LAYER 4\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 4\n");
     } break;
     default: // No activation, No pooling, but HPVM intrinsic
       Mch->setCurrent(new NoPattern());
-      errs() << "TO NO PATTERN\n";
+      DEBUG(errs() << "TO NO PATTERN\n");
       break;
     }
   } else { // End of instruction stream
@@ -607,7 +607,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
         ConstantInt::get(Type::getInt32Ty(Mch->getModule()->getContext()), -1));
 
     Mch->setCurrent(new ConvolutionLayer());
-    errs() << "TO CONVOLUTION LAYER\n";
+    DEBUG(errs() << "TO CONVOLUTION LAYER\n");
   }
   delete this;
 }
@@ -615,7 +615,7 @@ void ConvolutionLayer_2::transition(CodeGenStateMachine *Mch,
 void ConvolutionLayer_3::transition(CodeGenStateMachine *Mch,
                                     IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "CONVOLUTION LAYER 3\n";
+    DEBUG(errs() << "CONVOLUTION LAYER 3\n");
     switch (II->getIntrinsicID()) {
     case Intrinsic::hpvm_tensor_pool_max: {
       // pool max
@@ -644,7 +644,7 @@ void ConvolutionLayer_3::transition(CodeGenStateMachine *Mch,
       }
 
       Mch->setCurrent(new ConvolutionLayer_4());
-      errs() << "TO CONVOLUTION LAYER 4\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 4\n");
     } break;
     case Intrinsic::hpvm_tensor_pool_min: {
       // pool min FIXME: 2: supported?
@@ -674,7 +674,7 @@ void ConvolutionLayer_3::transition(CodeGenStateMachine *Mch,
       }
 
       Mch->setCurrent(new ConvolutionLayer_4());
-      errs() << "TO CONVOLUTION LAYER 4\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 4\n");
     } break;
     case Intrinsic::hpvm_tensor_pool_mean: {
       // pool max
@@ -703,11 +703,11 @@ void ConvolutionLayer_3::transition(CodeGenStateMachine *Mch,
       }
 
       Mch->setCurrent(new ConvolutionLayer_4());
-      errs() << "TO CONVOLUTION LAYER 4\n";
+      DEBUG(errs() << "TO CONVOLUTION LAYER 4\n");
     } break;
     default: // No pooling, but HPVM intrinsic
       Mch->setCurrent(new NoPattern());
-      errs() << "TO NO PATTERN\n";
+      DEBUG(errs() << "TO NO PATTERN\n");
       break;
     }
   } else { // End of instruction stream
@@ -736,7 +736,7 @@ void ConvolutionLayer_3::transition(CodeGenStateMachine *Mch,
     }
 
     Mch->setCurrent(new ConvolutionLayer());
-    errs() << "TO CONVOLUTION LAYER\n";
+    DEBUG(errs() << "TO CONVOLUTION LAYER\n");
   }
   delete this;
 }
@@ -744,24 +744,24 @@ void ConvolutionLayer_3::transition(CodeGenStateMachine *Mch,
 void ConvolutionLayer_4::transition(CodeGenStateMachine *Mch,
                                     IntrinsicInst *II) {
   if (!II) { // End of instruction stream
-    errs() << "CONVOLUTION LAYER 4\n";
+    DEBUG(errs() << "CONVOLUTION LAYER 4\n");
     Mch->setCurrent(new ConvolutionLayer());
-    errs() << "TO CONVOLUTION LAYER\n";
+    DEBUG(errs() << "TO CONVOLUTION LAYER\n");
   } else {
     Mch->setCurrent(new NoPattern());
-    errs() << "TO NO PATTERN\n";
+    DEBUG(errs() << "TO NO PATTERN\n");
   }
   delete this;
 }
 
 void ConvolutionLayer::transition(CodeGenStateMachine *Mch, IntrinsicInst *II) {
   if (II) { // Not end of instruction stream
-    errs() << "CONVOLUTION LAYER\n";
+    DEBUG(errs() << "CONVOLUTION LAYER\n");
     Mch->setCurrent(new NoPattern());
-    errs() << "TO NO PATTERN\n";
+    DEBUG(errs() << "TO NO PATTERN\n");
     delete this;
   }
-  errs() << "NO CHANGE\n";
+  DEBUG(errs() << "NO CHANGE\n");
 }
 
 void NoPattern::transition(CodeGenStateMachine *Mch, IntrinsicInst *II) {}
@@ -779,8 +779,8 @@ void CodeGenStateMachine::codeGen(
     DFNode *N, Function *F, const StringRef &strRef,
     InPlaceDFGAnalysis::InPlaceDFGParameter &IPP) {
 
-  errs() << "TRANSITIONTED TO: " << std::to_string(current->getStateID())
-         << "\n";
+  DEBUG(errs() << "TRANSITIONTED TO: " << std::to_string(current->getStateID())
+               << "\n");
   assert(
       ((current->getStateID() == AbstractState::ID::FULLY_CONNECTED_LAYER) ||
        (current->getStateID() == AbstractState::ID::CONVOLUTION_LAYER) ||
@@ -894,7 +894,7 @@ void CodeGenStateMachine::codeGen(
            "Unexpected arguments found in coge gen state machine.\n");
     IntrinsicInst *TensorII = IIs[0];
 
-    errs() << "TensorII: " << *TensorII << "\n";
+    DEBUG(errs() << "TensorII: " << *TensorII << "\n");
 
     switch (TensorII->getIntrinsicID()) {
     case Intrinsic::
@@ -1330,8 +1330,8 @@ void CGT_WrapperAPI::initRuntimeAPI() {
 }
 
 void CGT_WrapperAPI::codeGen(DFInternalNode *N) {
-  errs() << "Inside node: " << N->getFuncPointer()->getName() << "\n";
-  errs() << "Skipping internal node\n";
+  DEBUG(errs() << "Inside node: " << N->getFuncPointer()->getName() << "\n");
+  DEBUG(errs() << "Skipping internal node\n");
 }
 
 void CGT_WrapperAPI::codeGen(DFLeafNode *N) {
@@ -1350,11 +1350,11 @@ void CGT_WrapperAPI::codeGen(DFLeafNode *N) {
 
   // Increment the node ID, for current node.
   ++nodeID;
-  errs() << "Node ID string: " << StringRef(std::to_string(nodeID)) << "\n";
+  DEBUG(errs() << "Node ID string: " << StringRef(std::to_string(nodeID)) << "\n");
 
   // Get the function associated with the dataflow node
   Function *F = N->getFuncPointer();
-  errs() << "Node Function: " << *F << "\n";
+  DEBUG(errs() << "Node Function: " << *F << "\n");
   // Look up if we have visited this function before. If we have, then just
   // get the cloned function pointer from DFNode. Otherwise, create the cloned
   // function and add it to the DFNode GenFunc.
@@ -1418,10 +1418,10 @@ void CGT_WrapperAPI::codeGen(DFLeafNode *N) {
   for (inst_iterator i = inst_begin(F_wrapper_api), e = inst_end(F_wrapper_api);
        i != e; ++i) {
     Instruction *I = &(*i);
-    errs() << "PRINT INST: " << *I << "\n";
+    DEBUG(errs() << "PRINT INST: " << *I << "\n");
     CGM.transition(dyn_cast<IntrinsicInst>(I));
   }
-  errs() << "CLONED FUNCTION: " << *F_wrapper_api << "\n";
+  DEBUG(errs() << "CLONED FUNCTION: " << *F_wrapper_api << "\n");
   // errs() << "Node ID string: "<< StringRef(std::to_string(nodeID)) << "\n";
   // CGM.codeGen(N, F_wrapper_api, N->getFuncPointer()->getName(), *IPP);
   CGM.codeGen(N, F_wrapper_api, StringRef(std::to_string(nodeID)), *IPP);
@@ -1431,7 +1431,7 @@ void CGT_WrapperAPI::codeGen(DFLeafNode *N) {
 
 bool DFG2LLVM_WrapperAPI::runOnModule(Module &M) {
 
-  errs() << "\nDFG2LLVM_WrapperAPI PASS\n";
+  DEBUG(errs() << "\nDFG2LLVM_WrapperAPI PASS\n");
   // Get the BuildDFG Analysis Results:
   // - Dataflow graph
   BuildDFG &DFG = getAnalysis<BuildDFG>();
diff --git a/hpvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp b/hpvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp
index 5117cc23d3..616b8a9b57 100644
--- a/hpvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp
+++ b/hpvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp
@@ -101,7 +101,7 @@ static IntrinsicInst *isValidHPVMTensorNode(DFNode *N) {
     if (dyn_cast<IntrinsicInst>(&*I)) {
       II = dyn_cast<IntrinsicInst>(&*I);
       if ((II->getCalledFunction()->getName()).startswith("llvm.hpvm.tensor")) {
-        errs() << "** Tensor Intrinsic = " << *II << "\n";
+        DEBUG(errs() << "** Tensor Intrinsic = " << *II << "\n");
       }
     }
   }
@@ -134,7 +134,7 @@ static DFNode *findNextNodeInSequence(DFNode *SrcN) {
     if (!DstN)
       DstN = N;
     if (DstN != N) {
-      errs() << "Found different destination nodes: no node sequence.\n";
+      DEBUG(errs() << "Found different destination nodes: no node sequence.\n");
       return NULL;
     }
   }
@@ -767,7 +767,7 @@ void FuseHPVMTensorNodes::FuseHPVMTensorNodeSequence(
   }
 
   if (IIs.size() < 2) {
-    errs() << "Warning: Attempted to fuse fewer than 2 nodes\n";
+    DEBUG(errs() << "Warning: Attempted to fuse fewer than 2 nodes\n");
     return;
   }
 
@@ -792,17 +792,17 @@ void FuseHPVMTensorNodes::run(Module &M, FusionTargets &FTs) {
 // Print Fusion Targets. The argument vector contains createNode intrinsics
 // of nodes to be fused).
 void FuseHPVMTensorNodes::printFusionTargets(FusionTargets &FTs) {
-  errs() << "Print Fusion Targets\n";
-  errs() << "Found " << FTs.size() << " targets\n";
+  DEBUG(errs() << "Print Fusion Targets\n");
+  DEBUG(errs() << "Found " << FTs.size() << " targets\n");
   for (FuseHPVMTensorNodes::FusionTargets::iterator ii = FTs.begin(),
                                                     ie = FTs.end();
        ii != ie; ++ii) {
-    errs() << "Target:\n";
+    DEBUG(errs() << "Target:\n");
     std::vector<IntrinsicInst *> IIv = *ii;
     for (std::vector<IntrinsicInst *>::iterator pi = IIv.begin(),
                                                 pe = IIv.end();
          pi != pe; ++pi) {
-      errs() << "\t" << *((*pi)->getOperand(0)) << "\n";
+      DEBUG(errs() << "\t" << *((*pi)->getOperand(0)) << "\n");
     }
   }
   return;
@@ -817,19 +817,19 @@ void FindFusionTargetsTraversal::codeGen(DFInternalNode *N) {
 void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
   DEBUG(errs() << "Inside leaf node: " << N->getFuncPointer()->getName()
                << "\n");
-  errs() << "FUSE TARGETS AT LEAF NODE\n";
+  DEBUG(errs() << "FUSE TARGETS AT LEAF NODE\n");
   // Skip fusion check if it is a dummy node
   if (N->isDummyNode()) {
     DEBUG(errs() << "Skipping dummy node\n");
     return;
   }
-  errs() << "THIS IS NOT A DUMMY NODE\n";
-  errs() << "INTRINSIC: " << *isValidHPVMTensorNode(N) << "\n";
+  DEBUG(errs() << "THIS IS NOT A DUMMY NODE\n");
+  DEBUG(errs() << "INTRINSIC: " << *isValidHPVMTensorNode(N) << "\n");
   if (!preferredTargetIncludes(N, hpvm::TENSOR_TARGET)) {
     // Only fuse if we plan to target PROMISE/Layers API
     // The CUDNN backend would be able to generate calls for the fused node,
     // but not the other way around
-    errs() << "NO PROMISE HINT. SKIPPING NODE.\n";
+    DEBUG(errs() << "NO PROMISE HINT. SKIPPING NODE.\n");
     DEBUG(errs() << "No PROMISE hint. Skipping node: "
                  << N->getFuncPointer()->getName() << "\n");
     return;
@@ -852,7 +852,7 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
     */
 
   case Intrinsic::hpvm_tensor_convolution: {
-    errs() << "INSTRUCTION: " << *II << "\n";
+    DEBUG(errs() << "INSTRUCTION: " << *II << "\n");
 
     // Found beginning of pattern conv-bias-activation-pooling.
     // Look for the rest
@@ -861,20 +861,20 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
     // Look for bias
     DFNode *SN = findNextNodeInSequence(N);
     if (!SN) {
-      errs() << "DID NOT FIND ADD IN NODE SEQUENCE\n";
+      DEBUG(errs() << "DID NOT FIND ADD IN NODE SEQUENCE\n");
       return; // Did not find a node sequence starting at N. Simpy return.
     }
     if (getPreferredTarget(SN) != StartNodePreferredTarget) {
-      errs() << "NODE IN SEQUENCE HAS DIFFERENT HINT\n";
+      DEBUG(errs() << "NODE IN SEQUENCE HAS DIFFERENT HINT\n");
       return; // Node in sequence has different hint. Simpy return.
     }
     IntrinsicInst *SII = isValidHPVMTensorNode(SN);
     if (SII->getIntrinsicID() != Intrinsic::hpvm_tensor_add) {
-      errs() << "SUCCESSOR IS NOT A BIAS OPERATION\n";
+      DEBUG(errs() << "SUCCESSOR IS NOT A BIAS OPERATION\n");
       // Successor is not the bias operation, thus does not fit the pattern.
       return;
     }
-    errs() << "SUCCESSOR IS A BIAS OPERATION\n";
+    DEBUG(errs() << "SUCCESSOR IS A BIAS OPERATION\n");
     // Otherwise, push this node to the current sequence
     CurrentNodeSequence.push_back(SN->getInstruction());
 
@@ -884,15 +884,15 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
     // tanh)
     SN = findNextNodeInSequence(SN);
     if (!SN) {
-      errs() << "DID NOT FIND POOLING AND ACTIVATION NODE SEQUENCE\n";
+      DEBUG(errs() << "DID NOT FIND POOLING AND ACTIVATION NODE SEQUENCE\n");
       // Did not find a node sequence starting at N.Use current sequence.
       break;
     }
     if (getPreferredTarget(SN) != StartNodePreferredTarget) {
-      errs() << "NODE IN SEQUENCE HAS DIFFERENT HINT\n";
+      DEBUG(errs() << "NODE IN SEQUENCE HAS DIFFERENT HINT\n");
       break; // Node in sequence has different hint. Use current sequence.
     }
-    errs() << "SUCCESSOR IS A ACTIVATION OR POOLING  OPERATION\n";
+    DEBUG(errs() << "SUCCESSOR IS A ACTIVATION OR POOLING  OPERATION\n");
     SII = isValidHPVMTensorNode(SN);
 
     if ((SII->getIntrinsicID() == Intrinsic::hpvm_tensor_clipped_relu) ||
@@ -900,15 +900,15 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
         (SII->getIntrinsicID() == Intrinsic::hpvm_tensor_tanh)) {
       // Successor is activation. Push this node to the current sequence.
       CurrentNodeSequence.push_back(SN->getInstruction());
-      errs() << "SUCCESSOR IS AN ACTIVATION OPERATION\n";
+      DEBUG(errs() << "SUCCESSOR IS AN ACTIVATION OPERATION\n");
       // Will continue, looking for pooling in the next node
       SN = findNextNodeInSequence(SN);
       if (!SN) {
-        errs() << "DID NOT FIND POOLING NODE SEQUENCE\n";
+        DEBUG(errs() << "DID NOT FIND POOLING NODE SEQUENCE\n");
         break; // No node in sequence. Use currently found sequence.
       }
       if (getPreferredTarget(SN) != StartNodePreferredTarget) {
-        errs() << "NODE IN SEQUENCE HAS DIFFERENT HINT\n";
+        DEBUG(errs() << "NODE IN SEQUENCE HAS DIFFERENT HINT\n");
         break; // Node in sequence has different hint. Use current sequence.
       }
       SII = isValidHPVMTensorNode(SN);
@@ -917,7 +917,7 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
     if ((SII->getIntrinsicID() == Intrinsic::hpvm_tensor_pool_max) ||
         (SII->getIntrinsicID() == Intrinsic::hpvm_tensor_pool_min) ||
         (SII->getIntrinsicID() == Intrinsic::hpvm_tensor_pool_mean)) {
-      errs() << "SUCCESSOR IS A POOLING OPERATION\n";
+      DEBUG(errs() << "SUCCESSOR IS A POOLING OPERATION\n");
       // Successor is a pool operation. Use currently found sequence.
       CurrentNodeSequence.push_back(SN->getInstruction());
     }
@@ -928,20 +928,20 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
     // Look for bias
     DFNode *SN = findNextNodeInSequence(N);
     if (!SN) {
-      errs() << "DID NOT FIND ADD IN NODE SEQUENCE\n";
+      DEBUG(errs() << "DID NOT FIND ADD IN NODE SEQUENCE\n");
       return; // Did not find a node sequence starting at N. Simpy return.
     }
     if (getPreferredTarget(SN) != StartNodePreferredTarget) {
-      errs() << "HINT DO NOT MATCH IN NODE SEQUENCE\n";
+      DEBUG(errs() << "HINT DO NOT MATCH IN NODE SEQUENCE\n");
       return; // Node in sequence has different hint. Simpy return.
     }
     IntrinsicInst *SII = isValidHPVMTensorNode(SN);
     if (SII->getIntrinsicID() != Intrinsic::hpvm_tensor_add) {
-      errs() << "SUCCESSOR IS NOT IS BIAS OPERATION\n";
+      DEBUG(errs() << "SUCCESSOR IS NOT IS BIAS OPERATION\n");
       // Successor is not the bias operation, thus does not fit the pattern.
       return;
     }
-    errs() << "SUCCESSOR IS BIAS OPERATION\n";
+    DEBUG(errs() << "SUCCESSOR IS BIAS OPERATION\n");
     // Otherwise, push this node to the current sequence
     CurrentNodeSequence.push_back(SN->getInstruction());
     // This is a possible fuse target, gemm-add.
@@ -958,7 +958,7 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
         if ((SII->getIntrinsicID() == Intrinsic::hpvm_tensor_clipped_relu) ||
             (SII->getIntrinsicID() == Intrinsic::hpvm_tensor_relu) ||
             (SII->getIntrinsicID() == Intrinsic::hpvm_tensor_tanh)) {
-          errs() << "SUCCESSOR IS ACTIVATION OPERATION\n";
+          DEBUG(errs() << "SUCCESSOR IS ACTIVATION OPERATION\n");
           // We found activation in sequence. Push in vector as well.
           CurrentNodeSequence.push_back(SN->getInstruction());
         }
@@ -980,7 +980,7 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
 
 bool FuseHPVMTensorNodesWrapper::runOnModule(Module &M) {
 
-  errs() << "\nFUSE HPVM TENSOR NODES PASS\n";
+  DEBUG(errs() << "\nFUSE HPVM TENSOR NODES PASS\n");
   // Get the BuildDFG Analysis Results:
   // - Dataflow graph
   BuildDFG &DFG = getAnalysis<BuildDFG>();
@@ -993,7 +993,7 @@ bool FuseHPVMTensorNodesWrapper::runOnModule(Module &M) {
   // Visit each DFG only once
   std::set<Function *> Visited;
 
-  errs() << "Find targets\n";
+  DEBUG(errs() << "Find targets\n");
   // Iterate over all the DFGs and produce code for each one of them
   for (auto rootNode : Roots) {
 
@@ -1007,7 +1007,7 @@ bool FuseHPVMTensorNodesWrapper::runOnModule(Module &M) {
     Visited.insert(rootFunc);
   }
 
-  errs() << "Finished visiting DFGs ...\n";
+  DEBUG(errs() << "Finished visiting DFGs ...\n");
   FuseHPVMTensorNodes::FusionTargets &FTs = FTTVisitor->getFusionTargets();
 
   FuseHPVMTensorNodes Fuse;
diff --git a/hpvm/lib/Transforms/GenHPVM/GenHPVM.cpp b/hpvm/lib/Transforms/GenHPVM/GenHPVM.cpp
index 12f6abc340..eda655e319 100644
--- a/hpvm/lib/Transforms/GenHPVM/GenHPVM.cpp
+++ b/hpvm/lib/Transforms/GenHPVM/GenHPVM.cpp
@@ -382,7 +382,7 @@ bool GenHPVM::runOnModule(Module &M) {
         assert(isa<ConstantInt>(CI->getArgOperand(0)) &&
                "Argument to hint must be constant integer!");
         ConstantInt *hint = cast<ConstantInt>(CI->getArgOperand(0));
-        errs() << "HINT INSTRUCTION: " << *I << "\n";
+        DEBUG(errs() << "HINT INSTRUCTION: " << *I << "\n");
         hpvm::Target t = (hpvm::Target)hint->getZExtValue();
         addHint(CI->getParent()->getParent(), t);
         DEBUG(errs() << "Found hpvm hint call: " << *CI << "\n");
diff --git a/hpvm/lib/Transforms/InPlaceDFG/InPlaceDFGAnalysis.cpp b/hpvm/lib/Transforms/InPlaceDFG/InPlaceDFGAnalysis.cpp
index db5a1f5fe0..dcef54fb26 100644
--- a/hpvm/lib/Transforms/InPlaceDFG/InPlaceDFGAnalysis.cpp
+++ b/hpvm/lib/Transforms/InPlaceDFG/InPlaceDFGAnalysis.cpp
@@ -152,7 +152,7 @@ bool InPlaceDFGAnalysisWrapper::runOnModule(Module &M) {
 /*** Methods of InPlaceDFGAnalysis ***/
 void InPlaceDFGAnalysis::run(Module &M, BuildDFG &DFG, InPlaceDFGParameter &IPP) {
 
-  errs() << "\nIN PLACE ANALYSIS PASS\n";
+  DEBUG(errs() << "\nIN PLACE ANALYSIS PASS\n");
 
   std::vector<DFInternalNode*> Roots = DFG.getRoots();
 
-- 
GitLab