diff --git a/llvm/include/llvm/IR/IntrinsicsVISC.td b/llvm/include/llvm/IR/IntrinsicsVISC.td
index 22b84c0ace3be26914a6820be33b8ce3667d476e..c6ce86c504efc6a56b3f6888977265335d5cc31e 100644
--- a/llvm/include/llvm/IR/IntrinsicsVISC.td
+++ b/llvm/include/llvm/IR/IntrinsicsVISC.td
@@ -245,6 +245,11 @@ let TargetPrefix = "visc" in {
    */
   def int_visc_tensor_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
 
+  /* Tensor tanh intrinsic
+   * i8* llvm.visc.tensor.tanh(i8*);
+   */
+  def int_visc_tensor_tanh : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
   /* Tensor softmax intrinsic
    * i8* llvm.visc.tensor.softmax(i8*);
    */
@@ -279,12 +284,12 @@ let TargetPrefix = "visc" in {
                                                            llvm_i32_ty,
                                                            llvm_i32_ty,
                                                            llvm_i32_ty], []>;
-  def int_visc_tensor_pool_average : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
-                                                              llvm_i32_ty,
-                                                              llvm_i32_ty,
-                                                              llvm_i32_ty,
-                                                              llvm_i32_ty,
-                                                              llvm_i32_ty,
-                                                              llvm_i32_ty], []>;
+  def int_visc_tensor_pool_mean : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty], []>;
 
 }
diff --git a/llvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp b/llvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp
index 0597f7878659096e582c98377660edfecac9439b..06916b969c1c88ddf6e824dee9fcfa89baccd8aa 100644
--- a/llvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp
+++ b/llvm/lib/Transforms/FuseHPVMTensorNodes/FuseHPVMTensorNodes.cpp
@@ -808,7 +808,8 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
 
   switch(II->getIntrinsicID()) {
     case Intrinsic::visc_tensor_convolution:
-      { // Found beginning of pattern conv-bias-relu-pooling. Look for the rest
+      { // Found beginning of pattern conv-bias-activation-pooling.
+        // Look for the rest
         CurrentNodeSequence.push_back(N->getInstruction());
 
         // Look for bias
@@ -827,43 +828,45 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
         // Otherwise, push this node to the current sequence
         CurrentNodeSequence.push_back(SN->getInstruction());
 
-        // Continue with next node, looking for relu
+        // This is a valid sequence.
+        // We still need to fuse activation and/or pooling if we find them 
+        // Continue with next node, looking for activation (relu, tanh)
         SN = findNextNodeInSequence(SN);
         if (!SN) {
-          return; // Did not find a node sequence starting at N. Simpy return.
+          // Did not find a node sequence starting at N.Use current sequence.
+          break;
         }
         if (getPreferredTarget(SN) != StartNodePreferredTarget) {
-          return; // Node in sequence has different hint. Simpy return.
+          break; // Node in sequence has different hint. Use current sequence.
         }
         SII = isValidHPVMTensorNode(SN);
-        if (SII->getIntrinsicID() != Intrinsic::visc_tensor_relu) {
-          // Successor is not the relu operation, thus does not fit the pattern.
-          return;
-        }
-        // Otherwise, push this node to the current sequence
-        CurrentNodeSequence.push_back(SN->getInstruction());
 
-        // Continue with next node, looking for pooling
-        SN = findNextNodeInSequence(SN);
-        if (!SN) {
-          return; // Did not find a node sequence starting at N. Simpy return.
-        }
-        if (getPreferredTarget(SN) != StartNodePreferredTarget) {
-          return; // Node in sequence has different hint. Simpy return.
-        }
-        SII = isValidHPVMTensorNode(SN);
-        if ((SII->getIntrinsicID() != Intrinsic::visc_tensor_pool_max) &&
-            (SII->getIntrinsicID() != Intrinsic::visc_tensor_pool_min) &&
-            (SII->getIntrinsicID() != Intrinsic::visc_tensor_pool_average)) {
-          // Successor is not a pool operation, thus does not fit the pattern.
-          return;
+        if ((SII->getIntrinsicID() == Intrinsic::visc_tensor_relu) ||
+            (SII->getIntrinsicID() == Intrinsic::visc_tensor_tanh)) {
+          // Successor is activation. Push this node to the current sequence.
+          CurrentNodeSequence.push_back(SN->getInstruction());
+
+          // Will continue, looking for pooling in the next node
+          SN = findNextNodeInSequence(SN);
+          if (!SN) {
+            break; // No node in sequence. Use currently found sequence.
+          }
+          if (getPreferredTarget(SN) != StartNodePreferredTarget) {
+            break; // Node in sequence has different hint. Use current sequence.
+          }
+          SII = isValidHPVMTensorNode(SN);
+        } //else {} // Look for pooling in this node
+
+        if ((SII->getIntrinsicID() == Intrinsic::visc_tensor_pool_max) ||
+            (SII->getIntrinsicID() == Intrinsic::visc_tensor_pool_min) ||
+            (SII->getIntrinsicID() == Intrinsic::visc_tensor_pool_mean)) {
+          // Successor is a pool operation. Use currently found sequence.
+          CurrentNodeSequence.push_back(SN->getInstruction());      
         }
-        // Otherwise, push this node to the current sequence
-        CurrentNodeSequence.push_back(SN->getInstruction());      
       }
       break;
     case Intrinsic::visc_tensor_mul:
-      { // Found beginning of pattern gemm-bias-relu. Look for the rest
+      { // Found beginning of pattern gemm-bias-activation. Look for the rest
         CurrentNodeSequence.push_back(N->getInstruction());
         // Look for bias
         DFNode *SN = findNextNodeInSequence(N);
@@ -884,14 +887,15 @@ void FindFusionTargetsTraversal::codeGen(DFLeafNode *N) {
         // We need to reach the end of the function, where the found sequence
         // is added.
 
-        // If the next operation is a relu, we fuse that as well.
-        // Continue with next node, looking for relu
+        // If the next operation is activation, we fuse that as well.
+        // Continue with next node, looking for activation (relu, tanh)
         SN = findNextNodeInSequence(SN);
         if (SN) {
           if (getPreferredTarget(SN) == StartNodePreferredTarget) {
             SII = isValidHPVMTensorNode(SN);
-            if (SII->getIntrinsicID() == Intrinsic::visc_tensor_relu) {
-              // We found a relu operation in sequence. Push in vector as well.
+            if ((SII->getIntrinsicID() == Intrinsic::visc_tensor_relu) ||
+                (SII->getIntrinsicID() == Intrinsic::visc_tensor_tanh)) {
+              // We found activation in sequence. Push in vector as well.
               CurrentNodeSequence.push_back(SN->getInstruction());
             }
           }