diff --git a/hpvm/projects/onnx/frontend/operators.py b/hpvm/projects/onnx/frontend/operators.py
index 743c9761c8485d258f1e0b8d757e73b66b701539..f5851e8955540ec74768b88bb2125dc25bf82527 100644
--- a/hpvm/projects/onnx/frontend/operators.py
+++ b/hpvm/projects/onnx/frontend/operators.py
@@ -32,7 +32,39 @@ class DepthwiseConv2DNode(DFGNode):
         print("\t", self.strides)
         print("\tPadding = ", self.padding)
     def codegen(self):
-        pass
+        input_var_name = self.getSingleInputName(cur_node)
+        weights = cur_node.weights
+        strides = cur_node.strides
+        padding = 0
+        if cur_node.padding.strip() == "valid":
+            padding = 0
+        else:
+            padding = cur_node.padding      
+            padding = int((weights.shape[0] - 1) / 2)
+        prev_padding = self.getPrevLayerPadding(cur_node)
+        if prev_padding != None:
+            # FIXME: currently only supporting symmetric padding
+            padding = prev_padding[0][0]        
+
+        inst_str = "void* " + out_var_name1 + " = "
+        inst_str += "tensorConvolution(" + input_var_name + ", "
+        inst_str += cur_node.layer_name + "_w, "
+        inst_str += str(padding) + ", "
+        inst_str += str(padding) + ", "
+        inst_str += str(strides[0]) + ", "
+        inst_str += str(strides[1]) + ", "
+        inst_str += "1, "
+        if layer_type == "DepthwiseConv2D":
+            C = weights.shape[2]
+            inst_str += str(C) + "); \n"
+        else:
+            inst_str += "1); \n"
+        if strides[0] > 1 and cur_node.padding.strip() == "same":
+            print ("!ERROR: Same Padding not supported for Conv with Stride > 1")
+            print ("Use: ZeroPadding2D(padding=(" + str(padding) + "," + str(padding) + "));\n");
+            sys.exit(0)
+        return += inst_str
+
 class DenseNode(DFGNode):
     def __init__(self, layer):
         DFGNode.__init__(self, layer)
@@ -40,7 +72,14 @@ class DenseNode(DFGNode):
         print("\t", self.weights.shape)
         self.use_bias = layer.use_bias
     def codegen(self):
-        pass
+        input_var_name = self.getSingleInputName(cur_node)
+        weights = cur_node.weights
+        inst_str = "void* " + out_var_name1 + " = "
+        inst_str += "tensorGemmGPU(" + input_var_name + ", "
+        inst_str += cur_node.layer_name + "_w"
+        inst_str += "); \n"
+        return inst_str
+
 class MaxPool2DNode(DFGNode):
     def __init__(self, layer):
         DFGNode.__init__(self, layer)
@@ -76,4 +115,12 @@ class BatchNormalizationNode(DFGNode):
         self.moving_mean = layer.moving_mean
         self.moving_variance = layer.moving_variance
     def codegen(self):
-        pass
+        input_var_name = self.getSingleInputName(cur_node)
+        inst_str = "void* " + out_var_name1 + " = "
+        inst_str += "tensorBatchNorm(" + input_var_name + ", "
+        inst_str += cur_node.layer_name + "_gamma, "
+        inst_str += cur_node.layer_name + "_beta, "
+        inst_str += cur_node.layer_name + "_mean, "
+        inst_str += cur_node.layer_name + "_variance, "
+        inst_str += str(cur_node.epsilon)
+        inst_str += "); \n"