diff --git a/hpvm/projects/onnx/frontend/codegen_hpvm.py b/hpvm/projects/onnx/frontend/codegen_hpvm.py
index 205dad9e8a7bd1e9ff1e42d724405aa57f97d994..a044fb36d1d2e49b8440d71375afeca63f3d1652 100644
--- a/hpvm/projects/onnx/frontend/codegen_hpvm.py
+++ b/hpvm/projects/onnx/frontend/codegen_hpvm.py
@@ -118,7 +118,7 @@ def emit_weights(tensors: Dict[str, Tensor]) -> List[dict]:
         if not isinstance(tensor, WeightTensor):
             continue
         name = make_c_identifier(name)
-        file_path = f"{tensor.get_mapped_name()}_path.bin"
+        file_path = f"{tensor.new_name}_path.bin"
         ret.append({"name": name, "shape": tensor.shape, "filename": file_path})
     return ret
 
diff --git a/hpvm/projects/onnx/frontend/graph_builder.py b/hpvm/projects/onnx/frontend/graph_builder.py
index e6088fb86d993fae1b74cae8e3291c8d6abf4096..3e6a3a95fd627eaac7c09f4ef3e2838d38e7eba2 100644
--- a/hpvm/projects/onnx/frontend/graph_builder.py
+++ b/hpvm/projects/onnx/frontend/graph_builder.py
@@ -42,20 +42,14 @@ class GraphBuilder:
         # parse weight
         weight_cnt = 0
         for weight_tensor in onnx_graph.initializer:
-            tensors[weight_tensor.name] = WeightTensor(weight_tensor)
-            tensors[weight_tensor.name].set_mapped_name("weight_" + str(weight_cnt))
+            tensors[weight_tensor.name] = WeightTensor(weight_tensor, f"weight_{weight_cnt}")
             weight_cnt += 1
         # parse input
         input_cnt = 0
-        for i in onnx_graph.input:
-            if i.name in tensors:
+        for input_ in onnx_graph.input:
+            if input_.name in tensors:
                 continue
-            # get type of input tensor
-            tensor_type = i.type.tensor_type
-            # check if it has a shape:
-            shape = tensor_type.shape if tensor_type.HasField("shape") else None
-            tensors[i.name] = InputTensor(i.name, shape)
-            tensors[i.name].set_mapped_name("input_" + str(input_cnt))
+            tensors[input_.name] = InputTensor(input_, f"input_{input_cnt}")
             input_cnt += 1
         return tensors
 
@@ -64,7 +58,7 @@ class GraphBuilder:
         for tensor in self.tensors.values():
             if not isinstance(tensor, WeightTensor):
                 continue
-            tensor.dump_weight(output_dir / (tensor.get_mapped_name() + "_path.bin"))
+            tensor.dump_weight(output_dir / (tensor.new_name + "_path.bin"))
 
 
 class DFG(object):
diff --git a/hpvm/projects/onnx/frontend/graph_ir.py b/hpvm/projects/onnx/frontend/graph_ir.py
index 467ed63ccb6f292c7c7f7bc9f8a1f1c9e3b8d167..43cdf10643dec84e4d344e0e7f3b5ff85d5ab94f 100644
--- a/hpvm/projects/onnx/frontend/graph_ir.py
+++ b/hpvm/projects/onnx/frontend/graph_ir.py
@@ -3,8 +3,12 @@
 ################################################
 
 
-class DFGNode(object):
-    def __init__(self, onnx_node):
+from typing import List
+import onnx
+
+
+class DFGNode:
+    def __init__(self, onnx_node: onnx.NodeProto):
         self.name = onnx_node.name
         self.op_type = onnx_node.op_type
         self.input = onnx_node.input
@@ -19,41 +23,22 @@ class DFGNode(object):
     def __repr__(self):
         return f"{self.__class__.__name__}({self.input}) -> {self.output}"
 
-"""
-Element wise operators that is for activation function
-e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal,
-Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu,
-Abs, Ceil, Elu, Floor, Neg
-"""
-class ActivationNode(DFGNode):
-    pass
-
-
-"""
-ELement wise operators that is not for activation function.
-In other words, they are logical comparison operators
-e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual,
-Or, Xor
-"""
-class LogicalOpNode(DFGNode):
-    pass
 
 ################################################
-# Actually Implementation of Operators
+# Actual Implementation of Operators
 ################################################
 
 
 class AddNode(DFGNode):
-
     def codegen(self):
         return "tensorAdd", []
 
     def hpvm_codegen(self):
         return "__hpvm__tensor_add", []
 
-class BiasAddNode(DFGNode):
 
-    def __init__(self, onnx_conv_node):
+class BiasAddNode(DFGNode):
+    def __init__(self, onnx_conv_node: onnx.NodeProto):
         super().__init__(onnx_conv_node)
         self.op_type = "BiasAdd"
         self.input = list()
@@ -66,6 +51,7 @@ class BiasAddNode(DFGNode):
     def hpvm_codegen(self):
         return "__hpvm__tensor_add", []
 
+
 class MatMulNode(DFGNode):
     def codegen(self):
         return "tensorGemmGPU", []
@@ -83,15 +69,14 @@ class SoftMaxNode(DFGNode):
 
 
 class Conv2DNode(DFGNode):
-
-    def __init__(self, onnx_node):
+    def __init__(self, onnx_node: onnx.NodeProto):
         super().__init__(onnx_node)
         if len(self.input) == 3:
             tmp_input = list()
             for i in self.input:
                 tmp_input.append(i)
             self.input = tmp_input
-            self.input.pop() # remove the last index for bias add
+            self.input.pop()  # remove the last index for bias add
         self.padding = 0
         self.strides = list()
         for attr in onnx_node.attribute:
@@ -102,14 +87,20 @@ class Conv2DNode(DFGNode):
                     self.strides.append(stride)
 
     def codegen(self):
-        return "tensorConvolution", [self.padding, self.padding, self.strides[0], self.strides[1]]
+        return (
+            "tensorConvolution",
+            [self.padding, self.padding, self.strides[0], self.strides[1]],
+        )
 
     def hpvm_codegen(self):
-        return "__hpvm__tensor_convolution", [self.padding, self.padding, self.strides[0], self.strides[1]]
+        return (
+            "__hpvm__tensor_convolution",
+            [self.padding, self.padding, self.strides[0], self.strides[1]],
+        )
 
-class MaxPool2DNode(DFGNode):
 
-    def __init__(self, onnx_node):
+class MaxPool2DNode(DFGNode):
+    def __init__(self, onnx_node: onnx.NodeProto):
         super().__init__(onnx_node)
         self.strides = list()
         self.pool_size = list()
@@ -123,17 +114,27 @@ class MaxPool2DNode(DFGNode):
                 for stride in attr.ints:
                     self.strides.append(stride)
 
-
     def codegen(self):
-        return "tensorPooling", [self.pool_type, *self.pool_size, self.padding, self.padding, *self.strides]
+        return (
+            "tensorPooling",
+            [
+                self.pool_type,
+                *self.pool_size,
+                self.padding,
+                self.padding,
+                *self.strides,
+            ],
+        )
 
     def hpvm_codegen(self):
-        return "__hpvm__tensor_pool_max", [*self.pool_size, self.padding, self.padding, *self.strides]
+        return (
+            "__hpvm__tensor_pool_max",
+            [*self.pool_size, self.padding, self.padding, *self.strides],
+        )
 
 
 class AveragePool2DNode(DFGNode):
-
-    def __init__(self, onnx_node):
+    def __init__(self, onnx_node: onnx.NodeProto):
         super().__init__(onnx_node)
         self.strides = list()
         self.pool_size = list()
@@ -148,22 +149,33 @@ class AveragePool2DNode(DFGNode):
                     self.strides.append(stride)
 
     def codegen(self):
-        return "tensorPooling", [self.pool_type, *self.pool_size, self.padding, self.padding, *self.strides]
+        return (
+            "tensorPooling",
+            [
+                self.pool_type,
+                *self.pool_size,
+                self.padding,
+                self.padding,
+                *self.strides,
+            ],
+        )
 
     def hpvm_codegen(self):
-        return "__hpvm__tensor_pool_avg", [*self.pool_size, self.padding, self.padding, *self.strides]
+        return (
+            "__hpvm__tensor_pool_avg",
+            [*self.pool_size, self.padding, self.padding, *self.strides],
+        )
 
 
 class ReluNode(DFGNode):
-
     def codegen(self):
         return "tensorRelu", []
 
     def hpvm_codegen(self):
         return "__hpvm__tensor_relu", []
 
-class TanhNode(DFGNode):
 
+class TanhNode(DFGNode):
     def codegen(self):
         return "tensorTanh", []
 
@@ -172,8 +184,7 @@ class TanhNode(DFGNode):
 
 
 class BatchNormalizationNode(DFGNode):
-
-    def __init__(self, onnx_node):
+    def __init__(self, onnx_node: onnx.NodeProto):
         super().__init__(onnx_node)
         self.epsilon = ""
         for attr in onnx_node.attribute:
@@ -195,20 +206,45 @@ class FlattenNode(DFGNode):
         self.output = output
 
     @classmethod
-    def from_single_node(cls, n):
+    def from_single_node(cls, n: onnx.NodeProto):
         return cls(n.name, n.op_type, n.input, n.output)
 
     @classmethod
-    def from_onnx_idiom(cls, nodes):
-        _, suffix = nodes[0].name.split('_')
-        return cls(f'Flatten_{suffix}', 'Flatten', nodes[0].input, nodes[-1].output)
+    def from_onnx_idiom(cls, nodes: List[onnx.NodeProto]):
+        _, suffix = nodes[0].name.split("_")
+        return cls(f"Flatten_{suffix}", "Flatten", nodes[0].input, nodes[-1].output)
+
+
+class ActivationNode(DFGNode):
+    """
+    Element wise operators that is for activation function
+    e.g. HardSigmoid, LeakyRelu, PRelu, Pow, Reciprocal,
+    Relu, Selu, Sigmoid, Softplus, Sqrt, ThresholdedRelu,
+    Abs, Ceil, Elu, Floor, Neg
+    """
+
+    pass
+
+
+class LogicalOpNode(DFGNode):
+    """
+    ELement wise operators that is not for activation function.
+    In other words, they are logical comparison operators
+    e.g. And, Equal, Greater, GreaterOrEqual, Less, LessOrEqual,
+    Or, Xor
+    """
+
+    pass
+
 
 class ZeroPadding2DNode(DFGNode):
     pass
 
+
 class DepthwiseConv2DNode(DFGNode):
     pass
 
+
 class DenseNode(DFGNode):
     pass
 
@@ -216,5 +252,6 @@ class DenseNode(DFGNode):
 class PadNode(DFGNode):
     pass
 
+
 class IdentityNode(DFGNode):
-    pass
\ No newline at end of file
+    pass
diff --git a/hpvm/projects/onnx/frontend/tensor.py b/hpvm/projects/onnx/frontend/tensor.py
index 3bc6eae4f5dd75750a4892e4d13bca92627cf0cc..f313bd779d26e1df79619b457faddc5c41246c19 100644
--- a/hpvm/projects/onnx/frontend/tensor.py
+++ b/hpvm/projects/onnx/frontend/tensor.py
@@ -1,70 +1,46 @@
-import sys
-import os
-from onnx import numpy_helper
+from os import PathLike
 import onnx
-from utils import dumpConvWeights, dumpFcWeights, dumpFcBias
+from onnx import numpy_helper
 
 
 class Tensor(object):
-	def __init__(self, proto):
-		if not proto.name.strip():
-			raise ValueError("Tensor's name is required.")
-		self.name = proto.name
-		self.mapped_name = None
+    def __init__(self, proto: onnx.TensorProto, new_name: str):
+        if not proto.name.strip():
+            raise ValueError("Tensor's name is required.")
+        self.name = proto.name
+        self.new_name = new_name
 
-	def set_mapped_name(self, mapped_name):
-		self.mapped_name = mapped_name
+    def __str__(self):
+        return f"{self.__class__.__name__}: {self.name}"
 
-	def get_mapped_name(self):
-		if self.mapped_name != None:
-			return self.mapped_name
-		else:
-			raise ValueError("Var name not mapped before use!")
+    __repr__ = __str__
 
-	def __str__(self):
-		return f"{self.__class__.__name__}: {self.name}"
-	__repr__ = __str__
 
 class InputTensor(Tensor):
-	def __init__(self, input_name, shape: onnx.TensorShapeProto):
-		self.name = input_name
-		self.shape = [d.dim_value for d in shape.dim]
+    def __init__(self, input_proto: onnx.TensorProto, new_name: str):
+        super().__init__(input_proto, new_name)
+        # get type of input tensor
+        tensor_type = input_proto.type.tensor_type
+        # check if it has a shape:
+        shape = tensor_type.shape if tensor_type.HasField("shape") else None
+        self.shape = [d.dim_value for d in shape.dim] if shape else None
+
 
 # Can be either input or weight tensor
 class WeightTensor(Tensor):
-	def __init__(self, weight_proto):
-		Tensor.__init__(self, weight_proto)
-		self.shape = list()
-		self.input_data = numpy_helper.to_array(weight_proto)#.reshape(tuple(input_proto.dims))
-		if len(self.input_data.shape) == 1:
-			self.shape.append(1)
-			self.shape.append(self.input_data.shape[0])
-			self.shape.append(1)
-			self.shape.append(1)
-		elif len(self.input_data.shape) == 2:
-			self.shape.append(1)
-			self.shape.append(1)
-			self.shape.append(self.input_data.shape[0])
-			self.shape.append(self.input_data.shape[1])
-		elif len(self.input_data.shape) == 4:
-			self.shape.append(self.input_data.shape[0])
-			self.shape.append(self.input_data.shape[1])
-			self.shape.append(self.input_data.shape[2])
-			self.shape.append(self.input_data.shape[3])
-		else:
-			print(weight_proto.name)
-			self.shape.append(1)
-			self.shape.append(1)
-			self.shape.append(1)
-			self.shape.append(1)
-			#raise ValueError("Dimensions of weight not equals to 1,2 or 4")
-
-	def dump_weight(self, file_name):
-		if len(self.input_data.shape) == 1:
-			dumpFcBias(file_name, self.input_data)
-		elif len(self.input_data.shape) == 2:
-			dumpFcWeights(file_name, self.input_data)
-		elif len(self.input_data.shape) == 4:
-			dumpConvWeights(file_name, self.input_data)
-
-
+    def __init__(self, weight_proto: onnx.TensorProto, new_name: str):
+        Tensor.__init__(self, weight_proto, new_name)
+        self.shape = []
+        self.input_data = numpy_helper.to_array(weight_proto)
+        sh = self.input_data.shape
+        if len(sh) == 1:
+            self.shape = [1, sh[0], 1, 1]
+        elif len(sh) == 2:
+            self.shape = [1, 1, sh[0], sh[1]]
+        elif len(sh) == 4:
+            self.shape = [sh[0], sh[1], sh[2], sh[3]]
+        else:
+            self.shape = [1] * 4
+
+    def dump_weight(self, file_name: PathLike):
+        self.input_data.tofile(file_name)
diff --git a/hpvm/projects/onnx/frontend/utils.py b/hpvm/projects/onnx/frontend/utils.py
deleted file mode 100644
index 891f40fae7b3653cedefb4cf60cb3f9f80cf0aa0..0000000000000000000000000000000000000000
--- a/hpvm/projects/onnx/frontend/utils.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import numpy as np
-
-skip_layer = ["Identity", "Flatten", "Pad"]
-
-def dumpLabels(file_name, Y_test):
-    
-    f = open(file_name, "wb")
-    
-    labels_map = {}    
-    for label in Y_test:
-        label_val = 0
-        if len(Y_test.shape) > 1:        
-          #label_val = np.int8(label[0])
-          label_val = np.int32(label[0])
-        else:
-          #label_val = np.int8(label)
-          label_val = np.int32(label)
-         
-        if label_val not in labels_map:
-            labels_map[label_val] = 0
-            labels_map[label_val] += 1
-
-        f.write(label_val)
-
-    f.close()
-    
-
-    
-"""def dumpData(file_name, X_test):
-
-    N = X_test.shape[0]
-    C = X_test.shape[1]
-    H = X_test.shape[2]
-    W = X_test.shape[3]
- 
-    print ("*DumpData")
-    print("-min_val = ", np.amin(X_test))
-    print("-max_val = ", np.amax(X_test))
-    
-    f = open(file_name, "wb")
-    for i in range(N):
-        for j in range(C):
-            for k in range(H):
-                for l in range(W):
-                    val = struct.unpack("f", struct.pack("f", X_test[i][j][k][l]))
-                    f.write(np.float32(val[0]))
-
-    f.close()
-"""
-
-
-def dumpData(file_name, X_test):
-
-    N = X_test.shape[0]
-    C = X_test.shape[1]
-    H = X_test.shape[2]
-    W = X_test.shape[3]
- 
-    print ("*DumpData")
-    print("-min_val = ", np.amin(X_test))
-    print("-max_val = ", np.amax(X_test))
-
-    f = open(file_name, "wb")
-
-    X_test.tofile(f)    
-
-    f.close()
-
-    
-    
-def dumpConvWeights(file_name, weights):
-
-    print (weights.shape)
-    print ("*DumpConvWeights")
-    print("-min_val = ", np.amin(weights))
-    print("-max_val = ", np.amax(weights))
-    weights_keras = np.einsum('NCWH->WHCN', weights)
-    print ("Convert shape for conv weights: " + str(weights_keras.shape))
-    N = weights_keras.shape[3]
-    C = weights_keras.shape[2]
-    H = weights_keras.shape[1]
-    W = weights_keras.shape[0]
-    f = open(file_name, "wb")
-    for i in range(N):
-        for j in range(C):
-            for k in range(H):
-                for l in range(W):
-                    # FIXME: Legacy code from Keras frontend 
-                    # should actually interchange k with l
-                    f.write(weights_keras[k][l][j][i])
-
-    f.close()
-
-
-    
-def dumpFcWeights(file_name, weights):
-
-    print (weights.shape)
-    print ("*DumpFcWeights")
-    print("-min_val = ", np.amin(weights))
-    print("-max_val = ", np.amax(weights))
-
-    H = weights.shape[0]
-    W = weights.shape[1]
-    f = open(file_name, "wb")
-    for i in range(H):
-        for j in range(W):
-            f.write(weights[i][j])
-
-    f.close()        
-
-
-    
-def dumpFcBias(file_name, bias):
-
-    print (bias.shape)
-    print ("*DumpFcBias")
-    print("-min_val = ", np.amin(bias))
-    print("-max_val = ", np.amax(bias))
-
-    W = bias.shape[0]
-    f = open(file_name, "wb")
-    for i in range(W):
-        f.write(bias[i])
-
-    f.close()
-
-
-
-def dumpCalibrationData(file_name, X_train, labels_fname, train_labels):
-
-  combined_list = []
-  for i in range(len(X_train)):
-    tup = (X_train[i], train_labels[i])
-    combined_list.append(tup)       
-  
-  np.random.shuffle(combined_list)
-  #X_calibration = X_train[0:5000]
-
-  data_list = []
-  labels_list = []
-  for i in range(5000):
-    tup = combined_list[i]
-    data_list.append(tup[0])
-    labels_list.append(tup[1])
-
-  data_list = np.array(data_list)
-  labels_list = np.array(labels_list)
-  
-  dumpData(file_name, data_list)
-  dumpLabels(labels_fname, labels_list)
-