diff --git a/llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py b/llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py
index ff0f9b37c0f51ed8461e8b226660bde9b27de729..e7177d1ca2d632a629178926f23a7827a1c71564 100644
--- a/llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py
+++ b/llvm/projects/hpvm-tensor-rt/build_pldi/table_generator.py
@@ -6,111 +6,117 @@ import shutil
 from collections import defaultdict
 
 class TableGenerator: 
-    
+   
+    '''
+    Stores all precision conversions used. 
+    '''
     precision_conversions = frozenset(["h2f", "f2h"]) 
 
-    def __init__(self, dir_name, iters, profiler_binary_name):
-        self.__dir_name = dir_name
+    def __init__(self, dir_path, iters, profiler_binary_name):
+        '''
+        Args:
+            dir_path:               Path of directory containing network binaries
+            iters:                  Number of iterations to run each binary for
+            profiler_binary_name:   Name of offline profiler binary to run 
+        '''
+        self.__dir_path = dir_path
 
-        # directory/path/network_name (last item in path)  
-        self.__network_name = os.path.split(dir_name)[-1]
+        # Name of the actual directory 
+        self.__network_name = os.path.split(dir_path)[-1]
 
         self.__iters = iters 
         self.__profiler_binary_name = profiler_binary_name
-        self.__results_dir_name = "%s_results" % self.__dir_name
 
-        self.__table_filename = "%s_tensors.txt" % self.__network_name
+        # Path to results directory 
+        self.__results_dir_path = "%s_results" % self.__dir_path
 
+        # Outputted table file
+        self.__table_filename = "%s_tensors.txt" % self.__network_name
 
-    def __is_binary(self, file_path):
-        # Binary name must start with the network name as per our naming standards
-        return os.path.isfile(file_path) and os.access(file_path, os.X_OK) and \
-                file_path.find(self.__network_name) != -1
+		# Nested default dictionary of default dicts
+        self.__table = self.__build_nested_default_dict()
 
 
-    def run_binaries_in_input_dir(self):
-        if not os.path.isdir(self.__dir_name):
-            print("ERROR: Directory %s not found" % self.__dir_name)
+	def generate_table(self):
+		'''
+        Generates a table file called <network_name>_tensors.txt in the following 
+        steps:
+        1. Runs the offline profiler against the inputted binaries to generate
+        results files
+        2. Builds an internal table storing all data from the parsed results files
+        the offline profiler generated
+        3. Writes the internal table to <network_name>_tensors.txt file and uses the 
+        <network_name>_ops.txt file as a guideline in terms of row order 
+		'''
+        #self.__run_inputted_binaries()
+        self.__build_internal_table()
+        self.__output_table_to_file()
+
+
+    def __run_inputted_binaries(self):
+        '''
+        Invokes the profiler to run all appropriate binaries (must start with the network 
+        name) in the inputted directory. Result files generated by the profiler are 
+        stored in the results file directory and are named <binary_name>.txt. These results
+        files are then parsed in a later step to generate the table
+        '''
+        if not os.path.isdir(self.__dir_path):
+            print("ERROR: Directory %s not found" % self.__dir_path)
             exit(1)
 
         try:
-            os.mkdir(self.__results_dir_name)
+            os.mkdir(self.__results_dir_path)
         except OSError:
-            if os.path.isdir(self.__results_dir_name):
+            if os.path.isdir(self.__results_dir_path):
                 print("Directory already exists. Clearing directory.")
-                for old_file in glob.glob(os.path.join(self.__results_dir_name, "*")):
+                for old_file in glob.glob(os.path.join(self.__results_dir_path, "*")):
                     os.remove(old_file)
             else:
                 print("ERROR: Directory doesn't exist but failed to create dir")
 
-        for binary_name in os.listdir(self.__dir_name):
-            binary_path = os.path.join(self.__dir_name, binary_name)
+        for binary_name in os.listdir(self.__dir_path):
+            binary_path = os.path.join(self.__dir_path, binary_name)
 
-            if not self.__is_binary(binary_path):
+            if not self.__should_execute_file(binary_path):
                 continue
 
-            if not os.path.isfile(binary_path):
-                print("ERROR: Binary %s not found" % binary_path)
-                exit(1)
-
-            output_file = os.path.join(self.__results_dir_name, binary_name + ".txt")
+            output_file = os.path.join(self.__results_dir_path, binary_name + ".txt")
             # No stdout/stderr piping needed for now
             subprocess.Popen([profiler_binary_name, binary_path, str(self.__iters), \
                         output_file]).communicate()
 
 
-    def __get_approximation_type(self, results_filename):
-        approx_type_start_ind = results_filename.find(self.__network_name) \
-                + len(self.__network_name) + 1 # + 1 to account for _ delimiter
-        approx_type_end_ind = results_filename.find(".txt")
-        return results_filename[approx_type_start_ind : approx_type_end_ind] 
-   
-
-    def __parse_tensor_operation_line(self, tensor_op_line):
-        print(tensor_op_line)
-        line_as_list = tensor_op_line.split(",")
-        return line_as_list[0], line_as_list[1], line_as_list[2] 
-
-
-    def __build_nested_default_dict(self):
-        return defaultdict(self.__build_nested_default_dict)
-
-    # h2f or f2h
-    def __get_original_operation_name(self, op_name):
-        underscore_ind = op_name.find("_")
-        return op_name[ : underscore_ind], op_name[underscore_ind + 1 : ]
-
-
-    def generate_table(self):
-        self.__table = self.__build_nested_default_dict()
-        self.__build_internal_table()
-        self.__output_table()
-
-
     def __build_internal_table(self):
-        for results_file_name in os.listdir(self.__results_dir_name):
+        '''
+        Iterates through each results file generated by the runs of the offline
+        profiler and stores the data in a dictionary in the following format:
+            [operation name][approximation type OR conversion type][time/energy]
+        '''
+        for results_file_name in os.listdir(self.__results_dir_path):
             # Ignore if it's not a results file
             if results_file_name == self.__table_filename or \
                         not results_file_name.startswith(self.__network_name):
                 continue
 
             approx_type = self.__get_approximation_type(results_file_name)
-            results_file = open(os.path.join(self.__results_dir_name, results_file_name), "r")
+            results_file = open(os.path.join(self.__results_dir_path, results_file_name), "r")
 
             for line in results_file:
                 line = line.strip()
                 op_name, total_time, total_energy = self.__parse_tensor_operation_line(line)
 
-                # Handle _f2h and _h2f output for tensor operation
-                # Store as columns of original operation rather than independent rows 
+                # If the current operation is f2h or h2f  
                 if any(op_name.endswith(prec_conv) for prec_conv in TableGenerator.precision_conversions):
+                    # Get the original operation name (without the f2h/h2f) and the conversion type 
                     orig_op_name, conversion_type = self.__get_original_operation_name(op_name)
-                    # Error bc original op name should ALWAYS be in the table
-                    if orig_op_name not in self.__table: 
+
+                    if orig_op_name not in self.__table:
                         print("ERROR: Conversion found but original %s is not in the table" % orig_op_name)
                         exit(1)
-                    self.__table[orig_op_name][conversion_type]["time"] = total_time
-                    self.__table[orig_op_name][conversion_type]["energy"] = total_energy 
+
+                    # Store f2h and h2f as columns in the row belonging to the original operation
+                    self.__table[orig_op_name][approx_type]["time"] = total_time
+                    self.__table[orig_op_name][approx_type]["energy"] = total_energy
 
                 # Create a new row in the dictionary
                 else:
@@ -119,67 +125,162 @@ class TableGenerator:
 
             results_file.close()
 
-    def __output_table(self):
-        table_file_path = os.path.join(self.__results_dir_name, self.__table_filename)
-        # TODO un hard code this 
-        soc_operations_file_name = os.path.join("/home/nvidia/soc_simulator", "%s_cifar10" % self.__network_name, "%s_ops.txt" % self.__network_name)
 
-		# Don't need to copy the file over --> can use the original file as a reference
+    def __output_table_to_file(self):
+        '''
+        Outputs the internally stored table to a file using the <network_name>_ops.txt file as
+        a guideline in the following steps:
+        1. Opens the ops file and the file to output the table to
+        2. Reads a line from the ops file (guaranteed to be the layers/NML header)
+        3. For each operation in the layer (or 1 operation if the "layer" is a NML), we store the
+        time and the energy
+        '''
+        table_file_path = os.path.join(self.__results_dir_path, self.__table_filename)
+        soc_operations_file_name = os.path.join("/", "home", "nvidia", "soc_simulator", \
+                        "%s_cifar10" % self.__network_name, "%s_ops.txt" % self.__network_name)
+
         soc_operations_file = open(soc_operations_file_name, "r")
         table_file = open(table_file_path, "w")
 
-        # TODO possible for operations in the same layer to not have the same # of cols? 
-
         curr_line = soc_operations_file.readline().strip()
 
         while curr_line:
             # First line is always the layers line (#layer_name,num_ops)
             layer_name, num_ops = self.__parse_layer_info_line(curr_line)
-            print("FIRST LINE", layer_name, num_ops)
-            
-            # Get each operation in the layer
+
+            # List of strings, where each string is a row corresponding to an operation
+            # in the layer
             ops_in_layer = []
-            header = ["**", layer_name, str(num_ops), "_"]
-            
-            for op_in_layer_count in range(num_ops): 
-                # Each line consists of operation name  
+
+            # Stores a list of elements in the header, which will be joined into a string
+            # The header is only generated for the first operation in the layer
+            # CRITICAL ASSUMPTION: All operations within a layer have the same # columns
+            # or everything breaks bc the header is per layer, not per operation
+            header = ["**", layer_name, str(num_ops)]
+
+            # Iterate through all operations within the layer 
+            for op_in_layer_count in range(num_ops):
+                # Contains the operation name 
                 curr_line = soc_operations_file.readline().strip()
-                curr_op = [curr_line] # Join into a string later
+
+                # Stores a list of elements that will be joined to make up a row 
+                curr_op = [curr_line]
                 operation_data = self.__table[curr_line]
 
-                # Iterate through time/energy data for each approx type
+                # Iterate through time/energy data for each approximation type corresponding
+                # to the current operation
                 for approx_type in operation_data:
-                    curr_op.append(operation_data[approx_type]["time"]) 
-                    curr_op.append(operation_data[approx_type]["energy"])
+                    op_time = operation_data[approx_type]["time"]
+                    op_energy = operation_data[approx_type]["energy"]
+
+                    curr_op.append(op_time)
+                    curr_op.append(op_energy)
 
-                    # CRITICAL ASSUMPTION: All ops within a layer have the same # cols
-                    # Only fill out the header once for the layer
                     if op_in_layer_count == 0:
-                        header.append(approx_type)    
-               
+                        header.append("%s_time" % approx_type)
+                        header.append("%s_energy" % approx_type)
+
                 ops_in_layer.append(' '.join(curr_op))
+
             # Getting all operation rows and then writing everything because
             # calls to write() are slow (memory vs time tradeoff)
-            print("%s" % ' '.join(header))
-            print("%s" % '\n'.join(ops_in_layer))
             table_file.write("%s\n%s\n" % (' '.join(header), '\n'.join(ops_in_layer)))
 
             curr_line = soc_operations_file.readline().strip()
 
+
+    def __should_execute_file(self, file_path):
+        '''
+        Checks if the file at the given file path is a binary that should be run
+        by the profiler. Must exist, be a binary, and must start with the network
+        name as per our naming standards.
+
+        Args:
+            file_path:          Path of the file to check 
+        '''
+        return os.path.isfile(file_path) and os.access(file_path, os.X_OK) and \
+                file_path.find(self.__network_name) != -1
+
+
+    def __get_approximation_type(self, results_filename):
+        '''
+        Parses a given results filename for the approximation type. 
+        Format assumption: <network_name>_<approx_type>.txt
+            
+        Args:
+            results_filename:      Name of results file
+
+        Returns:
+            the approximation technique (ex: fp16) 
+        '''
+        approx_type_start_ind = results_filename.find(self.__network_name) \
+                + len(self.__network_name) + 1 # + 1 to account for _ delimiter
+        approx_type_end_ind = results_filename.find(".txt")
+        return results_filename[approx_type_start_ind : approx_type_end_ind] 
+   
+
+    def __parse_tensor_operation_line(self, tensor_op_line):
+        '''
+        Parses a tensor operation line (within a output file from the offline
+        profiler for the operation name, the total time used, and the total
+        energy used
+
+        Args:
+            tensor_op_line:        Tensor operation line from output file
+
+        Returns:
+            operation name
+            total time used
+            total energy used
+        '''
+        line_as_list = tensor_op_line.split(",")
+        return line_as_list[0], line_as_list[1], line_as_list[2] 
+
+
+    def __build_nested_default_dict(self):
+        '''
+        Builds a nested default dictionary with an arbitrary number of levels
+        '''
+        return defaultdict(self.__build_nested_default_dict)
+
+    def __get_original_operation_name(self, op_name):
+        '''
+        Parses an operation name containing _<conversion type> for the original
+        operation name.
+        Format assumption: <original_op_name>_<conversion type>
+
+        Args:
+            op_name:        Name of the operation
+        
+        Returns:
+            the original operation name 
+        '''
+        underscore_ind = op_name.find("_")
+        return op_name[ : underscore_ind], op_name[underscore_ind + 1 : ]
+
+
     def __parse_layer_info_line(self, layer_info_line): #layer_name,num_ops
+        '''
+        Parses a layer header (from the original ops.txt file) into the layer name
+        and the number of operations
+        Assumed format: #layer_name,num_ops
+
+        Args:
+            layer_info_line:    Line at the beginning of each layer in the ops file
+
+        Returns:
+            layer name
+            number of ops in the layer
+        '''
         comma_ind = layer_info_line.find(",")
         return layer_info_line[layer_info_line.find("#") + 1 : comma_ind], \
                     int(layer_info_line[comma_ind + 1 : ])
 
-    def __generate_header(self, table):
-        # <approx type time/energy> <conversion type at very end> 
-        # should the header be per tensor op or per layer?
-        # Try doing this per layer first
-        pass            
-
-binary_dir_name = "/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_pldi/mobilenet"
-num_iters = 1 
-profiler_binary_name = "/home/nvidia/awesome_profiler/pp"
-table_gen = TableGenerator(binary_dir_name, num_iters, profiler_binary_name)
-#table_gen.run_binaries_in_input_dir()
-table_gen.generate_table()
+
+if __name__ == "__main__":
+    binary_dir_path = "/home/nvidia/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/build_pldi/mobilenet"
+    num_iters = 1 
+    profiler_binary_name = "/home/nvidia/awesome_profiler/pp"
+    table_gen = TableGenerator(binary_dir_path, num_iters, profiler_binary_name)
+    #table_gen.run_inputted_binaries()
+    table_gen.generate_table()