diff --git a/hpvm/projects/keras/frontend/approxhpvm_translator.py b/hpvm/projects/keras/frontend/approxhpvm_translator.py
index 852b3e89f4ca98d072e846af1cd4aed66bd97f8c..ff75415a0f47eefa88f43ccbd01f42f02ae28dd9 100644
--- a/hpvm/projects/keras/frontend/approxhpvm_translator.py
+++ b/hpvm/projects/keras/frontend/approxhpvm_translator.py
@@ -1117,6 +1117,12 @@ def translate_to_approxhpvm(model,
   hpvmTranslator = HPVMTranslator(dfg, weight_str, input_str, filter_names)    
   hpvmTranslator.translate(model, src_dir, test_data, tuner_data, batch_size)
 
+  promiseTranslator = PromiseRtTranslator(dfg, weight_str)
+  promiseTranslator.translate(model, src_dir, test_data)
+
+  
+  
+  
   if reload_weights:
     print ("NOTE: Using existing pretrained weights \n")
   else:
diff --git a/hpvm/projects/keras/frontend/promise_translator.py b/hpvm/projects/keras/frontend/promise_translator.py
index 30acf47cd8aeedaaae8ee1ba0fda637a7d931940..f655174c746fbfdac01f10ca344317e5cd27f7d5 100644
--- a/hpvm/projects/keras/frontend/promise_translator.py
+++ b/hpvm/projects/keras/frontend/promise_translator.py
@@ -185,6 +185,7 @@ class PromiseRtTranslator:
     self.quant_ranges = {}
     # Used to generate PromiseSim Info
     self.layer_str = ""
+    self.cur_layer_id = 1
     self.layer_size_str = "" 
     self.layer_input_sizes = {}
     self.unique_op_types = {}
@@ -238,9 +239,7 @@ class PromiseRtTranslator:
       
     first_op = state.getFirstOp()
     layer_name = first_op.layer_name
-
-    #print("** layer_name = ", layer_name)    
-          
+        
     unique_id = 0
     if promise_layer_type not in self.unique_op_types:
       self.unique_op_types[promise_layer_type] = 1
@@ -261,7 +260,6 @@ class PromiseRtTranslator:
     
     weights_shape = central_op.weights.shape
     input_size = self.layer_input_sizes[layer_name]  
-    #print ("layer_name = ", layer_name, " input_size = ", input_size)
     N = self.batch_size
     C = input_size[1]
 
@@ -294,41 +292,48 @@ class PromiseRtTranslator:
 
   def appendLayerString(self, promise_layer_type, state):
 
-    layer_str = ""
+    
+    layer_str = str(self.cur_layer_id) + " gpu "
+    self.cur_layer_id += 1
+    
     for op in state.ops:
       op_type = op.layer_type    
       if op_type == "Conv2D":
-        layer_str += "conv  "
+        layer_str += "conv fp32 1 "
         if op.use_bias:
-          layer_str += "add  "
+          layer_str += "add fp32 1 "
         if op.activation_type != "linear":
-          layer_str += "activation  "
+          layer_str += op.activation_type + " fp32 1 "
 
       if op_type == "DepthwiseConv2D":
-        layer_str += "depthwise_conv  "
+        layer_str += "depthwise_conv fp32 1"
         if op.use_bias:
-          layer_str += "add  "
+          layer_str += "add "
         if op.activation_type != "linear":
-          layer_str += "activation  "
+          layer_str += op.activation_type + " fp32 1"
 
       if op_type == "BatchNormalization":
-        layer_str += "batchnorm  "
+        layer_str += "batchnorm fp32 1 "
           
       if op_type == "Dense":
-        layer_str += "dense  "
+        layer_str += "mul fp32 1 "
         if op.use_bias:
-          layer_str += "add  "
+          layer_str += "add fp32 1 "
         if op.activation_type != "linear":
-          layer_str += "activation  "
-      
-      if "Pooling" in op_type:
-        layer_str += "pool  "
+          layer_str += op.activation_type + " fp32 1 "
+
+          
+      if op_type == "MaxPooling2D":
+        layer_str += "pool_max fp32 1 "
+
+      if op_type == "AvgPooling2D":
+        layer_str += "pool_mean fp32 1 "
       
       if op_type == "Add":    
-        layer_str += "add  "
+        layer_str += "add fp32 1 "
 
       if op_type == "Activation":
-        layer_str += "activation  "
+        layer_str += op.activation_type + " fp32 1 "
 
     layer_str += "\n"
 
@@ -355,13 +360,10 @@ class PromiseRtTranslator:
   # Retrieve input name of the previous layer
   def getInputLayerName(self, cur_node):
 
-    #print (cur_node.layer_name)
     # Assumption: If no inputs, the previous layer must be input layer
     if len(cur_node.inputs) == 0:
       return "input"
 
-    #print ("Input_type = ", cur_node.inputs[0].layer_type)
-
     pred_layer_type = cur_node.inputs[0].layer_type
     # FIXME: Assuming the 'inference' phase - hence skipping Dropout
     #if pred_layer_type == "Flatten" or pred_layer_type == "Dropout":
@@ -381,7 +383,6 @@ class PromiseRtTranslator:
   # Retrieve input name of the previous layer
   def getSingleInputName(self, cur_node):
 
-    #print (cur_node.layer_name)
     # Assumption: If no inputs, the previous layer must be input layer
     if len(cur_node.inputs) == 0:
       return "input"
@@ -396,7 +397,6 @@ class PromiseRtTranslator:
   
     # get input to the layer
     input_node_name = cur_node.inputs[0].layer_name  # get the input layer ID
-
     
     input_var_name = ""
     if input_node_name in self.output_map:
@@ -450,12 +450,6 @@ class PromiseRtTranslator:
 
     weights = cur_node.weights
 
-    #min_val = np.amin(weights)
-    #max_val = np.amax(weights)
-
-    #min_val = np.percentile(weights, 0.5)
-    #max_val = np.percentile(weights, 99.5)
-
     (min_val, max_val) = get_best_quant_range(weights)
     
     
@@ -488,8 +482,6 @@ class PromiseRtTranslator:
     prev_layer_name = self.getInputLayerName(first_op)
     cur_layer_name = last_op.layer_name
 
-    # print ("prev_layer_name ", prev_layer_name , " cur_layer_name = ", cur_layer_name)
-
     if prev_layer_name not in self.quant_ranges or cur_layer_name not in self.quant_ranges:
       print ("ERROR: Layer_name = ", prev_layer_name ," or ", cur_layer_name, " not found in quant_range")
       sys.exit(0)
@@ -506,8 +498,6 @@ class PromiseRtTranslator:
     
   def genDenseLayer(self, state):
     
-    print ("\n\n Layer = ", state.op_string, "\n\n")
-
     first_op = state.getFirstOp()
     dense_op = state.getDenseOp()
     last_op = state.getLastOp()
@@ -520,40 +510,16 @@ class PromiseRtTranslator:
     b_min, b_max = self.getBiasRange(dense_op)   
     
     activation_id = state.getActivationID()
-
-    # NOTE: retrieve the quantization ranges for inputs and ouputs
-    input_quant_range, output_quant_range = self.getQuantRange(state)
-    
-    promise_layer_str = "void* " + output_var + " = FCLayer_PROMISE(" + input_var + ", "
-    promise_layer_str += str(input_quant_range[0]) + ", "  + str(input_quant_range[1]) + ", "
-    promise_layer_str += w_name + ", " + str(w_min) + ", " + str(w_max) + ", "
-    promise_layer_str += b_name + ", " + str(b_min) + ", " + str(b_max) + ", "
-    promise_layer_str += str(activation_id) + ", "
-    promise_layer_str += str(output_quant_range[0]) + ", "  + str(output_quant_range[1]) + ", "
-    promise_layer_str += str(self.swing_value) 
-    promise_layer_str += "); \n"
-    
-    print (promise_layer_str)
-
-    self.program_str += promise_layer_str
-
     
     self.appendLayerString("FC", state)
     
     state.clear()
 
 
-    # NOTE: This dumps quantization range files needed for HPVM wrapper backend
-    dumpQuantizeRanges(self.weights_dir, input_quant_range[0], input_quant_range[1],\
-                       w_min, w_max, b_min, b_max, \
-                       output_quant_range[0], output_quant_range[1])
-
 
     
   def genConvLayer(self, state):
     
-    print ("\n\n Layer = ", state.op_string, "\n\n")
-
     first_op = state.getFirstOp()
     conv_op = state.getConvOp()
     last_op = state.getLastOp()
@@ -570,41 +536,15 @@ class PromiseRtTranslator:
     pool_id, pool_size = state.getPoolInfo()
     strides = state.getStrides()
 
-    # NOTE: retrieve the quantization ranges for inputs and ouputs
-    input_quant_range, output_quant_range = self.getQuantRange(state)
-
-    # NOTE: Assuming symmetric K*K pool size
-    promise_layer_str = "void* " + output_var + " = ConvLayer_PROMISE(" + input_var + ", "
-    promise_layer_str += str(input_quant_range[0]) + ", "  + str(input_quant_range[1]) + ", "
-    promise_layer_str += w_name + ", " + str(w_min) + ", " + str(w_max) + ", "
-    promise_layer_str += b_name + ", " + str(b_min) + ", " + str(b_max) + ", "
-    promise_layer_str += str(padding) + ", " + str(padding) + ", "
-    promise_layer_str += str(strides[0]) + ", " + str(strides[1]) + ", "
-    promise_layer_str += str(pool_id) + ", " + str(pool_size[0]) + ", "
-    promise_layer_str += str(activation_id) + ", "
-    promise_layer_str += str(output_quant_range[0]) + ", "  + str(output_quant_range[1]) + ", "    
-    promise_layer_str += str(self.swing_value) 
-    promise_layer_str += "); \n"
-
-    print (promise_layer_str)
-    
-    self.program_str += promise_layer_str
-
     self.appendLayerString("Conv", state)
 
     state.clear()
 
-
-    # NOTE: This dumps quantization range files needed for HPVM wrapper backend
-    dumpQuantizeRanges(self.weights_dir, input_quant_range[0], input_quant_range[1],\
-                       w_min, w_max, b_min, b_max, \
-                       output_quant_range[0], output_quant_range[1])
     
 
 
   def genDepthwiseConvLayer(self, state):
-    print ("\n\n Layer = ", state.op_string, "\n\n")
-
+  
     conv_op = state.getDepthwiseConvOp()
     first_op = state.getFirstOp()
     last_op = state.getLastOp()
@@ -618,44 +558,7 @@ class PromiseRtTranslator:
     padding = state.getPadding()
     pool_id, pool_size = state.getPoolInfo()
     strides = state.getStrides()
-
-    promise_layer_str = "void* " + output_var + " = "
-    promise_layer_str += "tensorConvolution(" + input_var + ", "
-    promise_layer_str += w_name  + ", "
-    promise_layer_str += str(padding) + ", "
-    promise_layer_str += str(padding) + ", "
-    promise_layer_str += str(strides[0]) + ", "
-    promise_layer_str += str(strides[1]) + ", "
-    promise_layer_str += "1, "
-
-    C = conv_op.weights.shape[2]
-    promise_layer_str += str(C) + "); \n"
-
-    # FIX: ADD code for TensorAdd and ACTIVATION
-    # TODO: ADD code for TensorAdd and ACTIVATION
-
-    input_var = output_var
-    if nodeHasBias(conv_op):
-      output_var2 = self.getVariableName(conv_op)    
-      promise_layer_str += "void* " + output_var2 + " = "
-      promise_layer_str += "tensorAdd(" + input_var + ", "
-      promise_layer_str += conv_op.layer_name + "_b"
-      promise_layer_str += "); \n"
-
-      # Update variable that holds input for next operation
-      input_var = output_var2
-
-
-    if nodeHasActivation(conv_op):
-      activation_type = conv_op.activation_type
-      output_var = self.getVariableName(conv_op)    
-      promise_layer_str += genActivationCallStr(input_var, output_var, activation_type)  
-
-      
-    print (promise_layer_str)    
-    self.program_str += promise_layer_str
-
-       
+    
     self.appendLayerString("DepthwiseConv", state)
 
     state.clear()
@@ -667,20 +570,6 @@ class PromiseRtTranslator:
     first_op = state.getFirstOp()
     last_op = state.getFirstOp()
 
-    input_var = self.getSingleInputName(first_op)
-    output_var = self.getVariableName(last_op)
-
-    promise_layer_str = "void* " + output_var + " = "
-    promise_layer_str += "tensorBatchNorm(" + input_var + ", "
-    promise_layer_str += first_op.layer_name + "_gamma, "
-    promise_layer_str += first_op.layer_name + "_beta, "
-    promise_layer_str += first_op.layer_name + "_mean, "
-    promise_layer_str += first_op.layer_name + "_variance, "
-    promise_layer_str += str(first_op.epsilon)
-    promise_layer_str += "); \n"
-
-    self.program_str += promise_layer_str
-
     self.appendLayerString("BatchNorm", state)
 
     state.clear()
@@ -689,25 +578,17 @@ class PromiseRtTranslator:
     
 
   def genSoftmaxLayer(self, state):
-    print ("\n\n Layer = ", state.op_string, "\n\n")
-
+  
     first_op = state.getFirstOp()
     last_op = state.getLastOp()
 
-    input_var = self.getSingleInputName(first_op)
-    output_var = self.getVariableName(last_op)
-    
-    promise_layer_str = "void* " + output_var + " = tensorSoftmax(" + input_var + "); \n"
-    print (promise_layer_str)
-
-    self.program_str += promise_layer_str
+    self.layer_str += str(self.cur_layer_id) + " gpu softmax fp32 1\n"  
     
     state.clear()
 
 
   def genAddLayer(self, state):
-    print ("\n\n Layer = ", state.op_string, "\n\n")
-
+  
     first_op = state.getFirstOp()
     last_op = state.getLastOp()
 
@@ -729,8 +610,7 @@ class PromiseRtTranslator:
     
     
   def genActivationLayer(self, state):
-    print ("\n\n Layer = ", state.op_string, "\n\n")
-
+  
     first_op = state.getFirstOp()
     input_var = self.getSingleInputName(first_op)
     output_var = self.getVariableName(first_op)
@@ -830,7 +710,6 @@ class PromiseRtTranslator:
       return  
 
     layer_name = cur_node.layer_name
-    print (layer_name)
     self.visited_nodes[layer_name] = True
 
     self.genPreviousLayer(state)
@@ -847,7 +726,6 @@ class PromiseRtTranslator:
       return  
 
     layer_name = cur_node.layer_name
-    print ("handle_conv", layer_name)
     self.visited_nodes[layer_name] = True
 
     self.genPreviousLayer(state)
@@ -864,7 +742,6 @@ class PromiseRtTranslator:
       return  
 
     layer_name = cur_node.layer_name
-    print ("handle_depthwise_conv", layer_name)
     self.visited_nodes[layer_name] = True
 
     self.genPreviousLayer(state)
@@ -881,7 +758,7 @@ class PromiseRtTranslator:
       return  
 
     layer_name = cur_node.layer_name
-    print ("handle_batchnorm", layer_name)
+    #print ("handle_batchnorm", layer_name)
     self.visited_nodes[layer_name] = True
 
     self.genPreviousLayer(state)
@@ -901,7 +778,6 @@ class PromiseRtTranslator:
       return
 
     layer_name = cur_node.layer_name
-    print (layer_name)
     self.visited_nodes[layer_name] = True
 
     self.genPreviousLayer(state)
@@ -920,7 +796,6 @@ class PromiseRtTranslator:
       return
 
     layer_name = cur_node.layer_name
-    print (layer_name)
     self.visited_nodes[layer_name] = True
 
     # NOTE: If end of DNN
@@ -942,7 +817,6 @@ class PromiseRtTranslator:
       return
 
     layer_name = cur_node.layer_name
-    print (layer_name)
     self.visited_nodes[layer_name] = True
 
     self.genPreviousLayer(state)
@@ -957,7 +831,6 @@ class PromiseRtTranslator:
       return
   
     layer_name = cur_node.layer_name
-    print (layer_name)
     self.visited_nodes[layer_name] = True
 
     layer_type = cur_node.layer_type
@@ -978,7 +851,6 @@ class PromiseRtTranslator:
   def handleLayers(self, output_node, state):
 
     layer_type = output_node.layer_type
-    #print ("layer_type", layer_type)
 
     if layer_type == "ZeroPadding2D":
       self.handle_padding(output_node, state)
@@ -1006,7 +878,7 @@ class PromiseRtTranslator:
         
     if(self.isForwardLayer(layer_type)):
       layer_name = output_node.layer_name
-      print ("NOTE: Skippping = ", layer_name)
+      #print ("NOTE: Skippping = ", layer_name)
       self.visited_nodes[layer_name] = True
       self.traverseSuccessors(output_node, state)   
 
@@ -1036,7 +908,6 @@ class PromiseRtTranslator:
       layer_it += 1
 
     batch_size = 1000
-    #batch_size = len(x_test)
     input_size = len(x_test)
     num_batches = input_size // batch_size 
 
@@ -1048,8 +919,6 @@ class PromiseRtTranslator:
       
       start = i * batch_size
       end = (i + 1) * batch_size
-
-      print ("start = ", start, " end = , ", end)
       
       # Inference over test set
       layer_outs = functor([x_test[start:end], 1.])
@@ -1062,11 +931,9 @@ class PromiseRtTranslator:
       ind = 0
       for layer_out in layer_outs:
         layer_name = model.layers[ind].name
-        print ("layer_name = ", layer_name)
     
-        (min_val, max_val) = get_best_quant_range(layer_out)
-      
-        print ("min_val = ", min_val, " max_val = ", max_val)
+        (min_val, max_val) = get_best_quant_range(layer_out)    
+        #print ("min_val = ", min_val, " max_val = ", max_val)
 
         layer_ranges[layer_name].append((min_val, max_val))
         #self.quant_ranges[layer_name] = (min_val, max_val)
@@ -1094,7 +961,7 @@ class PromiseRtTranslator:
         
       self.quant_ranges[layer_name] = (min_val, max_val)    
 
-      print ("---- min = ", min_val, "  max = ", max_val, " ----- \n\n")
+      #print ("---- min = ", min_val, "  max = ", max_val, " ----- \n\n")
 
       ind += 1
 
@@ -1111,8 +978,8 @@ class PromiseRtTranslator:
         continue
 
       layer_name = layer.name
-      print ("layer_name = ", layer_name)
-      print ("layer_shape = ", layer.input.shape)
+      #print ("layer_name = ", layer_name)
+      #print ("layer_shape = ", layer.input.shape)
       self.layer_input_sizes[layer_name] = layer.input.shape
 
 
@@ -1163,18 +1030,14 @@ class PromiseRtTranslator:
   def endBatchLoop(self):
 
     end_loop_str = ""
-    #end_loop_str += "\nuint8_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n"
-    #end_loop_str += "\nuint32_t* labels = readLabelsBatch2(labels_path.c_str(),start,end); \n"
     end_loop_str += "\nuint32_t* labels = readLabelsBatch3(labels_path.c_str(),start,end); \n"
 
     
     last_node = self.dfg.last_node
     output_var = self.output_map[last_node.layer_name]
-    #accuracy_call = "\nfloat accuracy = computeAccuracy2(labels, batch_size, " + output_var + "); \n"
     accuracy_call = "\nfloat accuracy = computeAccuracy3(labels, " + output_var + "); \n"
     end_loop_str += accuracy_call
  
-    #end_loop_str += "float accuracy = computeAccuracy2(labels, batch_size, var_60); "
     end_loop_str += "final_accuracy += accuracy; \n"
     end_loop_str += "freeBatchMemory(); \n "
     end_loop_str += "\n}\n\n"
@@ -1220,9 +1083,7 @@ class PromiseRtTranslator:
     if test_data is not None and self.dfg.last_node is not None:
       last_node = self.dfg.last_node
       output_var = self.output_map[last_node.layer_name]
-      #accuracy_call = "\ncomputeAccuracy2(labels," + str(len(test_data)) + "," + output_var + "); \n"
-      #footer_str += accuracy_call
-
+ 
     accuracy_call =  "\ndumpExecutionAccuracies(); \n"
     footer_str += accuracy_call
     
@@ -1238,14 +1099,20 @@ class PromiseRtTranslator:
 
   def dumpLayerStr(self, dir_prefix):
 
-    f = open(dir_prefix + "/layer_composition.txt", "w+")
-    f.write(self.layer_str)
-    f.close()
+    config_str = "0\n"
+    config_str += "+++++\n"
+    config_str += "conf1 1 1 100 0\n"
+    config_str += self.layer_str
+    config_str += "-----"
 
-    f = open(dir_prefix + "/layers.txt", "w+")
-    f.write(self.layer_size_str)
+    f = open(dir_prefix + "/tuner_confs.txt", "w+")
+    f.write(config_str)
     f.close()
 
+    #f = open(dir_prefix + "/layers.txt", "w+")
+    #f.write(self.layer_size_str)
+    #f.close()
+
     
       
   def dumpProgramString(self, final_str, dir_prefix):
@@ -1258,8 +1125,6 @@ class PromiseRtTranslator:
     
   def generateSourceProgram(self, weights_dir, x_test):
 
-    print(self.program_str)
-    
     final_str = ""
     header_str = self.genHeader()
     final_str += header_str
@@ -1270,7 +1135,6 @@ class PromiseRtTranslator:
     loop_str = self.genBatchLoop(x_test)
     final_str += loop_str
     
-    #final_str += "\n\n" + self.weight_str + "\n\n"
     final_str += self.program_str
 
     end_loop_str = self.endBatchLoop()
@@ -1281,7 +1145,7 @@ class PromiseRtTranslator:
 
     footer_str = self.genFooter(x_test)
     final_str += footer_str    
-    print (final_str)
+    #print (final_str)
     
     self.dumpProgramString(final_str, weights_dir)
     
@@ -1299,13 +1163,10 @@ class PromiseRtTranslator:
     
     self.findLayerInputSizes(model, x_test)
     
-    self.findQuantizeRanges(model, x_test)
-    
     self.handleLayers(root_node, state)
 
-    print ("\n *** Generated PROMISE Layers **** \n ")
-    
-    self.generateSourceProgram(weights_dir, x_test)
+    # Commented out Promise code-gen - Not needed in this release version
+    #self.generateSourceProgram(weights_dir, x_test)
 
     self.dumpLayerStr(weights_dir)