diff --git a/llvm/projects/keras/frontend/weight_utils.py b/llvm/projects/keras/frontend/weight_utils.py
index fefd48e7781712fde47d0e5a918a8c0afbecb2db..9d5bf75d6de9ccdb3d264542a231668944d668d7 100644
--- a/llvm/projects/keras/frontend/weight_utils.py
+++ b/llvm/projects/keras/frontend/weight_utils.py
@@ -2,6 +2,7 @@
 import numpy as np
 import struct
 import random
+from keras.optimizers import Adam
 
 
 def dumpLabels(file_name, Y_test):
@@ -148,9 +149,53 @@ def dumpCalibrationData(file_name, X_train, labels_fname, train_labels):
 
 
 def dumpCalibrationData2(file_name, test_data, labels_fname, test_labels):
-
-    
+   
   dumpData(file_name, test_data)
   dumpLabels(labels_fname, test_labels)
   
   
+
+
+# Loads Existing HPVM FP32 weights
+def dumpHPVMToKerasModel(model, reload_dir, output_model, X_test, Y_test):
+
+  print ("***** Reloading pre-trained HPVM weights ****")
+  
+  for i in range(len(model.layers)):
+    layer = model.layers[i]
+    layer_name = layer.name
+    print ("*layer_name = ", layer_name)
+    if "conv" not in layer_name and "dense" not in layer_name:
+      continue
+    
+    w_path = reload_dir + layer_name + "_w.bin"
+    print ("** w_path = ", w_path)    
+    w_arr = np.fromfile(w_path, dtype='float32')
+    
+    b_path = reload_dir + layer_name + "_b.bin"
+    b_arr = np.fromfile(b_path, dtype='float32')
+
+    w_shape = layer.get_weights()[0].shape    
+    if "conv" in layer_name:      
+      w_nchw_shape = (w_shape[3], w_shape[2], w_shape[0], w_shape[1])      
+      w_arr = np.reshape(w_arr, w_nchw_shape)
+      w_arr = np.transpose(w_arr, (2,3,1,0))
+
+    if "dense" in layer_name:      
+      w_arr = np.reshape(w_arr, w_shape)
+
+    weights = []
+    weights.append(w_arr)
+    weights.append(b_arr)
+    
+    # Overriding model weights
+    layer.set_weights(weights)
+
+  # Model recompilation needed after resetting weights
+  model.compile(loss='categorical_crossentropy',
+                optimizer=Adam(lr=0.0001, decay=1e-6),
+                metrics=['accuracy'])    
+
+  model.save("alexnet.h5")
+
+  return model
diff --git a/llvm/projects/keras/src/alexnet.py b/llvm/projects/keras/src/alexnet.py
index f1337f432944b6159b6be54f11d1fc3384933e38..fa9abcfdd9a5324af948d18af791fc21524b842f 100644
--- a/llvm/projects/keras/src/alexnet.py
+++ b/llvm/projects/keras/src/alexnet.py
@@ -3,6 +3,7 @@ import numpy as np
 
 from keras.datasets import cifar10
 from keras.models import Sequential
+from keras.models import load_model
 from keras.layers.core import Dense, Dropout, Flatten, Activation
 from keras.layers.convolutional import Conv2D
 from keras.optimizers import Adam
@@ -19,7 +20,7 @@ import numpy as np
 import os
 from frontend.approxhpvm_translator import translate_to_approxhpvm
 from frontend.weight_utils import dumpCalibrationData
-
+from frontend.weight_utils import dumpHPVMToKerasModel
 
 
 def lr_schedule(epoch):
@@ -161,61 +162,22 @@ def trainModel(model):
 
 
 
-# Loads Existing HPVM FP32 weights
-def reloadFP32HPVMModel(model, reload_dir):
-
-  print ("\n\n*****NOTE: Reloading pre-trained weights \n")
-
-  for i in range(len(model.layers)):
-    layer = model.layers[i]
-    layer_name = layer.name
-    print ("*layer_name = ", layer_name)
-    if "conv" not in layer_name and "dense" not in layer_name:
-      continue
-    
-    w_path = reload_dir + layer_name + "_w.bin"
-    print ("** w_path = ", w_path)
-    b_path = reload_dir + layer_name + "_b.bin"
-       
-    w_arr = np.fromfile(w_path, dtype='float32')
-    print ("w_arr = ", w_arr)
-
-    b_arr = np.fromfile(b_path, dtype='float32')
-
-    w_shape = layer.get_weights()[0].shape    
-    if "conv" in layer_name:      
-      w_nchw_shape = (w_shape[3], w_shape[2], w_shape[0], w_shape[1])      
-      w_arr = np.reshape(w_arr, w_nchw_shape)
-      w_arr = np.transpose(w_arr, (2,3,1,0))
-      print ("old_shape = ", w_shape, " new_shape = ", w_arr.shape)
+  
 
-    if "dense" in layer_name:      
-      w_arr = np.reshape(w_arr, w_shape)
+def reloadKerasModel(model_path):
 
-    weights = []
-    weights.append(w_arr)
-    weights.append(b_arr)
+  model = load_model(model_path)
     
-    # NOTE: overriding weights
-    layer.set_weights(weights)
-
+  score = model.evaluate(X_test, to_categorical(Y_test, 10), verbose=0)
+  print('Test loss2:', score[0])
+  print('Test accuracy2:', score[1])
 
-  model.compile(loss='categorical_crossentropy',
-                  optimizer=Adam(lr=0.0001, decay=1e-6),
-                  #optimizer = opt_rms,
-                  metrics=['accuracy'])
 
   
-  #input_path = reload_dir +  "input.bin"
-  #input_arr = np.fromfile(input_path, dtype='float32')
-  #input_arr = np.reshape(input_arr, (10000, 3, 32, 32))
+def data_preprocess():
 
   (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
-  test_labels = Y_test
-  train_labels = Y_train
 
-  #X_train = X_train.astype('float32')
-  #X_test = X_test.astype('float32')
   X_train = X_train / 255.0
   X_test = X_test / 255.0
     
@@ -223,13 +185,11 @@ def reloadFP32HPVMModel(model, reload_dir):
   std = np.std(X_train,axis=(0,1,2,3))   
   X_train = (X_train-mean)/(std+1e-7)
   X_test = (X_test-mean)/(std+1e-7)  
-  
-  score = model.evaluate(X_test, to_categorical(Y_test, 10), verbose=0)
-  print('Test loss2:', score[0])
-  print('Test accuracy2:', score[1])
 
+  return X_train, Y_train, X_test, Y_test
   
 
+  
 
     
 if __name__ == "__main__":
@@ -240,7 +200,18 @@ if __name__ == "__main__":
 
     model = buildModel2()
 
-    reloadFP32HPVMModel(model, "/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/alexnet_cifar10/")
+    X_train, Y_train, X_test, Y_test = data_preprocess()   
+    ###reloadFP32HPVMModel(model, "/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/alexnet_cifar10/")
+    reload_dir = "/home/hsharif3/Gitlab/hpvm/llvm/projects/hpvm-tensor-rt/model_params/alexnet_cifar10/"
+    keras_model_file = "alexnet.h5"
+    model = dumpHPVMToKerasModel(model, reload_dir, keras_model_file, X_test, Y_test)
+
+
+    num_classes = 10
+    score = model.evaluate(X_test, to_categorical(Y_test, num_classes), verbose=0)
+    print('Test accuracy2:', score[1])
+
+    reloadKerasModel(keras_model_file)
     
     ### trainModel(model)