diff --git a/llvm/projects/keras/src/mobilenet_imagenet.py b/llvm/projects/keras/src/mobilenet_imagenet.py
index c88e72967db82520fc6824375dcdeb3523fffb59..34a3ccaf7b2aac699441632e23759bb1a889a9f0 100644
--- a/llvm/projects/keras/src/mobilenet_imagenet.py
+++ b/llvm/projects/keras/src/mobilenet_imagenet.py
@@ -45,7 +45,7 @@ def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
     channel_axis = 1
     filters = int(filters * alpha)
         
-    x = ZeroPadding2D(padding=((1, 1), (1, 1)))(inputs)
+    x = ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)
     x = Conv2D(filters, kernel,
                       padding='valid',
                       use_bias=False,
@@ -60,7 +60,7 @@ def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
     pointwise_conv_filters = int(pointwise_conv_filters * alpha)
 
     if strides != (1, 1):
-        x = ZeroPadding2D(padding=((1, 1), (1, 1)))(inputs)
+        x = ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)
     else:
         x = inputs
     
@@ -210,59 +210,12 @@ y_true = np.array(y_true)
 
 
 
-def train_helper(x):
-    
-    try:
-        x = x.decode('utf-8')
-    except:
-        pass
-    
-    image = load_image(x)
-    
-    y = np.zeros(1000, dtype=np.uint8)
-        
-    y[synset_to_keras_idx[x.split('/')[-2]]]= 1
-        
-    return image, y
-
-
-
-
-train_images = glob.glob(IMAGENET_DIR + 'train/*/*')
-random.shuffle(train_images)
-
-dataset = tf.data.Dataset().from_tensor_slices(train_images)
-dataset = dataset.map(
-    lambda x : tf.py_func(train_helper, [x], [tf.float32, tf.uint8]), 
-    num_parallel_calls=16
-)
-
-dataset = dataset.shuffle(buffer_size=1000)
-dataset = dataset.batch(64)
-dataset = dataset.repeat()
-
-next_element = dataset.make_one_shot_iterator().get_next()
-
-sess = tf.Session()
-
-def generate():
-    while True:
-        yield sess.run(next_element)
-    
-
-
-model.compile(optimizer=keras.optimizers.Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['acc'])
-
-
-model.fit_generator(generate(), steps_per_epoch=1000, validation_data=(X_test, to_categorical(y_true, num_classes=1000)), epochs=5)
-
-
 translate_to_approxhpvm(model, OUTPUT_DIR, X_test[:VAL_SIZE], y_true[:VAL_SIZE], 1000)
 
 dumpCalibrationData(OUTPUT_DIR + 'test_input.bin', X_test, OUTPUT_DIR + 'test_labels.bin', y_true)
 
 
-pred = np.argmax(model.predict(X_test), axis=1)
-print ('val accuracy', np.sum(pred == y_true.ravel()) / len(X_test))    
+# pred = np.argmax(model.predict(X_test), axis=1)
+# print ('val accuracy', np.sum(pred == y_true.ravel()) / len(X_test))    
 
     
\ No newline at end of file