diff --git a/examples/baseline_networks/imagenet/mobilenet_imagenet_baseline_training.yaml b/examples/baseline_networks/imagenet/mobilenet_imagenet_baseline_training.yaml
deleted file mode 120000
index 582d1417248f8ccb4c2fb4806fa3abc973c6f119..0000000000000000000000000000000000000000
--- a/examples/baseline_networks/imagenet/mobilenet_imagenet_baseline_training.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../../agp-pruning/mobilenet_imagenet_baseline_training.yaml
\ No newline at end of file
diff --git a/examples/baseline_networks/imagenet/mobilenet_imagenet_baseline_training.yaml b/examples/baseline_networks/imagenet/mobilenet_imagenet_baseline_training.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bd9402e5adfaff2b4b9df6fa6d900305b6422db0
--- /dev/null
+++ b/examples/baseline_networks/imagenet/mobilenet_imagenet_baseline_training.yaml
@@ -0,0 +1,30 @@
+#
+# This YAML file contains the configuration and command-line arguments for training MobileNet v1 from scratch.
+# Top1: 71.156    Top5: 89.972
+#
+# compress_classifier.py --arch=mobilenet ../../../data.imagenet --lr=0.045 --batch=256 -j=32 --vs=0 --name=mobilenet_v1_training -p=50 --wd=1e-4 --epochs=200 --compress=../baseline_networks/mobilenet_imagenet_baseline_training.yaml
+#
+#
+# 2019-07-01 19:22:09,917 - ==> Best [Top1: 71.156   Top5: 89.972   Sparsity:0.00   Params: 4209088 on epoch: 199]
+# 2019-07-01 19:22:09,917 - Saving checkpoint to: logs/mobilenet_v1_training___2019.06.29-122534/mobilenet_v1_training_checkpoint.pth.tar
+# 2019-07-01 19:22:10,145 - --- test ---------------------
+# 2019-07-01 19:22:10,145 - 50000 samples (256 per mini-batch)
+# 2019-07-01 19:22:28,635 - Test: [   50/  195]    Loss 1.189988    Top1 70.539062    Top5 89.781250
+# 2019-07-01 19:22:35,567 - Test: [  100/  195]    Loss 1.182166    Top1 70.851562    Top5 89.792969
+# 2019-07-01 19:22:43,253 - Test: [  150/  195]    Loss 1.177892    Top1 70.927083    Top5 89.903646
+# 2019-07-01 19:22:50,377 - ==> Top1: 71.156    Top5: 89.972    Loss: 1.175
+#
+
+lr_schedulers:
+  training_lr:
+    class: ExponentialLR
+    gamma: 0.98
+
+policies:
+    - lr_scheduler:
+        instance_name: training_lr
+      starting_epoch: 0
+      ending_epoch: 200
+      frequency: 1
+
+