diff --git a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
index ad27a9ab781d28468f5ebbdec3f372a40dbd086b..fc3095301181212dc72b99e53797b5c6a7f1db25 100644
--- a/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
+++ b/yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/YarnRMClientImpl.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.deploy.yarn
 
 import scala.collection.{Map, Set}
+import java.net.URI;
 
 import org.apache.hadoop.net.NetUtils
 import org.apache.hadoop.yarn.api._
@@ -97,7 +98,8 @@ private class YarnRMClientImpl(args: ApplicationMasterArguments) extends YarnRMC
     // Users can then monitor stderr/stdout on that node if required.
     appMasterRequest.setHost(Utils.localHostName())
     appMasterRequest.setRpcPort(0)
-    appMasterRequest.setTrackingUrl(uiAddress)
+    //remove the scheme from the url if it exists since Hadoop does not expect scheme
+    appMasterRequest.setTrackingUrl(new URI(uiAddress).getAuthority())
     resourceManager.registerApplicationMaster(appMasterRequest)
   }
 
diff --git a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
index a879c833a014f03d0a0a2505782540d69561e21a..5756263e89e21036ac7f6fddc1e2a39e3fdd9f88 100644
--- a/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
+++ b/yarn/common/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
@@ -189,7 +189,7 @@ private[spark] class ApplicationMaster(args: ApplicationMasterArguments,
     if (sc == null) {
       finish(FinalApplicationStatus.FAILED, "Timed out waiting for SparkContext.")
     } else {
-      registerAM(sc.ui.appUIHostPort, securityMgr)
+      registerAM(sc.ui.appUIAddress, securityMgr)
       try {
         userThread.join()
       } finally {