diff --git a/bin/start-all.sh b/bin/start-all.sh
index 9bd6c5065445126d2cf2d94041b77364da2ddfe9..b9891ad2f6ea5673e0044f7db754529107aea7a6 100755
--- a/bin/start-all.sh
+++ b/bin/start-all.sh
@@ -11,7 +11,7 @@ bin=`cd "$bin"; pwd`
 . "$bin/spark-config.sh"
 
 # Start Master
-"$bin"/start-master.sh --config $SPARK_CONF_DIR
+"$bin"/start-master.sh
 
 # Start Workers
-"$bin"/start-slaves.sh --config $SPARK_CONF_DIR
\ No newline at end of file
+"$bin"/start-slaves.sh
diff --git a/bin/start-master.sh b/bin/start-master.sh
index ad19d483310d16a40bca517c2cedc284d4507632..a901b1c26068e47ad0eb476aacf4928b0124c0b9 100755
--- a/bin/start-master.sh
+++ b/bin/start-master.sh
@@ -7,13 +7,28 @@ bin=`cd "$bin"; pwd`
 
 . "$bin/spark-config.sh"
 
+if [ -f "${SPARK_CONF_DIR}/spark-env.sh" ]; then
+  . "${SPARK_CONF_DIR}/spark-env.sh"
+fi
+
+if [ "$SPARK_MASTER_PORT" = "" ]; then
+  SPARK_MASTER_PORT=7077
+fi
+
+if [ "$SPARK_MASTER_IP" = "" ]; then
+  SPARK_MASTER_IP=`hostname`
+fi
+
+if [ "$SPARK_MASTER_WEBUI_PORT" = "" ]; then
+  SPARK_MASTER_WEBUI_PORT=8080
+fi
+
 # Set SPARK_PUBLIC_DNS so the master report the correct webUI address to the slaves
 if [ "$SPARK_PUBLIC_DNS" = "" ]; then
     # If we appear to be running on EC2, use the public address by default:
     if [[ `hostname` == *ec2.internal ]]; then
-        echo "RUNNING ON EC2"
         export SPARK_PUBLIC_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/public-hostname`
     fi
 fi
 
-"$bin"/spark-daemon.sh start spark.deploy.master.Master
+"$bin"/spark-daemon.sh start spark.deploy.master.Master --ip $SPARK_MASTER_IP --port $SPARK_MASTER_PORT --webui-port $SPARK_MASTER_WEBUI_PORT
diff --git a/bin/start-slave.sh b/bin/start-slave.sh
index 10cce9c17bb3a4224d0ae8da90459cd3722bf4c1..45a0cf7a6b7ac526fb0651e6a3f12f4cbfca8b51 100755
--- a/bin/start-slave.sh
+++ b/bin/start-slave.sh
@@ -7,7 +7,6 @@ bin=`cd "$bin"; pwd`
 if [ "$SPARK_PUBLIC_DNS" = "" ]; then
     # If we appear to be running on EC2, use the public address by default:
     if [[ `hostname` == *ec2.internal ]]; then
-        echo "RUNNING ON EC2"
         export SPARK_PUBLIC_DNS=`wget -q -O - http://instance-data.ec2.internal/latest/meta-data/public-hostname`
     fi
 fi
diff --git a/docs/spark-standalone.md b/docs/spark-standalone.md
index ae630a037184017a0b4f98b99a8121747a985174..e0ba7c35cb1fa4f21d77030e5c4fcb0734a13943 100644
--- a/docs/spark-standalone.md
+++ b/docs/spark-standalone.md
@@ -68,7 +68,7 @@ Finally, the following configuration options can be passed to the master and wor
 
 To launch a Spark standalone cluster with the deploy scripts, you need to set up two files, `conf/spark-env.sh` and `conf/slaves`. The `conf/spark-env.sh` file lets you specify global settings for the master and slave instances, such as memory, or port numbers to bind to, while `conf/slaves` is a list of slave nodes. The system requires that all the slave machines have the same configuration files, so *copy these files to each machine*.
 
-In `conf/spark-env.sh`, you can set the following parameters, in addition to the [standard Spark configuration settongs](configuration.html):
+In `conf/spark-env.sh`, you can set the following parameters, in addition to the [standard Spark configuration settings](configuration.html):
 
 <table class="table">
   <tr><th style="width:21%">Environment Variable</th><th>Meaning</th></tr>