diff --git a/docs/_config.yml b/docs/_config.yml
index 88567403a04080ce932c368ce8b96a343c9abb87..7c18ab3fa92a7aedd10997b2dd9c84c8988fe3ec 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -5,4 +5,4 @@ markdown: kramdown
 # of Spark, Scala, and Mesos.
 SPARK_VERSION: 0.6.0
 SCALA_VERSION: 2.9.2
-MESOS_VERSION: 0.9.0
+MESOS_VERSION: 0.9.0-incubating
diff --git a/docs/_layouts/global.html b/docs/_layouts/global.html
index d185790bb5dfd7e14f417833bffdc99744102c0d..e9637dc1508dbc81301ed625c03773344ba9cc18 100755
--- a/docs/_layouts/global.html
+++ b/docs/_layouts/global.html
@@ -43,8 +43,8 @@
                             <a href="#" class="dropdown-toggle" data-toggle="dropdown">Programming Guides<b class="caret"></b></a>
                             <ul class="dropdown-menu">
                                 <li><a href="quick-start.html">Quick Start</a></li>
-                                <li><a href="scala-programming-guide.html">Scala</a></li>
-                                <li><a href="java-programming-guide.html">Java</a></li>
+                                <li><a href="scala-programming-guide.html">Scala Programming Guide</a></li>
+                                <li><a href="java-programming-guide.html">Java Programming Guide</a></li>
                             </ul>
                         </li>
                         
@@ -55,8 +55,8 @@
                             <ul class="dropdown-menu">
                                 <li><a href="ec2-scripts.html">Amazon EC2</a></li>
                                 <li><a href="spark-standalone.html">Standalone Mode</a></li>
-                                <li><a href="running-on-mesos.html">Mesos</a></li>
-                                <li><a href="running-on-yarn.html">YARN</a></li>
+                                <li><a href="running-on-mesos.html">On Mesos</a></li>
+                                <li><a href="running-on-yarn.html">On YARN</a></li>
                             </ul>
                         </li>
 
@@ -69,6 +69,7 @@
                                 <li><a href="contributing-to-spark.html">Contributing to Spark</a></li>
                             </ul>
                         </li>
+                        <p class="navbar-text pull-right"><span class="version-text">v{{site.SPARK_VERSION}}</span></p>
                     </ul>
                 </div>
             </div>
diff --git a/docs/css/main.css b/docs/css/main.css
index 13fe0b819514c5145530b22790441ecb40aab8e3..83fc7c8ec9f621e173348022a26eba0ad68fc0aa 100755
--- a/docs/css/main.css
+++ b/docs/css/main.css
@@ -20,6 +20,17 @@
   font-size: 15px;
 }
 
+.navbar .divider-vertical {
+  border-right-color: lightgray;
+}
+
+.navbar-text .version-text {
+  border: solid thin lightgray;
+  border-radius: 6px;
+  padding: 5px;
+  margin-left: 10px;
+}
+
 body #content {
   line-height: 1.6; /* Inspired by Github's wiki style */
 }
diff --git a/docs/index.md b/docs/index.md
index b6f08b53774a3bf97fb8399f0112a22480b1e445..028668e03f85974d6bc164eb89519a6955e3a390 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -19,7 +19,7 @@ Get Spark by checking out the master branch of the Git repository, using `git cl
 
 # Building
 
-Spark requires [Scala 2.9.2](http://www.scala-lang.org/). You will need to have Scala's `bin` directory in your `PATH`,
+Spark requires [Scala {{site.SCALA_VERSION}}](http://www.scala-lang.org/). You will need to have Scala's `bin` directory in your `PATH`,
 or you will need to set the `SCALA_HOME` environment variable to point
 to where you've installed Scala. Scala must also be accessible through one
 of these methods on slave nodes on your cluster.
diff --git a/docs/java-programming-guide.md b/docs/java-programming-guide.md
index 24aa2d5c6bf549f63fccd6d84e0cef27801e5881..9b870e40814c102b19d8deaaae4803a99e2506a6 100644
--- a/docs/java-programming-guide.md
+++ b/docs/java-programming-guide.md
@@ -1,6 +1,6 @@
 ---
 layout: global
-title: Java Programming Guide
+title: Spark Java Programming Guide
 ---
 
 The Spark Java API exposes all the Spark features available in the Scala version to Java.
diff --git a/docs/quick-start.md b/docs/quick-start.md
index d28e78823988e4a56ea386a8d930f0b2a72abfe6..51e60426b5ac010f0b769d23c1a08130529091bc 100644
--- a/docs/quick-start.md
+++ b/docs/quick-start.md
@@ -119,7 +119,7 @@ import SparkContext._
 object SimpleJob extends Application {
   val logFile = "/var/log/syslog" // Should be some log file on your system
   val sc = new SparkContext("local", "Simple Job", "$YOUR_SPARK_HOME", 
-    "target/scala-2.9.2/simple-project_2.9.2-1.0.jar")
+    "target/scala-{{site.SCALA_VERSION}}/simple-project_{{site.SCALA_VERSION}}-1.0.jar")
   val logData = sc.textFile(logFile, 2).cache()
   val numAs = logData.filter(line => line.contains("a")).count()
   val numBs = logData.filter(line => line.contains("b")).count()
@@ -136,9 +136,9 @@ name := "Simple Project"
 
 version := "1.0"
 
-scalaVersion := "2.9.2"
+scalaVersion := "{{site.SCALA_VERSION}}"
 
-libraryDependencies += "org.spark-project" %% "spark-core" % "0.6.0-SNAPSHOT"
+libraryDependencies += "org.spark-project" %% "spark-core" % "{{site.SPARK_VERSION}}"
 {% endhighlight %}
 
 Of course, for sbt to work correctly, we'll need to layout `SimpleJob.scala` and `simple.sbt` according to the typical directory structure. Once that is in place, we can create a jar package containing the job's code, then use `sbt run` to execute our example job. 
@@ -209,8 +209,8 @@ Our Maven `pom.xml` file will list Spark as a dependency. Note that Spark artifa
   <dependencies>
     <dependency> <!-- Spark dependency -->
       <groupId>org.spark-project</groupId>
-      <artifactId>spark-core_2.9.2</artifactId>
-      <version>0.6.0-SNAPSHOT</version>
+      <artifactId>spark-core_{{site.SCALA_VERSION}}</artifactId>
+      <version>{{site.SPARK_VERSION}}</version>
     </dependency>
   </dependencies>
 </project>
diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index 0fc71bfbd5b01fb4e6af30adaee1c33ca0575695..97564d74267782a48435189298af2655d84633fe 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -6,7 +6,7 @@ title: Running Spark on Mesos
 Spark can run on private clusters managed by the [Apache Mesos](http://incubator.apache.org/mesos/) resource manager. Follow the steps below to install Mesos and Spark:
 
 1. Download and build Spark using the instructions [here](index.html).
-2. Download Mesos 0.9.0 from a [mirror](http://www.apache.org/dyn/closer.cgi/incubator/mesos/mesos-0.9.0-incubating/).
+2. Download Mesos {{site.MESOS_VERSION}} from a [mirror](http://www.apache.org/dyn/closer.cgi/incubator/mesos/mesos-{{site.MESOS_VERSION}}/).
 3. Configure Mesos using the `configure` script, passing the location of your `JAVA_HOME` using `--with-java-home`. Mesos comes with "template" configure scripts for different platforms, such as `configure.macosx`, that you can run. See the README file in Mesos for other options. **Note:** If you want to run Mesos without installing it into the default paths on your system (e.g. if you don't have administrative privileges to install it), you should also pass the `--prefix` option to `configure` to tell it where to install. For example, pass `--prefix=/home/user/mesos`. By default the prefix is `/usr/local`.
 4. Build Mesos using `make`, and then install it using `make install`.
 5. Create a file called `spark-env.sh` in Spark's `conf` directory, by copying `conf/spark-env.sh.template`, and add the following lines in it:
diff --git a/docs/running-on-yarn.md b/docs/running-on-yarn.md
index 501b19b79e45f7ee9c4dd28fec1f422b4f17af7e..dd094ab131bb733c86777ec94c76c7aefa4a8d0d 100644
--- a/docs/running-on-yarn.md
+++ b/docs/running-on-yarn.md
@@ -3,10 +3,11 @@ layout: global
 title: Launching Spark on YARN
 ---
 
-Spark 0.6 adds experimental support for running over a [YARN (Hadoop 
-NextGen)](http://hadoop.apache.org/docs/r2.0.1-alpha/hadoop-yarn/hadoop-yarn-site/YARN.html) cluster. 
-Because YARN depends on version 2.0 of the Hadoop libraries, this currently requires checking out a
-separate branch of Spark, called `yarn`, which you can do as follows:
+Experimental support for running over a [YARN (Hadoop
+NextGen)](http://hadoop.apache.org/docs/r2.0.1-alpha/hadoop-yarn/hadoop-yarn-site/YARN.html)
+cluster was added to Spark in version 0.6.0.  Because YARN depends on version
+2.0 of the Hadoop libraries, this currently requires checking out a separate
+branch of Spark, called `yarn`, which you can do as follows:
 
     git clone git://github.com/mesos/spark
     cd spark
@@ -18,7 +19,7 @@ separate branch of Spark, called `yarn`, which you can do as follows:
 - In order to distribute Spark within the cluster, it must be packaged into a single JAR file. This can be done by running `sbt/sbt assembly`
 - Your application code must be packaged into a separate JAR file.
 
-If you want to test out the YARN deployment mode, you can use the current Spark examples. A `spark-examples_2.9.2-0.6.0-SNAPSHOT.jar` file can be generated by running `sbt/sbt package`.
+If you want to test out the YARN deployment mode, you can use the current Spark examples. A `spark-examples_{{site.SCALA_VERSION}}-{{site.SPARK_VERSION}}-SNAPSHOT.jar` file can be generated by running `sbt/sbt package`. NOTE: since the documentation you're reading is for Spark version {{site.SPARK_VERSION}}, we are assuming here that you have downloaded Spark {{site.SPARK_VERSION}} or checked it out of source control. If you are using a different version of Spark, the version numbers in the jar generated by the sbt package command will obviously be different.
 
 # Launching Spark on YARN
 
@@ -34,8 +35,8 @@ The command to launch the YARN Client is as follows:
 
 For example:
 
-    SPARK_JAR=./core/target/spark-core-assembly-0.6.0-SNAPSHOT.jar ./run spark.deploy.yarn.Client \
-      --jar examples/target/scala-2.9.2/spark-examples_2.9.2-0.6.0-SNAPSHOT.jar \
+    SPARK_JAR=./core/target/spark-core-assembly-{{site.SPARK_VERSION}}-SNAPSHOT.jar ./run spark.deploy.yarn.Client \
+      --jar examples/target/scala-{{site.SCALA_VERSION}}/spark-examples_{{site.SCALA_VERSION}}-{{site.SPARK_VERSION}}-SNAPSHOT.jar \
       --class spark.examples.SparkPi \
       --args standalone \
       --num-workers 3 \
diff --git a/docs/scala-programming-guide.md b/docs/scala-programming-guide.md
index 70d1dc988cbaec03d9b21bfb5eac19227ab5128b..76a1957efa1ed5f9780b75ea99b6d176523228c1 100644
--- a/docs/scala-programming-guide.md
+++ b/docs/scala-programming-guide.md
@@ -1,6 +1,6 @@
 ---
 layout: global
-title: Spark Programming Guide
+title: Spark Scala Programming Guide
 ---
 
 * This will become a table of contents (this text will be scraped).