diff --git a/docs/graphx-programming-guide.md b/docs/graphx-programming-guide.md
index bf4b968eb8b787341499355e972da63f340ff5eb..6f738f05998481b8ea99c0d035f09adcac240fea 100644
--- a/docs/graphx-programming-guide.md
+++ b/docs/graphx-programming-guide.md
@@ -421,15 +421,15 @@ val graph = Graph(users, relationships, defaultUser)
 // Notice that there is a user 0 (for which we have no information) connected to users
 // 4 (peter) and 5 (franklin).
 graph.triplets.map(
-    triplet => triplet.srcAttr._1 + " is the " + triplet.attr + " of " + triplet.dstAttr._1
-  ).collect.foreach(println(_))
+  triplet => triplet.srcAttr._1 + " is the " + triplet.attr + " of " + triplet.dstAttr._1
+).collect.foreach(println(_))
 // Remove missing vertices as well as the edges to connected to them
 val validGraph = graph.subgraph(vpred = (id, attr) => attr._2 != "Missing")
 // The valid subgraph will disconnect users 4 and 5 by removing user 0
 validGraph.vertices.collect.foreach(println(_))
 validGraph.triplets.map(
-    triplet => triplet.srcAttr._1 + " is the " + triplet.attr + " of " + triplet.dstAttr._1
-  ).collect.foreach(println(_))
+  triplet => triplet.srcAttr._1 + " is the " + triplet.attr + " of " + triplet.dstAttr._1
+).collect.foreach(println(_))
 {% endhighlight %}
 
 > Note in the above example only the vertex predicate is provided.  The `subgraph` operator defaults
diff --git a/docs/programming-guide.md b/docs/programming-guide.md
index f82832905ef4480dec71e07ba3b26b8369e9aca5..40287d7702bd5745c01059278f39ff45d4c72ad2 100644
--- a/docs/programming-guide.md
+++ b/docs/programming-guide.md
@@ -1516,8 +1516,8 @@ data.map(x -> { accum.add(x); return f(x); });
 {% highlight python %}
 accum = sc.accumulator(0)
 def g(x):
-  accum.add(x)
-  return f(x)
+    accum.add(x)
+    return f(x)
 data.map(g)
 # Here, accum is still 0 because no actions have caused the `map` to be computed.
 {% endhighlight %}
diff --git a/docs/spark-standalone.md b/docs/spark-standalone.md
index 5ae63fe4e6e072c3727e0d0db2d3d10f672abb60..1097f1fabef6c5a5c0490be475ab9a171b7a8a4f 100644
--- a/docs/spark-standalone.md
+++ b/docs/spark-standalone.md
@@ -298,9 +298,9 @@ application at a time. You can cap the number of cores by setting `spark.cores.m
 
 {% highlight scala %}
 val conf = new SparkConf()
-             .setMaster(...)
-             .setAppName(...)
-             .set("spark.cores.max", "10")
+  .setMaster(...)
+  .setAppName(...)
+  .set("spark.cores.max", "10")
 val sc = new SparkContext(conf)
 {% endhighlight %}
 
diff --git a/docs/streaming-custom-receivers.md b/docs/streaming-custom-receivers.md
index 479140f519103ff9501860107ca1ee6c7967a9b2..fae5901e8dcefbb36ad87219d9d0df058b2507f6 100644
--- a/docs/streaming-custom-receivers.md
+++ b/docs/streaming-custom-receivers.md
@@ -59,8 +59,8 @@ class CustomReceiver(host: String, port: Int)
   }
 
   def onStop() {
-   // There is nothing much to do as the thread calling receive()
-   // is designed to stop by itself if isStopped() returns false
+    // There is nothing much to do as the thread calling receive()
+    // is designed to stop by itself if isStopped() returns false
   }
 
   /** Create a socket connection and receive data until receiver is stopped */
@@ -68,29 +68,29 @@ class CustomReceiver(host: String, port: Int)
     var socket: Socket = null
     var userInput: String = null
     try {
-     // Connect to host:port
-     socket = new Socket(host, port)
-
-     // Until stopped or connection broken continue reading
-     val reader = new BufferedReader(
-       new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))
-     userInput = reader.readLine()
-     while(!isStopped && userInput != null) {
-       store(userInput)
-       userInput = reader.readLine()
-     }
-     reader.close()
-     socket.close()
-
-     // Restart in an attempt to connect again when server is active again
-     restart("Trying to connect again")
+      // Connect to host:port
+      socket = new Socket(host, port)
+
+      // Until stopped or connection broken continue reading
+      val reader = new BufferedReader(
+        new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))
+      userInput = reader.readLine()
+      while(!isStopped && userInput != null) {
+        store(userInput)
+        userInput = reader.readLine()
+      }
+      reader.close()
+      socket.close()
+
+      // Restart in an attempt to connect again when server is active again
+      restart("Trying to connect again")
     } catch {
-     case e: java.net.ConnectException =>
-       // restart if could not connect to server
-       restart("Error connecting to " + host + ":" + port, e)
-     case t: Throwable =>
-       // restart if there is any other error
-       restart("Error receiving data", t)
+      case e: java.net.ConnectException =>
+        // restart if could not connect to server
+        restart("Error connecting to " + host + ":" + port, e)
+      case t: Throwable =>
+        // restart if there is any other error
+        restart("Error receiving data", t)
     }
   }
 }
diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md
index 3d40b2c3136eba38f0d44b088c0a65ecab6cff01..aef62ea9000bbaf2361d4e056daf9ed515268fd8 100644
--- a/docs/streaming-programming-guide.md
+++ b/docs/streaming-programming-guide.md
@@ -863,7 +863,7 @@ Java code, take a look at the example
 {% highlight python %}
 def updateFunction(newValues, runningCount):
     if runningCount is None:
-       runningCount = 0
+        runningCount = 0
     return sum(newValues, runningCount)  # add the new values with the previous running count to get the new count
 {% endhighlight %}
 
@@ -903,10 +903,10 @@ spam information (maybe generated with Spark as well) and then filtering based o
 {% highlight scala %}
 val spamInfoRDD = ssc.sparkContext.newAPIHadoopRDD(...) // RDD containing spam information
 
-val cleanedDStream = wordCounts.transform(rdd => {
+val cleanedDStream = wordCounts.transform { rdd =>
   rdd.join(spamInfoRDD).filter(...) // join data stream with spam information to do data cleaning
   ...
-})
+}
 {% endhighlight %}
 
 </div>
@@ -1142,12 +1142,12 @@ val joinedStream = windowedStream.transform { rdd => rdd.join(dataset) }
 JavaPairRDD<String, String> dataset = ...
 JavaPairDStream<String, String> windowedStream = stream.window(Durations.seconds(20));
 JavaPairDStream<String, String> joinedStream = windowedStream.transform(
-    new Function<JavaRDD<Tuple2<String, String>>, JavaRDD<Tuple2<String, String>>>() {
-        @Override 
-        public JavaRDD<Tuple2<String, String>> call(JavaRDD<Tuple2<String, String>> rdd) {
-            return rdd.join(dataset);
-        }
+  new Function<JavaRDD<Tuple2<String, String>>, JavaRDD<Tuple2<String, String>>>() {
+    @Override
+    public JavaRDD<Tuple2<String, String>> call(JavaRDD<Tuple2<String, String>> rdd) {
+      return rdd.join(dataset);
     }
+  }
 );
 {% endhighlight %}
 </div>
@@ -1611,7 +1611,7 @@ words.foreachRDD(
 
       // Do word count on table using SQL and print it
       DataFrame wordCountsDataFrame =
-          spark.sql("select word, count(*) as total from words group by word");
+        spark.sql("select word, count(*) as total from words group by word");
       wordCountsDataFrame.show();
       return null;
     }
@@ -1759,11 +1759,11 @@ This behavior is made simple by using `StreamingContext.getOrCreate`. This is us
 {% highlight scala %}
 // Function to create and setup a new StreamingContext
 def functionToCreateContext(): StreamingContext = {
-    val ssc = new StreamingContext(...)   // new context
-    val lines = ssc.socketTextStream(...) // create DStreams
-    ...
-    ssc.checkpoint(checkpointDirectory)   // set checkpoint directory
-    ssc
+  val ssc = new StreamingContext(...)   // new context
+  val lines = ssc.socketTextStream(...) // create DStreams
+  ...
+  ssc.checkpoint(checkpointDirectory)   // set checkpoint directory
+  ssc
 }
 
 // Get StreamingContext from checkpoint data or create a new one