diff --git a/docs/index.md b/docs/index.md
index e3647717a1f187387677aae31cd46f8b2dbf2923..a2f1a84371ff428785cf87edf7766abd171f9173 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -24,11 +24,11 @@ right version of Scala from [scala-lang.org](http://www.scala-lang.org/download/
 
 # Running the Examples and Shell
 
-Spark comes with several sample programs.  Scala and Java examples are in the `examples` directory, and Python examples are in `python/examples`.
+Spark comes with several sample programs.  Scala, Java and Python examples are in the `examples/src/main` directory.
 To run one of the Java or Scala sample programs, use `./bin/run-example <class> <params>` in the top-level Spark directory
 (the `bin/run-example` script sets up the appropriate paths and launches that program).
 For example, try `./bin/run-example org.apache.spark.examples.SparkPi local`.
-To run a Python sample program, use `./bin/pyspark <sample-program> <params>`.  For example, try `./bin/pyspark ./python/examples/pi.py local`.
+To run a Python sample program, use `./bin/pyspark <sample-program> <params>`.  For example, try `./bin/pyspark ./examples/src/main/python/pi.py local`.
 
 Each example prints usage help when run with no parameters.
 
diff --git a/docs/python-programming-guide.md b/docs/python-programming-guide.md
index 8ea22e15a4b69cd5b7249a6bfd028fcfaaaccd37..6813963bb080cef80a68e885ee5b47762b1799fc 100644
--- a/docs/python-programming-guide.md
+++ b/docs/python-programming-guide.md
@@ -161,9 +161,9 @@ some example applications.
 
 # Where to Go from Here
 
-PySpark also includes several sample programs in the [`python/examples` folder](https://github.com/apache/spark/tree/master/python/examples).
+PySpark also includes several sample programs in the [`examples/src/main/python` folder](https://github.com/apache/spark/tree/master/examples/src/main/python).
 You can run them by passing the files to `pyspark`; e.g.:
 
-    ./bin/spark-submit python/examples/wordcount.py
+    ./bin/spark-submit examples/src/main/python/wordcount.py
 
 Each program prints usage help when run without arguments.
diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md
index e8b718b30356013e62c1287d09373f58beef098c..939599aa6855b2fd06e744c49a8e1769ca47ce87 100644
--- a/docs/streaming-programming-guide.md
+++ b/docs/streaming-programming-guide.md
@@ -129,7 +129,7 @@ ssc.awaitTermination()  // Wait for the computation to terminate
 {% endhighlight %}
 
 The complete code can be found in the Spark Streaming example
-[NetworkWordCount]({{site.SPARK_GITHUB_URL}}/blob/master/examples/src/main/scala/org/apache/spark/streaming/examples/NetworkWordCount.scala).
+[NetworkWordCount]({{site.SPARK_GITHUB_URL}}/blob/master/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala).
 <br>
 
 </div>
@@ -215,7 +215,7 @@ jssc.awaitTermination();   // Wait for the computation to terminate
 {% endhighlight %}
 
 The complete code can be found in the Spark Streaming example
-[JavaNetworkWordCount]({{site.SPARK_GITHUB_URL}}/blob/master/examples/src/main/java/org/apache/spark/streaming/examples/JavaNetworkWordCount.java).
+[JavaNetworkWordCount]({{site.SPARK_GITHUB_URL}}/blob/master/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java).
 <br>
 
 </div>
@@ -234,12 +234,12 @@ Then, in a different terminal, you can start the example by using
 <div class="codetabs">
 <div data-lang="scala" markdown="1">
 {% highlight bash %}
-$ ./bin/run-example org.apache.spark.streaming.examples.NetworkWordCount local[2] localhost 9999
+$ ./bin/run-example org.apache.spark.examples.streaming.NetworkWordCount local[2] localhost 9999
 {% endhighlight %}
 </div>
 <div data-lang="java" markdown="1">
 {% highlight bash %}
-$ ./bin/run-example org.apache.spark.streaming.examples.JavaNetworkWordCount local[2] localhost 9999
+$ ./bin/run-example org.apache.spark.examples.streaming.JavaNetworkWordCount local[2] localhost 9999
 {% endhighlight %}
 </div>
 </div>
@@ -268,7 +268,7 @@ hello world
 {% highlight bash %}
 # TERMINAL 2: RUNNING NetworkWordCount or JavaNetworkWordCount
 
-$ ./bin/run-example org.apache.spark.streaming.examples.NetworkWordCount local[2] localhost 9999
+$ ./bin/run-example org.apache.spark.examples.streaming.NetworkWordCount local[2] localhost 9999
 ...
 -------------------------------------------
 Time: 1357008430000 ms
@@ -609,7 +609,7 @@ JavaPairDStream<String, Integer> runningCounts = pairs.updateStateByKey(updateFu
 The update function will be called for each word, with `newValues` having a sequence of 1's (from
 the `(word, 1)` pairs) and the `runningCount` having the previous count. For the complete
 Scala code, take a look at the example
-[StatefulNetworkWordCount]({{site.SPARK_GITHUB_URL}}/blob/master/examples/src/main/scala/org/apache/spark/streaming/examples/StatefulNetworkWordCount.scala).
+[StatefulNetworkWordCount]({{site.SPARK_GITHUB_URL}}/blob/master/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala).
 
 <h4>Transform Operation</h4>
 
@@ -1135,7 +1135,7 @@ If the `checkpointDirectory` exists, then the context will be recreated from the
 If the directory does not exist (i.e., running for the first time),
 then the function `functionToCreateContext` will be called to create a new
 context and set up the DStreams. See the Scala example
-[RecoverableNetworkWordCount]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/scala/org/apache/spark/streaming/examples/RecoverableNetworkWordCount.scala).
+[RecoverableNetworkWordCount]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala).
 This example appends the word counts of network data into a file.
 
 You can also explicitly create a `StreamingContext` from the checkpoint data and start the
@@ -1174,7 +1174,7 @@ If the `checkpointDirectory` exists, then the context will be recreated from the
 If the directory does not exist (i.e., running for the first time),
 then the function `contextFactory` will be called to create a new
 context and set up the DStreams. See the Scala example
-[JavaRecoverableWordCount]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/scala/org/apache/spark/streaming/examples/JavaRecoverableWordCount.scala)
+[JavaRecoverableWordCount]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/scala/org/apache/spark/examples/streaming/JavaRecoverableWordCount.scala)
 (note that this example is missing in the 0.9 release, so you can test it using the master branch).
 This example appends the word counts of network data into a file.
 
@@ -1374,7 +1374,6 @@ package and renamed for better clarity.
     [ZeroMQUtils](api/java/org/apache/spark/streaming/zeromq/ZeroMQUtils.html), and
     [MQTTUtils](api/java/org/apache/spark/streaming/mqtt/MQTTUtils.html)
 
-* More examples in [Scala]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/scala/org/apache/spark/streaming/examples)
-  and [Java]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/java/org/apache/spark/streaming/examples)
-* [Paper](http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-259.pdf) and
-[video](http://youtu.be/g171ndOHgJ0) describing Spark Streaming.
+* More examples in [Scala]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/scala/org/apache/spark/examples/streaming)
+  and [Java]({{site.SPARK_GITHUB_URL}}/tree/master/examples/src/main/java/org/apache/spark/examples/streaming)
+* [Paper](http://www.eecs.berkeley.edu/Pubs/TechRpts/2012/EECS-2012-259.pdf) and [video](http://youtu.be/g171ndOHgJ0) describing Spark Streaming.
diff --git a/examples/src/main/java/org/apache/spark/mllib/examples/JavaALS.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaALS.java
similarity index 98%
rename from examples/src/main/java/org/apache/spark/mllib/examples/JavaALS.java
rename to examples/src/main/java/org/apache/spark/examples/mllib/JavaALS.java
index c516199d61c72a174e88d2def0c650b1ac1fc91d..4533c4c5f241a7c89e7ba027eee0dd6822550569 100644
--- a/examples/src/main/java/org/apache/spark/mllib/examples/JavaALS.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaALS.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.mllib.examples;
+package org.apache.spark.examples.mllib;
 
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
diff --git a/examples/src/main/java/org/apache/spark/mllib/examples/JavaKMeans.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaKMeans.java
similarity index 98%
rename from examples/src/main/java/org/apache/spark/mllib/examples/JavaKMeans.java
rename to examples/src/main/java/org/apache/spark/examples/mllib/JavaKMeans.java
index 7461609ab9e8f462e37bf3d5dffd103e5c5bddf8..0cfb8e69ed28f4e9d89ea7b264346aa2ffcffd36 100644
--- a/examples/src/main/java/org/apache/spark/mllib/examples/JavaKMeans.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaKMeans.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.mllib.examples;
+package org.apache.spark.examples.mllib;
 
 import java.util.regex.Pattern;
 
diff --git a/examples/src/main/java/org/apache/spark/mllib/examples/JavaLR.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLR.java
similarity index 98%
rename from examples/src/main/java/org/apache/spark/mllib/examples/JavaLR.java
rename to examples/src/main/java/org/apache/spark/examples/mllib/JavaLR.java
index e3ab87cc722f325584925b048a46e1f80cd26cbb..f6e48b498727b47808bde88410c568be1bb756bf 100644
--- a/examples/src/main/java/org/apache/spark/mllib/examples/JavaLR.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLR.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.mllib.examples;
+package org.apache.spark.examples.mllib;
 
 import java.util.regex.Pattern;
 
diff --git a/examples/src/main/java/org/apache/spark/streaming/examples/JavaFlumeEventCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
similarity index 96%
rename from examples/src/main/java/org/apache/spark/streaming/examples/JavaFlumeEventCount.java
rename to examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
index c59f7538f8a826dc2712b38373efe0cf65fafcf1..a5ece68cef870507095c9fad791f3c94de21ee40 100644
--- a/examples/src/main/java/org/apache/spark/streaming/examples/JavaFlumeEventCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaFlumeEventCount.java
@@ -15,9 +15,10 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples;
+package org.apache.spark.examples.streaming;
 
 import org.apache.spark.api.java.function.Function;
+import org.apache.spark.examples.streaming.StreamingExamples;
 import org.apache.spark.streaming.*;
 import org.apache.spark.streaming.api.java.*;
 import org.apache.spark.streaming.flume.FlumeUtils;
diff --git a/examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
similarity index 96%
rename from examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java
rename to examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
index 8da9bcd05ac381f819a652590435cc4ac9d07960..da51eb189a64931a0d9dedef2bb564e96caff453 100644
--- a/examples/src/main/java/org/apache/spark/streaming/examples/JavaKafkaWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples;
+package org.apache.spark.examples.streaming;
 
 import java.util.Map;
 import java.util.HashMap;
@@ -26,6 +26,7 @@ import org.apache.spark.api.java.function.FlatMapFunction;
 import org.apache.spark.api.java.function.Function;
 import org.apache.spark.api.java.function.Function2;
 import org.apache.spark.api.java.function.PairFunction;
+import org.apache.spark.examples.streaming.StreamingExamples;
 import org.apache.spark.streaming.Duration;
 import org.apache.spark.streaming.api.java.JavaDStream;
 import org.apache.spark.streaming.api.java.JavaPairDStream;
@@ -44,7 +45,7 @@ import scala.Tuple2;
  *   <numThreads> is the number of threads the kafka consumer should use
  *
  * Example:
- *    `./bin/run-example org.apache.spark.streaming.examples.JavaKafkaWordCount local[2] zoo01,zoo02,
+ *    `./bin/run-example org.apache.spark.examples.streaming.JavaKafkaWordCount local[2] zoo01,zoo02,
  *    zoo03 my-consumer-group topic1,topic2 1`
  */
 
diff --git a/examples/src/main/java/org/apache/spark/streaming/examples/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
similarity index 95%
rename from examples/src/main/java/org/apache/spark/streaming/examples/JavaNetworkWordCount.java
rename to examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
index 098c329ff6808356ca39643857015e96f882e991..ac84991d87b8b6caeff975cecc6d30df96467be1 100644
--- a/examples/src/main/java/org/apache/spark/streaming/examples/JavaNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples;
+package org.apache.spark.examples.streaming;
 
 import com.google.common.collect.Lists;
 import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
@@ -23,6 +23,7 @@ import scala.Tuple2;
 import org.apache.spark.api.java.function.FlatMapFunction;
 import org.apache.spark.api.java.function.Function2;
 import org.apache.spark.api.java.function.PairFunction;
+import org.apache.spark.examples.streaming.StreamingExamples;
 import org.apache.spark.streaming.Duration;
 import org.apache.spark.streaming.api.java.JavaDStream;
 import org.apache.spark.streaming.api.java.JavaPairDStream;
@@ -39,7 +40,7 @@ import java.util.regex.Pattern;
  * To run this on your local machine, you need to first run a Netcat server
  *    `$ nc -lk 9999`
  * and then run the example
- *    `$ ./run org.apache.spark.streaming.examples.JavaNetworkWordCount local[2] localhost 9999`
+ *    `$ ./run org.apache.spark.examples.streaming.JavaNetworkWordCount local[2] localhost 9999`
  */
 public final class JavaNetworkWordCount {
   private static final Pattern SPACE = Pattern.compile(" ");
diff --git a/examples/src/main/java/org/apache/spark/streaming/examples/JavaQueueStream.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
similarity index 96%
rename from examples/src/main/java/org/apache/spark/streaming/examples/JavaQueueStream.java
rename to examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
index 88ad341641e0a925ba377451052b34576750789f..819311968fac57b025d4a8adc6136522eade9fb0 100644
--- a/examples/src/main/java/org/apache/spark/streaming/examples/JavaQueueStream.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
@@ -15,13 +15,14 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples;
+package org.apache.spark.examples.streaming;
 
 import com.google.common.collect.Lists;
 import scala.Tuple2;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.function.Function2;
 import org.apache.spark.api.java.function.PairFunction;
+import org.apache.spark.examples.streaming.StreamingExamples;
 import org.apache.spark.streaming.Duration;
 import org.apache.spark.streaming.api.java.JavaDStream;
 import org.apache.spark.streaming.api.java.JavaPairDStream;
diff --git a/python/examples/als.py b/examples/src/main/python/als.py
similarity index 100%
rename from python/examples/als.py
rename to examples/src/main/python/als.py
diff --git a/python/examples/kmeans.py b/examples/src/main/python/kmeans.py
similarity index 98%
rename from python/examples/kmeans.py
rename to examples/src/main/python/kmeans.py
index d8387b0b183e6ae663b7cb250a5dfbf04252a216..e3596488faf9e26c2b3e65b792da692bd0a4d640 100755
--- a/python/examples/kmeans.py
+++ b/examples/src/main/python/kmeans.py
@@ -18,7 +18,7 @@
 """
 The K-means algorithm written from scratch against PySpark. In practice,
 one may prefer to use the KMeans algorithm in MLlib, as shown in
-python/examples/mllib/kmeans.py.
+examples/src/main/python/mllib/kmeans.py.
 
 This example requires NumPy (http://www.numpy.org/).
 """
diff --git a/python/examples/logistic_regression.py b/examples/src/main/python/logistic_regression.py
similarity index 97%
rename from python/examples/logistic_regression.py
rename to examples/src/main/python/logistic_regression.py
index 28d52e6a40b4511ce6772f4d3977210cea8f5ac7..fe5373cf799b1c884cbd76583718f0a54eb62b4b 100755
--- a/python/examples/logistic_regression.py
+++ b/examples/src/main/python/logistic_regression.py
@@ -20,7 +20,7 @@ A logistic regression implementation that uses NumPy (http://www.numpy.org)
 to act on batches of input data using efficient matrix operations.
 
 In practice, one may prefer to use the LogisticRegression algorithm in
-MLlib, as shown in python/examples/mllib/logistic_regression.py.
+MLlib, as shown in examples/src/main/python/mllib/logistic_regression.py.
 """
 
 from collections import namedtuple
diff --git a/python/examples/mllib/kmeans.py b/examples/src/main/python/mllib/kmeans.py
similarity index 100%
rename from python/examples/mllib/kmeans.py
rename to examples/src/main/python/mllib/kmeans.py
diff --git a/python/examples/mllib/logistic_regression.py b/examples/src/main/python/mllib/logistic_regression.py
similarity index 100%
rename from python/examples/mllib/logistic_regression.py
rename to examples/src/main/python/mllib/logistic_regression.py
diff --git a/python/examples/pagerank.py b/examples/src/main/python/pagerank.py
similarity index 100%
rename from python/examples/pagerank.py
rename to examples/src/main/python/pagerank.py
diff --git a/python/examples/pi.py b/examples/src/main/python/pi.py
similarity index 100%
rename from python/examples/pi.py
rename to examples/src/main/python/pi.py
diff --git a/python/examples/sort.py b/examples/src/main/python/sort.py
similarity index 100%
rename from python/examples/sort.py
rename to examples/src/main/python/sort.py
diff --git a/python/examples/transitive_closure.py b/examples/src/main/python/transitive_closure.py
similarity index 100%
rename from python/examples/transitive_closure.py
rename to examples/src/main/python/transitive_closure.py
diff --git a/python/examples/wordcount.py b/examples/src/main/python/wordcount.py
similarity index 100%
rename from python/examples/wordcount.py
rename to examples/src/main/python/wordcount.py
diff --git a/examples/src/main/scala/org/apache/spark/sql/examples/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
similarity index 98%
rename from examples/src/main/scala/org/apache/spark/sql/examples/RDDRelation.scala
rename to examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index 8210ad977f066425b6b8293d533f75e664f87d6e..ff9254b044c24aed2a9ed7f78a0e4696d3ed1ae6 100644
--- a/examples/src/main/scala/org/apache/spark/sql/examples/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql.examples
+package org.apache.spark.examples.sql
 
 import org.apache.spark.SparkContext
 import org.apache.spark.sql.SQLContext
diff --git a/examples/src/main/scala/org/apache/spark/sql/examples/HiveFromSpark.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
similarity index 98%
rename from examples/src/main/scala/org/apache/spark/sql/examples/HiveFromSpark.scala
rename to examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index 62329bde8448105239f3c9aa0efabaf77a9b080d..66ce93a26ef42b70cff5227113177206a2eacf05 100644
--- a/examples/src/main/scala/org/apache/spark/sql/examples/HiveFromSpark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql.hive.examples
+package org.apache.spark.examples.sql.hive
 
 import org.apache.spark.SparkContext
 import org.apache.spark.sql._
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/ActorWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
similarity index 95%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/ActorWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
index c845dd8904c90cae4937f402b11035fd59310078..84cf43df0f96c3cf8fc95cbfeff6c6c7e8e11409 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/ActorWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import scala.collection.mutable.LinkedList
 import scala.reflect.ClassTag
@@ -78,7 +78,7 @@ class FeederActor extends Actor {
  * goes and subscribe to a typical publisher/feeder actor and receives
  * data.
  *
- * @see [[org.apache.spark.streaming.examples.FeederActor]]
+ * @see [[org.apache.spark.examples.streaming.FeederActor]]
  */
 class SampleActorReceiver[T: ClassTag](urlOfPublisher: String)
 extends Actor with ActorHelper {
@@ -131,9 +131,9 @@ object FeederActor {
  *   <hostname> and <port> describe the AkkaSystem that Spark Sample feeder is running on.
  *
  * To run this example locally, you may run Feeder Actor as
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.FeederActor 127.0.1.1 9999`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.FeederActor 127.0.1.1 9999`
  * and then run the example
- *    `./bin/run-example org.apache.spark.streaming.examples.ActorWordCount local[2] 127.0.1.1 9999`
+ *    `./bin/run-example org.apache.spark.examples.streaming.ActorWordCount local[2] 127.0.1.1 9999`
  */
 object ActorWordCount {
   def main(args: Array[String]) {
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/FlumeEventCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala
similarity index 98%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/FlumeEventCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala
index 26b60245341244eb8334ad1276d38342ab867f45..5b2a1035fc779c2c28412bdc8b9dca5955546e43 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/FlumeEventCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/FlumeEventCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.storage.StorageLevel
 import org.apache.spark.streaming._
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/HdfsWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala
similarity index 95%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/HdfsWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala
index 7f86fc792aacf6a46e3a81a09bee42363017a474..b440956ba31375f2511836f5f27cece7fcec06aa 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/HdfsWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/HdfsWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.streaming.{Seconds, StreamingContext}
 import org.apache.spark.streaming.StreamingContext._
@@ -27,7 +27,7 @@ import org.apache.spark.streaming.StreamingContext._
  *   <directory> is the directory that Spark Streaming will use to find and read new text files.
  *
  * To run this on your local machine on directory `localdir`, run this example
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.HdfsWordCount local[2] localdir`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.HdfsWordCount local[2] localdir`
  * Then create a text file in `localdir` and the words in the file will get counted.
  */
 object HdfsWordCount {
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala
similarity index 93%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala
index 2aa4f1474a59e904b7d7e2c763f68cddc2aba438..c3aae5af05b1c030d870295156ce6202c740e433 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/KafkaWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/KafkaWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import java.util.Properties
 
@@ -24,7 +24,6 @@ import kafka.producer._
 import org.apache.spark.streaming._
 import org.apache.spark.streaming.StreamingContext._
 import org.apache.spark.streaming.kafka._
-import org.apache.spark.streaming.util.RawTextHelper._
 
 // scalastyle:off
 /**
@@ -37,7 +36,7 @@ import org.apache.spark.streaming.util.RawTextHelper._
  *   <numThreads> is the number of threads the kafka consumer should use
  *
  * Example:
- *    `./bin/run-example org.apache.spark.streaming.examples.KafkaWordCount local[2] zoo01,zoo02,zoo03 my-consumer-group topic1,topic2 1`
+ *    `./bin/run-example org.apache.spark.examples.streaming.KafkaWordCount local[2] zoo01,zoo02,zoo03 my-consumer-group topic1,topic2 1`
  */
 // scalastyle:on
 object KafkaWordCount {
@@ -59,7 +58,7 @@ object KafkaWordCount {
     val lines = KafkaUtils.createStream(ssc, zkQuorum, group, topicpMap).map(_._2)
     val words = lines.flatMap(_.split(" "))
     val wordCounts = words.map(x => (x, 1L))
-      .reduceByKeyAndWindow(add _, subtract _, Minutes(10), Seconds(2), 2)
+      .reduceByKeyAndWindow(_ + _, _ - _, Minutes(10), Seconds(2), 2)
     wordCounts.print()
 
     ssc.start()
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala
similarity index 95%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala
index 62aef0fb471070f5cd2b483e6d2f0656ea867fdf..47bf1e5a064399ba84b84a0346b657478963e5c6 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/MQTTWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/MQTTWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.eclipse.paho.client.mqttv3.{MqttClient, MqttClientPersistence, MqttException, MqttMessage, MqttTopic}
 import org.eclipse.paho.client.mqttv3.persist.MqttDefaultFilePersistence
@@ -79,9 +79,9 @@ object MQTTPublisher {
  *   <MqttbrokerUrl> and <topic> describe where Mqtt publisher is running.
  *
  * To run this example locally, you may run publisher as
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.MQTTPublisher tcp://localhost:1883 foo`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.MQTTPublisher tcp://localhost:1883 foo`
  * and run the example as
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.MQTTWordCount local[2] tcp://localhost:1883 foo`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.MQTTWordCount local[2] tcp://localhost:1883 foo`
  */
 // scalastyle:on
 object MQTTWordCount {
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/NetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
similarity index 95%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/NetworkWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
index 272ab11212451ce8b2132df0f6d5706870bcf429..acfe9a4da35966ff01c7e514d21420f5f08738c3 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/NetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/NetworkWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.streaming.{Seconds, StreamingContext}
 import org.apache.spark.streaming.StreamingContext._
@@ -32,7 +32,7 @@ import org.apache.spark.storage.StorageLevel
  * To run this on your local machine, you need to first run a Netcat server
  *    `$ nc -lk 9999`
  * and then run the example
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.NetworkWordCount local[2] localhost 9999`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.NetworkWordCount local[2] localhost 9999`
  */
 // scalastyle:on
 object NetworkWordCount {
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/QueueStream.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/QueueStream.scala
similarity index 97%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/QueueStream.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/QueueStream.scala
index ff2a205ec1c153d2bc89cab9deee7c54e33b6e61..f92f72f2de87646b4880361a2387b3e11d03fca0 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/QueueStream.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/QueueStream.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import scala.collection.mutable.SynchronizedQueue
 
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/RawNetworkGrep.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala
similarity index 92%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/RawNetworkGrep.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala
index d915c0c39b3344c6ce29f85e2d8b88b1c751e585..1b0319a0464334330cf8f437e9c89f41c8e74dfc 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/RawNetworkGrep.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RawNetworkGrep.scala
@@ -15,11 +15,10 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.storage.StorageLevel
 import org.apache.spark.streaming._
-import org.apache.spark.streaming.util.RawTextHelper
 import org.apache.spark.util.IntParam
 
 /**
@@ -52,9 +51,6 @@ object RawNetworkGrep {
     val ssc = new StreamingContext(master, "RawNetworkGrep", Milliseconds(batchMillis),
       System.getenv("SPARK_HOME"), StreamingContext.jarOfClass(this.getClass).toSeq)
 
-    // Warm up the JVMs on master and slave for JIT compilation to kick in
-    RawTextHelper.warmUp(ssc.sparkContext)
-
     val rawStreams = (1 to numStreams).map(_ =>
       ssc.rawSocketStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray
     val union = ssc.union(rawStreams)
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/RecoverableNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
similarity index 96%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/RecoverableNetworkWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
index 4aacbb199141855630f764ad04f095f9693aad0e..b0bc31cc66ab58520a895f9b7e61c6f0c352bf5c 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/RecoverableNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/RecoverableNetworkWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.streaming.{Time, Seconds, StreamingContext}
 import org.apache.spark.streaming.StreamingContext._
@@ -44,7 +44,7 @@ import java.nio.charset.Charset
  *
  * and run the example as
  *
- *      `$ ./run-example org.apache.spark.streaming.examples.RecoverableNetworkWordCount \
+ *      `$ ./run-example org.apache.spark.examples.streaming.RecoverableNetworkWordCount \
  *              local[2] localhost 9999 ~/checkpoint/ ~/out`
  *
  * If the directory ~/checkpoint/ does not exist (e.g. running for the first time), it will create
@@ -56,7 +56,7 @@ import java.nio.charset.Charset
  *
  *      `$ ./spark-class org.apache.spark.deploy.Client -s launch <cluster-url> \
  *              <path-to-examples-jar> \
- *              org.apache.spark.streaming.examples.RecoverableNetworkWordCount <cluster-url> \
+ *              org.apache.spark.examples.streaming.RecoverableNetworkWordCount <cluster-url> \
  *              localhost 9999 ~/checkpoint ~/out`
  *
  * <path-to-examples-jar> would typically be
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/StatefulNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
similarity index 96%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/StatefulNetworkWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
index ef94c9298dd934b7eaa6971162db4ec9210b2a44..8001d56c98d8641f1afe473e1017f487845a5c10 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/StatefulNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StatefulNetworkWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.streaming._
 import org.apache.spark.streaming.StreamingContext._
@@ -31,7 +31,7 @@ import org.apache.spark.streaming.StreamingContext._
  * To run this on your local machine, you need to first run a Netcat server
  *    `$ nc -lk 9999`
  * and then run the example
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.StatefulNetworkWordCount local[2] localhost 9999`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.StatefulNetworkWordCount local[2] localhost 9999`
  */
 // scalastyle:on
 object StatefulNetworkWordCount {
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/StreamingExamples.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/StreamingExamples.scala
similarity index 97%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/StreamingExamples.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/StreamingExamples.scala
index 99f1502046f53072635a40023511f350713f42a4..8396e65d0d5880daaf2a999894896c8dd25c37da 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/StreamingExamples.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/StreamingExamples.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.Logging
 
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/TwitterAlgebirdCMS.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
similarity index 99%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/TwitterAlgebirdCMS.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
index c38905e8f3663d45769da06539aa82eb8e644830..b12617d8817871500eec1e28c1d32536af994b7f 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/TwitterAlgebirdCMS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdCMS.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import com.twitter.algebird._
 
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/TwitterAlgebirdHLL.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala
similarity index 98%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/TwitterAlgebirdHLL.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala
index c067046f9020e63a90785eb508d3d6df3e6b1cdc..22f232c72545c01a327941f4d059c8df3f376c70 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/TwitterAlgebirdHLL.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterAlgebirdHLL.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import com.twitter.algebird.HyperLogLogMonoid
 import com.twitter.algebird.HyperLogLog._
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/TwitterPopularTags.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
similarity index 98%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/TwitterPopularTags.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
index 2597c8178862ac3863eaadff5ee67b7c60b9e9a6..5b58e94600a1627c21cc84ca5f22a00b8f5cf5e8 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/TwitterPopularTags.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import org.apache.spark.streaming.{Seconds, StreamingContext}
 import StreamingContext._
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/ZeroMQWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
similarity index 95%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/ZeroMQWordCount.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
index 109ff855b5228e560333a00c68e27dc3a98b95c9..de46e5f5b10b610ebea93cef9e4930a07bac558c 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/ZeroMQWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/ZeroMQWordCount.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples
+package org.apache.spark.examples.streaming
 
 import akka.actor.ActorSystem
 import akka.actor.actorRef2Scala
@@ -68,9 +68,9 @@ object SimpleZeroMQPublisher {
  *   <zeroMQurl> and <topic> describe where zeroMq publisher is running.
  *
  * To run this example locally, you may run publisher as
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.SimpleZeroMQPublisher tcp://127.0.1.1:1234 foo.bar`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.SimpleZeroMQPublisher tcp://127.0.1.1:1234 foo.bar`
  * and run the example as
- *    `$ ./bin/run-example org.apache.spark.streaming.examples.ZeroMQWordCount local[2] tcp://127.0.1.1:1234 foo`
+ *    `$ ./bin/run-example org.apache.spark.examples.streaming.ZeroMQWordCount local[2] tcp://127.0.1.1:1234 foo`
  */
 // scalastyle:on
 object ZeroMQWordCount {
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewGenerator.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
similarity index 95%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewGenerator.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
index 251f65fe4df9c7706d397ce503dab9b8783c2a2e..97e0cb92078dc95306a9300c08343fafb6f64152 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewGenerator.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewGenerator.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples.clickstream
+package org.apache.spark.examples.streaming.clickstream
 
 import java.net.ServerSocket
 import java.io.PrintWriter
@@ -40,8 +40,8 @@ object PageView extends Serializable {
 /** Generates streaming events to simulate page views on a website.
   *
   * This should be used in tandem with PageViewStream.scala. Example:
-  * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewGenerator 44444 10
-  * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444
+  * $ ./bin/run-example org.apache.spark.examples.streaming.clickstream.PageViewGenerator 44444 10
+  * $ ./bin/run-example org.apache.spark.examples.streaming.clickstream.PageViewStream errorRatePerZipCode localhost 44444
   *
   * When running this, you may want to set the root logging level to ERROR in
   * conf/log4j.properties to reduce the verbosity of the output.
diff --git a/examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewStream.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
similarity index 94%
rename from examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewStream.scala
rename to examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
index 673013f7cf9484c86b54b0a62a358b8d8fedb26e..d30ceffbe29cbe4661cc24309713db70e0a8527c 100644
--- a/examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewStream.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/clickstream/PageViewStream.scala
@@ -15,19 +15,19 @@
  * limitations under the License.
  */
 
-package org.apache.spark.streaming.examples.clickstream
+package org.apache.spark.examples.streaming.clickstream
 
 import org.apache.spark.SparkContext._
 import org.apache.spark.streaming.{Seconds, StreamingContext}
 import org.apache.spark.streaming.StreamingContext._
-import org.apache.spark.streaming.examples.StreamingExamples
+import org.apache.spark.examples.streaming.StreamingExamples
 // scalastyle:off
 /** Analyses a streaming dataset of web page views. This class demonstrates several types of
   * operators available in Spark streaming.
   *
   * This should be used in tandem with PageViewStream.scala. Example:
-  * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewGenerator 44444 10
-  * $ ./bin/run-example org.apache.spark.streaming.examples.clickstream.PageViewStream errorRatePerZipCode localhost 44444
+  * $ ./bin/run-example org.apache.spark.examples.streaming.clickstream.PageViewGenerator 44444 10
+  * $ ./bin/run-example org.apache.spark.examples.streaming.clickstream.PageViewStream errorRatePerZipCode localhost 44444
   */
 // scalastyle:on
 object PageViewStream {
diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala
index bbf57ef9275c0a9682e658e6691414348cb30c21..a73d6f3bf06611cb232a7a749c04dcb9c1443cc3 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala
@@ -25,7 +25,7 @@ import scala.collection.JavaConversions.mapAsScalaMap
 private[streaming]
 object RawTextHelper {
 
-  /** 
+  /**
    * Splits lines and counts the words.
    */
   def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = {
@@ -114,4 +114,3 @@ object RawTextHelper {
 
   def max(v1: Long, v2: Long) = math.max(v1, v2)
 }
-