diff --git a/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java b/core/src/main/java/org/apache/spark/io/LZ4BlockInputStream.java
similarity index 94%
rename from core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java
rename to core/src/main/java/org/apache/spark/io/LZ4BlockInputStream.java
index 27b6f0d4a38851098c91a74c5c659c9bb03ddb35..8783b5f56ebae528f0edeefef94f46da2304f3f9 100644
--- a/core/src/main/scala/org/apache/spark/io/LZ4BlockInputStream.java
+++ b/core/src/main/java/org/apache/spark/io/LZ4BlockInputStream.java
@@ -20,20 +20,17 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.zip.Checksum;
 
-import net.jpountz.lz4.LZ4BlockOutputStream;
 import net.jpountz.lz4.LZ4Exception;
 import net.jpountz.lz4.LZ4Factory;
 import net.jpountz.lz4.LZ4FastDecompressor;
 import net.jpountz.util.SafeUtils;
-import net.jpountz.xxhash.StreamingXXHash32;
-import net.jpountz.xxhash.XXHash32;
 import net.jpountz.xxhash.XXHashFactory;
 
 /**
  * {@link InputStream} implementation to decode data written with
- * {@link LZ4BlockOutputStream}. This class is not thread-safe and does not
+ * {@link net.jpountz.lz4.LZ4BlockOutputStream}. This class is not thread-safe and does not
  * support {@link #mark(int)}/{@link #reset()}.
- * @see LZ4BlockOutputStream
+ * @see net.jpountz.lz4.LZ4BlockOutputStream
  *
  * This is based on net.jpountz.lz4.LZ4BlockInputStream
  *
@@ -90,12 +87,13 @@ public final class LZ4BlockInputStream extends FilterInputStream {
   }
 
   /**
-   * Create a new instance using {@link XXHash32} for checksuming.
+   * Create a new instance using {@link net.jpountz.xxhash.XXHash32} for checksuming.
    * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum)
-   * @see StreamingXXHash32#asChecksum()
+   * @see net.jpountz.xxhash.StreamingXXHash32#asChecksum()
    */
   public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor) {
-    this(in, decompressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum());
+    this(in, decompressor,
+      XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum());
   }
 
   /**
diff --git a/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java b/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java
index 67bc64a44466cbccf20f733af974c7ce3281f8f0..d0fed303e659c965a589f6d7a7ab47b70c86b563 100644
--- a/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java
+++ b/external/java8-tests/src/test/java/org/apache/spark/streaming/Java8APISuite.java
@@ -377,7 +377,9 @@ public class Java8APISuite extends LocalJavaStreamingContext implements Serializ
     });
 
     // This is a test to make sure foreachRDD(VoidFunction2) can be called from Java
-    stream.foreachRDD((rdd, time) -> { return; });
+    stream.foreachRDD((rdd, time) -> {
+      return;
+    });
 
     JavaTestUtils.runStreams(ssc, 2, 2);
 
@@ -873,7 +875,7 @@ public class Java8APISuite extends LocalJavaStreamingContext implements Serializ
 
     JavaMapWithStateDStream<String, Integer, Boolean, Double> stateDstream =
         wordsDstream.mapWithState(
-            StateSpec.<String, Integer, Boolean, Double> function((time, key, value, state) -> {
+            StateSpec.<String, Integer, Boolean, Double>function((time, key, value, state) -> {
               // Use all State's methods here
               state.exists();
               state.get();
diff --git a/pom.xml b/pom.xml
index 3f9e4abc3224e9a199ad9255cca4671c21fd8c7f..4cbc6a2f116138db8b2ee73a5220ea6dfe185ae0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2253,7 +2253,7 @@
           <failOnViolation>false</failOnViolation>
           <includeTestSourceDirectory>true</includeTestSourceDirectory>
           <failOnWarning>false</failOnWarning>
-          <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
+          <sourceDirectories>${basedir}/src/main/java,${basedir}/src/main/scala</sourceDirectories>
           <testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>
           <configLocation>dev/checkstyle.xml</configLocation>
           <outputFile>${basedir}/target/checkstyle-output.xml</outputFile>
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java b/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java
similarity index 97%
rename from sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java
rename to sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java
index c2633a9f8cd48bb34055367ff725ee290492de5d..086547c793e3b4eee5ace1e3f42df7cc3200f154 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/BufferedRowIterator.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/BufferedRowIterator.java
@@ -60,7 +60,7 @@ public abstract class BufferedRowIterator {
   /**
    * Initializes from array of iterators of InternalRow.
    */
-  public abstract void init(int index, Iterator<InternalRow> iters[]);
+  public abstract void init(int index, Iterator<InternalRow>[] iters);
 
   /**
    * Append a row to currentRows.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java b/sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java
similarity index 83%
rename from sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java
rename to sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java
index 8ff7b6549b5f466e0fe3eabff1e256e5564bd9fc..c7c6e3868f9bbbc45d9cec0b61784b4868df46f4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/java/typed.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/expressions/java/typed.java
@@ -19,7 +19,6 @@ package org.apache.spark.sql.expressions.java;
 
 import org.apache.spark.annotation.Experimental;
 import org.apache.spark.api.java.function.MapFunction;
-import org.apache.spark.sql.Dataset;
 import org.apache.spark.sql.TypedColumn;
 import org.apache.spark.sql.execution.aggregate.TypedAverage;
 import org.apache.spark.sql.execution.aggregate.TypedCount;
@@ -28,7 +27,7 @@ import org.apache.spark.sql.execution.aggregate.TypedSumLong;
 
 /**
  * :: Experimental ::
- * Type-safe functions available for {@link Dataset} operations in Java.
+ * Type-safe functions available for {@link org.apache.spark.sql.Dataset} operations in Java.
  *
  * Scala users should use {@link org.apache.spark.sql.expressions.scala.typed}.
  *
@@ -43,7 +42,7 @@ public class typed {
    *
    * @since 2.0.0
    */
-  public static<T> TypedColumn<T, Double> avg(MapFunction<T, Double> f) {
+  public static <T> TypedColumn<T, Double> avg(MapFunction<T, Double> f) {
     return new TypedAverage<T>(f).toColumnJava();
   }
 
@@ -52,7 +51,7 @@ public class typed {
    *
    * @since 2.0.0
    */
-  public static<T> TypedColumn<T, Long> count(MapFunction<T, Object> f) {
+  public static <T> TypedColumn<T, Long> count(MapFunction<T, Object> f) {
     return new TypedCount<T>(f).toColumnJava();
   }
 
@@ -61,7 +60,7 @@ public class typed {
    *
    * @since 2.0.0
    */
-  public static<T> TypedColumn<T, Double> sum(MapFunction<T, Double> f) {
+  public static <T> TypedColumn<T, Double> sum(MapFunction<T, Double> f) {
     return new TypedSumDouble<T>(f).toColumnJava();
   }
 
@@ -70,7 +69,7 @@ public class typed {
    *
    * @since 2.0.0
    */
-  public static<T> TypedColumn<T, Long> sumLong(MapFunction<T, Long> f) {
+  public static <T> TypedColumn<T, Long> sumLong(MapFunction<T, Long> f) {
     return new TypedSumLong<T>(f).toColumnJava();
   }
 }