diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
index 61bafc8380049d023db2615f89127fa215172a63..1008c67de3491780a6fb8518d79e5487769101f9 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java
@@ -194,8 +194,8 @@ public class TransportClientFactory implements Closeable {
       .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
       .option(ChannelOption.ALLOCATOR, pooledAllocator);
 
-    final AtomicReference<TransportClient> clientRef = new AtomicReference<TransportClient>();
-    final AtomicReference<Channel> channelRef = new AtomicReference<Channel>();
+    final AtomicReference<TransportClient> clientRef = new AtomicReference<>();
+    final AtomicReference<Channel> channelRef = new AtomicReference<>();
 
     bootstrap.handler(new ChannelInitializer<SocketChannel>() {
       @Override
diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
index f97e76d7ed0d994432467f528685c746bc037fae..7a114df2d6857bea8983974202aca2fca645e895 100644
--- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
+++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java
@@ -84,9 +84,9 @@ final class ShuffleExternalSorter extends MemoryConsumer {
    * this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager
    * itself).
    */
-  private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<MemoryBlock>();
+  private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<>();
 
-  private final LinkedList<SpillInfo> spills = new LinkedList<SpillInfo>();
+  private final LinkedList<SpillInfo> spills = new LinkedList<>();
 
   /** Peak memory used by this sorter so far, in bytes. **/
   private long peakMemoryUsedBytes;
diff --git a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
index f19ed01d5aebfda79843289b04791b621fe9befa..0cf84d5f9b7161a6804d168c214d009791adb01c 100644
--- a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
+++ b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java
@@ -29,7 +29,7 @@ public enum TaskSorting {
 
   private final Set<String> alternateNames;
   private TaskSorting(String... names) {
-    alternateNames = new HashSet<String>();
+    alternateNames = new HashSet<>();
     for (String n: names) {
       alternateNames.add(n);
     }
diff --git a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
index 1692df7d30f0d79781973fdc0d287e78462ef86f..3e47bfc274cb13bb22c5f0fd42932f704a54d433 100644
--- a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
+++ b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java
@@ -88,7 +88,7 @@ public class SparkLauncherSuite {
   @Test
   public void testChildProcLauncher() throws Exception {
     SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     env.put("SPARK_PRINT_LAUNCH_COMMAND", "1");
 
     SparkLauncher launcher = new SparkLauncher(env)
diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
index add9d937d387bd1672436124790b7badee398f22..ddea6f5a69b18ea0ac9ca5d53ebb775eeaaeb349 100644
--- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
+++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java
@@ -67,7 +67,7 @@ public class UnsafeShuffleWriterSuite {
   File mergedOutputFile;
   File tempDir;
   long[] partitionSizesInMergedFile;
-  final LinkedList<File> spillFilesCreated = new LinkedList<File>();
+  final LinkedList<File> spillFilesCreated = new LinkedList<>();
   SparkConf conf;
   final Serializer serializer = new KryoSerializer(new SparkConf());
   TaskMetrics taskMetrics;
@@ -217,7 +217,7 @@ public class UnsafeShuffleWriterSuite {
   }
 
   private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
-    final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<Tuple2<Object, Object>>();
+    final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
     long startOffset = 0;
     for (int i = 0; i < NUM_PARTITITONS; i++) {
       final long partitionSize = partitionSizesInMergedFile[i];
@@ -286,8 +286,7 @@ public class UnsafeShuffleWriterSuite {
   @Test
   public void writeWithoutSpilling() throws Exception {
     // In this example, each partition should have exactly one record:
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     for (int i = 0; i < NUM_PARTITITONS; i++) {
       dataToWrite.add(new Tuple2<Object, Object>(i, i));
     }
@@ -325,8 +324,7 @@ public class UnsafeShuffleWriterSuite {
       conf.set("spark.shuffle.compress", "false");
     }
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(transferToEnabled);
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) {
       dataToWrite.add(new Tuple2<Object, Object>(i, i));
     }
@@ -403,7 +401,7 @@ public class UnsafeShuffleWriterSuite {
   public void writeEnoughDataToTriggerSpill() throws Exception {
     memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
-    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
     for (int i = 0; i < 10 + 1; i++) {
       dataToWrite.add(new Tuple2<Object, Object>(i, bigByteArray));
@@ -445,8 +443,7 @@ public class UnsafeShuffleWriterSuite {
   @Test
   public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception {
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     final byte[] bytes = new byte[(int) (ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)];
     new Random(42).nextBytes(bytes);
     dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(bytes)));
@@ -461,7 +458,7 @@ public class UnsafeShuffleWriterSuite {
   @Test
   public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception {
     final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
-    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
     dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(new byte[1])));
     // We should be able to write a record that's right _at_ the max record size
     final byte[] atMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes() - 4];
@@ -498,7 +495,7 @@ public class UnsafeShuffleWriterSuite {
     taskMemoryManager = spy(taskMemoryManager);
     when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes);
     final UnsafeShuffleWriter<Object, Object> writer =
-      new UnsafeShuffleWriter<Object, Object>(
+      new UnsafeShuffleWriter<>(
         blockManager,
         shuffleBlockResolver,
         taskMemoryManager,
diff --git a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
index 61b94b736d38efe8c2210685fab2f908edef5bcb..9aab2265c98928d5e475f65e97de76f54bb0420c 100644
--- a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
+++ b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java
@@ -66,7 +66,7 @@ public abstract class AbstractBytesToBytesMapSuite {
   private TaskMemoryManager taskMemoryManager;
   private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes
 
-  final LinkedList<File> spillFilesCreated = new LinkedList<File>();
+  final LinkedList<File> spillFilesCreated = new LinkedList<>();
   File tempDir;
 
   @Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
@@ -397,7 +397,7 @@ public abstract class AbstractBytesToBytesMapSuite {
     final int size = 65536;
     // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
     // into ByteBuffers in order to use them as keys here.
-    final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
+    final Map<ByteBuffer, byte[]> expected = new HashMap<>();
     final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, PAGE_SIZE_BYTES);
     try {
       // Fill the map to 90% full so that we can trigger probing
@@ -453,7 +453,7 @@ public abstract class AbstractBytesToBytesMapSuite {
     final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, pageSizeBytes);
     // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
     // into ByteBuffers in order to use them as keys here.
-    final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
+    final Map<ByteBuffer, byte[]> expected = new HashMap<>();
     try {
       for (int i = 0; i < 1000; i++) {
         final byte[] key = getRandomByteArray(rand.nextInt(128));
diff --git a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
index 45772b6d3c20d4719866a361a61217c1f1e402f8..e884b1bc123b8a223d87962cfd04b3ee109de914 100644
--- a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
+++ b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java
@@ -76,7 +76,7 @@ public class TestTimSort {
    * @param length The sum of all run lengths that will be added to <code>runs</code>.
    */
   private static List<Long> runsJDKWorstCase(int minRun, int length) {
-    List<Long> runs = new ArrayList<Long>();
+    List<Long> runs = new ArrayList<>();
 
     long runningTotal = 0, Y = minRun + 4, X = minRun;
 
diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
index 492fe49ba4c4fc88355511741a6a63a7c334db7f..b757ddc3b37f91e2aee50eda3a1fc8b803db4e6a 100644
--- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
+++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java
@@ -56,7 +56,7 @@ import static org.mockito.Mockito.*;
 
 public class UnsafeExternalSorterSuite {
 
-  final LinkedList<File> spillFilesCreated = new LinkedList<File>();
+  final LinkedList<File> spillFilesCreated = new LinkedList<>();
   final TestMemoryManager memoryManager =
     new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false"));
   final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index c4d277f9bf95817351670d97c1dfea842b20152f..89fe873851d8c9663f0ee00a941ca65ea25d8730 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -760,7 +760,7 @@ JavaRDD<String> people = sc.textFile("examples/src/main/resources/people.txt");
 String schemaString = "name age";
 
 // Generate the schema based on the string of schema
-List<StructField> fields = new ArrayList<StructField>();
+List<StructField> fields = new ArrayList<>();
 for (String fieldName: schemaString.split(" ")) {
   fields.add(DataTypes.createStructField(fieldName, DataTypes.StringType, true));
 }
@@ -1935,7 +1935,7 @@ val jdbcDF = sqlContext.read.format("jdbc").options(
 
 {% highlight java %}
 
-Map<String, String> options = new HashMap<String, String>();
+Map<String, String> options = new HashMap<>();
 options.put("url", "jdbc:postgresql:dbserver");
 options.put("dbtable", "schema.tablename");
 
diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md
index e92b01aa7774a2c4af6cb5185b9e0d49befeb005..998644f2e23db2ad5e6ec3689bd4707781578e89 100644
--- a/docs/streaming-programming-guide.md
+++ b/docs/streaming-programming-guide.md
@@ -186,7 +186,7 @@ Next, we want to count these words.
 JavaPairDStream<String, Integer> pairs = words.mapToPair(
   new PairFunction<String, String, Integer>() {
     @Override public Tuple2<String, Integer> call(String s) {
-      return new Tuple2<String, Integer>(s, 1);
+      return new Tuple2<>(s, 1);
     }
   });
 JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(
@@ -2095,7 +2095,7 @@ unifiedStream.print()
 <div data-lang="java" markdown="1">
 {% highlight java %}
 int numStreams = 5;
-List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<JavaPairDStream<String, String>>(numStreams);
+List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<>(numStreams);
 for (int i = 0; i < numStreams; i++) {
   kafkaStreams.add(KafkaUtils.createStream(...));
 }
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
index 1a6caa8cf86471d868bfb54957cba193a649c68c..8abc03e73d965517ef38a599d210016617f2f670 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java
@@ -108,7 +108,7 @@ public final class JavaLogQuery {
     JavaPairRDD<Tuple3<String, String, String>, Stats> extracted = dataSet.mapToPair(new PairFunction<String, Tuple3<String, String, String>, Stats>() {
       @Override
       public Tuple2<Tuple3<String, String, String>, Stats> call(String s) {
-        return new Tuple2<Tuple3<String, String, String>, Stats>(extractKey(s), extractStats(s));
+        return new Tuple2<>(extractKey(s), extractStats(s));
       }
     });
 
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
index 635fb6a373c47d528831d32a4285186a4df725ee..c3ef93c5b63250db8611a1177950b387cd27cf8a 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java
@@ -88,7 +88,7 @@ public final class JavaPageRank {
       @Override
       public Tuple2<String, String> call(String s) {
         String[] parts = SPACES.split(s);
-        return new Tuple2<String, String>(parts[0], parts[1]);
+        return new Tuple2<>(parts[0], parts[1]);
       }
     }).distinct().groupByKey().cache();
 
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
index af874887445b1864a04cb45cf39980138f23caa1..04a57a6bfb58b7059902345898471347d96294e0 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java
@@ -38,7 +38,7 @@ public final class JavaSparkPi {
 
     int slices = (args.length == 1) ? Integer.parseInt(args[0]) : 2;
     int n = 100000 * slices;
-    List<Integer> l = new ArrayList<Integer>(n);
+    List<Integer> l = new ArrayList<>(n);
     for (int i = 0; i < n; i++) {
       l.add(i);
     }
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaTC.java b/examples/src/main/java/org/apache/spark/examples/JavaTC.java
index 2563fcdd234bbf3383f39fe22af1b6956ed0b7fe..ca10384212da2dd3d5fac48ac8198a28bb4cd470 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaTC.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaTC.java
@@ -41,16 +41,16 @@ public final class JavaTC {
   private static final Random rand = new Random(42);
 
   static List<Tuple2<Integer, Integer>> generateGraph() {
-    Set<Tuple2<Integer, Integer>> edges = new HashSet<Tuple2<Integer, Integer>>(numEdges);
+    Set<Tuple2<Integer, Integer>> edges = new HashSet<>(numEdges);
     while (edges.size() < numEdges) {
       int from = rand.nextInt(numVertices);
       int to = rand.nextInt(numVertices);
-      Tuple2<Integer, Integer> e = new Tuple2<Integer, Integer>(from, to);
+      Tuple2<Integer, Integer> e = new Tuple2<>(from, to);
       if (from != to) {
         edges.add(e);
       }
     }
-    return new ArrayList<Tuple2<Integer, Integer>>(edges);
+    return new ArrayList<>(edges);
   }
 
   static class ProjectFn implements PairFunction<Tuple2<Integer, Tuple2<Integer, Integer>>,
@@ -59,7 +59,7 @@ public final class JavaTC {
 
     @Override
     public Tuple2<Integer, Integer> call(Tuple2<Integer, Tuple2<Integer, Integer>> triple) {
-      return new Tuple2<Integer, Integer>(triple._2()._2(), triple._2()._1());
+      return new Tuple2<>(triple._2()._2(), triple._2()._1());
     }
   }
 
@@ -79,7 +79,7 @@ public final class JavaTC {
       new PairFunction<Tuple2<Integer, Integer>, Integer, Integer>() {
         @Override
         public Tuple2<Integer, Integer> call(Tuple2<Integer, Integer> e) {
-          return new Tuple2<Integer, Integer>(e._2(), e._1());
+          return new Tuple2<>(e._2(), e._1());
         }
     });
 
diff --git a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
index d746a3d2b677353ddd24a173605cef4f3c19e344..84dbea5caa13501490c8f467065401f87186cde0 100644
--- a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java
@@ -55,7 +55,7 @@ public final class JavaWordCount {
     JavaPairRDD<String, Integer> ones = words.mapToPair(new PairFunction<String, String, Integer>() {
       @Override
       public Tuple2<String, Integer> call(String s) {
-        return new Tuple2<String, Integer>(s, 1);
+        return new Tuple2<>(s, 1);
       }
     });
 
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
index 2898accec61b0781c244680d7a78fdbdfacbd99b..c1f00dde0e60a274cee7504b08d8f6c0ac1add7c 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java
@@ -52,7 +52,7 @@ public class JavaElementwiseProductExample {
       RowFactory.create("b", Vectors.dense(4.0, 5.0, 6.0))
     ));
 
-    List<StructField> fields = new ArrayList<StructField>(2);
+    List<StructField> fields = new ArrayList<>(2);
     fields.add(DataTypes.createStructField("id", DataTypes.StringType, false));
     fields.add(DataTypes.createStructField("vector", new VectorUDT(), false));
 
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
index 5839b0cf8a8f8c388c52ec65fac6469b90906135..66387b9df51c78f1b779826bc404ea63cf40cb98 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java
@@ -54,7 +54,7 @@ class JavaDecisionTreeClassificationExample {
     // Set parameters.
     //  Empty categoricalFeaturesInfo indicates all features are continuous.
     Integer numClasses = 2;
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     String impurity = "gini";
     Integer maxDepth = 5;
     Integer maxBins = 32;
@@ -68,7 +68,7 @@ class JavaDecisionTreeClassificationExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testErr =
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
index ccde578249f7c9b740390212af2bdf8e910ef3dc..904e7f7e9505e221980b20214f043ba4a254c3f3 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java
@@ -54,7 +54,7 @@ class JavaDecisionTreeRegressionExample {
 
     // Set parameters.
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     String impurity = "variance";
     Integer maxDepth = 5;
     Integer maxBins = 32;
@@ -68,7 +68,7 @@ class JavaDecisionTreeRegressionExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
       @Override
       public Tuple2<Double, Double> call(LabeledPoint p) {
-        return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+        return new Tuple2<>(model.predict(p.features()), p.label());
       }
     });
     Double testMSE =
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
index 0c2e4c928bb2c1a290f0067bbdbd7a150272bc1d..213949e525dc2e6dc0c22761ff3ef89a9cbc6ff0 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java
@@ -58,7 +58,7 @@ public class JavaGradientBoostingClassificationExample {
     boostingStrategy.getTreeStrategy().setNumClasses(2);
     boostingStrategy.getTreeStrategy().setMaxDepth(5);
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
 
     final GradientBoostedTreesModel model =
@@ -69,7 +69,7 @@ public class JavaGradientBoostingClassificationExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testErr =
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
index c1bc2098dcd7e5307c0ae8377f11024ebe13ca78..78db442dbc99d4a99dcc6145dfbd2253d43c0c52 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java
@@ -57,7 +57,7 @@ public class JavaGradientBoostingRegressionExample {
     boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice.
     boostingStrategy.getTreeStrategy().setMaxDepth(5);
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
 
     final GradientBoostedTreesModel model =
@@ -68,7 +68,7 @@ public class JavaGradientBoostingRegressionExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testMSE =
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
index e632e35110920f6213f2696852a493ed3f910b80..0e15f755083bf71751adcfc777348e1ee8591607 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java
@@ -62,7 +62,7 @@ public class JavaIsotonicRegressionExample {
         @Override
         public Tuple2<Double, Double> call(Tuple3<Double, Double, Double> point) {
           Double predictedLabel = model.predict(point._2());
-          return new Tuple2<Double, Double>(predictedLabel, point._1());
+          return new Tuple2<>(predictedLabel, point._1());
         }
       }
     );
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
index 3e50118c0d9ec71a6c93e679500b2636fa168784..9ca9a7847c463cb4547b3c7562d45021008308d6 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java
@@ -70,7 +70,7 @@ public class JavaLinearRegressionWithSGDExample {
       new Function<LabeledPoint, Tuple2<Double, Double>>() {
         public Tuple2<Double, Double> call(LabeledPoint point) {
           double prediction = model.predict(point.features());
-          return new Tuple2<Double, Double>(prediction, point.label());
+          return new Tuple2<>(prediction, point.label());
         }
       }
     );
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
index 478e615123e6d5ad6504ed4d76f7e0fecdbf05a9..2b17dbb96365e8978a58ad3a09c49a267b937609 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java
@@ -46,7 +46,7 @@ public class JavaNaiveBayesExample {
       test.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     double accuracy = predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>() {
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
index faf76a9540e774e9e316b4c4207e59e1ba574177..a42c29f52fb65de09a010e5fc22b303c78a0ddfb 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java
@@ -42,7 +42,7 @@ public class JavaPCAExample {
 
     // $example on$
     double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}};
-    LinkedList<Vector> rowsList = new LinkedList<Vector>();
+    LinkedList<Vector> rowsList = new LinkedList<>();
     for (int i = 0; i < array.length; i++) {
       Vector currentRow = Vectors.dense(array[i]);
       rowsList.add(currentRow);
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
index f4c9d8a35dc96c948a756411187360b307f5f9ed..24af5d0180ce4ed8ffa9f80b8bb7bce0f1ccd828 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java
@@ -50,7 +50,7 @@ public class JavaRandomForestClassificationExample {
     // Train a RandomForest model.
     // Empty categoricalFeaturesInfo indicates all features are continuous.
     Integer numClasses = 2;
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     Integer numTrees = 3; // Use more in practice.
     String featureSubsetStrategy = "auto"; // Let the algorithm choose.
     String impurity = "gini";
@@ -67,7 +67,7 @@ public class JavaRandomForestClassificationExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testErr =
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
index c71125ce414a094d51d7e9ae48d2f78853fc1e19..afa9045878db3787533a9fa86a41ff2d1065d362 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java
@@ -51,7 +51,7 @@ public class JavaRandomForestRegressionExample {
 
     // Set parameters.
     // Empty categoricalFeaturesInfo indicates all features are continuous.
-    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     Integer numTrees = 3; // Use more in practice.
     String featureSubsetStrategy = "auto"; // Let the algorithm choose.
     String impurity = "variance";
@@ -67,7 +67,7 @@ public class JavaRandomForestRegressionExample {
       testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
         @Override
         public Tuple2<Double, Double> call(LabeledPoint p) {
-          return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
+          return new Tuple2<>(model.predict(p.features()), p.label());
         }
       });
     Double testMSE =
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
index 5e643420ada152ae4a6cb554c05ef1c0e5dc2913..f69aa4b75a56cdbf8ba64f0ff732fa4e9bab79af 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java
@@ -64,8 +64,7 @@ public class JavaRecommendationExample {
       model.predict(JavaRDD.toRDD(userProducts)).toJavaRDD().map(
         new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
           public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
-            return new Tuple2<Tuple2<Integer, Integer>, Double>(
-              new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
+            return new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating());
           }
         }
       ));
@@ -73,8 +72,7 @@ public class JavaRecommendationExample {
       JavaPairRDD.fromJavaRDD(ratings.map(
         new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
           public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
-            return new Tuple2<Tuple2<Integer, Integer>, Double>(
-              new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
+            return new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating());
           }
         }
       )).join(predictions).values();
diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
index b417da8f85cf543126f64d0eb3462300199d1f84..3730e60f68803876fa60bd7a6b6fa813cc2a164b 100644
--- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java
@@ -44,7 +44,7 @@ public class JavaSVDExample {
 
     // $example on$
     double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}};
-    LinkedList<Vector> rowsList = new LinkedList<Vector>();
+    LinkedList<Vector> rowsList = new LinkedList<>();
     for (int i = 0; i < array.length; i++) {
       Vector currentRow = Vectors.dense(array[i]);
       rowsList.add(currentRow);
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
index 7bb70d0c067dffd09cfa745677f08ce7abd0e388..7884b8cdfff84145dcf41676d3079a27c471265e 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java
@@ -129,7 +129,7 @@ public class JavaActorWordCount {
     }).mapToPair(new PairFunction<String, String, Integer>() {
       @Override
       public Tuple2<String, Integer> call(String s) {
-        return new Tuple2<String, Integer>(s, 1);
+        return new Tuple2<>(s, 1);
       }
     }).reduceByKey(new Function2<Integer, Integer, Integer>() {
       @Override
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
index 3d668adcf815f433eb992e4baa21ed6b5075bdcb..5de56340c6d2234435a65a5acbe3cc71cb0886b6 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java
@@ -82,7 +82,7 @@ public class JavaCustomReceiver extends Receiver<String> {
     JavaPairDStream<String, Integer> wordCounts = words.mapToPair(
       new PairFunction<String, String, Integer>() {
         @Override public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
index 5107500a127c5c44e66736eb6405ae0569dce2e4..bfbad91e4fdfaac4f32832d90963ae3107d95ec5 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java
@@ -63,8 +63,8 @@ public final class JavaDirectKafkaWordCount {
     SparkConf sparkConf = new SparkConf().setAppName("JavaDirectKafkaWordCount");
     JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(2));
 
-    HashSet<String> topicsSet = new HashSet<String>(Arrays.asList(topics.split(",")));
-    HashMap<String, String> kafkaParams = new HashMap<String, String>();
+    HashSet<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));
+    HashMap<String, String> kafkaParams = new HashMap<>();
     kafkaParams.put("metadata.broker.list", brokers);
 
     // Create direct kafka stream with brokers and topics
@@ -95,7 +95,7 @@ public final class JavaDirectKafkaWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(
         new Function2<Integer, Integer, Integer>() {
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
index 0df4cb40a9a76e1e6c546534ee365284c5974a34..655da6840cc57bb2941f6fac4b9e51c7c8b3c48a 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java
@@ -69,7 +69,7 @@ public final class JavaKafkaWordCount {
     JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));
 
     int numThreads = Integer.parseInt(args[3]);
-    Map<String, Integer> topicMap = new HashMap<String, Integer>();
+    Map<String, Integer> topicMap = new HashMap<>();
     String[] topics = args[2].split(",");
     for (String topic: topics) {
       topicMap.put(topic, numThreads);
@@ -96,7 +96,7 @@ public final class JavaKafkaWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
index b82b319acb735e4205fa614b9895dea36f7d048f..5761da684b4673334d19e15359a4b6aa2153fa76 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java
@@ -76,7 +76,7 @@ public final class JavaNetworkWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
index 4ce8437f827051e29e829ae9fed3c1b14937aa9b..426eaa5f0adea50a63574a1ee88ba9beb5ef0fa4 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java
@@ -50,7 +50,7 @@ public final class JavaQueueStream {
 
     // Create the queue through which RDDs can be pushed to
     // a QueueInputDStream
-    Queue<JavaRDD<Integer>> rddQueue = new LinkedList<JavaRDD<Integer>>();
+    Queue<JavaRDD<Integer>> rddQueue = new LinkedList<>();
 
     // Create and push some RDDs into the queue
     List<Integer> list = Lists.newArrayList();
@@ -68,7 +68,7 @@ public final class JavaQueueStream {
         new PairFunction<Integer, Integer, Integer>() {
           @Override
           public Tuple2<Integer, Integer> call(Integer i) {
-            return new Tuple2<Integer, Integer>(i % 10, 1);
+            return new Tuple2<>(i % 10, 1);
           }
         });
     JavaPairDStream<Integer, Integer> reducedStream = mappedStream.reduceByKey(
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
index f9929fc86dc7f8e98ff870fb1110ce202fda81e4..a597ecbc5bcb369a5400811340474cc8093b0941 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java
@@ -142,7 +142,7 @@ public final class JavaRecoverableNetworkWordCount {
       new PairFunction<String, String, Integer>() {
         @Override
         public Tuple2<String, Integer> call(String s) {
-          return new Tuple2<String, Integer>(s, 1);
+          return new Tuple2<>(s, 1);
         }
       }).reduceByKey(new Function2<Integer, Integer, Integer>() {
         @Override
diff --git a/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java b/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
index 64e044aa8e4a4bafa5a311cebfc10f0a491f8e43..5dc825dfdc9112d35a239e2631b370bfdaab39cd 100644
--- a/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
+++ b/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java
@@ -136,7 +136,7 @@ public final class JavaKinesisWordCountASL { // needs to be public for access fr
     JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, batchInterval);
 
     // Create the Kinesis DStreams
-    List<JavaDStream<byte[]>> streamsList = new ArrayList<JavaDStream<byte[]>>(numStreams);
+    List<JavaDStream<byte[]>> streamsList = new ArrayList<>(numStreams);
     for (int i = 0; i < numStreams; i++) {
       streamsList.add(
           KinesisUtils.createStream(jssc, kinesisAppName, streamName, endpointUrl, regionName,
diff --git a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
index c7ab51357c1fbe80393321e1a92ea84bc0fc7cff..46410327a5d723f4ded86e79cd7aaec5cc26cb39 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
@@ -58,12 +58,12 @@ abstract class AbstractCommandBuilder {
   private Map<String, String> effectiveConfig;
 
   public AbstractCommandBuilder() {
-    this.appArgs = new ArrayList<String>();
-    this.childEnv = new HashMap<String, String>();
-    this.conf = new HashMap<String, String>();
-    this.files = new ArrayList<String>();
-    this.jars = new ArrayList<String>();
-    this.pyFiles = new ArrayList<String>();
+    this.appArgs = new ArrayList<>();
+    this.childEnv = new HashMap<>();
+    this.conf = new HashMap<>();
+    this.files = new ArrayList<>();
+    this.jars = new ArrayList<>();
+    this.pyFiles = new ArrayList<>();
   }
 
   /**
@@ -87,7 +87,7 @@ abstract class AbstractCommandBuilder {
    * class.
    */
   List<String> buildJavaCommand(String extraClassPath) throws IOException {
-    List<String> cmd = new ArrayList<String>();
+    List<String> cmd = new ArrayList<>();
     String envJavaHome;
 
     if (javaHome != null) {
@@ -134,7 +134,7 @@ abstract class AbstractCommandBuilder {
   List<String> buildClassPath(String appClassPath) throws IOException {
     String sparkHome = getSparkHome();
 
-    List<String> cp = new ArrayList<String>();
+    List<String> cp = new ArrayList<>();
     addToClassPath(cp, getenv("SPARK_CLASSPATH"));
     addToClassPath(cp, appClassPath);
 
diff --git a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
index e328c8a341c286d32db35934f44d33bd3d8287a0..7942d7372faff4688f1f88571bf33542a550b5c3 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java
@@ -147,7 +147,7 @@ class CommandBuilderUtils {
    * Output: [ "ab cd", "efgh", "i \" j" ]
    */
   static List<String> parseOptionString(String s) {
-    List<String> opts = new ArrayList<String>();
+    List<String> opts = new ArrayList<>();
     StringBuilder opt = new StringBuilder();
     boolean inOpt = false;
     boolean inSingleQuote = false;
diff --git a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
index 414ffc2c84e520e8aad4b97a824733e6359747e0..69fbf4387bdfb03d0f52d1f923e3f4ddbef91ecc 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
@@ -129,7 +129,7 @@ class LauncherServer implements Closeable {
       server.setReuseAddress(true);
       server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
 
-      this.clients = new ArrayList<ServerConnection>();
+      this.clients = new ArrayList<>();
       this.threadIds = new AtomicLong();
       this.factory = new NamedThreadFactory(THREAD_NAME_FMT);
       this.pending = new ConcurrentHashMap<>();
diff --git a/launcher/src/main/java/org/apache/spark/launcher/Main.java b/launcher/src/main/java/org/apache/spark/launcher/Main.java
index e751e948e3561632f9ae5a9cc435371af5c9b4ef..1e34bb8c73279353c22e0b6e1ace237216e0f7a3 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/Main.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/Main.java
@@ -50,7 +50,7 @@ class Main {
   public static void main(String[] argsArray) throws Exception {
     checkArgument(argsArray.length > 0, "Not enough arguments: missing class name.");
 
-    List<String> args = new ArrayList<String>(Arrays.asList(argsArray));
+    List<String> args = new ArrayList<>(Arrays.asList(argsArray));
     String className = args.remove(0);
 
     boolean printLaunchCommand = !isEmpty(System.getenv("SPARK_PRINT_LAUNCH_COMMAND"));
@@ -70,7 +70,7 @@ class Main {
           // Ignore parsing exceptions.
         }
 
-        List<String> help = new ArrayList<String>();
+        List<String> help = new ArrayList<>();
         if (parser.className != null) {
           help.add(parser.CLASS);
           help.add(parser.className);
@@ -82,7 +82,7 @@ class Main {
       builder = new SparkClassCommandBuilder(className, args);
     }
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = builder.buildCommand(env);
     if (printLaunchCommand) {
       System.err.println("Spark Command: " + join(" ", cmd));
@@ -130,7 +130,7 @@ class Main {
       return cmd;
     }
 
-    List<String> newCmd = new ArrayList<String>();
+    List<String> newCmd = new ArrayList<>();
     newCmd.add("env");
 
     for (Map.Entry<String, String> e : childEnv.entrySet()) {
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
index e575fd33080a21a70d3ca9bd27754be033c2663f..40187236f2f2dd31c81567186fd79f90c6324ddd 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
@@ -44,7 +44,7 @@ class SparkClassCommandBuilder extends AbstractCommandBuilder {
 
   @Override
   public List<String> buildCommand(Map<String, String> env) throws IOException {
-    List<String> javaOptsKeys = new ArrayList<String>();
+    List<String> javaOptsKeys = new ArrayList<>();
     String memKey = null;
     String extraClassPath = null;
 
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
index 20e6003a00c19493a8c153f9513f0e3b6a15bdd2..a54215990137ef5ea08fcdcc1baa58d37ebcf461 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
@@ -75,7 +75,7 @@ public class SparkLauncher {
   /** Used internally to create unique logger names. */
   private static final AtomicInteger COUNTER = new AtomicInteger();
 
-  static final Map<String, String> launcherConfig = new HashMap<String, String>();
+  static final Map<String, String> launcherConfig = new HashMap<>();
 
   /**
    * Set a configuration value for the launcher library. These config values do not affect the
@@ -428,7 +428,7 @@ public class SparkLauncher {
   }
 
   private ProcessBuilder createBuilder() {
-    List<String> cmd = new ArrayList<String>();
+    List<String> cmd = new ArrayList<>();
     String script = isWindows() ? "spark-submit.cmd" : "spark-submit";
     cmd.add(join(File.separator, builder.getSparkHome(), "bin", script));
     cmd.addAll(builder.buildSparkSubmitArgs());
@@ -437,7 +437,7 @@ public class SparkLauncher {
     // preserved, otherwise the batch interpreter will mess up the arguments. Batch scripts are
     // weird.
     if (isWindows()) {
-      List<String> winCmd = new ArrayList<String>();
+      List<String> winCmd = new ArrayList<>();
       for (String arg : cmd) {
         winCmd.add(quoteForBatchScript(arg));
       }
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
index 269c89c310550f44ce7663c55d95029b8f9a0e2d..b2dd6ac4c39823937e708617047a9e50468ba3ab 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
@@ -67,7 +67,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
    * command line parsing works. This maps the class name to the resource to use when calling
    * spark-submit.
    */
-  private static final Map<String, String> specialClasses = new HashMap<String, String>();
+  private static final Map<String, String> specialClasses = new HashMap<>();
   static {
     specialClasses.put("org.apache.spark.repl.Main", "spark-shell");
     specialClasses.put("org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver",
@@ -87,12 +87,12 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
   private boolean allowsMixedArguments;
 
   SparkSubmitCommandBuilder() {
-    this.sparkArgs = new ArrayList<String>();
+    this.sparkArgs = new ArrayList<>();
     this.printInfo = false;
   }
 
   SparkSubmitCommandBuilder(List<String> args) {
-    this.sparkArgs = new ArrayList<String>();
+    this.sparkArgs = new ArrayList<>();
     List<String> submitArgs = args;
     if (args.size() > 0 && args.get(0).equals(PYSPARK_SHELL)) {
       this.allowsMixedArguments = true;
@@ -123,7 +123,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
   }
 
   List<String> buildSparkSubmitArgs() {
-    List<String> args = new ArrayList<String>();
+    List<String> args = new ArrayList<>();
     SparkSubmitOptionParser parser = new SparkSubmitOptionParser();
 
     if (verbose) {
@@ -244,7 +244,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
 
     // The executable is the PYSPARK_DRIVER_PYTHON env variable set by the pyspark script,
     // followed by PYSPARK_DRIVER_PYTHON_OPTS.
-    List<String> pyargs = new ArrayList<String>();
+    List<String> pyargs = new ArrayList<>();
     pyargs.add(firstNonEmpty(System.getenv("PYSPARK_DRIVER_PYTHON"), "python"));
     String pyOpts = System.getenv("PYSPARK_DRIVER_PYTHON_OPTS");
     if (!isEmpty(pyOpts)) {
@@ -270,7 +270,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
     env.put("R_PROFILE_USER",
             join(File.separator, sparkHome, "R", "lib", "SparkR", "profile", "shell.R"));
 
-    List<String> args = new ArrayList<String>();
+    List<String> args = new ArrayList<>();
     args.add(firstNonEmpty(System.getenv("SPARKR_DRIVER_R"), "R"));
     return args;
   }
diff --git a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
index 6aad47adbcc82c4a1c454d325e2df925b630aef5..d36731840b1a1e72741d038bd463ae671405d9d7 100644
--- a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
+++ b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java
@@ -73,7 +73,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
       "spark.randomOption=foo",
       parser.CONF,
       SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH + "=/driverLibPath");
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
 
     assertTrue(findInStringList(env.get(CommandBuilderUtils.getLibPathEnvName()),
@@ -125,7 +125,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
       "--master=foo",
       "--deploy-mode=bar");
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
     assertEquals("python", cmd.get(cmd.size() - 1));
     assertEquals(
@@ -142,7 +142,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
       "script.py",
       "arg1");
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = buildCommand(sparkSubmitArgs, env);
 
     assertEquals("foo", findArgValue(cmd, "--master"));
@@ -178,7 +178,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
           + "/launcher/src/test/resources");
     }
 
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     List<String> cmd = launcher.buildCommand(env);
 
     // Checks below are different for driver and non-driver mode.
@@ -258,7 +258,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
   }
 
   private Map<String, String> parseConf(List<String> cmd, SparkSubmitOptionParser parser) {
-    Map<String, String> conf = new HashMap<String, String>();
+    Map<String, String> conf = new HashMap<>();
     for (int i = 0; i < cmd.size(); i++) {
       if (cmd.get(i).equals(parser.CONF)) {
         String[] val = cmd.get(i + 1).split("=", 2);
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
index 60f25e5cce437ffec1a5120ad451a9f1a853c037..40b9c35adc431e7da49b9d17c6e142ae613a3623 100644
--- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java
@@ -56,7 +56,7 @@ public class JavaDecisionTreeClassifierSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
 
     // This tests setters. Training with various options is tested in Scala.
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
index 3c69467fa119ee823c57250e3ace62539b3322d9..59b6fba7a928adc6b52be3057fea3c2f3920ff37 100644
--- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java
@@ -56,7 +56,7 @@ public class JavaGBTClassifierSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
 
     // This tests setters. Training with various options is tested in Scala.
diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
index a66a1e12927beaf1fbb1fa269c4f4a439b6e4150..5485fcbf01bda8631893e2413d8be882bd33df16 100644
--- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java
@@ -57,7 +57,7 @@ public class JavaRandomForestClassifierSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
 
     // This tests setters. Training with various options is tested in Scala.
diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
index ebe800e749e05d20c61cbf049c10e8b8789a964d..d5c9d120c592c7a0351c3e6d640761bd26011b46 100644
--- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java
@@ -56,7 +56,7 @@ public class JavaDecisionTreeRegressorSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
 
     // This tests setters. Training with various options is tested in Scala.
diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
index fc8c13db07e6f2fb16520c434d4c1b27a3ea3282..38d15dc2b7c78cf79927b99b8f3520f34e1803a8 100644
--- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java
@@ -56,7 +56,7 @@ public class JavaGBTRegressorSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
 
     GBTRegressor rf = new GBTRegressor()
diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
index a00ce5e249c3453a674a267fb8b1ace998ac1fd4..31be8880c25e191d593945e6ec3460b23dd124cb 100644
--- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
+++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java
@@ -57,7 +57,7 @@ public class JavaRandomForestRegressorSuite implements Serializable {
 
     JavaRDD<LabeledPoint> data = sc.parallelize(
       LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
-    Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
+    Map<Integer, Integer> categoricalFeatures = new HashMap<>();
     DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
 
     // This tests setters. Training with various options is tested in Scala.
diff --git a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
index 225a216270b3bbe6005e70c870a14f0cc995760a..db19b309f65aec55a7da5c14aa880f5a6e54e3b8 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
@@ -45,9 +45,9 @@ public class JavaLDASuite implements Serializable {
   @Before
   public void setUp() {
     sc = new JavaSparkContext("local", "JavaLDA");
-    ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<Tuple2<Long, Vector>>();
+    ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<>();
     for (int i = 0; i < LDASuite.tinyCorpus().length; i++) {
-      tinyCorpus.add(new Tuple2<Long, Vector>((Long)LDASuite.tinyCorpus()[i]._1(),
+      tinyCorpus.add(new Tuple2<>((Long)LDASuite.tinyCorpus()[i]._1(),
           LDASuite.tinyCorpus()[i]._2()));
     }
     JavaRDD<Tuple2<Long, Vector>> tmpCorpus = sc.parallelize(tinyCorpus, 2);
@@ -189,8 +189,8 @@ public class JavaLDASuite implements Serializable {
     double logPerplexity = toyModel.logPerplexity(pairedDocs);
 
     // check: logLikelihood.
-    ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<Tuple2<Long, Vector>>();
-    docsSingleWord.add(new Tuple2<Long, Vector>(0L, Vectors.dense(1.0, 0.0, 0.0)));
+    ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<>();
+    docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0)));
     JavaPairRDD<Long, Vector> single = JavaPairRDD.fromJavaRDD(sc.parallelize(docsSingleWord));
     double logLikelihood = toyModel.logLikelihood(single);
   }
diff --git a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
index 9925aae441af9180c567f404d008726573ca07e5..8dd29061daaad1d857459cf31f72a78134eec0c8 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java
@@ -64,7 +64,7 @@ public class JavaDecisionTreeSuite implements Serializable {
   public void runDTUsingConstructor() {
     List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
     JavaRDD<LabeledPoint> rdd = sc.parallelize(arr);
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
 
     int maxDepth = 4;
@@ -84,7 +84,7 @@ public class JavaDecisionTreeSuite implements Serializable {
   public void runDTUsingStaticMethods() {
     List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
     JavaRDD<LabeledPoint> rdd = sc.parallelize(arr);
-    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
+    HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
     categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
 
     int maxDepth = 4;
diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
index 17659d7d960b04be91072435461f20435e39efbf..24adeadf95675098f4872a6dfccd08a2546425ba 100644
--- a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
+++ b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java
@@ -201,7 +201,7 @@ public class DataTypes {
     if (fields == null) {
       throw new IllegalArgumentException("fields should not be null.");
     }
-    Set<String> distinctNames = new HashSet<String>();
+    Set<String> distinctNames = new HashSet<>();
     for (StructField field : fields) {
       if (field == null) {
         throw new IllegalArgumentException(
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
index 6bcd155ccdc4930a8ab8f6142dc08f609b6a32d7..5c257bc2608730a11a5b322a0e3aa54481f6543c 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java
@@ -149,7 +149,7 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo
    * by MapReduce.
    */
   public static List<String> listDirectory(File path) throws IOException {
-    List<String> result = new ArrayList<String>();
+    List<String> result = new ArrayList<>();
     if (path.isDirectory()) {
       for (File f: path.listFiles()) {
         result.addAll(listDirectory(f));
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
index 640efcc737eaa54c3323bdba1a6c304ff16233ff..51f987fda9de71693f8a44d30f1353118bf396de 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java
@@ -111,7 +111,7 @@ public class JavaApplySchemaSuite implements Serializable {
     df.registerTempTable("people");
     Row[] actual = sqlContext.sql("SELECT * FROM people").collect();
 
-    List<Row> expected = new ArrayList<Row>(2);
+    List<Row> expected = new ArrayList<>(2);
     expected.add(RowFactory.create("Michael", 29));
     expected.add(RowFactory.create("Yin", 28));
 
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
index 9b624f318c6f88591c95f02e5705aab8f7f1b01d..b054b1095b2b06e23b101de0603fbed2943d0f5a 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java
@@ -67,7 +67,7 @@ public class JavaDatasetSuite implements Serializable {
   }
 
   private <T1, T2> Tuple2<T1, T2> tuple2(T1 t1, T2 t2) {
-    return new Tuple2<T1, T2>(t1, t2);
+    return new Tuple2<>(t1, t2);
   }
 
   @Test
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
index 5a167edd89592e51efd59c43513aed653717f869..ae0c097c362ab95fd81d4a1ca4832a733d4e226d 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java
@@ -42,14 +42,14 @@ public class MyDoubleAvg extends UserDefinedAggregateFunction {
   private DataType _returnDataType;
 
   public MyDoubleAvg() {
-    List<StructField> inputFields = new ArrayList<StructField>();
+    List<StructField> inputFields = new ArrayList<>();
     inputFields.add(DataTypes.createStructField("inputDouble", DataTypes.DoubleType, true));
     _inputDataType = DataTypes.createStructType(inputFields);
 
     // The buffer has two values, bufferSum for storing the current sum and
     // bufferCount for storing the number of non-null input values that have been contribuetd
     // to the current sum.
-    List<StructField> bufferFields = new ArrayList<StructField>();
+    List<StructField> bufferFields = new ArrayList<>();
     bufferFields.add(DataTypes.createStructField("bufferSum", DataTypes.DoubleType, true));
     bufferFields.add(DataTypes.createStructField("bufferCount", DataTypes.LongType, true));
     _bufferSchema = DataTypes.createStructType(bufferFields);
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
index c3b7768e71bf8768c3feda6c79be4e15fd1fc79b..d17fb3e5194f3e41c25a42c766a5a815deb83ab2 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java
@@ -41,11 +41,11 @@ public class MyDoubleSum extends UserDefinedAggregateFunction {
   private DataType _returnDataType;
 
   public MyDoubleSum() {
-    List<StructField> inputFields = new ArrayList<StructField>();
+    List<StructField> inputFields = new ArrayList<>();
     inputFields.add(DataTypes.createStructField("inputDouble", DataTypes.DoubleType, true));
     _inputDataType = DataTypes.createStructType(inputFields);
 
-    List<StructField> bufferFields = new ArrayList<StructField>();
+    List<StructField> bufferFields = new ArrayList<>();
     bufferFields.add(DataTypes.createStructField("bufferDouble", DataTypes.DoubleType, true));
     _bufferSchema = DataTypes.createStructType(bufferFields);
 
diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java
index 4ef1f276d1bbbc1fe26551fc09dace15af948490..fc24600a1e4a723d3e67eba2a7250a6e710d895b 100644
--- a/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java
+++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java
@@ -50,7 +50,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
   private static final org.apache.thrift.protocol.TField LINT_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("lintString", org.apache.thrift.protocol.TType.LIST, (short)5);
   private static final org.apache.thrift.protocol.TField M_STRING_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("mStringString", org.apache.thrift.protocol.TType.MAP, (short)6);
 
-  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<>();
   static {
     schemes.put(StandardScheme.class, new ComplexStandardSchemeFactory());
     schemes.put(TupleScheme.class, new ComplexTupleSchemeFactory());
@@ -72,7 +72,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
     LINT_STRING((short)5, "lintString"),
     M_STRING_STRING((short)6, "mStringString");
 
-    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+    private static final Map<String, _Fields> byName = new HashMap<>();
 
     static {
       for (_Fields field : EnumSet.allOf(_Fields.class)) {
@@ -141,7 +141,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
   private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
-    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<>(_Fields.class);
     tmpMap.put(_Fields.AINT, new org.apache.thrift.meta_data.FieldMetaData("aint", org.apache.thrift.TFieldRequirementType.DEFAULT,
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.A_STRING, new org.apache.thrift.meta_data.FieldMetaData("aString", org.apache.thrift.TFieldRequirementType.DEFAULT,
@@ -194,28 +194,28 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
       this.aString = other.aString;
     }
     if (other.isSetLint()) {
-      List<Integer> __this__lint = new ArrayList<Integer>();
+      List<Integer> __this__lint = new ArrayList<>();
       for (Integer other_element : other.lint) {
         __this__lint.add(other_element);
       }
       this.lint = __this__lint;
     }
     if (other.isSetLString()) {
-      List<String> __this__lString = new ArrayList<String>();
+      List<String> __this__lString = new ArrayList<>();
       for (String other_element : other.lString) {
         __this__lString.add(other_element);
       }
       this.lString = __this__lString;
     }
     if (other.isSetLintString()) {
-      List<IntString> __this__lintString = new ArrayList<IntString>();
+      List<IntString> __this__lintString = new ArrayList<>();
       for (IntString other_element : other.lintString) {
         __this__lintString.add(new IntString(other_element));
       }
       this.lintString = __this__lintString;
     }
     if (other.isSetMStringString()) {
-      Map<String,String> __this__mStringString = new HashMap<String,String>();
+      Map<String,String> __this__mStringString = new HashMap<>();
       for (Map.Entry<String, String> other_element : other.mStringString.entrySet()) {
 
         String other_element_key = other_element.getKey();
@@ -339,7 +339,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
 
   public void addToLString(String elem) {
     if (this.lString == null) {
-      this.lString = new ArrayList<String>();
+      this.lString = new ArrayList<>();
     }
     this.lString.add(elem);
   }
@@ -411,7 +411,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
 
   public void putToMStringString(String key, String val) {
     if (this.mStringString == null) {
-      this.mStringString = new HashMap<String,String>();
+      this.mStringString = new HashMap<>();
     }
     this.mStringString.put(key, val);
   }
@@ -876,7 +876,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
                 org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
-                struct.lint = new ArrayList<Integer>(_list0.size);
+                struct.lint = new ArrayList<>(_list0.size);
                 for (int _i1 = 0; _i1 < _list0.size; ++_i1)
                 {
                   int _elem2; // required
@@ -894,7 +894,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
                 org.apache.thrift.protocol.TList _list3 = iprot.readListBegin();
-                struct.lString = new ArrayList<String>(_list3.size);
+                struct.lString = new ArrayList<>(_list3.size);
                 for (int _i4 = 0; _i4 < _list3.size; ++_i4)
                 {
                   String _elem5; // required
@@ -912,7 +912,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
                 org.apache.thrift.protocol.TList _list6 = iprot.readListBegin();
-                struct.lintString = new ArrayList<IntString>(_list6.size);
+                struct.lintString = new ArrayList<>(_list6.size);
                 for (int _i7 = 0; _i7 < _list6.size; ++_i7)
                 {
                   IntString _elem8; // required
@@ -1114,7 +1114,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
       if (incoming.get(2)) {
         {
           org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
-          struct.lint = new ArrayList<Integer>(_list21.size);
+          struct.lint = new ArrayList<>(_list21.size);
           for (int _i22 = 0; _i22 < _list21.size; ++_i22)
           {
             int _elem23; // required
@@ -1127,7 +1127,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
       if (incoming.get(3)) {
         {
           org.apache.thrift.protocol.TList _list24 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.lString = new ArrayList<String>(_list24.size);
+          struct.lString = new ArrayList<>(_list24.size);
           for (int _i25 = 0; _i25 < _list24.size; ++_i25)
           {
             String _elem26; // required
@@ -1140,7 +1140,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
       if (incoming.get(4)) {
         {
           org.apache.thrift.protocol.TList _list27 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.lintString = new ArrayList<IntString>(_list27.size);
+          struct.lintString = new ArrayList<>(_list27.size);
           for (int _i28 = 0; _i28 < _list27.size; ++_i28)
           {
             IntString _elem29; // required
@@ -1154,7 +1154,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
       if (incoming.get(5)) {
         {
           org.apache.thrift.protocol.TMap _map30 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.mStringString = new HashMap<String,String>(2*_map30.size);
+          struct.mStringString = new HashMap<>(2*_map30.size);
           for (int _i31 = 0; _i31 < _map30.size; ++_i31)
           {
             String _key32; // required