From 88547a09fcc25df132b401ecec4ebe1ef6778576 Mon Sep 17 00:00:00 2001
From: Sandy Ryza <sandy@cloudera.com>
Date: Tue, 9 Sep 2014 10:23:28 -0700
Subject: [PATCH] SPARK-3422. JavaAPISuite.getHadoopInputSplits isn't used
 anywhere.

Author: Sandy Ryza <sandy@cloudera.com>

Closes #2324 from sryza/sandy-spark-3422 and squashes the following commits:

6446175 [Sandy Ryza] SPARK-3422. JavaAPISuite.getHadoopInputSplits isn't used anywhere.
---
 .../java/org/apache/spark/JavaAPISuite.java   | 25 -------------------
 1 file changed, 25 deletions(-)

diff --git a/core/src/test/java/org/apache/spark/JavaAPISuite.java b/core/src/test/java/org/apache/spark/JavaAPISuite.java
index be99dc501c..b8574dfb42 100644
--- a/core/src/test/java/org/apache/spark/JavaAPISuite.java
+++ b/core/src/test/java/org/apache/spark/JavaAPISuite.java
@@ -29,19 +29,14 @@ import com.google.common.collect.Iterables;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
 import com.google.common.base.Optional;
 import com.google.common.base.Charsets;
 import com.google.common.io.Files;
 import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
-import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.mapreduce.Job;
 import org.junit.After;
 import org.junit.Assert;
@@ -49,7 +44,6 @@ import org.junit.Before;
 import org.junit.Test;
 
 import org.apache.spark.api.java.JavaDoubleRDD;
-import org.apache.spark.api.java.JavaHadoopRDD;
 import org.apache.spark.api.java.JavaPairRDD;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
@@ -1313,23 +1307,4 @@ public class JavaAPISuite implements Serializable {
     SomeCustomClass[] collected = (SomeCustomClass[]) rdd.rdd().retag(SomeCustomClass.class).collect();
     Assert.assertEquals(data.size(), collected.length);
   }
-
-  public void getHadoopInputSplits() {
-    String outDir = new File(tempDir, "output").getAbsolutePath();
-    sc.parallelize(Arrays.asList(1, 2, 3, 4, 5), 2).saveAsTextFile(outDir);
-
-    JavaHadoopRDD<LongWritable, Text> hadoopRDD = (JavaHadoopRDD<LongWritable, Text>)
-        sc.hadoopFile(outDir, TextInputFormat.class, LongWritable.class, Text.class);
-    List<String> inputPaths = hadoopRDD.mapPartitionsWithInputSplit(
-        new Function2<InputSplit, Iterator<Tuple2<LongWritable, Text>>, Iterator<String>>() {
-      @Override
-      public Iterator<String> call(InputSplit split, Iterator<Tuple2<LongWritable, Text>> it)
-          throws Exception {
-        FileSplit fileSplit = (FileSplit) split;
-        return Lists.newArrayList(fileSplit.getPath().toUri().getPath()).iterator();
-      }
-    }, true).collect();
-    Assert.assertEquals(Sets.newHashSet(inputPaths),
-        Sets.newHashSet(outDir + "/part-00000", outDir + "/part-00001"));
-  }
 }
-- 
GitLab