diff --git a/core/src/test/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriterSuite.java index 730d265c87f882b048caa3c82e34305b2155078c..03116d8fc2b213949f63d0e3679ea6c409fcc979 100644 --- a/core/src/test/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriterSuite.java +++ b/core/src/test/java/org/apache/spark/shuffle/unsafe/UnsafeShuffleWriterSuite.java @@ -252,20 +252,6 @@ public class UnsafeShuffleWriterSuite { createWriter(false).stop(false); } - @Test - public void writeEmptyIterator() throws Exception { - final UnsafeShuffleWriter<Object, Object> writer = createWriter(true); - writer.write(Collections.<Product2<Object, Object>>emptyIterator()); - final Option<MapStatus> mapStatus = writer.stop(true); - assertTrue(mapStatus.isDefined()); - assertTrue(mergedOutputFile.exists()); - assertArrayEquals(new long[NUM_PARTITITONS], partitionSizesInMergedFile); - assertEquals(0, taskMetrics.shuffleWriteMetrics().get().shuffleRecordsWritten()); - assertEquals(0, taskMetrics.shuffleWriteMetrics().get().shuffleBytesWritten()); - assertEquals(0, taskMetrics.diskBytesSpilled()); - assertEquals(0, taskMetrics.memoryBytesSpilled()); - } - @Test public void writeWithoutSpilling() throws Exception { // In this example, each partition should have exactly one record: diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala index 58b97adb4616597a9be48f61844c836f280fe2bf..b69e14a179d0adcb471b074a244706088a6b4311 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcRelation.scala @@ -17,8 +17,9 @@ package org.apache.spark.sql.hive.orc -import java.util.{Objects, Properties} +import java.util.Properties +import com.google.common.base.Objects import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, Path} import org.apache.hadoop.hive.conf.HiveConf.ConfVars