From e85af507671d417724c28ee2db499fc019feb1d8 Mon Sep 17 00:00:00 2001
From: Binh Nguyen <bnguyen@palantir.com>
Date: Tue, 10 Dec 2013 11:01:56 -0800
Subject: [PATCH] Leave default value of numPartitions to Scala code.

---
 .../scala/org/apache/spark/api/java/JavaPairRDD.scala  | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
index 36bd3e673f..2d2b3847de 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
@@ -584,7 +584,9 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])(implicit val kManifest: ClassManif
    * order of the keys).
    */
   def sortByKey(comp: Comparator[K], ascending: Boolean): JavaPairRDD[K, V] = {
-    sortByKey(comp, ascending, rdd.partitions.size)
+    // numPartitions should never be negative in practice so we can use -1 here to indicate that
+    // we want to use implementation's default value.
+    sortByKey(comp, ascending, -1)
   }
 
   /**
@@ -598,7 +600,11 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])(implicit val kManifest: ClassManif
       override def compare(b: K) = comp.compare(a, b)
     }
     implicit def toOrdered(x: K): Ordered[K] = new KeyOrdering(x)
-    fromRDD(new OrderedRDDFunctions[K, V, (K, V)](rdd).sortByKey(ascending, numPartitions))
+    if (numPartitions < 0) {
+      fromRDD(new OrderedRDDFunctions[K, V, (K, V)](rdd).sortByKey(ascending))
+    } else {
+      fromRDD(new OrderedRDDFunctions[K, V, (K, V)](rdd).sortByKey(ascending, numPartitions))
+    }
   }
 
   /**
-- 
GitLab