From ffe272d97c22955fe7744b1c0132cd9877b6df96 Mon Sep 17 00:00:00 2001
From: Aaron Davidson <aaron@databricks.com>
Date: Wed, 19 Mar 2014 17:56:48 -0700
Subject: [PATCH] Revert "SPARK-1099:Spark's local mode should probably respect
 spark.cores.max by default"

This reverts commit 16789317a34c1974f7b35960f06a7b51d8e0f29f. Jenkins was not run for this PR.
---
 .../scala/org/apache/spark/SparkContext.scala |  5 +----
 .../scala/org/apache/spark/FileSuite.scala    |  4 ++--
 .../SparkContextSchedulerCreationSuite.scala  | 19 +++----------------
 3 files changed, 6 insertions(+), 22 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 8f74607278..a1003b7925 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -1262,10 +1262,7 @@ object SparkContext extends Logging {
     master match {
       case "local" =>
         val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
-        // Use user specified in config, up to all available cores
-        val realCores = Runtime.getRuntime.availableProcessors()
-        val toUseCores = math.min(sc.conf.getInt("spark.cores.max", realCores), realCores)
-        val backend = new LocalBackend(scheduler, toUseCores)
+        val backend = new LocalBackend(scheduler, 1)
         scheduler.initialize(backend)
         scheduler
 
diff --git a/core/src/test/scala/org/apache/spark/FileSuite.scala b/core/src/test/scala/org/apache/spark/FileSuite.scala
index b4a5881cd9..01af940771 100644
--- a/core/src/test/scala/org/apache/spark/FileSuite.scala
+++ b/core/src/test/scala/org/apache/spark/FileSuite.scala
@@ -34,7 +34,7 @@ import org.apache.spark.SparkContext._
 class FileSuite extends FunSuite with LocalSparkContext {
 
   test("text files") {
-    sc = new SparkContext("local[1]", "test")
+    sc = new SparkContext("local", "test")
     val tempDir = Files.createTempDir()
     val outputDir = new File(tempDir, "output").getAbsolutePath
     val nums = sc.makeRDD(1 to 4)
@@ -176,7 +176,7 @@ class FileSuite extends FunSuite with LocalSparkContext {
 
   test("write SequenceFile using new Hadoop API") {
     import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
-    sc = new SparkContext("local[1]", "test")
+    sc = new SparkContext("local", "test")
     val tempDir = Files.createTempDir()
     val outputDir = new File(tempDir, "output").getAbsolutePath
     val nums = sc.makeRDD(1 to 3).map(x => (new IntWritable(x), new Text("a" * x)))
diff --git a/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala
index 9dd42be1d7..b543471a5d 100644
--- a/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala
+++ b/core/src/test/scala/org/apache/spark/SparkContextSchedulerCreationSuite.scala
@@ -27,10 +27,10 @@ import org.apache.spark.scheduler.local.LocalBackend
 class SparkContextSchedulerCreationSuite
   extends FunSuite with PrivateMethodTester with LocalSparkContext with Logging {
 
-  def createTaskScheduler(master: String, conf: SparkConf = new SparkConf()): TaskSchedulerImpl = {
+  def createTaskScheduler(master: String): TaskSchedulerImpl = {
     // Create local SparkContext to setup a SparkEnv. We don't actually want to start() the
     // real schedulers, so we don't want to create a full SparkContext with the desired scheduler.
-    sc = new SparkContext("local", "test", conf)
+    sc = new SparkContext("local", "test")
     val createTaskSchedulerMethod = PrivateMethod[TaskScheduler]('createTaskScheduler)
     val sched = SparkContext invokePrivate createTaskSchedulerMethod(sc, master)
     sched.asInstanceOf[TaskSchedulerImpl]
@@ -44,26 +44,13 @@ class SparkContextSchedulerCreationSuite
   }
 
   test("local") {
-    var conf = new SparkConf()
-    conf.set("spark.cores.max", "1")
-    val sched = createTaskScheduler("local", conf)
+    val sched = createTaskScheduler("local")
     sched.backend match {
       case s: LocalBackend => assert(s.totalCores === 1)
       case _ => fail()
     }
   }
 
-  test("local-cores-exceed") {
-    val cores = Runtime.getRuntime.availableProcessors() + 1
-    var conf = new SparkConf()
-    conf.set("spark.cores.max", cores.toString)
-    val sched = createTaskScheduler("local", conf)
-    sched.backend match {
-      case s: LocalBackend => assert(s.totalCores === Runtime.getRuntime.availableProcessors())
-      case _ => fail()
-    }
-  }
-
   test("local-n") {
     val sched = createTaskScheduler("local[5]")
     assert(sched.maxTaskFailures === 1)
-- 
GitLab