From 05ab2948ab357fc07222bb3505df80b1886f7310 Mon Sep 17 00:00:00 2001
From: Yonathan Randolph <yonathan@liftigniter.com>
Date: Wed, 16 Mar 2016 09:34:04 +0000
Subject: [PATCH] [SPARK-13906] Ensure that there are at least 2 dispatcher
 threads.

## What changes were proposed in this pull request?

Force at least two dispatcher-event-loop threads. Since SparkDeploySchedulerBackend (in AppClient) calls askWithRetry to CoarseGrainedScheduler in the same process, there the driver needs at least two dispatcher threads to prevent the dispatcher thread from hanging.

## How was this patch tested?

Manual.

Author: Yonathan Randolph <yonathangmail.com>

Author: Yonathan Randolph <yonathan@liftigniter.com>

Closes #11728 from yonran/SPARK-13906.
---
 core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala b/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala
index f092f1d7f6..613d6ee781 100644
--- a/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala
@@ -192,7 +192,7 @@ private[netty] class Dispatcher(nettyEnv: NettyRpcEnv) extends Logging {
   /** Thread pool used for dispatching messages. */
   private val threadpool: ThreadPoolExecutor = {
     val numThreads = nettyEnv.conf.getInt("spark.rpc.netty.dispatcher.numThreads",
-      Runtime.getRuntime.availableProcessors())
+      math.max(2, Runtime.getRuntime.availableProcessors()))
     val pool = ThreadUtils.newDaemonFixedThreadPool(numThreads, "dispatcher-event-loop")
     for (i <- 0 until numThreads) {
       pool.execute(new MessageLoop)
-- 
GitLab