From cfb25b27c0b32a8a70a518955fb269314b1fd716 Mon Sep 17 00:00:00 2001
From: jinxing <jinxing6042@126.com>
Date: Thu, 27 Jul 2017 11:55:48 +0800
Subject: [PATCH] [SPARK-21530] Update description of
 spark.shuffle.maxChunksBeingTransferred.

## What changes were proposed in this pull request?

Update the description of `spark.shuffle.maxChunksBeingTransferred` to include that the new coming connections will be closed when the max is hit and client should have retry mechanism.

Author: jinxing <jinxing6042@126.com>

Closes #18735 from jinxing64/SPARK-21530.
---
 .../java/org/apache/spark/network/util/TransportConf.java   | 6 +++++-
 docs/configuration.md                                       | 6 +++++-
 2 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
index ea52e9fe6c..88256b810b 100644
--- a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
+++ b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java
@@ -258,7 +258,11 @@ public class TransportConf {
   }
 
   /**
-   * The max number of chunks allowed to being transferred at the same time on shuffle service.
+   * The max number of chunks allowed to be transferred at the same time on shuffle service.
+   * Note that new incoming connections will be closed when the max number is hit. The client will
+   * retry according to the shuffle retry configs (see `spark.shuffle.io.maxRetries` and
+   * `spark.shuffle.io.retryWait`), if those limits are reached the task will fail with fetch
+   * failure.
    */
   public long maxChunksBeingTransferred() {
     return conf.getLong("spark.shuffle.maxChunksBeingTransferred", Long.MAX_VALUE);
diff --git a/docs/configuration.md b/docs/configuration.md
index f4b6f46db5..500f980455 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -635,7 +635,11 @@ Apart from these, the following properties are also available, and may be useful
   <td><code>spark.shuffle.maxChunksBeingTransferred</code></td>
   <td>Long.MAX_VALUE</td>
   <td>
-    The max number of chunks allowed to being transferred at the same time on shuffle service.
+    The max number of chunks allowed to be transferred at the same time on shuffle service.
+    Note that new incoming connections will be closed when the max number is hit. The client will
+    retry according to the shuffle retry configs (see <code>spark.shuffle.io.maxRetries</code> and
+    <code>spark.shuffle.io.retryWait</code>), if those limits are reached the task will fail with
+    fetch failure.
   </td>
 </tr>
 <tr>
-- 
GitLab