Skip to content
Snippets Groups Projects
Commit dfd40e9f authored by Matei Zaharia's avatar Matei Zaharia
Browse files

Merge pull request #175 from kayousterhout/no_retry_not_serializable

Don't retry tasks when they fail due to a NotSerializableException

As with my previous pull request, this will be unit tested once the Cluster and Local schedulers get merged.
parents ed25105f 29c88e40
No related branches found
No related tags found
Loading
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
package org.apache.spark.scheduler.cluster package org.apache.spark.scheduler.cluster
import java.io.NotSerializableException
import java.util.Arrays import java.util.Arrays
import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.ArrayBuffer
...@@ -484,6 +485,14 @@ private[spark] class ClusterTaskSetManager( ...@@ -484,6 +485,14 @@ private[spark] class ClusterTaskSetManager(
case ef: ExceptionFailure => case ef: ExceptionFailure =>
sched.dagScheduler.taskEnded(tasks(index), ef, null, null, info, ef.metrics.getOrElse(null)) sched.dagScheduler.taskEnded(tasks(index), ef, null, null, info, ef.metrics.getOrElse(null))
if (ef.className == classOf[NotSerializableException].getName()) {
// If the task result wasn't serializable, there's no point in trying to re-execute it.
logError("Task %s:%s had a not serializable result: %s; not retrying".format(
taskSet.id, index, ef.description))
abort("Task %s:%s had a not serializable result: %s".format(
taskSet.id, index, ef.description))
return
}
val key = ef.description val key = ef.description
val now = clock.getTime() val now = clock.getTime()
val (printFull, dupCount) = { val (printFull, dupCount) = {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment