diff --git a/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala b/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala index ab200decb18c1ee9211a4e92b3919245b0d9f670..20f6e65020f54d87418ffb8a1441a886f759c754 100644 --- a/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala +++ b/core/src/main/scala/spark/scheduler/cluster/ClusterScheduler.scala @@ -249,7 +249,7 @@ private[spark] class ClusterScheduler(val sc: SparkContext) } } - def slaveLost(slaveId: String, reason: ExecutorLostReason) { + def slaveLost(slaveId: String, reason: ExecutorLossReason) { var failedHost: Option[String] = None synchronized { val host = slaveIdToHost(slaveId) diff --git a/core/src/main/scala/spark/scheduler/cluster/ExecutorLostReason.scala b/core/src/main/scala/spark/scheduler/cluster/ExecutorLossReason.scala similarity index 70% rename from core/src/main/scala/spark/scheduler/cluster/ExecutorLostReason.scala rename to core/src/main/scala/spark/scheduler/cluster/ExecutorLossReason.scala index 8976b3969d5ca7266307e780d3e0d38b845153d1..bba7de6a65c3d17aab47bdfa07c464ee7e801604 100644 --- a/core/src/main/scala/spark/scheduler/cluster/ExecutorLostReason.scala +++ b/core/src/main/scala/spark/scheduler/cluster/ExecutorLossReason.scala @@ -6,16 +6,16 @@ import spark.executor.ExecutorExitCode * Represents an explanation for a executor or whole slave failing or exiting. */ private[spark] -class ExecutorLostReason(val message: String) { +class ExecutorLossReason(val message: String) { override def toString: String = message } private[spark] case class ExecutorExited(val exitCode: Int) - extends ExecutorLostReason(ExecutorExitCode.explainExitCode(exitCode)) { + extends ExecutorLossReason(ExecutorExitCode.explainExitCode(exitCode)) { } private[spark] case class SlaveLost(_message: String = "Slave lost") - extends ExecutorLostReason(_message) { + extends ExecutorLossReason(_message) { } diff --git a/core/src/main/scala/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala b/core/src/main/scala/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala index f5056287530d96bc3edf13631abdc8104818364d..efaf2d330c4fa25f846df304653e6ed7b2e38932 100644 --- a/core/src/main/scala/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala +++ b/core/src/main/scala/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala @@ -72,7 +72,7 @@ private[spark] class SparkDeploySchedulerBackend( } def executorRemoved(id: String, message: String) { - var reason: ExecutorLostReason = SlaveLost(message) + var reason: ExecutorLossReason = SlaveLost(message) if (message.startsWith("Command exited with code ")) { try { reason = ExecutorExited(message.substring("Command exited with code ".length).toInt) diff --git a/core/src/main/scala/spark/scheduler/mesos/MesosSchedulerBackend.scala b/core/src/main/scala/spark/scheduler/mesos/MesosSchedulerBackend.scala index b0d4315f0574514298815f803302ad9c7573fb82..8c7a1dfbc0570b80af2517a7a572eef106c9badd 100644 --- a/core/src/main/scala/spark/scheduler/mesos/MesosSchedulerBackend.scala +++ b/core/src/main/scala/spark/scheduler/mesos/MesosSchedulerBackend.scala @@ -267,7 +267,7 @@ private[spark] class MesosSchedulerBackend( override def frameworkMessage(d: SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {} - private def recordSlaveLost(d: SchedulerDriver, slaveId: SlaveID, reason: ExecutorLostReason) { + private def recordSlaveLost(d: SchedulerDriver, slaveId: SlaveID, reason: ExecutorLossReason) { logInfo("Mesos slave lost: " + slaveId.getValue) synchronized { slaveIdsWithExecutors -= slaveId.getValue