From 04ad78b09d195e52d2747c18fe2e3a4640abf838 Mon Sep 17 00:00:00 2001
From: Patrick Wendell <pwendell@gmail.com>
Date: Wed, 14 Aug 2013 14:55:24 -0700
Subject: [PATCH] Style cleanup based on Matei feedback

---
 core/src/main/scala/spark/executor/Executor.scala             | 4 ++--
 .../src/main/scala/spark/scheduler/local/LocalScheduler.scala | 3 +--
 core/src/main/scala/spark/ui/jobs/StagePage.scala             | 2 +-
 3 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/core/src/main/scala/spark/executor/Executor.scala b/core/src/main/scala/spark/executor/Executor.scala
index c5202d94b0..05a960d7c5 100644
--- a/core/src/main/scala/spark/executor/Executor.scala
+++ b/core/src/main/scala/spark/executor/Executor.scala
@@ -130,7 +130,7 @@ private[spark] class Executor(executorId: String, slaveHostname: String, propert
         taskStart = System.currentTimeMillis()
         val value = task.run(taskId.toInt)
         val taskFinish = System.currentTimeMillis()
-        task.metrics.foreach{ m =>
+        for (m <- task.metrics) {
           m.hostname = Utils.localHostName
           m.executorDeserializeTime = (taskStart - startTime).toInt
           m.executorRunTime = (taskFinish - taskStart).toInt
@@ -158,7 +158,7 @@ private[spark] class Executor(executorId: String, slaveHostname: String, propert
         case t: Throwable => {
           val serviceTime = (System.currentTimeMillis() - taskStart).toInt
           val metrics = attemptedTask.flatMap(t => t.metrics)
-          metrics.foreach {m =>
+          for (m <- metrics) {
             m.executorRunTime = serviceTime
             m.jvmGCTime = getTotalGCTime - startGCTime
           }
diff --git a/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala b/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala
index 33e7a10ea4..6c43928bc8 100644
--- a/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala
+++ b/core/src/main/scala/spark/scheduler/local/LocalScheduler.scala
@@ -34,7 +34,6 @@ import spark.scheduler._
 import spark.scheduler.cluster._
 import spark.scheduler.cluster.SchedulingMode.SchedulingMode
 import akka.actor._
-import management.ManagementFactory
 
 /**
  * A FIFO or Fair TaskScheduler implementation that runs tasks locally in a thread pool. Optionally
@@ -218,7 +217,7 @@ private[spark] class LocalScheduler(threads: Int, val maxFailures: Int, val sc:
       case t: Throwable => {
         val serviceTime = System.currentTimeMillis() - taskStart
         val metrics = attemptedTask.flatMap(t => t.metrics)
-        metrics.foreach {m =>
+        for (m <- metrics) {
           m.executorRunTime = serviceTime.toInt
           m.jvmGCTime = getTotalGCTime - startGCTime
         }
diff --git a/core/src/main/scala/spark/ui/jobs/StagePage.scala b/core/src/main/scala/spark/ui/jobs/StagePage.scala
index 28a6d7b179..f91a415e37 100644
--- a/core/src/main/scala/spark/ui/jobs/StagePage.scala
+++ b/core/src/main/scala/spark/ui/jobs/StagePage.scala
@@ -166,7 +166,7 @@ private[spark] class StagePage(parent: JobProgressUI) {
           Utils.memoryBytesToString(s.shuffleBytesWritten)}.getOrElse("")}</td>
       }}
       <td sorttable_customkey={gcTime.toString}>
-        {if (gcTime > 0) {parent.formatDuration(gcTime)} else ""}
+        {if (gcTime > 0) parent.formatDuration(gcTime) else ""}
       </td>
       <td>{exception.map(e =>
         <span>
-- 
GitLab