From de6e633420aba1fe5d806a2725a95e610699ae7d Mon Sep 17 00:00:00 2001 From: Azeem Jiva <azeemj@gmail.com> Date: Tue, 26 Apr 2016 11:49:04 +0100 Subject: [PATCH] [SPARK-14756][CORE] Use parseLong instead of valueOf ## What changes were proposed in this pull request? Use Long.parseLong which returns a primative. Use a series of appends() reduces the creation of an extra StringBuilder type ## How was this patch tested? Unit tests Author: Azeem Jiva <azeemj@gmail.com> Closes #12520 from javawithjiva/minor. --- .../org/apache/spark/unsafe/types/CalendarInterval.java | 8 ++++---- .../scala/org/apache/spark/deploy/SparkHadoopUtil.scala | 2 +- .../spark/examples/mllib/JavaStreamingTestExample.java | 8 ++++---- .../sql/execution/datasources/PartitioningUtils.scala | 6 +++--- .../apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java index 62edf6c64b..518ed6470a 100644 --- a/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java +++ b/common/unsafe/src/main/java/org/apache/spark/unsafe/types/CalendarInterval.java @@ -62,7 +62,7 @@ public final class CalendarInterval implements Serializable { if (s == null) { return 0; } else { - return Long.valueOf(s); + return Long.parseLong(s); } } @@ -91,7 +91,7 @@ public final class CalendarInterval implements Serializable { String s, long minValue, long maxValue) throws IllegalArgumentException { long result = 0; if (s != null) { - result = Long.valueOf(s); + result = Long.parseLong(s); if (result < minValue || result > maxValue) { throw new IllegalArgumentException(String.format("%s %d outside range [%d, %d]", fieldName, result, minValue, maxValue)); @@ -218,7 +218,7 @@ public final class CalendarInterval implements Serializable { result = new CalendarInterval(0, millisecond * MICROS_PER_MILLI); } else if (unit.equals("microsecond")) { - long micros = Long.valueOf(m.group(1)); + long micros = Long.parseLong(m.group(1)); result = new CalendarInterval(0, micros); } } catch (Exception e) { @@ -318,7 +318,7 @@ public final class CalendarInterval implements Serializable { private void appendUnit(StringBuilder sb, long value, String unit) { if (value != 0) { - sb.append(" " + value + " " + unit + "s"); + sb.append(' ').append(value).append(' ').append(unit).append('s'); } } } diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala index cda9d38c6a..2e9e45a155 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala @@ -384,7 +384,7 @@ object SparkHadoopUtil { def get: SparkHadoopUtil = { // Check each time to support changing to/from YARN - val yarnMode = java.lang.Boolean.valueOf( + val yarnMode = java.lang.Boolean.parseBoolean( System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE"))) if (yarnMode) { yarn diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java index df901997e1..cfaa577b51 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaStreamingTestExample.java @@ -66,8 +66,8 @@ public class JavaStreamingTestExample { } String dataDir = args[0]; - Duration batchDuration = Seconds.apply(Long.valueOf(args[1])); - int numBatchesTimeout = Integer.valueOf(args[2]); + Duration batchDuration = Seconds.apply(Long.parseLong(args[1])); + int numBatchesTimeout = Integer.parseInt(args[2]); SparkConf conf = new SparkConf().setMaster("local").setAppName("StreamingTestExample"); JavaStreamingContext ssc = new JavaStreamingContext(conf, batchDuration); @@ -80,8 +80,8 @@ public class JavaStreamingTestExample { @Override public BinarySample call(String line) { String[] ts = line.split(","); - boolean label = Boolean.valueOf(ts[0]); - double value = Double.valueOf(ts[1]); + boolean label = Boolean.parseBoolean(ts[0]); + double value = Double.parseDouble(ts[1]); return new BinarySample(label, value); } }); diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala index 3ac2ff494f..1065bb1047 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala @@ -441,9 +441,9 @@ private[sql] object PartitioningUtils { val c = path.charAt(i) if (c == '%' && i + 2 < path.length) { val code: Int = try { - Integer.valueOf(path.substring(i + 1, i + 3), 16) - } catch { case e: Exception => - -1: Integer + Integer.parseInt(path.substring(i + 1, i + 3), 16) + } catch { + case _: Exception => -1 } if (code >= 0) { sb.append(code.asInstanceOf[Char]) diff --git a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala index 4b36da309d..ee002f6223 100644 --- a/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala +++ b/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala @@ -345,7 +345,7 @@ object YarnSparkHadoopUtil { val RM_REQUEST_PRIORITY = Priority.newInstance(1) def get: YarnSparkHadoopUtil = { - val yarnMode = java.lang.Boolean.valueOf( + val yarnMode = java.lang.Boolean.parseBoolean( System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE"))) if (!yarnMode) { throw new SparkException("YarnSparkHadoopUtil is not available in non-YARN mode!") -- GitLab