spark-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "Reynold Xin (JIRA)" <j...@apache.org>
Subject [jira] [Commented] (SPARK-1436) Compression code broke in-memory store
Date Mon, 07 Apr 2014 19:13:14 GMT

    [ https://issues.apache.org/jira/browse/SPARK-1436?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13962155#comment-13962155
] 

Reynold Xin commented on SPARK-1436:
------------------------------------

Try run the following code:

{code}

package org.apache.spark.sql

import org.apache.spark.sql.test.TestSQLContext._
import org.apache.spark.sql.catalyst.util._

case class Data(a: Int, b: Long)

object AggregationBenchmark {
  def main(args: Array[String]): Unit = {
    val rdd =
      sparkContext.parallelize(1 to 20).flatMap(_ => (1 to 500000).map(i => Data(i %
100, i)))
    rdd.registerAsTable("data")
    cacheTable("data")

    (1 to 10).foreach { i =>
      println(s"=== ITERATION $i ===")

      benchmark { println("SELECT COUNT() FROM data:" + sql("SELECT COUNT(*) FROM data").collect().head)
}

      println("SELECT a, SUM(b) FROM data GROUP BY a")
      benchmark { sql("SELECT a, SUM(b) FROM data GROUP BY a").count() }

      println("SELECT SUM(b) FROM data")
      benchmark { sql("SELECT SUM(b) FROM data").count() }
    }
  }
}
{code}

The following exception is thrown:
{code}
java.nio.BufferUnderflowException
	at java.nio.Buffer.nextGetIndex(Buffer.java:498)
	at java.nio.HeapByteBuffer.getInt(HeapByteBuffer.java:355)
	at org.apache.spark.sql.columnar.ColumnAccessor$.apply(ColumnAccessor.scala:103)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1$$anon$1$$anonfun$3.apply(InMemoryColumnarTableScan.scala:61)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1$$anon$1$$anonfun$3.apply(InMemoryColumnarTableScan.scala:61)
	at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
	at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
	at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1$$anon$1.<init>(InMemoryColumnarTableScan.scala:61)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1.apply(InMemoryColumnarTableScan.scala:60)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1.apply(InMemoryColumnarTableScan.scala:56)
	at org.apache.spark.rdd.RDD$$anonfun$3.apply(RDD.scala:504)
	at org.apache.spark.rdd.RDD$$anonfun$3.apply(RDD.scala:504)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:229)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:220)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:229)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:220)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:229)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:220)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:161)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:102)
	at org.apache.spark.scheduler.Task.run(Task.scala:52)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:211)
	at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:46)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:176)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:744)
14/04/07 12:07:38 WARN TaskSetManager: Lost TID 3 (task 4.0:0)
14/04/07 12:07:38 WARN TaskSetManager: Loss was due to java.nio.BufferUnderflowException
java.nio.BufferUnderflowException
	at java.nio.Buffer.nextGetIndex(Buffer.java:498)
	at java.nio.HeapByteBuffer.getInt(HeapByteBuffer.java:355)
	at org.apache.spark.sql.columnar.ColumnAccessor$.apply(ColumnAccessor.scala:103)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1$$anon$1$$anonfun$3.apply(InMemoryColumnarTableScan.scala:61)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1$$anon$1$$anonfun$3.apply(InMemoryColumnarTableScan.scala:61)
	at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
	at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
	at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1$$anon$1.<init>(InMemoryColumnarTableScan.scala:61)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1.apply(InMemoryColumnarTableScan.scala:60)
	at org.apache.spark.sql.columnar.InMemoryColumnarTableScan$$anonfun$execute$1.apply(InMemoryColumnarTableScan.scala:56)
	at org.apache.spark.rdd.RDD$$anonfun$3.apply(RDD.scala:504)
	at org.apache.spark.rdd.RDD$$anonfun$3.apply(RDD.scala:504)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:229)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:220)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:229)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:220)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:229)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:220)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:161)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:102)
	at org.apache.spark.scheduler.Task.run(Task.scala:52)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:211)
	at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:46)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:176)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:744)

{code}

> Compression code broke in-memory store
> --------------------------------------
>
>                 Key: SPARK-1436
>                 URL: https://issues.apache.org/jira/browse/SPARK-1436
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 1.0.0
>            Reporter: Reynold Xin
>            Assignee: Cheng Lian
>            Priority: Blocker
>             Fix For: 1.0.0
>
>
> See my following comment...



--
This message was sent by Atlassian JIRA
(v6.2#6252)

Mime
View raw message