carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gvram...@apache.org
Subject [1/2] incubator-carbondata git commit: Fixed compilation issue caused by CARBONDATA-212 PR:https://github.com/apache/incubator-carbondata/pull/126
Date Thu, 15 Sep 2016 16:36:30 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 57ea8c5e8 -> 6224d127b


Fixed compilation issue caused by CARBONDATA-212
PR:https://github.com/apache/incubator-carbondata/pull/126


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/31b355f5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/31b355f5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/31b355f5

Branch: refs/heads/master
Commit: 31b355f558754a53ff86d74116d3d5429ed5512d
Parents: 57ea8c5
Author: kumarvishal <kumarvishal.1802@gmail.com>
Authored: Thu Sep 15 14:29:28 2016 +0530
Committer: Venkata Ramana G <ramana.gollamudi@huawei.com>
Committed: Thu Sep 15 21:59:20 2016 +0530

----------------------------------------------------------------------
 .../carbondata/hadoop/util/SchemaReader.java    |  1 -
 .../sql/CarbonDatasourceHadoopRelation.scala    | 85 +++++++++++---------
 2 files changed, 46 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/31b355f5/hadoop/src/main/java/org/apache/carbondata/hadoop/util/SchemaReader.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/SchemaReader.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/util/SchemaReader.java
index e8c088f..5d7e125 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/SchemaReader.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/util/SchemaReader.java
@@ -21,7 +21,6 @@ package org.apache.carbondata.hadoop.util;
 import java.io.IOException;
 
 import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier;
-import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
 import org.apache.carbondata.core.carbon.metadata.CarbonMetadata;
 import org.apache.carbondata.core.carbon.metadata.converter.SchemaConverter;
 import org.apache.carbondata.core.carbon.metadata.converter.ThriftWrapperSchemaConverterImpl;

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/31b355f5/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
index 7a8397c..bb83cee 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
@@ -20,20 +20,13 @@ package org.apache.spark.sql
 import java.text.SimpleDateFormat
 import java.util.Date
 
-import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier
-import org.apache.carbondata.core.carbon.path.{CarbonStorePath, CarbonTablePath}
-import org.apache.carbondata.hadoop.util.SchemaReader
-import org.apache.carbondata.hadoop.{CarbonInputFormat, CarbonInputSplit, CarbonProjection}
-import org.apache.carbondata.scan.expression.logical.AndExpression
-import org.apache.carbondata.spark.readsupport.SparkRowReadSupportImpl
-import org.apache.carbondata.spark.util.CarbonScalaUtil.CarbonSparkUtil
-import org.apache.carbondata.spark.util.QueryPlanUtil
-import org.apache.carbondata.spark.{CarbonFilters, CarbonOption}
+import scala.reflect.ClassTag
+
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.{FileStatus, Path}
 import org.apache.hadoop.mapred.JobConf
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
 import org.apache.hadoop.mapreduce.{Job, JobID}
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
 import org.apache.spark._
 import org.apache.spark.annotation.DeveloperApi
 import org.apache.spark.mapreduce.SparkHadoopMapReduceUtil
@@ -44,14 +37,23 @@ import org.apache.spark.sql.sources.{Filter, HadoopFsRelation, OutputWriterFacto
 import org.apache.spark.sql.types.StructType
 import org.apache.spark.util.SerializableConfiguration
 
-import scala.reflect.ClassTag
+import org.apache.carbondata.core.carbon.AbsoluteTableIdentifier
+import org.apache.carbondata.core.carbon.path.{CarbonStorePath, CarbonTablePath}
+import org.apache.carbondata.hadoop.{CarbonInputFormat, CarbonInputSplit, CarbonProjection}
+import org.apache.carbondata.hadoop.util.SchemaReader
+import org.apache.carbondata.scan.expression.logical.AndExpression
+import org.apache.carbondata.spark.{CarbonFilters, CarbonOption}
+import org.apache.carbondata.spark.readsupport.SparkRowReadSupportImpl
+import org.apache.carbondata.spark.util.CarbonScalaUtil.CarbonSparkUtil
+import org.apache.carbondata.spark.util.QueryPlanUtil
+
 
 private[sql] case class CarbonDatasourceHadoopRelation(
-    sqlContext: SQLContext,
-    paths: Array[String],
-    parameters: Map[String, String],
-    tableSchema: Option[StructType])
-    extends HadoopFsRelation {
+  sqlContext: SQLContext,
+  paths: Array[String],
+  parameters: Map[String, String],
+  tableSchema: Option[StructType])
+  extends HadoopFsRelation {
 
   lazy val schemaPath = new Path(CarbonTablePath.getSchemaFilePath(paths.head))
   if (!schemaPath.getFileSystem(new Configuration).exists(schemaPath)) {
@@ -65,7 +67,8 @@ private[sql] case class CarbonDatasourceHadoopRelation(
   lazy val relationRaw: CarbonRelation = {
     val carbonTable = SchemaReader.readCarbonTableFromStore(
       CarbonStorePath.getCarbonTablePath(absIdentifier.getStorePath, identifier),
-      absIdentifier)
+      absIdentifier
+    )
     if (carbonTable == null) {
       sys.error(s"CarbonData file path ${paths.head} is not valid")
     }
@@ -94,14 +97,14 @@ private[sql] case class CarbonDatasourceHadoopRelation(
   }
 
   override def buildScan(
-      requiredColumns: Array[String],
-      filters: Array[Filter],
-      inputFiles: Array[FileStatus]): RDD[Row] = {
+    requiredColumns: Array[String],
+    filters: Array[Filter],
+    inputFiles: Array[FileStatus]): RDD[Row] = {
     val conf = new Configuration(job.getConfiguration)
     filters.flatMap { filter =>
-        CarbonFilters.createCarbonFilter(dataSchema, filter)
-      }.reduceOption(new AndExpression(_, _))
-        .foreach(CarbonInputFormat.setFilterPredicates(conf, _))
+      CarbonFilters.createCarbonFilter(dataSchema, filter)
+    }.reduceOption(new AndExpression(_, _))
+      .foreach(CarbonInputFormat.setFilterPredicates(conf, _))
 
     val projection = new CarbonProjection
     requiredColumns.foreach(projection.addColumn)
@@ -119,8 +122,8 @@ private[sql] case class CarbonDatasourceHadoopRelation(
 }
 
 class CarbonHadoopFSPartition(rddId: Int, val idx: Int,
-    val carbonSplit: SerializableWritable[CarbonInputSplit])
-    extends Partition {
+  val carbonSplit: SerializableWritable[CarbonInputSplit])
+  extends Partition {
 
   override val index: Int = idx
 
@@ -128,14 +131,14 @@ class CarbonHadoopFSPartition(rddId: Int, val idx: Int,
 }
 
 class CarbonHadoopFSRDD[V: ClassTag](
-    @transient sc: SparkContext,
-    conf: SerializableConfiguration,
-    identifier: AbsoluteTableIdentifier,
-    inputFormatClass: Class[_ <: CarbonInputFormat[V]],
-    valueClass: Class[V])
-    extends RDD[V](sc, Nil)
-        with SparkHadoopMapReduceUtil
-        with Logging {
+  @transient sc: SparkContext,
+  conf: SerializableConfiguration,
+  identifier: AbsoluteTableIdentifier,
+  inputFormatClass: Class[_ <: CarbonInputFormat[V]],
+  valueClass: Class[V])
+  extends RDD[V](sc, Nil)
+    with SparkHadoopMapReduceUtil
+    with Logging {
 
   private val jobTrackerId: String = {
     val formatter = new SimpleDateFormat("yyyyMMddHHmm")
@@ -145,17 +148,20 @@ class CarbonHadoopFSRDD[V: ClassTag](
 
   @DeveloperApi
   override def compute(split: Partition,
-      context: TaskContext): Iterator[V] = {
+    context: TaskContext): Iterator[V] = {
     val attemptId = newTaskAttemptID(jobTrackerId, id, isMap = true, split.index, 0)
     val hadoopAttemptContext = newTaskAttemptContext(conf.value, attemptId)
     val inputFormat = QueryPlanUtil.createCarbonInputFormat(identifier,
-      hadoopAttemptContext.getConfiguration)
+      hadoopAttemptContext.getConfiguration
+    )
     hadoopAttemptContext.getConfiguration.set(FileInputFormat.INPUT_DIR, identifier.getStorePath)
     val reader =
       inputFormat.createRecordReader(split.asInstanceOf[CarbonHadoopFSPartition].carbonSplit.value,
-        hadoopAttemptContext)
+        hadoopAttemptContext
+      )
     reader.initialize(split.asInstanceOf[CarbonHadoopFSPartition].carbonSplit.value,
-      hadoopAttemptContext)
+      hadoopAttemptContext
+    )
     new Iterator[V] {
       private[this] var havePair = false
       private[this] var finished = false
@@ -187,11 +193,12 @@ class CarbonHadoopFSRDD[V: ClassTag](
   override protected def getPartitions: Array[Partition] = {
     val jobContext = newJobContext(conf.value, jobId)
     val carbonInputFormat = QueryPlanUtil.createCarbonInputFormat(identifier,
-      jobContext.getConfiguration)
+      jobContext.getConfiguration
+    )
     jobContext.getConfiguration.set(FileInputFormat.INPUT_DIR, identifier.getStorePath)
     val splits = carbonInputFormat.getSplits(jobContext).toArray
     val carbonInputSplits = splits
-        .map(f => new SerializableWritable(f.asInstanceOf[CarbonInputSplit]))
+      .map(f => new SerializableWritable(f.asInstanceOf[CarbonInputSplit]))
     carbonInputSplits.zipWithIndex.map(f => new CarbonHadoopFSPartition(id, f._2, f._1))
   }
 }


Mime
View raw message