carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [1/3] incubator-carbondata git commit: fix testcase
Date Wed, 04 Jan 2017 09:15:46 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master b96604334 -> af956f533


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index ee96c35..09cde7b 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -20,6 +20,7 @@ package org.apache.spark.sql.catalyst
 import java.util.regex.{Matcher, Pattern}
 
 import scala.collection.JavaConverters._
+import scala.collection.mutable
 import scala.collection.mutable.{LinkedHashSet, Map}
 import scala.language.implicitConversions
 import scala.util.matching.Regex
@@ -223,7 +224,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
   def prepareTableModel(ifNotExistPresent: Boolean, dbName: Option[String]
       , tableName: String, fields: Seq[Field],
       partitionCols: Seq[PartitionerField],
-      tableProperties: Map[String, String],
+      tableProperties: mutable.Map[String, String],
       bucketFields: Option[BucketFields]): TableModel = {
 
     fields.zipWithIndex.foreach { x =>
@@ -279,7 +280,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
    * @param tableProperties
    * @return
    */
-  protected def updateColumnGroupsInField(tableProperties: Map[String, String],
+  protected def updateColumnGroupsInField(tableProperties: mutable.Map[String, String],
       noDictionaryDims: Seq[String],
       msrs: Seq[Field],
       dims: Seq[Field]): Seq[String] = {
@@ -446,7 +447,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
     if (tableProperties.get("NO_INVERTED_INDEX").isDefined) {
       noInvertedIdxColsProps =
         tableProperties.get("NO_INVERTED_INDEX").get.split(',').map(_.trim)
-      noInvertedIdxColsProps.map { noInvertedIdxColProp =>
+      noInvertedIdxColsProps.foreach { noInvertedIdxColProp =>
           if (!fields.exists(x => x.column.equalsIgnoreCase(noInvertedIdxColProp))) {
             val errormsg = "NO_INVERTED_INDEX column: " + noInvertedIdxColProp +
                            " does not exist in table. Please check create table statement."
@@ -487,7 +488,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
       dictExcludeCols =
         tableProperties.get(CarbonCommonConstants.DICTIONARY_EXCLUDE).get.split(',').map(_.trim)
       dictExcludeCols
-        .map { dictExcludeCol =>
+        .foreach { dictExcludeCol =>
           if (!fields.exists(x => x.column.equalsIgnoreCase(dictExcludeCol))) {
             val errormsg = "DICTIONARY_EXCLUDE column: " + dictExcludeCol +
                            " does not exist in table. Please check create table statement."
@@ -510,8 +511,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
     // All included cols should be there in create table cols
     if (tableProperties.get(CarbonCommonConstants.DICTIONARY_INCLUDE).isDefined) {
       dictIncludeCols =
-        tableProperties.get(CarbonCommonConstants.DICTIONARY_INCLUDE).get.split(",").map(_.trim)
-      dictIncludeCols.map { distIncludeCol =>
+        tableProperties(CarbonCommonConstants.DICTIONARY_INCLUDE).split(",").map(_.trim)
+      dictIncludeCols.foreach { distIncludeCol =>
         if (!fields.exists(x => x.column.equalsIgnoreCase(distIncludeCol.trim))) {
           val errormsg = "DICTIONARY_INCLUDE column: " + distIncludeCol.trim +
                          " does not exist in table. Please check create table statement."
@@ -531,21 +532,19 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
 
     // by default consider all String cols as dims and if any dictionary exclude is present
then
     // add it to noDictionaryDims list. consider all dictionary excludes/include cols as
dims
-    fields.foreach(field => {
-
+    fields.foreach { field =>
       if (dictExcludeCols.toSeq.exists(x => x.equalsIgnoreCase(field.column))) {
         val dataType = DataTypeUtil.getDataType(field.dataType.get.toUpperCase())
-        if (dataType != DataType.TIMESTAMP && dataType != DataType.DATE ) {
+        if (dataType != DataType.TIMESTAMP && dataType != DataType.DATE) {
           noDictionaryDims :+= field.column
         }
         dimFields += field
       } else if (dictIncludeCols.exists(x => x.equalsIgnoreCase(field.column))) {
-        dimFields += (field)
+        dimFields += field
       } else if (isDetectAsDimentionDatatype(field.dataType.get)) {
-        dimFields += (field)
+        dimFields += field
       }
     }
-    )
 
     (dimFields.toSeq, noDictionaryDims)
   }
@@ -700,7 +699,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
       case Token("TOK_TABLEPROPLIST", list) =>
         list.map {
           case Token("TOK_TABLEPROPERTY", Token(key, Nil) :: Token(value, Nil) :: Nil) =>
-            (unquoteString(key) -> unquoteString(value))
+            unquoteString(key) -> unquoteString(value)
         }
     }
   }
@@ -747,7 +746,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
     }
 
     if (options.exists(_._1.equalsIgnoreCase("MAXCOLUMNS"))) {
-      val maxColumns: String = options.get("maxcolumns").get(0)._2
+      val maxColumns: String = options.get("maxcolumns").get.head._2
       try {
         maxColumns.toInt
       } catch {
@@ -773,7 +772,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
   }
 
   protected lazy val dbTableIdentifier: Parser[Seq[String]] =
-    (ident <~ ".").? ~ (ident) ^^ {
+    (ident <~ ".").? ~ ident ^^ {
       case databaseName ~ tableName =>
         if (databaseName.isDefined) {
           Seq(databaseName.get, tableName)
@@ -819,13 +818,13 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
                                                  primitiveFieldType
 
   lazy val anyFieldDef: Parser[Field] =
-    (ident | stringLit) ~ ((":").? ~> nestedType) ~ (IN ~> (ident | stringLit)).? ^^
{
+    (ident | stringLit) ~ (":".? ~> nestedType) ~ (IN ~> (ident | stringLit)).? ^^
{
       case e1 ~ e2 ~ e3 =>
         Field(e1, e2.dataType, Some(e1), e2.children, null, e3)
     }
 
   protected lazy val primitiveFieldType: Parser[Field] =
-    (primitiveTypes) ^^ {
+    primitiveTypes ^^ {
       case e1 =>
         Field("unknown", Some(e1), Some("unknown"), Some(null))
     }
@@ -898,7 +897,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
       // checking if the nested data type contains the child type as decimal(10,0),
       // if it is present then extracting the precision and scale. resetting the data type
       // with Decimal.
-      case _ if (dataType.startsWith("decimal")) =>
+      case _ if dataType.startsWith("decimal") =>
         val (precision, scale) = getScaleAndPrecision(dataType)
         Field(field.column,
           Some("Decimal"),

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonTableIdentifierImplicit.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonTableIdentifierImplicit.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonTableIdentifierImplicit.scala
index d607523..55441cf 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonTableIdentifierImplicit.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonTableIdentifierImplicit.scala
@@ -21,7 +21,7 @@ package org.apache.spark.sql.catalyst
  * Implicit functions for [TableIdentifier]
  */
 object CarbonTableIdentifierImplicit {
-  def apply(tableName: String): TableIdentifier = new TableIdentifier(tableName)
+  def apply(tableName: String): TableIdentifier = TableIdentifier(tableName)
 
   implicit def toTableIdentifier(tableIdentifier: Seq[String]): TableIdentifier = {
     tableIdentifier match {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 1bfd0c7..1f8af66 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -219,7 +219,6 @@ object CarbonDataRDDFactory {
     val executor: ExecutorService = Executors.newFixedThreadPool(1)
     // update the updated table status.
     CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
-    var segList: util.List[LoadMetadataDetails] = carbonLoadModel.getLoadMetadataDetails
 
     // clean up of the stale segments.
     try {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
index a534ab0..20569df 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql
 
+import java.util
 import java.util.LinkedHashSet
 
 import scala.collection.JavaConverters._
@@ -205,7 +206,7 @@ case class CarbonRelation(
   }
 
   val dimensionsAttr = {
-    val sett = new LinkedHashSet(
+    val sett = new util.LinkedHashSet(
       tableMeta.carbonTable.getDimensionByTableName(tableMeta.carbonTableIdentifier.getTableName)
           .asScala.asJava)
     sett.asScala.toSeq.filter(!_.getColumnSchema.isInvisible).map(dim => {
@@ -231,7 +232,7 @@ case class CarbonRelation(
 
   val measureAttr = {
     val factTable = tableMeta.carbonTable.getFactTableName
-    new LinkedHashSet(
+    new util.LinkedHashSet(
       tableMeta.carbonTable.
           getMeasureByTableName(tableMeta.carbonTable.getFactTableName).
           asScala.asJava).asScala.toSeq.filter(!_.getColumnSchema.isInvisible)
@@ -247,9 +248,8 @@ case class CarbonRelation(
   }
 
   override val output = {
-    val factTable = tableMeta.carbonTable.getFactTableName
-    var columns = tableMeta.carbonTable.getCreateOrderColumn(tableMeta.carbonTable.getFactTableName)
-      .asScala
+    val columns = tableMeta.carbonTable.getCreateOrderColumn(tableMeta.carbonTable.getFactTableName)
+        .asScala
     columns.filter(!_.getColumnSchema.isInvisible).map { column =>
       if (column.isDimesion()) {
         val output: DataType = column.getDataType.toString.toLowerCase match {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
index dfc1bc9..11c1726 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
@@ -174,7 +174,7 @@ case class CarbonDictionaryDecoder(
           dictionaryTaskCleaner.addTaskCompletionListener(context =>
             dicts.foreach { dictionary =>
               if (null != dictionary) {
-                dictionary.clear
+                dictionary.clear()
               }
             }
           )

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/spark/sql/CarbonScan.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonScan.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonScan.scala
index bdc223a..c351c30 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonScan.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonScan.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql
 
+import java.util
 import java.util.ArrayList
 
 import scala.collection.JavaConverters._
@@ -51,7 +52,7 @@ case class CarbonScan(
   val buildCarbonPlan: CarbonQueryPlan = {
     val plan: CarbonQueryPlan = new CarbonQueryPlan(relationRaw.databaseName, relationRaw.tableName)
 
-    plan.setSortedDimemsions(new ArrayList[QueryDimension])
+    plan.setSortedDimemsions(new util.ArrayList[QueryDimension])
 
     plan.setOutLocationPath(
       CarbonProperties.getInstance().getProperty(CarbonCommonConstants.STORE_LOCATION_HDFS))

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/spark/sql/SparkUnknownExpression.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/SparkUnknownExpression.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/SparkUnknownExpression.scala
index bf8d9b5..dd51184 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/SparkUnknownExpression.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/SparkUnknownExpression.scala
@@ -17,12 +17,13 @@
 
 package org.apache.spark.sql
 
-import java.util.{ArrayList, List}
+import java.util
 
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.catalyst.InternalRow
-import org.apache.spark.sql.catalyst.expressions.{Expression => SparkExpression, GenericMutableRow}
+import org.apache.spark.sql.catalyst.expressions.{Expression => SparkExpression}
+import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
 
 import org.apache.carbondata.core.carbon.metadata.encoder.Encoding
 import org.apache.carbondata.scan.expression.{ColumnExpression, ExpressionResult, UnknownExpression}
@@ -120,7 +121,7 @@ class SparkUnknownExpression(var sparkExp: SparkExpression)
 
 
   def getAllColumnListFromExpressionTree(sparkCurrentExp: SparkExpression,
-      list: List[ColumnExpression]): List[ColumnExpression] = {
+      list: util.List[ColumnExpression]): util.List[ColumnExpression] = {
     sparkCurrentExp match {
       case carbonBoundRef: CarbonBoundReference => list.add(carbonBoundRef.colExp)
       case _ => sparkCurrentExp.children.foreach(getColumnListFromExpressionTree(_, list))
@@ -129,7 +130,7 @@ class SparkUnknownExpression(var sparkExp: SparkExpression)
   }
 
   def isDirectDictionaryColumns: Boolean = {
-    val lst = new ArrayList[ColumnExpression]()
+    val lst = new util.ArrayList[ColumnExpression]()
     getAllColumnListFromExpressionTree(sparkExp, lst)
     if (lst.get(0).getCarbonColumn.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
       true

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 30978e4..62ea24d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -238,7 +238,7 @@ private[sql] case class LoadTableByInsert(relation: CarbonDatasourceRelation,
       relation.carbonRelation.tableName,
       null,
       Seq(),
-      scala.collection.immutable.Map(("fileheader" -> header)),
+      scala.collection.immutable.Map("fileheader" -> header),
       false,
       null,
       Some(df)).run(sqlContext)
@@ -563,7 +563,7 @@ case class LoadTable(
         throw new MalformedCarbonCommandException("Error: Option DateFormat is set an empty
" +
                                                   "string.")
       } else {
-        var dateFormats: Array[String] = dateFormat.split(CarbonCommonConstants.COMMA)
+        val dateFormats: Array[String] = dateFormat.split(CarbonCommonConstants.COMMA)
         for (singleDateFormat <- dateFormats) {
           val dateFormatSplits: Array[String] = singleDateFormat.split(":", 2)
           val columnName = dateFormatSplits(0).trim.toLowerCase
@@ -669,7 +669,6 @@ private[sql] case class DescribeCommandFormatted(
           relation.tableMeta.carbonTableIdentifier.getTableName,
           field.name)
         if (null != dimension.getColumnProperties && dimension.getColumnProperties.size()
> 0) {
-          val colprop = mapper.writeValueAsString(dimension.getColumnProperties)
           colProps.append(field.name).append(".")
             .append(mapper.writeValueAsString(dimension.getColumnProperties))
             .append(",")
@@ -681,7 +680,7 @@ private[sql] case class DescribeCommandFormatted(
           "KEY COLUMN"
         }
       } else {
-        ("MEASURE")
+        "MEASURE"
       }
       (field.name, field.dataType.simpleString, comment)
     }
@@ -723,7 +722,7 @@ private[sql] case class DescribeCommandFormatted(
       colGroups._2.map(dim => dim.getColName).mkString(", ")
     })
     var index = 1
-    groups.map { x =>
+    groups.foreach { x =>
       results = results :+ (s"Column Group $index", x, "")
       index = index + 1
     }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
index 1d52762..481ce54 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala
@@ -399,13 +399,12 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
                   attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr,
attr)))
               }
           }
-          wd.windowExpressions.map {
-            case others =>
-              others.collect {
-                case attr: AttributeReference
-                  if isDictionaryEncoded(attr, attrMap, aliasMap) =>
-                  attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr,
attr)))
-              }
+          wd.windowExpressions.map { others =>
+            others.collect {
+              case attr: AttributeReference
+                if isDictionaryEncoded(attr, attrMap, aliasMap) =>
+                attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
+            }
           }
           wd.partitionSpec.map{
             case attr: AttributeReference =>
@@ -617,7 +616,7 @@ class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
       case a@Alias(exp, name) =>
         exp match {
           case attr: Attribute => aliasMap.put(a.toAttribute, attr)
-          case _ => aliasMap.put(a.toAttribute, new AttributeReference("", StringType)())
+          case _ => aliasMap.put(a.toAttribute, AttributeReference("", StringType)())
         }
         a
     }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBlockletBoundryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBlockletBoundryTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBlockletBoundryTest.scala
index f7056a9..71b4247 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBlockletBoundryTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBlockletBoundryTest.scala
@@ -97,7 +97,8 @@ class DataCompactionBlockletBoundryTest extends QueryTest with BeforeAndAfterAll
   )
 
   override def afterAll {
-    /* sql("drop table blocklettest") */
+    sql("drop table if exists  blocklettest")
+    sql("drop table if exists  Carbon_automation_hive")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     CarbonProperties.getInstance()

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
index f5039a7..97a5bdd 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionBoundaryConditionsTest.scala
@@ -95,6 +95,7 @@ class DataCompactionBoundaryConditionsTest extends QueryTest with BeforeAndAfter
 
 
   override def afterAll {
+    sql("drop table if exists  boundarytest")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
index f77ec9b..e1e1412 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
@@ -37,7 +37,7 @@ import scala.collection.JavaConverters._
 class DataCompactionCardinalityBoundryTest extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "true")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
"true")
     sql("drop table if exists  cardinalityTest")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
@@ -62,15 +62,9 @@ class DataCompactionCardinalityBoundryTest extends QueryTest with BeforeAndAfter
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE cardinalityTest OPTIONS"
+
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
     )
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "true")
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE cardinalityTest  OPTIONS"
+
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
     )
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "true")
-    System.out
-      .println("load merge status is " + CarbonProperties.getInstance()
-        .getProperty("carbon.enable.load.merge")
-      )
     // compaction will happen here.
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE cardinalityTest  OPTIONS"
+
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
@@ -125,10 +119,10 @@ class DataCompactionCardinalityBoundryTest extends QueryTest with BeforeAndAfter
   }
 
   override def afterAll {
-    /* sql("drop table cardinalityTest") */
+    sql("drop table if exists  cardinalityTest")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "false")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
"false")
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
index 7ec6431..0b228c5 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
@@ -54,7 +54,6 @@ class DataCompactionLockTest extends QueryTest with BeforeAndAfterAll {
     CarbonLockFactory
       .getCarbonLockObj(absoluteTableIdentifier.getCarbonTableIdentifier, LockUsage.COMPACTION_LOCK)
 
-
   override def beforeAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION, "true")
@@ -116,7 +115,7 @@ class DataCompactionLockTest extends QueryTest with BeforeAndAfterAll
{
 
 
   override def afterAll {
-    /* sql("drop table compactionlocktesttable") */
+    sql("drop table if exists  compactionlocktesttable")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     carbonLock.unlock()

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
index 570bb72..00d295a 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
@@ -54,6 +54,7 @@ class DataCompactionNoDictionaryTest extends QueryTest with BeforeAndAfterAll
{
   override def beforeAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
+    sql("DROP TABLE IF EXISTS nodictionaryCompaction")
     sql(
       "CREATE TABLE nodictionaryCompaction (country String, ID Int, date Timestamp, name
" +
         "String, " +
@@ -168,10 +169,10 @@ class DataCompactionNoDictionaryTest extends QueryTest with BeforeAndAfterAll
{
   }
 
   override def afterAll {
-    sql("drop table nodictionaryCompaction")
+    sql("DROP TABLE IF EXISTS nodictionaryCompaction")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "false")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
"false")
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
index 137cebc..3c18bb7 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
@@ -38,7 +38,7 @@ import org.apache.carbondata.lcm.status.SegmentStatusManager
 class DataCompactionTest extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "true")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
"true")
     sql("drop table if exists  normalcompaction")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
@@ -60,15 +60,9 @@ class DataCompactionTest extends QueryTest with BeforeAndAfterAll {
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE normalcompaction OPTIONS"
+
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
     )
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "true")
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE normalcompaction  OPTIONS"
+
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
     )
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "true")
-    System.out
-      .println("load merge status is " + CarbonProperties.getInstance()
-        .getProperty("carbon.enable.load.merge")
-      )
     // compaction will happen here.
     sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE normalcompaction  OPTIONS"
+
       "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
@@ -166,10 +160,10 @@ class DataCompactionTest extends QueryTest with BeforeAndAfterAll {
   }
 
   override def afterAll {
-    /* sql("drop table normalcompaction") */
+    sql("drop table if exists  normalcompaction")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "false")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
"false")
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
index 37ce089..4e3d2f3 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionIgnoreInMinorTest.scala
@@ -200,6 +200,7 @@ class MajorCompactionIgnoreInMinorTest extends QueryTest with BeforeAndAfterAll
   }
 
   override def afterAll {
+    sql("drop table if exists  ignoremajor")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
index 25087a7..f5ff191 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala
@@ -135,6 +135,7 @@ class MajorCompactionStopsAfterCompaction extends QueryTest with BeforeAndAfterA
   }
 
   override def afterAll {
+    sql("drop table if exists  stopmajor")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
index c1fdfe1..c375f23 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
@@ -44,8 +44,7 @@ object DictionaryTestCaseUtil {
       dimension.getColumnIdentifier, dimension.getDataType
     )
     val dict = CarbonLoaderUtil.getDictionary(columnIdentifier,
-      CarbonHiveContext.hdfsCarbonBasePath
-    )
+      CarbonHiveContext.hdfsCarbonBasePath)
     assert(dict.getSurrogateKey(value) != CarbonCommonConstants.INVALID_SURROGATE_KEY)
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/7b8b1959/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 0f32ad9..96c4a08 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -216,7 +216,6 @@ object CarbonDataRDDFactory {
     val executor: ExecutorService = Executors.newFixedThreadPool(1)
     // update the updated table status.
     CommonUtil.readLoadMetadataDetails(carbonLoadModel, storePath)
-    var segList: util.List[LoadMetadataDetails] = carbonLoadModel.getLoadMetadataDetails
 
     // clean up of the stale segments.
     try {


Mime
View raw message