carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [21/38] incubator-carbondata git commit: reuse test case for integration module
Date Sat, 07 Jan 2017 16:36:55 GMT
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common-test/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
b/integration/spark-common-test/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
new file mode 100644
index 0000000..9912ec4
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.common.util
+
+import java.util.{Locale, TimeZone}
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import scala.collection.JavaConversions._
+
+import org.apache.spark.sql.catalyst.plans._
+import org.apache.spark.sql.catalyst.util._
+import org.apache.spark.sql.test.TestQueryExecutor
+import org.apache.spark.sql.{DataFrame, Row, SQLContext}
+
+class QueryTest extends PlanTest {
+
+  val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+
+  // Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
+  TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
+  // Add Locale setting
+  Locale.setDefault(Locale.US)
+
+  /**
+   * Runs the plan and makes sure the answer contains all of the keywords, or the
+   * none of keywords are listed in the answer
+   * @param df the [[DataFrame]] to be executed
+   * @param exists true for make sure the keywords are listed in the output, otherwise
+   *               to make sure none of the keyword are not listed in the output
+   * @param keywords keyword in string array
+   */
+  def checkExistence(df: DataFrame, exists: Boolean, keywords: String*) {
+    val outputs = df.collect().map(_.mkString).mkString
+    for (key <- keywords) {
+      if (exists) {
+        assert(outputs.contains(key), s"Failed for $df ($key doesn't exist in result)")
+      } else {
+        assert(!outputs.contains(key), s"Failed for $df ($key existed in the result)")
+      }
+    }
+  }
+
+  def sqlTest(sqlString: String, expectedAnswer: Seq[Row])(implicit sqlContext: SQLContext)
{
+    test(sqlString) {
+      checkAnswer(sqlContext.sql(sqlString), expectedAnswer)
+    }
+  }
+
+  /**
+   * Runs the plan and makes sure the answer matches the expected result.
+   * @param df the [[DataFrame]] to be executed
+   * @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
+   */
+  protected def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row]): Unit = {
+    QueryTest.checkAnswer(df, expectedAnswer) match {
+      case Some(errorMessage) => fail(errorMessage)
+      case None =>
+    }
+  }
+
+  protected def checkAnswer(df: DataFrame, expectedAnswer: Row): Unit = {
+    checkAnswer(df, Seq(expectedAnswer))
+  }
+
+  protected def checkAnswer(df: DataFrame, expectedAnswer: DataFrame): Unit = {
+    checkAnswer(df, expectedAnswer.collect())
+  }
+
+  def sql(sqlText: String): DataFrame = TestQueryExecutor.INSTANCE.sql(sqlText)
+
+  val sqlContext: SQLContext = TestQueryExecutor.INSTANCE.sqlContext
+
+  val storeLocation = TestQueryExecutor.storeLocation
+  val resourcesPath = TestQueryExecutor.resourcesPath
+  val integrationPath = TestQueryExecutor.integrationPath
+}
+
+object QueryTest {
+
+  def checkAnswer(df: DataFrame, expectedAnswer: java.util.List[Row]): String = {
+    checkAnswer(df, expectedAnswer.toSeq) match {
+      case Some(errorMessage) => errorMessage
+      case None => null
+    }
+  }
+
+  /**
+   * Runs the plan and makes sure the answer matches the expected result.
+   * If there was exception during the execution or the contents of the DataFrame does not
+   * match the expected result, an error message will be returned. Otherwise, a [[None]]
will
+   * be returned.
+   * @param df the [[DataFrame]] to be executed
+   * @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
+   */
+  def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row]): Option[String] = {
+    val isSorted = df.logicalPlan.collect { case s: logical.Sort => s }.nonEmpty
+    def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
+      // Converts data to types that we can do equality comparison using Scala collections.
+      // For BigDecimal type, the Scala type has a better definition of equality test (similar
to
+      // Java's java.math.BigDecimal.compareTo).
+      // For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals
for
+      // equality test.
+      val converted: Seq[Row] = answer.map { s =>
+        Row.fromSeq(s.toSeq.map {
+          case d: java.math.BigDecimal => BigDecimal(d)
+          case b: Array[Byte] => b.toSeq
+          case o => o
+        })
+      }
+      if (!isSorted) converted.sortBy(_.toString()) else converted
+    }
+    val sparkAnswer = try df.collect().toSeq catch {
+      case e: Exception =>
+        val errorMessage =
+          s"""
+             |Exception thrown while executing query:
+             |${df.queryExecution}
+             |== Exception ==
+             |$e
+             |${org.apache.spark.sql.catalyst.util.stackTraceToString(e)}
+          """.stripMargin
+        return Some(errorMessage)
+    }
+
+    if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
+      val errorMessage =
+        s"""
+           |Results do not match for query:
+           |${df.queryExecution}
+           |== Results ==
+           |${
+          sideBySide(
+            s"== Correct Answer - ${expectedAnswer.size} ==" +:
+              prepareAnswer(expectedAnswer).map(_.toString()),
+            s"== Spark Answer - ${sparkAnswer.size} ==" +:
+              prepareAnswer(sparkAnswer).map(_.toString())).mkString("\n")
+        }
+      """.stripMargin
+      return Some(errorMessage)
+    }
+
+    return None
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common/pom.xml
----------------------------------------------------------------------
diff --git a/integration/spark-common/pom.xml b/integration/spark-common/pom.xml
index 4bb4e0a..efe35ae 100644
--- a/integration/spark-common/pom.xml
+++ b/integration/spark-common/pom.xml
@@ -83,7 +83,6 @@
   </dependencies>
 
   <build>
-    <testSourceDirectory>src/test/scala</testSourceDirectory>
     <resources>
       <resource>
         <directory>src/resources</directory>
@@ -130,55 +129,7 @@
           <target>1.7</target>
         </configuration>
       </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <version>2.18</version>
-        <!-- Note config is repeated in scalatest config -->
-        <configuration>
-          <includes>
-            <include>**/Test*.java</include>
-            <include>**/*Test.java</include>
-            <include>**/*TestCase.java</include>
-            <include>**/*Suite.java</include>
-          </includes>
-          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
-          <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
-          <systemProperties>
-            <java.awt.headless>true</java.awt.headless>
-          </systemProperties>
-          <failIfNoTests>false</failIfNoTests>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.scalatest</groupId>
-        <artifactId>scalatest-maven-plugin</artifactId>
-        <version>1.0</version>
-        <!-- Note config is repeated in surefire config -->
-        <configuration>
-          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
-          <junitxml>.</junitxml>
-          <filereports>CarbonTestSuite.txt</filereports>
-          <argLine>-ea -Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m
-          </argLine>
-          <stderr />
-          <environmentVariables>
-          </environmentVariables>
-          <systemProperties>
-            <java.awt.headless>true</java.awt.headless>
-            <use.kettle>${use.kettle}</use.kettle>
-          </systemProperties>
-        </configuration>
-        <executions>
-          <execution>
-            <id>test</id>
-            <goals>
-              <goal>test</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
     </plugins>
   </build>
 
-</project>
\ No newline at end of file
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 7336440..b581a9f 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -789,7 +789,6 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
       case _ => ("", "")
     }
 
-
   protected lazy val dimCol: Parser[Field] = anyFieldDef
 
   protected lazy val primitiveTypes =
@@ -919,10 +918,10 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
     field.dataType.getOrElse("NIL") match {
       case "Array" => Field(field.column, Some("Array"), field.name,
         field.children.map(f => f.map(appendParentForEachChild(_, field.column))), field.parent,
-        field.storeType, field.schemaOrdinal)
+        field.storeType, field.schemaOrdinal, rawSchema = field.rawSchema)
       case "Struct" => Field(field.column, Some("Struct"), field.name,
         field.children.map(f => f.map(appendParentForEachChild(_, field.column))), field.parent,
-        field.storeType, field.schemaOrdinal)
+        field.storeType, field.schemaOrdinal, rawSchema = field.rawSchema)
       case _ => field
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
new file mode 100644
index 0000000..1c0bc61
--- /dev/null
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.test
+
+import java.io.File
+import java.util.ServiceLoader
+
+import org.apache.spark.sql.{DataFrame, SQLContext}
+import org.apache.spark.util.Utils
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+
+/**
+ * the sql executor of spark-common-test
+ */
+trait TestQueryExecutorRegister {
+  def sql(sqlText: String): DataFrame
+
+  def sqlContext: SQLContext
+}
+
+object TestQueryExecutor {
+
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+
+  val projectPath = new File(this.getClass.getResource("/").getPath + "../../../..")
+    .getCanonicalPath
+  LOGGER.info(s"project path: $projectPath")
+  val integrationPath = s"$projectPath/integration"
+  val resourcesPath = s"$integrationPath/spark-common-test/src/test/resources"
+  val storeLocation = s"$integrationPath/spark-common/target/store"
+  val warehouse = s"$integrationPath/spark-common/target/warehouse"
+  val metastoredb = s"$integrationPath/spark-common/target/metastore_db"
+  val kettleHome = s"$projectPath/processing/carbonplugins"
+  val timestampFormat = "dd-MM-yyyy"
+
+  val INSTANCE = lookupQueryExecutor.newInstance().asInstanceOf[TestQueryExecutorRegister]
+
+  private def lookupQueryExecutor: Class[_] = {
+    ServiceLoader.load(classOf[TestQueryExecutorRegister], Utils.getContextOrSparkClassLoader)
+      .iterator().next().getClass
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
index 2ec68ab..ff3b898 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
@@ -110,7 +110,7 @@ class CarbonContext(
       val metaStorePathAbsolute = new File(metaStorePath).getCanonicalPath
       val hiveMetaStoreDB = metaStorePathAbsolute + "/metastore_db"
       logDebug(s"metastore db is going to be created in location: $hiveMetaStoreDB")
-      super.configure() ++ Map((CarbonCommonConstants.HIVE_CONNECTION_URL,
+      super.configure() ++ Map[String, String]((CarbonCommonConstants.HIVE_CONNECTION_URL,
         s"jdbc:derby:;databaseName=$hiveMetaStoreDB;create=true"),
         ("hive.metastore.warehouse.dir", metaStorePathAbsolute + "/hivemetadata"))
     } else {
@@ -119,7 +119,7 @@ class CarbonContext(
   }
 
   @transient
-  val LOGGER = LogServiceFactory.getLogService(CarbonContext.getClass.getName)
+  private val LOGGER = LogServiceFactory.getLogService(CarbonContext.getClass.getName)
 
   var queryId: String = ""
 
@@ -153,7 +153,7 @@ object CarbonContext {
   val datasourceShortName: String = "carbondata"
 
   @transient
-  val LOGGER = LogServiceFactory.getLogService(CarbonContext.getClass.getName)
+  private val LOGGER = LogServiceFactory.getLogService(CarbonContext.getClass.getName)
 
   final def updateCarbonPorpertiesPath(hiveContext: HiveContext) {
     val carbonPropertiesFilePath = hiveContext.getConf("carbon.properties.filepath", null)

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 2dbe817..865a0cf 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -316,6 +316,11 @@ case class LoadTable(
 
   val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
 
+  private def checkDefaultValue(value: String, default: String) = if (StringUtils.isEmpty(value))
{
+    default
+  } else {
+    value
+  }
 
   def run(sqlContext: SQLContext): Seq[Row] = {
     if (dataFrame.isDefined && !updateModel.isDefined) {
@@ -422,10 +427,11 @@ case class LoadTable(
           throw new MalformedCarbonCommandException(errorMessage)
       }
       val maxColumns = options.getOrElse("maxcolumns", null)
-      carbonLoadModel.setMaxColumns(maxColumns)
-      carbonLoadModel.setEscapeChar(escapeChar)
-      carbonLoadModel.setQuoteChar(quoteChar)
-      carbonLoadModel.setCommentChar(commentchar)
+
+      carbonLoadModel.setMaxColumns(checkDefaultValue(maxColumns, null))
+      carbonLoadModel.setEscapeChar(checkDefaultValue(escapeChar, "\\"))
+      carbonLoadModel.setQuoteChar(checkDefaultValue(quoteChar, "\""))
+      carbonLoadModel.setCommentChar(checkDefaultValue(commentchar, "#"))
       carbonLoadModel.setDateFormat(dateFormat)
       carbonLoadModel
         .setSerializationNullFormat(

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
new file mode 100644
index 0000000..d9c6fbb
--- /dev/null
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.test
+
+import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.sql.{CarbonContext, DataFrame, SQLContext}
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+ * This class is a sql executor of unit test case for spark version 1.x.
+ */
+
+class SparkTestQueryExecutor extends TestQueryExecutorRegister {
+  override def sql(sqlText: String): DataFrame = SparkTestQueryExecutor.cc.sql(sqlText)
+
+  override def sqlContext: SQLContext = SparkTestQueryExecutor.cc
+}
+
+object SparkTestQueryExecutor {
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+  LOGGER.info("use TestQueryExecutorImplV1")
+  CarbonProperties.getInstance()
+    .addProperty("carbon.kettle.home", TestQueryExecutor.kettleHome)
+    .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, TestQueryExecutor.timestampFormat)
+    .addProperty(CarbonCommonConstants.STORE_LOCATION_TEMP_PATH,
+      System.getProperty("java.io.tmpdir"))
+    .addProperty(CarbonCommonConstants.LOCK_TYPE, CarbonCommonConstants.CARBON_LOCK_TYPE_LOCAL)
+
+  val sc = new SparkContext(new SparkConf()
+    .setAppName("CarbonSpark")
+    .setMaster("local[2]")
+    .set("spark.sql.shuffle.partitions", "20")
+    .set("use_kettle_default", "true"))
+  sc.setLogLevel("ERROR")
+
+  val cc = new CarbonContext(sc, TestQueryExecutor.storeLocation, TestQueryExecutor.metastoredb)
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/resources/META-INF/services/org.apache.spark.sql.test.TestQueryExecutorRegister
----------------------------------------------------------------------
diff --git a/integration/spark/src/resources/META-INF/services/org.apache.spark.sql.test.TestQueryExecutorRegister
b/integration/spark/src/resources/META-INF/services/org.apache.spark.sql.test.TestQueryExecutorRegister
new file mode 100644
index 0000000..a03832b
--- /dev/null
+++ b/integration/spark/src/resources/META-INF/services/org.apache.spark.sql.test.TestQueryExecutorRegister
@@ -0,0 +1 @@
+org.apache.spark.sql.test.SparkTestQueryExecutor
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/java/org/carbondata/integration/spark/load/CarbonLoaderUtilTest.java
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/java/org/carbondata/integration/spark/load/CarbonLoaderUtilTest.java
b/integration/spark/src/test/java/org/carbondata/integration/spark/load/CarbonLoaderUtilTest.java
deleted file mode 100644
index ed4f95b..0000000
--- a/integration/spark/src/test/java/org/carbondata/integration/spark/load/CarbonLoaderUtilTest.java
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.integration.spark.load;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.carbondata.core.carbon.datastore.block.Distributable;
-import org.apache.carbondata.core.carbon.datastore.block.TableBlockInfo;
-import org.apache.carbondata.spark.load.CarbonLoaderUtil;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test class to test block distribution functionality
- */
-public class CarbonLoaderUtilTest {
-  List<Distributable> blockInfos = null;
-  int noOfNodesInput = -1;
-  List<String> activeNode = null;
-  Map<String, List<Distributable>> expected = null;
-  Map<String, List<Distributable>> mapOfNodes = null;
-
-  @Test public void nodeBlockMapping() throws Exception {
-
-    // scenario when the 3 nodes and 3 executors
-    initSet1();
-    Map<String, List<Distributable>> mapOfNodes =
-            CarbonLoaderUtil.nodeBlockMapping(blockInfos, noOfNodesInput, activeNode);
-    // node allocation
-    Assert.assertTrue("Node Allocation", expected.size() == mapOfNodes.size());
-    // block allocation
-    boolean isEqual = compareResult(expected, mapOfNodes);
-    Assert.assertTrue("Block Allocation", isEqual);
-
-    // 2 node and 3 executors
-    initSet2();
-    mapOfNodes = CarbonLoaderUtil.nodeBlockMapping(blockInfos, noOfNodesInput, activeNode);
-    // node allocation
-    Assert.assertTrue("Node Allocation", expected.size() == mapOfNodes.size());
-    // block allocation
-    isEqual = compareResult(expected, mapOfNodes);
-    Assert.assertTrue("Block Allocation", isEqual);
-
-    // 3 data node and 2 executors
-    initSet3();
-    mapOfNodes = CarbonLoaderUtil.nodeBlockMapping(blockInfos, noOfNodesInput, activeNode);
-    // node allocation
-    Assert.assertTrue("Node Allocation", expected.size() == mapOfNodes.size());
-    // block allocation
-    isEqual = compareResult(expected, mapOfNodes);
-    Assert.assertTrue("Block Allocation", isEqual);
-  }
-
-  /**
-   * compares the blocks allocation
-   *
-   * @param expectedResult
-   * @param actualResult
-   * @return
-   */
-  private boolean compareResult(Map<String, List<Distributable>> expectedResult,
-                                Map<String, List<Distributable>> actualResult)
{
-    expectedResult = sortByListSize(expectedResult);
-    actualResult = sortByListSize(actualResult);
-    List<List<Distributable>> expectedList = new LinkedList(expectedResult.entrySet());
-    List<List<Distributable>> mapOfNodesList = new LinkedList(actualResult.entrySet());
-    boolean isEqual = expectedList.size() == mapOfNodesList.size();
-    if (isEqual) {
-      for (int i = 0; i < expectedList.size(); i++) {
-        int size1 = ((List) ((Map.Entry) (expectedList.get(i))).getValue()).size();
-        int size2 = ((List) ((Map.Entry) (mapOfNodesList.get(i))).getValue()).size();
-        isEqual = size1 == size2;
-        if (!isEqual) {
-          break;
-        }
-      }
-    }
-    return isEqual;
-  }
-
-  /**
-   * sort by list size
-   *
-   * @param map
-   * @return
-   */
-  private static Map<String, List<Distributable>> sortByListSize(
-          Map<String, List<Distributable>> map) {
-    List<List<Distributable>> list = new LinkedList(map.entrySet());
-    Collections.sort(list, new Comparator() {
-      public int compare(Object obj1, Object obj2) {
-        if (obj1 == null && obj2 == null) {
-          return 0;
-        } else if (obj1 == null) {
-          return 1;
-        } else if (obj2 == null) {
-          return -1;
-        }
-        int size1 = ((List) ((Map.Entry) (obj1)).getValue()).size();
-        int size2 = ((List) ((Map.Entry) (obj2)).getValue()).size();
-        return size2 - size1;
-      }
-    });
-
-    Map res = new LinkedHashMap();
-    for (Iterator it = list.iterator(); it.hasNext(); ) {
-      Map.Entry entry = (Map.Entry) it.next();
-      res.put(entry.getKey(), entry.getValue());
-    }
-    return res;
-  }
-
-  void initSet1() {
-    blockInfos = new ArrayList<>();
-    activeNode = new ArrayList<>();
-    activeNode.add("node-7");
-    activeNode.add("node-9");
-    activeNode.add("node-11");
-    String[] location = { "node-7", "node-9", "node-11" };
-    blockInfos.add(new TableBlockInfo("node", 1, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 2, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 3, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 4, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 5, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 6, "1", location, 0));
-    expected = new HashMap<>();
-    expected.put("node-7", blockInfos.subList(0, 2));
-    expected.put("node-9", blockInfos.subList(2, 4));
-    expected.put("node-11", blockInfos.subList(4, 6));
-  }
-
-  void initSet2() {
-    blockInfos = new ArrayList<>();
-    activeNode = new ArrayList<>();
-    activeNode.add("node-7");
-    activeNode.add("node-9");
-    activeNode.add("node-11");
-    String[] location = { "node-7", "node-11" };
-    blockInfos.add(new TableBlockInfo("node", 1, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 2, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 3, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 4, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 5, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 6, "1", location, 0));
-    expected = new HashMap<>();
-    expected.put("node-7", blockInfos.subList(0, 2));
-    expected.put("node-9", blockInfos.subList(2, 4));
-    expected.put("node-11", blockInfos.subList(4, 6));
-  }
-
-  void initSet3() {
-    blockInfos = new ArrayList<>();
-    activeNode = new ArrayList<>();
-    activeNode.add("node-7");
-    activeNode.add("node-11");
-    String[] location = { "node-7", "node-9", "node-11" };
-    blockInfos.add(new TableBlockInfo("node", 1, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 2, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 3, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 4, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 5, "1", location, 0));
-    blockInfos.add(new TableBlockInfo("node", 6, "1", location, 0));
-    expected = new HashMap<>();
-    expected.put("node-7", blockInfos.subList(0, 3));
-    expected.put("node-11", blockInfos.subList(3, 6));
-  }
-
-
-  /**
-   * Test case with 4 blocks and 4 nodes with 3 replication.
-   *
-   * @throws Exception
-   */
-  @Test public void nodeBlockMapping() throws Exception {
-
-    Map<TableBlockInfo, List<String>> inputMap = new HashMap<TableBlockInfo,
List<String>>(5);
-
-    TableBlockInfo block1 =
-        new TableBlockInfo("path1", 123, "1", new String[] { "1", "2", "3" }, 111);
-    TableBlockInfo block2 =
-        new TableBlockInfo("path2", 123, "2", new String[] { "2", "3", "4" }, 111);
-    TableBlockInfo block3 =
-        new TableBlockInfo("path3", 123, "3", new String[] { "3", "4", "1" }, 111);
-    TableBlockInfo block4 =
-        new TableBlockInfo("path4", 123, "4", new String[] { "1", "2", "4" }, 111);
-
-    inputMap.put(block1, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block2, Arrays.asList(new String[]{"2","3","4"}));
-    inputMap.put(block3, Arrays.asList(new String[]{"3","4","1"}));
-    inputMap.put(block4, Arrays.asList(new String[]{"1","2","4"}));
-
-    List<TableBlockInfo> inputBlocks = new ArrayList(6);
-    inputBlocks.add(block1);
-    inputBlocks.add(block2);
-    inputBlocks.add(block3);
-    inputBlocks.add(block4);
-
-    Map<String, List<TableBlockInfo>> outputMap
-        = CarbonLoaderUtil.nodeBlockMapping(inputBlocks, 4);
-
-    Assert.assertTrue(calculateBlockDistribution(inputMap, outputMap, 4, 4));
-
-    Assert.assertTrue(calculateBlockLocality(inputMap, outputMap, 4, 4));
-  }
-
-  private boolean calculateBlockLocality(Map<TableBlockInfo, List<String>> inputMap,
-      Map<String, List<TableBlockInfo>> outputMap, int numberOfBlocks, int numberOfNodes)
{
-
-    double notInNodeLocality = 0;
-    for (Map.Entry<String, List<TableBlockInfo>> entry : outputMap.entrySet())
{
-
-      List<TableBlockInfo> blockListOfANode = entry.getValue();
-
-      for (TableBlockInfo eachBlock : blockListOfANode) {
-
-        // for each block check the node locality
-
-        List<String> blockLocality = inputMap.get(eachBlock);
-        if (!blockLocality.contains(entry.getKey())) {
-          notInNodeLocality++;
-        }
-      }
-    }
-
-    System.out.println(
-        ((notInNodeLocality / numberOfBlocks) * 100) + " " + "is the node locality mismatch");
-    if ((notInNodeLocality / numberOfBlocks) * 100 > 30) {
-      return false;
-    }
-    return true;
-  }
-
-  private boolean calculateBlockDistribution(Map<TableBlockInfo, List<String>>
inputMap,
-      Map<String, List<TableBlockInfo>> outputMap, int numberOfBlocks, int numberOfNodes)
{
-
-    int nodesPerBlock = numberOfBlocks / numberOfNodes;
-
-    for (Map.Entry<String, List<TableBlockInfo>> entry : outputMap.entrySet())
{
-
-      if (entry.getValue().size() < nodesPerBlock) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Test case with 5 blocks and 3 nodes
-   *
-   * @throws Exception
-   */
-  @Test public void nodeBlockMappingTestWith5blocks3nodes() throws Exception {
-
-    Map<TableBlockInfo, List<String>> inputMap = new HashMap<TableBlockInfo,
List<String>>(5);
-
-    TableBlockInfo block1 =
-        new TableBlockInfo("part-0-0-1462341987000", 123, "1", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block2 =
-        new TableBlockInfo("part-1-0-1462341987000", 123, "2", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block3 =
-        new TableBlockInfo("part-2-0-1462341987000", 123, "3", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block4 =
-        new TableBlockInfo("part-3-0-1462341987000", 123, "4", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block5 =
-        new TableBlockInfo("part-4-0-1462341987000", 123, "5", new String[] { "1", "2", "3"
}, 111);
-
-    inputMap.put(block1, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block2, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block3, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block4, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block5, Arrays.asList(new String[]{"1","2","3"}));
-
-    List<TableBlockInfo> inputBlocks = new ArrayList(6);
-    inputBlocks.add(block1);
-    inputBlocks.add(block2);
-    inputBlocks.add(block3);
-    inputBlocks.add(block4);
-    inputBlocks.add(block5);
-
-    Map<String, List<TableBlockInfo>> outputMap = CarbonLoaderUtil.nodeBlockMapping(inputBlocks,
3);
-
-    Assert.assertTrue(calculateBlockDistribution(inputMap, outputMap, 5, 3));
-
-    Assert.assertTrue(calculateBlockLocality(inputMap, outputMap, 5, 3));
-
-  }
-
-  /**
-   * Test case with 6 blocks and 4 nodes where 4 th node doesnt have any local data.
-   *
-   * @throws Exception
-   */
-  @Test public void nodeBlockMappingTestWith6Blocks4nodes() throws Exception {
-
-    Map<TableBlockInfo, List<String>> inputMap = new HashMap<TableBlockInfo,
List<String>>(5);
-
-    TableBlockInfo block1 =
-        new TableBlockInfo("part-0-0-1462341987000", 123, "1", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block2 =
-        new TableBlockInfo("part-1-0-1462341987000", 123, "2", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block3 =
-        new TableBlockInfo("part-2-0-1462341987000", 123, "3", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block4 =
-        new TableBlockInfo("part-3-0-1462341987000", 123, "4", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block5 =
-        new TableBlockInfo("part-4-0-1462341987000", 123, "5", new String[] { "1", "2", "3"
}, 111);
-    TableBlockInfo block6 =
-        new TableBlockInfo("part-5-0-1462341987000", 123, "6", new String[] { "1", "2", "3"
}, 111);
-
-    inputMap.put(block1, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block2, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block3, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block4, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block5, Arrays.asList(new String[]{"1","2","3"}));
-    inputMap.put(block6, Arrays.asList(new String[]{"1","2","3"}));
-
-
-    List<TableBlockInfo> inputBlocks = new ArrayList(6);
-    inputBlocks.add(block1);
-    inputBlocks.add(block2);
-    inputBlocks.add(block3);
-    inputBlocks.add(block4);
-    inputBlocks.add(block5);
-    inputBlocks.add(block6);
-
-    Map<String, List<TableBlockInfo>> outputMap = CarbonLoaderUtil.nodeBlockMapping(inputBlocks,
4);
-
-    Assert.assertTrue(calculateBlockDistribution(inputMap, outputMap, 6, 4));
-
-    Assert.assertTrue(calculateBlockLocality(inputMap, outputMap, 6, 4));
-
-  }
-
-  /**
-   * Test case with 10 blocks and 4 nodes with 10,60,30 % distribution
-   *
-   * @throws Exception
-   */
-  @Test public void nodeBlockMappingTestWith10Blocks4nodes() throws Exception {
-
-    Map<TableBlockInfo, List<String>> inputMap = new HashMap<TableBlockInfo,
List<String>>(5);
-
-    TableBlockInfo block1 =
-        new TableBlockInfo("part-1-0-1462341987000", 123, "1", new String[] { "2", "4" },
111);
-    TableBlockInfo block2 =
-        new TableBlockInfo("part-2-0-1462341987000", 123, "2", new String[] { "2", "4" },
111);
-    TableBlockInfo block3 =
-        new TableBlockInfo("part-3-0-1462341987000", 123, "3", new String[] { "2", "4" },
111);
-    TableBlockInfo block4 =
-        new TableBlockInfo("part-4-0-1462341987000", 123, "4", new String[] { "2", "4" },
111);
-    TableBlockInfo block5 =
-        new TableBlockInfo("part-5-0-1462341987000", 123, "5", new String[] { "2", "4" },
111);
-    TableBlockInfo block6 =
-        new TableBlockInfo("part-6-0-1462341987000", 123, "6", new String[] { "2", "4" },
111);
-    TableBlockInfo block7 =
-        new TableBlockInfo("part-7-0-1462341987000", 123, "7", new String[] { "3", "4" },
111);
-    TableBlockInfo block8 =
-        new TableBlockInfo("part-8-0-1462341987000", 123, "8", new String[] { "3", "4" },
111);
-    TableBlockInfo block9 =
-        new TableBlockInfo("part-9-0-1462341987000", 123, "9", new String[] { "3", "4" },
111);
-    TableBlockInfo block10 =
-        new TableBlockInfo("part-10-0-1462341987000", 123, "9", new String[] { "1", "4" },
111);
-
-    inputMap.put(block1, Arrays.asList(new String[]{"2","4"}));
-    inputMap.put(block2, Arrays.asList(new String[]{"2","4"}));
-    inputMap.put(block3, Arrays.asList(new String[]{"2","4"}));
-    inputMap.put(block4, Arrays.asList(new String[]{"2","4"}));
-    inputMap.put(block5, Arrays.asList(new String[]{"2","4"}));
-    inputMap.put(block6, Arrays.asList(new String[]{"2","4"}));
-    inputMap.put(block7, Arrays.asList(new String[]{"3","4"}));
-    inputMap.put(block8, Arrays.asList(new String[]{"3","4"}));
-    inputMap.put(block9, Arrays.asList(new String[]{"3","4"}));
-    inputMap.put(block10, Arrays.asList(new String[]{"1","4"}));
-
-    List<TableBlockInfo> inputBlocks = new ArrayList(6);
-    inputBlocks.add(block1);
-    inputBlocks.add(block2);
-    inputBlocks.add(block3);
-    inputBlocks.add(block4);
-    inputBlocks.add(block5);
-    inputBlocks.add(block6);
-    inputBlocks.add(block7);
-    inputBlocks.add(block8);
-    inputBlocks.add(block9);
-    inputBlocks.add(block10);
-
-    Map<String, List<TableBlockInfo>> outputMap = CarbonLoaderUtil.nodeBlockMapping(inputBlocks,
4);
-
-    Assert.assertTrue(calculateBlockDistribution(inputMap, outputMap, 10, 4));
-
-    Assert.assertTrue(calculateBlockLocality(inputMap, outputMap, 10, 4));
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/af2f204e/integration/spark/src/test/java/org/carbondata/integration/spark/testsuite/validation/FileFooterValidator.java
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/java/org/carbondata/integration/spark/testsuite/validation/FileFooterValidator.java
b/integration/spark/src/test/java/org/carbondata/integration/spark/testsuite/validation/FileFooterValidator.java
deleted file mode 100644
index 76906c6..0000000
--- a/integration/spark/src/test/java/org/carbondata/integration/spark/testsuite/validation/FileFooterValidator.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.carbondata.integration.spark.testsuite.validation;
-
-import org.apache.spark.sql.common.util.CarbonHiveContext;
-import org.apache.carbondata.core.carbon.CarbonTableIdentifier;
-import org.apache.carbondata.core.carbon.path.CarbonStorePath;
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.datastorage.store.FileHolder;
-import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile;
-import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFileFilter;
-import org.apache.carbondata.core.datastorage.store.impl.FileFactory;
-import org.apache.carbondata.core.reader.CarbonFooterReader;
-import org.apache.carbondata.core.util.CarbonProperties;
-import org.apache.carbondata.format.BlockletIndex;
-import org.apache.carbondata.format.BlockletInfo;
-import org.apache.carbondata.format.DataChunk;
-import org.apache.carbondata.format.Encoding;
-import org.apache.carbondata.format.FileFooter;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-
-import static org.junit.Assert.assertTrue;
-
-public class FileFooterValidator {
-
-  private static FileFooter fileFooter;
-
-  private static boolean setUpIsDone;
-
-  @Before public void setUp() throws Exception {
-
-    if (setUpIsDone) {
-      return;
-    }
-    CarbonHiveContext.sql(
-            "CREATE CUBE validatefooter DIMENSIONS (empno Integer, empname String,"
-            + " designation String,"
-            + " doj Timestamp, workgroupcategory Integer, workgroupcategoryname String, "
-            + "deptno Integer, deptname String, projectcode Integer, projectjoindate Timestamp,"
-            + " projectenddate Timestamp) MEASURES (attendance Integer,utilization Integer,"
-            + "salary Integer) OPTIONS (PARTITIONER [PARTITION_COUNT=1])");
-    CarbonHiveContext.sql(
-            "LOAD DATA fact from './src/test/resources/data.csv' INTO CUBE validatefooter
"
-                + "PARTITIONDATA(DELIMITER ',', QUOTECHAR '\"')");
-    String storePath =
-        CarbonProperties.getInstance().getProperty(CarbonCommonConstants.STORE_LOCATION);
-    CarbonTableIdentifier tableIdentifier =
-            new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "validatefooter",
"1");
-    String segmentPath = CarbonStorePath.getCarbonTablePath(storePath, tableIdentifier)
-        .getCarbonDataDirectoryPath("0", "0");
-    CarbonFile carbonFile =
-        FileFactory.getCarbonFile(segmentPath, FileFactory.getFileType(segmentPath));
-    CarbonFile[] list = carbonFile.listFiles(new CarbonFileFilter() {
-      @Override public boolean accept(CarbonFile file) {
-        if (file.getName().endsWith(CarbonCommonConstants.FACT_FILE_EXT)) {
-          return true;
-        }
-        return false;
-      }
-    });
-
-    for (CarbonFile file : list) {
-      String fileLocation = file.getAbsolutePath();
-      CarbonFile factFile =
-          FileFactory.getCarbonFile(fileLocation, FileFactory.getFileType(fileLocation));
-      long offset = factFile.getSize() - CarbonCommonConstants.LONG_SIZE_IN_BYTE;
-      FileHolder fileHolder = FileFactory.getFileHolder(FileFactory.getFileType(fileLocation));
-      offset = fileHolder.readLong(fileLocation, offset);
-      CarbonFooterReader metaDataReader = new CarbonFooterReader(fileLocation, offset);
-      fileFooter = metaDataReader.readFooter();
-    }
-    setUpIsDone = true;
-  }
-
-  @AfterClass public static void tearDownAfterClass() {
-    CarbonHiveContext.sql("drop CUBE validatefooter");
-  }
-
-  @Test public void testFileFooterExist() {
-    assertTrue(fileFooter != null);
-  }
-
-  @Test public void testFileFooterVersion() {
-    assertTrue(fileFooter.getVersion() >= 0);
-  }
-
-  @Test public void testFileFooterNumRows() {
-    assertTrue(fileFooter.getNum_rows() > 0);
-  }
-
-  @Test public void testFileFooterTableColumns() {
-    assertTrue(fileFooter.getTable_columns() != null && fileFooter.getTable_columns().size()
> 0);
-  }
-
-  @Test public void testFileFooterSegmentInfo() {
-    assertTrue(
-        fileFooter.getSegment_info() != null && fileFooter.getSegment_info().getNum_cols()
> 0
-            && fileFooter.getSegment_info().getColumn_cardinalities().size() >
0);
-  }
-
-  @Test public void testFileFooterBlockletIndex() {
-    assertTrue(fileFooter.getBlocklet_index_list() != null
-        && fileFooter.getBlocklet_index_list().size() > 0);
-    for (BlockletIndex blockletIndex : fileFooter.getBlocklet_index_list()) {
-      assertTrue(blockletIndex.getMin_max_index().getMin_values() != null
-          && blockletIndex.getMin_max_index().getMin_values().size() > 0
-          && blockletIndex.getMin_max_index().getMax_values() != null
-          && blockletIndex.getMin_max_index().getMax_values().size() > 0
-          && blockletIndex.getMin_max_index().getMin_values().size() == blockletIndex
-          .getMin_max_index().getMax_values().size());
-      assertTrue(blockletIndex.getB_tree_index().getStart_key() != null
-          && blockletIndex.getB_tree_index().getEnd_key() != null);
-    }
-  }
-
-  @Test public void testFileFooterBlockletInfo() {
-    assertTrue(fileFooter.getBlocklet_info_list() != null
-        && fileFooter.getBlocklet_info_list().size() > 0);
-    for (BlockletInfo blockletInfo : fileFooter.getBlocklet_info_list()) {
-      assertTrue(blockletInfo.getNum_rows() > 0 && blockletInfo.getColumn_data_chunks()
!= null
-          && blockletInfo.getColumn_data_chunks().size() > 0);
-      for (DataChunk columnDataChunk : blockletInfo.getColumn_data_chunks()) {
-        testColumnDataChunk(columnDataChunk);
-      }
-    }
-  }
-
-  private void testColumnDataChunk(DataChunk columnDatachunk) {
-    assertTrue(columnDatachunk.getEncoders() != null && columnDatachunk.getChunk_meta()
!= null
-        && columnDatachunk.getChunk_meta().getCompression_codec() != null);
-    // For Measure
-    if (columnDatachunk.getEncoders().contains(Encoding.DELTA)) {
-      assertTrue(
-          columnDatachunk.getPresence() != null && columnDatachunk.getEncoder_meta()
!= null);
-    } else {
-      assertTrue(columnDatachunk.getSort_state() != null);
-    }
-  }
-}
\ No newline at end of file



Mime
View raw message