carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ravipes...@apache.org
Subject [1/2] carbondata git commit: added validation for partition columns
Date Wed, 07 Jun 2017 13:23:40 GMT
Repository: carbondata
Updated Branches:
  refs/heads/master 34fbafc2d -> 3e102dc73


added validation for partition columns

fixed_partition_validation_issues


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/0473d1b5
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/0473d1b5
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/0473d1b5

Branch: refs/heads/master
Commit: 0473d1b5e1c71bc925cc79e6784145a582878b5a
Parents: 34fbafc
Author: kunal642 <kunalkapoor642@gmail.com>
Authored: Wed May 31 16:33:58 2017 +0530
Committer: ravipesala <ravi.pesala@gmail.com>
Committed: Wed Jun 7 18:52:46 2017 +0530

----------------------------------------------------------------------
 .../testsuite/partition/TestDDLForPartitionTable.scala      | 8 ++++++++
 .../org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala  | 4 ++--
 .../spark/sql/execution/command/AlterTableCommands.scala    | 9 +++++++++
 .../restructure/vectorreader/AddColumnTestCases.scala       | 2 +-
 4 files changed, 20 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/0473d1b5/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
index 00c4df8..97798c7 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
@@ -114,6 +114,14 @@ class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll
{
     assert(partitionInfo.getListInfo.get(2).get(1).equals("3"))
   }
 
+  test("test exception if partition column is dropped") {
+    sql("drop table if exists test")
+    sql(
+      "create table test(a int, b string) partitioned by (c int) stored by 'carbondata' "
+
+      "tblproperties('PARTITION_TYPE'='LIST','list_info'='0,10,5,20')")
+    intercept[Exception] { sql("alter table test drop columns(c)") }
+  }
+
   override def afterAll = {
     dropTable
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0473d1b5/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 7e73629..7237f2f 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -930,9 +930,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
   private lazy val decimalType =
   DECIMAL ~ (("(" ~> numericLit <~ ",") ~ (numericLit <~ ")")).? ^^ {
     case decimal ~ precisionAndScale => if (precisionAndScale.isDefined) {
-      s"$decimal(${ precisionAndScale.get._1 }, ${ precisionAndScale.get._2 })"
+      s"decimal(${ precisionAndScale.get._1 }, ${ precisionAndScale.get._2 })"
     } else {
-      s"$decimal(10,0)"
+      s"decimal(10,0)"
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0473d1b5/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
index 2438ef6..d6b4838 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/AlterTableCommands.scala
@@ -282,7 +282,16 @@ private[sql] case class AlterTableDropColumns(
       carbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
         .lookupRelation(Some(dbName), tableName)(sparkSession).asInstanceOf[CarbonRelation]
         .tableMeta.carbonTable
+      val columnNames = carbonTable.getPartitionInfo(tableName).getColumnSchemaList.asScala
+        .map(_.getColumnName)
       // check each column existence in the table
+      val partitionColumns = alterTableDropColumnModel.columns.filter {
+        tableColumn => columnNames.contains(tableColumn)
+      }
+      if (partitionColumns.nonEmpty) {
+        throw new UnsupportedOperationException("Partition columns cannot be dropped: " +
+                                                s"$partitionColumns")
+      }
       val tableColumns = carbonTable.getCreateOrderColumn(tableName).asScala
       var dictionaryColumns = Seq[org.apache.carbondata.core.metadata.schema.table.column
       .ColumnSchema]()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0473d1b5/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
b/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
index 60c0412..52abbe8 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
@@ -70,7 +70,7 @@ class AddColumnTestCases extends QueryTest with BeforeAndAfterAll {
 
   test("test add msr column and check aggregate") {
     sql(
-      "alter table addcolumntest add columns(msrField decimal(5,2))TBLPROPERTIES ('DEFAULT.VALUE"
+
+      "alter table addcolumntest add columns(msrField DECIMAL(5,2))TBLPROPERTIES ('DEFAULT.VALUE"
+
       ".msrfield'= '123.45')")
     checkAnswer(sql("select sum(msrField) from addcolumntest"),
       Row(new BigDecimal("246.90").setScale(2, RoundingMode.HALF_UP)))


Mime
View raw message