carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jack...@apache.org
Subject [1/2] incubator-carbondata git commit: Added support for creating tables with column name containing spaces within.
Date Tue, 21 Feb 2017 01:54:47 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 57f22e834 -> 1d925a656


Added support for creating tables with column name containing spaces within.

*Test case added for create table with space in column names.


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/589b00a1
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/589b00a1
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/589b00a1

Branch: refs/heads/master
Commit: 589b00a1de65e63200a97b067f51e8b5b45ad702
Parents: 57f22e8
Author: Himani Arora <1himani.arora@gmail.com>
Authored: Mon Dec 19 17:44:01 2016 +0530
Committer: jackylk <jacky.likun@huawei.com>
Committed: Tue Feb 21 09:40:34 2017 +0800

----------------------------------------------------------------------
 .../TestCreateTableWithSpaceInColumnName.scala  | 61 ++++++++++++++++++++
 .../org/apache/spark/sql/CarbonSqlParser.scala  |  2 +-
 .../spark/sql/parser/CarbonSparkSqlParser.scala |  4 +-
 3 files changed, 64 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/589b00a1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithSpaceInColumnName.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithSpaceInColumnName.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithSpaceInColumnName.scala
new file mode 100644
index 0000000..13740c7
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithSpaceInColumnName.scala
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.createTable
+
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * test functionality related space in column names for create table
+ */
+class TestCreateTableWithSpaceInColumnName extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("use default")
+    sql("drop database if exists dbColumnSpace cascade")
+  }
+
+  test("test create table space in column names") {
+    // this test case will test the creation of table for different case for database name.
+    // In hive dbName folder is always created with small case in HDFS. Carbon should behave
+    // the same way. If table creation fails during second time creation it means in HDFS
+    // separate folders are created for the matching case in commands executed.
+    sql("create database dbColumnSpace")
+    sql("use dbColumnSpace")
+    sql("create table carbonTable(`my id` int, `full name` string)stored by 'carbondata'")
+    sql("drop table carbonTable")
+    sql("use default")
+    sql("use dbColumnSpace")
+    try {
+      // table creation should be successful
+      sql("create table carbonTable(`my id` int, `full name` string)stored by 'carbondata'")
+      assert(true)
+    } catch {
+      case ex: Exception =>
+        assert(false)
+    }
+  }
+
+  override def afterAll {
+    sql("use default")
+    sql("drop database if exists dbColumnSpace cascade")
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/589b00a1/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index 532918d..7d5ce4f 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -195,7 +195,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
                   val dataType = Option(col.getType)
                   val name = Option(col.getName())
                   // This is to parse complex data types
-                  val x = col.getName + ' ' + col.getType
+                  val x = '`' + col.getName + '`' + ' ' + col.getType
                   val f: Field = anyFieldDef(new lexical.Scanner(x))
                   match {
                     case Success(field, _) => field

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/589b00a1/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 3bcf165..2273689 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -109,10 +109,10 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
 
       val fields = schema.map { col =>
         val x = if (col.dataType.catalogString == "float") {
-          col.name + " double"
+          '`' + col.name + '`' + " double"
         }
         else {
-          col.name + ' ' + col.dataType.catalogString
+          '`' + col.name + '`' + ' ' + col.dataType.catalogString
         }
         val f: Field = parser.anyFieldDef(new parser.lexical.Scanner(x))
         match {


Mime
View raw message