carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ravipes...@apache.org
Subject [1/2] incubator-carbondata git commit: support for float datatype
Date Mon, 16 Jan 2017 11:27:31 GMT
Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 396d0d6b0 -> b5c20a80a


support for float datatype

remove style checks for float support

fix ordering imports for style checks

add testcases and example for float datatype


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/54e83045
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/54e83045
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/54e83045

Branch: refs/heads/master
Commit: 54e83045ee29ecbb67ab08eaaab480fa6e7b09a4
Parents: 396d0d6
Author: sandy <phalodi@gmail.com>
Authored: Wed Jan 11 15:07:38 2017 +0530
Committer: ravipesala <ravi.pesala@gmail.com>
Committed: Mon Jan 16 16:56:00 2017 +0530

----------------------------------------------------------------------
 examples/spark/src/main/resources/data.csv      | 22 ++++----
 .../examples/AllDictionaryExample.scala         |  6 ++-
 examples/spark2/src/main/resources/data.csv     | 20 +++----
 .../examples/CarbonSessionExample.scala         | 11 +++-
 .../src/test/resources/floatSample.csv          | 11 ++++
 .../primitiveTypes/FloatDataTypeTestCase.scala  | 50 +++++++++++++++++
 .../spark/util/DataTypeConverterUtil.scala      |  3 ++
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |  9 +++-
 .../spark/sql/CarbonDatasourceRelation.scala    |  2 +
 .../org/apache/spark/sql/CarbonSqlParser.scala  | 56 ++++++++++----------
 .../spark/sql/parser/CarbonSparkSqlParser.scala | 14 +++--
 11 files changed, 149 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/examples/spark/src/main/resources/data.csv
----------------------------------------------------------------------
diff --git a/examples/spark/src/main/resources/data.csv b/examples/spark/src/main/resources/data.csv
index 76aec2d..5696978 100644
--- a/examples/spark/src/main/resources/data.csv
+++ b/examples/spark/src/main/resources/data.csv
@@ -1,11 +1,11 @@
-ID,date,country,name,phonetype,serialname,salary
-1,2015/7/23,china,aaa1,phone197,ASD69643,15000
-2,2015/7/24,china,aaa2,phone756,ASD42892,15001
-3,2015/7/25,china,aaa3,phone1904,ASD37014,15002
-4,2015/7/26,china,aaa4,phone2435,ASD66902,15003
-5,2015/7/27,china,aaa5,phone2441,ASD90633,15004
-6,2015/7/28,china,aaa6,phone294,ASD59961,15005
-7,2015/7/29,china,aaa7,phone610,ASD14875,15006
-8,2015/7/30,china,aaa8,phone1848,ASD57308,15007
-9,2015/7/18,china,aaa9,phone706,ASD86717,15008
-10,2015/7/19,usa,aaa10,phone685,ASD30505,15009
\ No newline at end of file
+ID,date,country,name,phonetype,serialname,salary,floatField
+1,2015/7/23,china,aaa1,phone197,ASD69643,15000,2.34
+2,2015/7/24,china,aaa2,phone756,ASD42892,15001,2.34
+3,2015/7/25,china,aaa3,phone1904,ASD37014,15002,2.34
+4,2015/7/26,china,aaa4,phone2435,ASD66902,15003,2.34
+5,2015/7/27,china,aaa5,phone2441,ASD90633,15004,2.34
+6,2015/7/28,china,aaa6,phone294,ASD59961,15005,3.5
+7,2015/7/29,china,aaa7,phone610,ASD14875,15006,2.34
+8,2015/7/30,china,aaa8,phone1848,ASD57308,15007,2.34
+9,2015/7/18,china,aaa9,phone706,ASD86717,15008,2.34
+10,2015/7/19,usa,aaa10,phone685,ASD30505,15009,2.34
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/examples/spark/src/main/scala/org/apache/carbondata/examples/AllDictionaryExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark/src/main/scala/org/apache/carbondata/examples/AllDictionaryExample.scala
b/examples/spark/src/main/scala/org/apache/carbondata/examples/AllDictionaryExample.scala
index 4dcc868..558139a 100644
--- a/examples/spark/src/main/scala/org/apache/carbondata/examples/AllDictionaryExample.scala
+++ b/examples/spark/src/main/scala/org/apache/carbondata/examples/AllDictionaryExample.scala
@@ -40,7 +40,7 @@ object AllDictionaryExample {
     cc.sql("""
            CREATE TABLE IF NOT EXISTS t3
            (ID Int, date Timestamp, country String,
-           name String, phonetype String, serialname String, salary Int)
+           name String, phonetype String, serialname String, salary Int,floatField float)
            STORED BY 'carbondata'
            """)
 
@@ -53,6 +53,10 @@ object AllDictionaryExample {
            SELECT * FROM t3
            """).show()
 
+    cc.sql("""
+           SELECT * FROM t3 where floatField=3.5
+           """).show()
+
     cc.sql("DROP TABLE IF EXISTS t3")
 
     // clean local dictionary files

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/examples/spark2/src/main/resources/data.csv
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/resources/data.csv b/examples/spark2/src/main/resources/data.csv
index 2722edd..a9cdf9e 100644
--- a/examples/spark2/src/main/resources/data.csv
+++ b/examples/spark2/src/main/resources/data.csv
@@ -1,10 +1,10 @@
-1,10,100,48.4,spark,2015/4/23 12:01:01,1.23,2015/4/23 11:01:01,aaa
-5,17,140,43.4,spark,2015/7/27 12:01:02,3.45,2015/7/27 11:01:02,bbb
-1,11,100,44.4,flink,2015/5/23 12:01:03,23.23,2015/5/23 11:01:03,ccc
-1,10,150,43.4,spark,2015/7/24 12:01:04,254.12,2015/7/24 11:01:04,ddd
-1,10,100,47.4,spark,2015/7/23 12:01:05,876.14,2015/7/23 11:01:05,eeee
-3,14,160,43.4,hive,2015/7/26 12:01:06,3454.32,2015/7/26 11:01:06,ff
-2,10,100,43.4,impala,2015/7/23 12:01:07,456.98,2015/7/23 11:01:07,ggg
-1,10,100,43.4,spark,2015/5/23 12:01:08,32.53,2015/5/23 11:01:08,hhh
-4,16,130,42.4,impala,2015/7/23 12:01:09,67.23,2015/7/23 11:01:09,iii
-1,10,100,43.4,spark,2015/7/23 12:01:10,832.23,2015/7/23 11:01:10,jjj
+1,10,100,48.4,spark,2015/4/23 12:01:01,1.23,2015/4/23 11:01:01,aaa,2.5
+5,17,140,43.4,spark,2015/7/27 12:01:02,3.45,2015/7/27 11:01:02,bbb,2.5
+1,11,100,44.4,flink,2015/5/23 12:01:03,23.23,2015/5/23 11:01:03,ccc,2.5
+1,10,150,43.4,spark,2015/7/24 12:01:04,254.12,2015/7/24 11:01:04,ddd,2.5
+1,10,100,47.4,spark,2015/7/23 12:01:05,876.14,2015/7/23 11:01:05,eeee,3.5
+3,14,160,43.4,hive,2015/7/26 12:01:06,3454.32,2015/7/26 11:01:06,ff,2.5
+2,10,100,43.4,impala,2015/7/23 12:01:07,456.98,2015/7/23 11:01:07,ggg,2.5
+1,10,100,43.4,spark,2015/5/23 12:01:08,32.53,2015/5/23 11:01:08,hhh,2.5
+4,16,130,42.4,impala,2015/7/23 12:01:09,67.23,2015/7/23 11:01:09,iii,2.5
+1,10,100,43.4,spark,2015/7/23 12:01:10,832.23,2015/7/23 11:01:10,jjj,2.5

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSessionExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSessionExample.scala
b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSessionExample.scala
index 4923e5b..0d9c43f 100644
--- a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSessionExample.scala
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonSessionExample.scala
@@ -75,7 +75,8 @@ object CarbonSessionExample {
          |    timestampField timestamp,
          |    decimalField decimal(18,2),
          |    dateField date,
-         |    charField char(5)
+         |    charField char(5),
+         |    floatField float
          | )
          | STORED BY 'carbondata'
          | TBLPROPERTIES('DICTIONARY_INCLUDE'='dateField, charField')
@@ -88,7 +89,7 @@ object CarbonSessionExample {
       s"""
          | LOAD DATA LOCAL INPATH '$path'
          | INTO TABLE carbon_table
-         | options('FILEHEADER'='shortField,intField,bigintField,doubleField,stringField,timestampField,decimalField,dateField,charField')
+         | options('FILEHEADER'='shortField,intField,bigintField,doubleField,stringField,timestampField,decimalField,dateField,charField,floatField')
        """.stripMargin)
     // scalastyle:on
 
@@ -137,6 +138,12 @@ object CarbonSessionExample {
         |where t1.stringField = t2.stringField
       """.stripMargin).show
 
+    spark.sql("""
+             SELECT *
+             FROM carbon_table
+             where stringfield = 'spark' and floatField > 2.8
+              """).show
+
     // Drop table
     spark.sql("DROP TABLE IF EXISTS carbon_table")
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark-common-test/src/test/resources/floatSample.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/floatSample.csv b/integration/spark-common-test/src/test/resources/floatSample.csv
new file mode 100644
index 0000000..7cb46ba
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/floatSample.csv
@@ -0,0 +1,11 @@
+ID,date,country,name,phonetype,serialname,salary,rating
+1,2015/7/23,china,aaa1,phone197,ASD69643,15000,2.34
+2,2015/7/24,china,aaa2,phone756,ASD42892,15001,2.34
+3,2015/7/25,china,aaa3,phone1904,ASD37014,15002,2.34
+4,2015/7/26,china,aaa4,phone2435,ASD66902,15003,2.34
+5,2015/7/27,china,aaa5,phone2441,ASD90633,15004,2.34
+6,2015/7/28,china,aaa6,phone294,ASD59961,15005,3.5
+7,2015/7/29,china,aaa7,phone610,ASD14875,15006,2.34
+8,2015/7/30,china,aaa8,phone1848,ASD57308,15007,2.34
+9,2015/7/18,china,aaa9,phone706,ASD86717,15008,2.34
+10,2015/7/19,usa,aaa10,phone685,ASD30505,15009,2.34
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/primitiveTypes/FloatDataTypeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/primitiveTypes/FloatDataTypeTestCase.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/primitiveTypes/FloatDataTypeTestCase.scala
new file mode 100644
index 0000000..41f3b40
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/primitiveTypes/FloatDataTypeTestCase.scala
@@ -0,0 +1,50 @@
+package org.apache.carbondata.integration.spark.testsuite.primitiveTypes
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * Test Class for filter query on Float datatypes
+ *
+ * @author N00902756
+ *
+ */
+class FloatDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS tfloat")
+    sql("""
+           CREATE TABLE IF NOT EXISTS tfloat
+           (ID Int, date Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int,rating float)
+           STORED BY 'carbondata'
+           """)
+    sql(s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/floatSample.csv' into table tfloat
+           """)
+
+  }
+
+  test("select row whose rating is more than 2.8 from tfloat") {
+    checkAnswer(
+      sql("SELECT ID FROM tfloat where rating>2.8"),
+      Seq(Row(6)))
+  }
+
+  test("select row whose rating is 3.5 from tfloat") {
+    checkAnswer(
+      sql("SELECT ID FROM tfloat where rating=3.5"),
+      Seq(Row(6)))
+  }
+
+  test("select sum of rating column from tfloat") {
+    checkAnswer(
+      sql("SELECT sum(rating) FROM tfloat"),
+      Seq(Row(24.56)))
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS tfloat")
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
index 570d8ed..0364371 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
@@ -31,6 +31,7 @@ object DataTypeConverterUtil {
       case "bigint" => DataType.LONG
       case "numeric" => DataType.DOUBLE
       case "double" => DataType.DOUBLE
+      case "float" => DataType.DOUBLE
       case "decimal" => DataType.DECIMAL
       case "timestamp" => DataType.TIMESTAMP
       case "date" => DataType.DATE
@@ -51,6 +52,7 @@ object DataTypeConverterUtil {
       case "biginttype" => DataType.LONG
       case "numerictype" => DataType.DOUBLE
       case "doubletype" => DataType.DOUBLE
+      case "floattype" => DataType.DOUBLE
       case "decimaltype" => DataType.DECIMAL
       case "timestamptype" => DataType.TIMESTAMP
       case "datetype" => DataType.DATE
@@ -67,6 +69,7 @@ object DataTypeConverterUtil {
       case DataType.INT => "int"
       case DataType.LONG => "bigint"
       case DataType.DOUBLE => "double"
+      case DataType.FLOAT => "double"
       case DataType.DECIMAL => "decimal"
       case DataType.TIMESTAMP => "timestamp"
       case DataType.DATE => "date"

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 1c1d5f1..c775d57 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -146,6 +146,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
   protected val NUMERIC = carbonKeyWord("NUMERIC")
   protected val DECIMAL = carbonKeyWord("DECIMAL")
   protected val DOUBLE = carbonKeyWord("DOUBLE")
+  protected val FLOAT = carbonKeyWord("FLOAT")
   protected val SHORT = carbonKeyWord("SMALLINT")
   protected val INT = carbonKeyWord("INT")
   protected val BIGINT = carbonKeyWord("BIGINT")
@@ -818,7 +819,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
     STRING ^^^ "string" | INTEGER ^^^ "integer" |
     TIMESTAMP ^^^ "timestamp" | NUMERIC ^^^ "numeric" |
     BIGINT ^^^ "bigint" | SHORT ^^^ "smallint" |
-    INT ^^^ "int" | DOUBLE ^^^ "double" | decimalType | DATE ^^^ "date" | charType
+    INT ^^^ "int" | DOUBLE ^^^ "double" | FLOAT ^^^ "double" | decimalType |
+    DATE ^^^ "date" | charType
 
   /**
    * Matching the decimal(10,0) data type and returning the same.
@@ -895,6 +897,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
       case "double" => Field(field.column, Some("Double"), field.name, Some(null), field.parent,
         field.storeType, field.schemaOrdinal, field.precision, field.scale, field.rawSchema
       )
+      case "float" => Field(field.column, Some("Double"), field.name, Some(null), field.parent,
+        field.storeType, field.schemaOrdinal, field.precision, field.scale, field.rawSchema
+      )
       case "timestamp" =>
         Field(field.column, Some("Timestamp"), field.name, Some(null),
           field.parent, field.storeType, field.schemaOrdinal,
@@ -961,6 +966,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser
{
         Some(parentName + "." + field.name.getOrElse(None)), Some(null), parentName)
       case "Double" => Field(parentName + "." + field.column, Some("Double"),
         Some(parentName + "." + field.name.getOrElse(None)), Some(null), parentName)
+      case "Float" => Field(parentName + "." + field.column, Some("Double"),
+        Some(parentName + "." + field.name.getOrElse(None)), Some(null), parentName)
       case "Timestamp" => Field(parentName + "." + field.column, Some("Timestamp"),
         Some(parentName + "." + field.name.getOrElse(None)), Some(null), parentName)
       case "Numeric" => Field(parentName + "." + field.column, Some("Numeric"),

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
index 5ea7129..67a0e3e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
@@ -242,6 +242,7 @@ case class CarbonRelation(
               .toLowerCase match {
             case "int" => "long"
             case "short" => "long"
+            case "float" => "double"
             case "decimal" => "decimal(" + x.getPrecision + "," + x.getScale + ")"
             case others => others
           }),
@@ -271,6 +272,7 @@ case class CarbonRelation(
             .toLowerCase match {
             case "int" => "long"
             case "short" => "long"
+            case "float" => "double"
             case "decimal" => "decimal(" + column.getColumnSchema.getPrecision + "," +
column
               .getColumnSchema.getScale + ")"
             case others => others

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index e9738cb..532918d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -121,22 +121,22 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
    * For handling the create table DDl systax compatible to Hive syntax
    */
   protected lazy val createTable: Parser[LogicalPlan] =
-  restInput ^^ {
-
-    case statement =>
-      try {
-        // DDl will be parsed and we get the AST tree from the HiveQl
-        val node = HiveQlWrapper.getAst(statement)
-        // processing the AST tree
-        nodeToPlan(node)
-      } catch {
-        // MalformedCarbonCommandException need to be throw directly, parser will catch it
-        case ce: MalformedCarbonCommandException =>
-          throw ce
-        case e: Exception =>
-          sys.error("Parsing error") // no need to do anything.
-      }
-  }
+    restInput ^^ {
+
+      case statement =>
+        try {
+          // DDl will be parsed and we get the AST tree from the HiveQl
+          val node = HiveQlWrapper.getAst(statement)
+          // processing the AST tree
+          nodeToPlan(node)
+        } catch {
+          // MalformedCarbonCommandException need to be throw directly, parser will catch
it
+          case ce: MalformedCarbonCommandException =>
+            throw ce
+          case e: Exception =>
+            sys.error("Parsing error") // no need to do anything.
+        }
+    }
 
   /**
    * This function will traverse the tree and logical plan will be formed using that.
@@ -211,8 +211,10 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
                     f.scale = scale
                     f.dataType = Some("decimal")
                   }
-                  if(f.dataType.getOrElse("").startsWith("char")) {
+                  if (f.dataType.getOrElse("").startsWith("char")) {
                     f.dataType = Some("char")
+                  } else if (f.dataType.getOrElse("").startsWith("float")) {
+                    f.dataType = Some("float")
                   }
                   f.rawSchema = x
                   fields ++= Seq(f)
@@ -254,7 +256,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
             case Token("TOK_LIKETABLE", child :: Nil) =>
               likeTableName = child.getChild(0).getText()
             case Token("TOK_ALTERTABLE_BUCKETS",
-            Token("TOK_TABCOLNAME", list)::numberOfBuckets) =>
+            Token("TOK_TABCOLNAME", list) :: numberOfBuckets) =>
               val cols = list.map(_.getText)
               if (cols != null) {
                 bucketFields = Some(BucketFields(cols,
@@ -417,13 +419,13 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
     }
 
   protected lazy val deleteRecords: Parser[LogicalPlan] =
-    (DELETE ~> FROM ~> table) ~ (WHERE ~> restInput).? <~  opt(";") ^^ {
+    (DELETE ~> FROM ~> table) ~ (WHERE ~> restInput).? <~ opt(";") ^^ {
       case table ~ condition =>
         val tableName = getTableName(table.tableIdentifier)
         val alias = table.alias.getOrElse("")
         val stmt = condition match {
           case Some(cond) =>
-            "select tupleId from " + tableName  + " " + alias + " where " + cond
+            "select tupleId from " + tableName + " " + alias + " where " + cond
           case _ =>
             "select tupleId from " + tableName + " " + alias
         }
@@ -432,10 +434,10 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
 
   protected lazy val updateTable: Parser[LogicalPlan] =
     UPDATE ~> table ~
-    (SET ~> "(" ~>  repsep(element, ",") <~ ")") ~
-    ( "=" ~> restInput ) <~ opt(";")  ^^ {
-      case  tab~ columns ~ rest =>
-        val (sel, where ) = splitQuery(rest)
+    (SET ~> "(" ~> repsep(element, ",") <~ ")") ~
+    ("=" ~> restInput) <~ opt(";") ^^ {
+      case tab ~ columns ~ rest =>
+        val (sel, where) = splitQuery(rest)
         val (selectStmt, relation) =
           if (!sel.toLowerCase.startsWith("select ")) {
             if (sel.trim.isEmpty) {
@@ -483,7 +485,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
             } else if (ch == '\"') {
               foundDoubleQuotes = !foundDoubleQuotes
             }
-            else if (ch == '(' &&  !foundSingleQuotes && !foundDoubleQuotes)
{
+            else if (ch == '(' && !foundSingleQuotes && !foundDoubleQuotes)
{
               bracketCount = bracketCount + 1
               stack.push(ch)
             } else if (ch == ')' && !foundSingleQuotes && !foundDoubleQuotes)
{
@@ -507,7 +509,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
 
 
   protected lazy val table: Parser[UnresolvedRelation] = {
-    rep1sep(attributeName, ".")  ~  opt(ident)^^ {
+    rep1sep(attributeName, ".") ~ opt(ident) ^^ {
       case tableIdent ~ alias => UnresolvedRelation(tableIdent, alias)
     }
   }
@@ -541,7 +543,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
   }
 
   protected lazy val element: Parser[String] =
-    (ident <~ ".").? ~ ident  ^^ {
+    (ident <~ ".").? ~ ident ^^ {
       case table ~ column => column.toLowerCase
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/54e83045/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 04de23d..3bcf165 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -21,9 +21,9 @@ import scala.collection.mutable
 
 import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, ParseException, SqlBaseParser}
 import org.apache.spark.sql.catalyst.parser.ParserUtils._
-import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{CreateTableContext, TablePropertyListContext}
+import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{CreateTableContext,
+TablePropertyListContext}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-import org.apache.spark.sql.catalyst.trees.Origin
 import org.apache.spark.sql.execution.SparkSqlAstBuilder
 import org.apache.spark.sql.execution.command.{BucketFields, CreateTable, Field, TableModel}
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
@@ -108,7 +108,12 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
       val schema = cols ++ partitionCols
 
       val fields = schema.map { col =>
-        val x = col.name + ' ' + col.dataType.catalogString
+        val x = if (col.dataType.catalogString == "float") {
+          col.name + " double"
+        }
+        else {
+          col.name + ' ' + col.dataType.catalogString
+        }
         val f: Field = parser.anyFieldDef(new parser.lexical.Scanner(x))
         match {
           case parser.Success(field, _) => field.asInstanceOf[Field]
@@ -127,6 +132,9 @@ class CarbonSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf)
{
         if (f.dataType.getOrElse("").startsWith("char")) {
           f.dataType = Some("char")
         }
+        else if (f.dataType.getOrElse("").startsWith("float")) {
+          f.dataType = Some("double")
+        }
         f.rawSchema = x
         f
       }


Mime
View raw message