carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ravipes...@apache.org
Subject [carbondata] 21/41: [CARBONDATA-3314] Fix for Index Cache Size in SHOW METACACHE DDL
Date Tue, 02 Apr 2019 02:41:41 GMT
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 3f6a8534e30a356af2fcd4883f7ed27e5f8c6d79
Author: shivamasn <shivamasn17@gmail.com>
AuthorDate: Tue Mar 12 14:40:13 2019 +0530

    [CARBONDATA-3314] Fix for Index Cache Size in SHOW METACACHE DDL
    
    Problem :
    Index Cache Size printed in SHOW METACACHE on TABLE DDL is not accurate.
    
    Solution :
    Added a utility function in CommonUtil which will convert the bytes count
    to display size and display the accurate cache size upto 2 decimal places.
    
    This closes #3143
---
 .../sql/commands/TestCarbonShowCacheCommand.scala  |  4 +--
 .../apache/carbondata/spark/util/CommonUtil.scala  | 39 +++++++++++++++++++-
 .../command/cache/CarbonShowCacheCommand.scala     | 41 +++++++++++-----------
 3 files changed, 60 insertions(+), 24 deletions(-)

diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
index 69c5f7e..e7fd5fa 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/sql/commands/TestCarbonShowCacheCommand.scala
@@ -151,7 +151,7 @@ class TestCarbonShowCacheCommand extends QueryTest with BeforeAndAfterAll
{
     sql("use cache_empty_db").collect()
     val result1 = sql("show metacache").collect()
     assertResult(2)(result1.length)
-    assertResult(Row("cache_empty_db", "ALL", "0 bytes", "0 bytes", "0 bytes"))(result1(1))
+    assertResult(Row("cache_empty_db", "ALL", "0 B", "0 B", "0 B"))(result1(1))
 
     sql("use cache_db").collect()
     val result2 = sql("show metacache").collect()
@@ -174,7 +174,7 @@ class TestCarbonShowCacheCommand extends QueryTest with BeforeAndAfterAll
{
     assertResult(2)(result2.length)
 
     checkAnswer(sql("show metacache on table cache_db.cache_3"),
-      Seq(Row("Index", "0 bytes", "0/1 index files cached"), Row("Dictionary", "0 bytes",
"")))
+      Seq(Row("Index", "0 B", "0/1 index files cached"), Row("Dictionary", "0 B", "")))
 
     val result4 = sql("show metacache on table default.cache_4").collect()
     assertResult(3)(result4.length)
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index 34813ca..7887d87 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -19,13 +19,14 @@ package org.apache.carbondata.spark.util
 
 
 import java.io.File
+import java.math.BigDecimal
 import java.text.SimpleDateFormat
 import java.util
 import java.util.regex.{Matcher, Pattern}
 
 import scala.collection.JavaConverters._
 import scala.collection.mutable.Map
-import scala.util.Random
+import scala.math.BigDecimal.RoundingMode
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
@@ -62,6 +63,19 @@ object CommonUtil {
   val FIXED_DECIMAL = """decimal\(\s*(\d+)\s*,\s*(\-?\d+)\s*\)""".r
   val FIXED_DECIMALTYPE = """decimaltype\(\s*(\d+)\s*,\s*(\-?\d+)\s*\)""".r
 
+  val ONE_KB: Long = 1024L
+  val ONE_KB_BI: BigDecimal = BigDecimal.valueOf(ONE_KB)
+  val ONE_MB: Long = ONE_KB * ONE_KB
+  val ONE_MB_BI: BigDecimal = BigDecimal.valueOf(ONE_MB)
+  val ONE_GB: Long = ONE_KB * ONE_MB
+  val ONE_GB_BI: BigDecimal = BigDecimal.valueOf(ONE_GB)
+  val ONE_TB: Long = ONE_KB * ONE_GB
+  val ONE_TB_BI: BigDecimal = BigDecimal.valueOf(ONE_TB)
+  val ONE_PB: Long = ONE_KB * ONE_TB
+  val ONE_PB_BI: BigDecimal = BigDecimal.valueOf(ONE_PB)
+  val ONE_EB: Long = ONE_KB * ONE_PB
+  val ONE_EB_BI: BigDecimal = BigDecimal.valueOf(ONE_EB)
+
   def getColumnProperties(column: String,
       tableProperties: Map[String, String]): Option[util.List[ColumnProperty]] = {
     val fieldProps = new util.ArrayList[ColumnProperty]()
@@ -862,4 +876,27 @@ object CommonUtil {
       }
     }
   }
+
+  def bytesToDisplaySize(size: Long): String = bytesToDisplaySize(BigDecimal.valueOf(size))
+
+  // This method converts the bytes count to display size upto 2 decimal places
+  def bytesToDisplaySize(size: BigDecimal): String = {
+    var displaySize: String = null
+    if (size.divideToIntegralValue(ONE_EB_BI).compareTo(BigDecimal.ZERO) > 0) {
+      displaySize = size.divide(ONE_EB_BI).setScale(2, RoundingMode.HALF_DOWN).doubleValue()
+ " EB"
+    } else if (size.divideToIntegralValue(ONE_PB_BI).compareTo(BigDecimal.ZERO) > 0) {
+      displaySize = size.divide(ONE_PB_BI).setScale(2, RoundingMode.HALF_DOWN).doubleValue()
+ " PB"
+    } else if (size.divideToIntegralValue(ONE_TB_BI).compareTo(BigDecimal.ZERO) > 0) {
+      displaySize = size.divide(ONE_TB_BI).setScale(2, RoundingMode.HALF_DOWN).doubleValue()
+ " TB"
+    } else if (size.divideToIntegralValue(ONE_GB_BI).compareTo(BigDecimal.ZERO) > 0) {
+      displaySize = size.divide(ONE_GB_BI).setScale(2, RoundingMode.HALF_DOWN).doubleValue()
+ " GB"
+    } else if (size.divideToIntegralValue(ONE_MB_BI).compareTo(BigDecimal.ZERO) > 0) {
+      displaySize = size.divide(ONE_MB_BI).setScale(2, RoundingMode.HALF_DOWN).doubleValue()
+ " MB"
+    } else if (size.divideToIntegralValue(ONE_KB_BI).compareTo(BigDecimal.ZERO) > 0) {
+      displaySize = size.divide(ONE_KB_BI).setScale(2, RoundingMode.HALF_DOWN).doubleValue()
+ " KB"
+    } else {
+      displaySize = size + " B"
+    }
+    displaySize
+  }
 }
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index e5f89d8..462be83 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -20,13 +20,11 @@ package org.apache.spark.sql.execution.command.cache
 import scala.collection.mutable
 import scala.collection.JavaConverters._
 
-import org.apache.commons.io.FileUtils.byteCountToDisplaySize
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
-import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
-import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
+import org.apache.spark.sql.catalyst.expressions.AttributeReference
 import org.apache.spark.sql.execution.command.MetadataCommand
-import org.apache.spark.sql.types.{LongType, StringType}
+import org.apache.spark.sql.types.StringType
 
 import org.apache.carbondata.core.cache.CacheProvider
 import org.apache.carbondata.core.cache.dictionary.AbstractColumnDictionaryInfo
@@ -37,6 +35,7 @@ import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema}
 import org.apache.carbondata.datamap.bloom.BloomCacheKeyValue
 import org.apache.carbondata.processing.merger.CarbonDataMergerUtil
+import org.apache.carbondata.spark.util.CommonUtil.bytesToDisplaySize
 
 /**
  * SHOW CACHE
@@ -67,10 +66,10 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier])
     val cache = CacheProvider.getInstance().getCarbonCache()
     if (cache == null) {
       Seq(
-        Row("ALL", "ALL", byteCountToDisplaySize(0L),
-          byteCountToDisplaySize(0L), byteCountToDisplaySize(0L)),
-        Row(currentDatabase, "ALL", byteCountToDisplaySize(0L),
-          byteCountToDisplaySize(0L), byteCountToDisplaySize(0L)))
+        Row("ALL", "ALL", bytesToDisplaySize(0L),
+          bytesToDisplaySize(0L), bytesToDisplaySize(0L)),
+        Row(currentDatabase, "ALL", bytesToDisplaySize(0L),
+          bytesToDisplaySize(0L), bytesToDisplaySize(0L)))
     } else {
       val carbonTables = CarbonEnv.getInstance(sparkSession).carbonMetaStore
         .listAllTables(sparkSession)
@@ -152,10 +151,10 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier])
       }
       if (tableMapIndexSize.isEmpty && tableMapDatamapSize.isEmpty && tableMapDictSize.isEmpty)
{
         Seq(
-          Row("ALL", "ALL", byteCountToDisplaySize(allIndexSize),
-            byteCountToDisplaySize(allDatamapSize), byteCountToDisplaySize(allDictSize)),
-          Row(currentDatabase, "ALL", byteCountToDisplaySize(0),
-            byteCountToDisplaySize(0), byteCountToDisplaySize(0)))
+          Row("ALL", "ALL", bytesToDisplaySize(allIndexSize),
+            bytesToDisplaySize(allDatamapSize), bytesToDisplaySize(allDictSize)),
+          Row(currentDatabase, "ALL", bytesToDisplaySize(0),
+            bytesToDisplaySize(0), bytesToDisplaySize(0)))
       } else {
         val tableList = tableMapIndexSize
           .map(_._1)
@@ -168,15 +167,15 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier])
             val indexSize = tableMapIndexSize.getOrElse(uniqueName, 0L)
             val datamapSize = tableMapDatamapSize.getOrElse(uniqueName, 0L)
             val dictSize = tableMapDictSize.getOrElse(uniqueName, 0L)
-            Row(values(0), values(1), byteCountToDisplaySize(indexSize),
-              byteCountToDisplaySize(datamapSize), byteCountToDisplaySize(dictSize))
+            Row(values(0), values(1), bytesToDisplaySize(indexSize),
+              bytesToDisplaySize(datamapSize), bytesToDisplaySize(dictSize))
           }
 
         Seq(
-          Row("ALL", "ALL", byteCountToDisplaySize(allIndexSize),
-            byteCountToDisplaySize(allDatamapSize), byteCountToDisplaySize(allDictSize)),
-          Row(currentDatabase, "ALL", byteCountToDisplaySize(dbIndexSize),
-            byteCountToDisplaySize(dbDatamapSize), byteCountToDisplaySize(dbDictSize))
+          Row("ALL", "ALL", bytesToDisplaySize(allIndexSize),
+            bytesToDisplaySize(allDatamapSize), bytesToDisplaySize(allDictSize)),
+          Row(currentDatabase, "ALL", bytesToDisplaySize(dbIndexSize),
+            bytesToDisplaySize(dbDatamapSize), bytesToDisplaySize(dbDictSize))
         ) ++ tableList
       }
     }
@@ -274,14 +273,14 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier])
       }.size
 
       var result = Seq(
-        Row("Index", byteCountToDisplaySize(datamapSize.get(tablePath).get),
+        Row("Index", bytesToDisplaySize(datamapSize.get(tablePath).get),
           numIndexFilesCached + "/" + numIndexFilesAll + " index files cached"),
-        Row("Dictionary", byteCountToDisplaySize(dictSize), "")
+        Row("Dictionary", bytesToDisplaySize(dictSize), "")
       )
       for ((path, size) <- datamapSize) {
         if (path != tablePath) {
           val (dmName, dmType) = datamapName.get(path).get
-          result = result :+ Row(dmName, byteCountToDisplaySize(size), dmType)
+          result = result :+ Row(dmName, bytesToDisplaySize(size), dmType)
         }
       }
       result


Mime
View raw message