carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ravipes...@apache.org
Subject [carbondata] 22/41: [CARBONDATA-3317] Fix NPE when execute 'show segments' command for stream table
Date Tue, 02 Apr 2019 02:41:42 GMT
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 6975346f5857732b9530b33407c7f9f702f363c0
Author: Zhang Zhichao <441586683@qq.com>
AuthorDate: Sat Mar 16 23:49:24 2019 +0800

    [CARBONDATA-3317] Fix NPE when execute 'show segments' command for stream table
    
    When spark streaming app starts to create new stream segment, it does not create carbondataindex
file before writing data successfully, and now if execute 'show segments' command, it will
throw NPE.
    
    This closes #3149
---
 .../src/main/scala/org/apache/carbondata/api/CarbonStore.scala | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
index 11db430..f5e429e 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
@@ -103,8 +103,14 @@ object CarbonStore {
             // since it is continuously inserting data
             val segmentDir = CarbonTablePath.getSegmentPath(tablePath, load.getLoadName)
             val indexPath = CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir)
-            val indices = StreamSegment.readIndexFile(indexPath, FileFactory.getFileType(indexPath))
-            (indices.asScala.map(_.getFile_size).sum, FileFactory.getCarbonFile(indexPath).getSize)
+            val indexFile = FileFactory.getCarbonFile(indexPath)
+            if (indexFile.exists()) {
+              val indices =
+                StreamSegment.readIndexFile(indexPath, FileFactory.getFileType(indexPath))
+              (indices.asScala.map(_.getFile_size).sum, indexFile.getSize)
+            } else {
+              (-1L, -1L)
+            }
           } else {
             // for batch segment, we can get the data size from table status file directly
             (if (load.getDataSize == null) -1L else load.getDataSize.toLong,


Mime
View raw message