carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chenliang...@apache.org
Subject [11/50] [abbrv] incubator-carbondata git commit: Removed unused properties from carbon (#739)
Date Thu, 30 Jun 2016 17:41:58 GMT
Removed unused properties from carbon (#739)

* Removed unused properties from carbon

* Removed unused carbon properties

* Fixed review comments


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/60490179
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/60490179
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/60490179

Branch: refs/heads/master
Commit: 60490179674681985aad2465afd8c0f5b025e185
Parents: e96de9f
Author: nareshpr <prnaresh.naresh@gmail.com>
Authored: Sat Jun 25 14:28:35 2016 +0530
Committer: Ravindra Pesala <ravi.pesala@gmail.com>
Committed: Sat Jun 25 14:28:35 2016 +0530

----------------------------------------------------------------------
 conf/carbon.properties.template                 |  94 ++++++
 .../core/constants/CarbonCommonConstants.java   | 226 +------------
 .../core/datastorage/util/StoreFactory.java     |  36 +--
 .../carbondata/core/util/CarbonProperties.java  | 314 -------------------
 .../org/carbondata/core/util/CarbonUtil.java    |  35 +--
 dev/molap.properties.template                   |  94 ------
 dev/molap.properties_spark                      |  90 ------
 .../spark/sql/hive/CarbonMetastoreCatalog.scala |  82 +----
 .../store/CarbonFactDataHandlerColumnar.java    |  10 +-
 9 files changed, 113 insertions(+), 868 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/conf/carbon.properties.template
----------------------------------------------------------------------
diff --git a/conf/carbon.properties.template b/conf/carbon.properties.template
new file mode 100644
index 0000000..314320c
--- /dev/null
+++ b/conf/carbon.properties.template
@@ -0,0 +1,94 @@
+#################### System Configuration ##################
+#Mandatory. Carbon Store path
+carbon.storelocation=hdfs://hacluster/Opt/CarbonStore
+#Base directory for Data files
+carbon.ddl.base.hdfs.url=hdfs://hacluster/opt/data
+#Path where the bad records are stored
+carbon.badRecords.location=/opt/Carbon/Spark/badrecords
+#Mandatory. path to kettle home
+carbon.kettle.home=$<SPARK_HOME>/carbonlib/carbonplugins
+
+#################### Performance Configuration ##################
+######## DataLoading Configuration ########
+#File read buffer size used during sorting:MIN=:MAX=
+carbon.sort.file.buffer.size=20
+#Rowset size exchanged between data load graph steps.:MIN=:MAX=
+carbon.graph.rowset.size=100000
+#Number of cores to be used while data loading:MIN=:MAX=
+carbon.number.of.cores.while.loading=6
+#CARBON sort size.:MIN=:MAX=
+carbon.sort.size=500000
+#Algorithm for hashmap for hashkey calculation
+carbon.enableXXHash=true
+#Number of cores to be used for block sort while dataloading
+#carbon.number.of.cores.block.sort=7
+#max level cache size upto which level cache will be loaded in memory
+#carbon.max.level.cache.size=-1
+#enable prefetch of data during merge sort while reading data from sort temp files in data loading
+#carbon.merge.sort.prefetch=true
+######## Compaction Configuration ########
+#Number of cores to be used while compacting:MIN=:MAX=
+carbon.number.of.cores.while.compacting=2
+#default minor compaction in MBs
+carbon.minor.compaction.size=256
+#default major compaction in MBs
+carbon.major.compaction.size=1024
+######## Query Configuration ########
+#Number of cores to be used.:MIN=:MAX=
+carbon.number.of.cores=4
+#Carbon Inmemory record size:MIN=:MAX=
+carbon.inmemory.record.size=100000
+#Improves the performance of filter query
+carbon.enable.quick.filter=false
+
+#################### Extra Configuration ##################
+##Timestamp format of input data used for timestamp data type.
+#carbon.timestamp.format=yyyy-MM-dd HH:mm:ss
+######## Dataload Configuration ########
+######File write buffer size used during sorting.
+#carbon.sort.file.write.buffer.size=10485760
+##Minimum no of intermediate files after which sort merged to be started.
+#carbon.sort.intermediate.files.limit=20
+##space reserved in percentage for writing block meta data in carbon data file
+#carbon.block.meta.size.reserved.percentage=10
+##csv reading buffer size.
+#carbon.csv.read.buffersize.byte=1048576
+##space reserved in percentage for writing block meta data in carbon data file
+#carbon.block.meta.size.reserved.percentage=10
+##High Cardinality value
+#high.cardinality.value=100000
+##CARBON maximum no of threads used for sorting.
+#carbon.max.thread.for.sorting=3
+##Carbon blocklet size. Note: this configuration cannot be change once store is generated
+#carbon.blocklet.size=120000
+##How to times retry to get the lock
+#carbon.load.metadata.lock.retries=3
+##Maximum number of blocklets written in a single file.:Min=1:Max=1000
+#carbon.max.file.size=100
+##Interval between the retries to get the lock
+#carbon.load.metadata.lock.retry.timeout.sec=5
+##Temporary store location, By default it will take System.getProperty("java.io.tmpdir")
+#carbon.tempstore.location=/opt/Carbon/TempStoreLoc
+##data loading records count logger
+#carbon.load.log.counter=500000
+######## Compaction Configuration ########
+##to specify number of segments to be preserved from compaction
+#carbon.numberof.preserve.segments=0
+##To determine the loads of number of days to be compacted
+#carbon.allowed.compaction.days=0
+##To enable compaction while data loading
+#carbon.enable.auto.load.merge=false
+######## Query Configuration ########
+##Maximum time allowed for one query to be executed.
+#max.query.execution.time=60
+##Min max is feature added to enhance query performance. To disable this feature, make it false.
+#carbon.enableMinMax=true
+##number of core to load the blocks in driver
+#no.of.cores.to.load.blocks.in.driver=10
+######## Global Dictionary Configurations ########
+##To enable/disable identify high cardinality during first data loading
+#high.cardinality.identify.enable=true
+##threshold to identify whether high cardinality column
+#high.cardinality.threshold=1000000
+##Percentage to identify whether column cardinality is more than configured percent of total row count
+#high.cardinality.row.count.percentage=80
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
index cd25b88..a120d00 100644
--- a/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/carbondata/core/constants/CarbonCommonConstants.java
@@ -21,11 +21,6 @@ package org.carbondata.core.constants;
 
 public final class CarbonCommonConstants {
   /**
-   * MERGER_FOLDER_EXT
-   */
-  public static final String MERGER_FOLDER_EXT = ".merge";
-
-  /**
    * integer size in bytes
    */
   public static final int INT_SIZE_IN_BYTE = 4;
@@ -38,22 +33,6 @@ public final class CarbonCommonConstants {
    */
   public static final int DOUBLE_SIZE_IN_BYTE = 8;
   /**
-   * ONLINE_MERGE_MIN_VALUE
-   */
-  public static final int ONLINE_MERGE_MIN_VALUE = 10;
-  /**
-   * ONLINE_MERGE_MAX_VALUE
-   */
-  public static final int ONLINE_MERGE_MAX_VALUE = 100;
-  /**
-   * OFFLINE_MERGE_MIN_VALUE
-   */
-  public static final int OFFLINE_MERGE_MIN_VALUE = 100;
-  /**
-   * OFFLINE_MERGE_MAX_VALUE
-   */
-  public static final int OFFLINE_MERGE_MAX_VALUE = 500;
-  /**
    * LONG size in bytes
    */
   public static final int LONG_SIZE_IN_BYTE = 8;
@@ -74,30 +53,6 @@ public final class CarbonCommonConstants {
    */
   public static final String STORE_LOCATION = "carbon.storelocation";
   /**
-   * The keystore type
-   */
-  public static final String KEYSTORE_TYPE = "carbon.keystore.type";
-  /**
-   * The value store type
-   */
-  public static final String VALUESTORE_TYPE = "carbon.valuestore.type";
-  /**
-   * online merge file size
-   */
-  public static final String ONLINE_MERGE_FILE_SIZE = "carbon.online.merge.file.size";
-  /**
-   * online merge file size default value
-   */
-  public static final String ONLINE_MERGE_FILE_SIZE_DEFAULT_VALUE = "10";
-  /**
-   * offline merge file size
-   */
-  public static final String OFFLINE_MERGE_FILE_SIZE = "carbon.offline.merge.file.size";
-  /**
-   * offline merge file size default value
-   */
-  public static final String OFFLINE_MERGE_FILE_SIZE_DEFAULT_VALUE = "100";
-  /**
    * blocklet size in carbon file
    */
   public static final String BLOCKLET_SIZE = "carbon.blocklet.size";
@@ -110,41 +65,17 @@ public final class CarbonCommonConstants {
    */
   public static final String NUM_CORES = "carbon.number.of.cores";
   /**
-   * carbon batchsize
-   */
-  public static final String BATCH_SIZE = "carbon.batch.size";
-  /**
-   * CARDINALITY_INCREMENT_VALUE
-   */
-  public static final String CARDINALITY_INCREMENT_VALUE = "carbon.cardinality.increment.value";
-  /**
    * carbon sort size
    */
   public static final String SORT_SIZE = "carbon.sort.size";
   /**
    * default location of the carbon member, hierarchy and fact files
    */
-  public static final String STORE_LOCATION_DEFAULT_VAL = "../unibi-solutions/system/carbon/store";
-  /**
-   * default keystore type
-   */
-  public static final String KEYSTORE_TYPE_DEFAULT_VAL = "COMPRESSED_SINGLE_ARRAY";
-  /**
-   * default value store type
-   */
-  public static final String VALUESTORE_TYPE_DEFAULT_VAL = "HEAVY_VALUE_COMPRESSION";
-  /**
-   * CARDINALITY_INCREMENT_DEFAULT_VALUE
-   */
-  public static final String CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL = "10";
-  /**
-   * CARDINALITY_INCREMENT_MIN_VALUE
-   */
-  public static final int CARDINALITY_INCREMENT_MIN_VAL = 5;
+  public static final String STORE_LOCATION_DEFAULT_VAL = "../carbon.store";
   /**
    * CARDINALITY_INCREMENT_DEFAULT_VALUE
    */
-  public static final int CARDINALITY_INCREMENT_MAX_VAL = 30;
+  public static final int CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL = 10;
   /**
    * default blocklet size
    */
@@ -183,18 +114,6 @@ public final class CarbonCommonConstants {
    */
   public static final int NUM_CORES_MAX_VAL = 32;
   /**
-   * default carbon batchsize
-   */
-  public static final String BATCH_SIZE_DEFAULT_VAL = "1000";
-  /**
-   * min carbon batchsize
-   */
-  public static final int BATCH_SIZE_MIN_VAL = 500;
-  /**
-   * max carbon batchsize
-   */
-  public static final int BATCH_SIZE_MAX_VAL = 100000;
-  /**
    * default carbon sort size
    */
   public static final String SORT_SIZE_DEFAULT_VAL = "100000";
@@ -207,19 +126,10 @@ public final class CarbonCommonConstants {
    */
   public static final String CARBON_PROPERTIES_FILE_PATH = "../../../conf/carbon.properties";
   /**
-   * CARBON_BADRECORDS_ENCRYPTION
-   */
-  public static final String CARBON_BADRECORDS_ENCRYPTION = "carbon.badRecords.encryption";
-  /**
    * CARBON_DDL_BASE_HDFS_URL
    */
   public static final String CARBON_DDL_BASE_HDFS_URL = "carbon.ddl.base.hdfs.url";
   /**
-   * CARBON_BADRECORDS_ENCRYPTION_DEFAULT_VAL
-   */
-  public static final String CARBON_BADRECORDS_ENCRYPTION_DEFAULT_VAL = "false";
-
-  /**
    * Slice Meta data file.
    */
   public static final String SLICE_METADATA_FILENAME = "sliceMetaData";
@@ -298,57 +208,6 @@ public final class CarbonCommonConstants {
    */
   public static final String SORT_TEMP_FILE_LOCATION = "sortrowtmp";
   /**
-   * SORT_BUFFER_SIZE
-   */
-  public static final String SORT_BUFFER_SIZE = "carbon.sort.buffer.size";
-  /**
-   * SORT_BUFFER_SIZE_DEFAULT_SIZE
-   */
-  public static final String SORT_BUFFER_SIZE_DEFAULT_VALUE = "5000";
-  /**
-   * SORT_BUFFER_SIZE_MIN_SIZE
-   */
-  public static final int SORT_BUFFER_SIZE_MIN_VALUE = 5;
-  /**
-   * DATA_LOAD_Q_SIZE
-   */
-  public static final String DATA_LOAD_Q_SIZE = "carbon.dataload.queuesize";
-  /**
-   * DATA_LOAD_Q_SIZE_DEFAULT
-   */
-  public static final String DATA_LOAD_Q_SIZE_DEFAULT = "100";
-  /**
-   * DATA_LOAD_Q_SIZE_MIN
-   */
-  public static final int DATA_LOAD_Q_SIZE_MIN = 1;
-  /**
-   * DATA_LOAD_Q_SIZE_MAX
-   */
-  public static final int DATA_LOAD_Q_SIZE_MAX = 100;
-
-  /**
-   * DATA_LOAD_CONC_EXE_SIZE
-   */
-  public static final String DATA_LOAD_CONC_EXE_SIZE = "carbon.dataload.concurrent.execution.size";
-  /**
-   * DATA_LOAD_CONC_EXE_SIZE_DEFAULT
-   */
-  public static final String DATA_LOAD_CONC_EXE_SIZE_DEFAULT = "1";
-  /**
-   * DATA_LOAD_CONC_EXE_SIZE_MIN
-   */
-  public static final int DATA_LOAD_CONC_EXE_SIZE_MIN = 1;
-  /**
-   * DATA_LOAD_CONC_EXE_SIZE_MAX
-   */
-  public static final int DATA_LOAD_CONC_EXE_SIZE_MAX = 5;
-  /**
-   * CARBON_Realtime_data
-   */
-  public static final String CARBON_REALTIMEDATA_FILE =
-      "../unibi-solutions/system/carbon/realtimedata.properties";
-
-  /**
    * CARBON_RESULT_SIZE_DEFAULT
    */
   public static final String LEVEL_FILE_EXTENSION = ".level";
@@ -418,10 +277,6 @@ public final class CarbonCommonConstants {
    */
   public static final String CARBON_SORT_FILE_WRITE_BUFFER_SIZE_DEFAULT_VALUE = "50000";
   /**
-   * WRITE_ALL_NODE_IN_SINGLE_TIME_DEFAULT_VALUE
-   */
-  public static final String WRITE_ALL_NODE_IN_SINGLE_TIME_DEFAULT_VALUE = "true";
-  /**
    * Number of cores to be used while loading
    */
   public static final String NUM_CORES_LOADING = "carbon.number.of.cores.while.loading";
@@ -486,35 +341,14 @@ public final class CarbonCommonConstants {
    */
   public static final String HYPHEN_SPC_CHARACTER = "-#!:HYPHEN:!#-";
   /**
-   * CARBON_DECIMAL_POINTERS
-   */
-  public static final String CARBON_DECIMAL_POINTERS = "carbon.decimal.pointers";
-  /**
    * CARBON_DECIMAL_POINTERS_DEFAULT
    */
-  public static final String CARBON_DECIMAL_POINTERS_DEFAULT = "5";
-  /**
-   * CARBON_DECIMAL_POINTERS_AGG
-   */
-  public static final String CARBON_DECIMAL_POINTERS_AGG = "carbon.decimal.pointers.agg";
-  /**
-   * CARBON_DECIMAL_POINTERS_AGG_DEFAULT
-   */
-  public static final String CARBON_DECIMAL_POINTERS_AGG_DEFAULT = "4";
+  public static final byte CARBON_DECIMAL_POINTERS_DEFAULT = 5;
   /**
    * SORT_TEMP_FILE_EXT
    */
   public static final String SORT_TEMP_FILE_EXT = ".sorttemp";
   /**
-   * CARBON_SEQ_GEN_INMEMORY_LRU_CACHE_ENABLED
-   */
-  public static final String CARBON_SEQ_GEN_INMEMORY_LRU_CACHE_ENABLED =
-      "carbon.seqgen.inmemory.lru.cache.enabled";
-  /**
-   * CARBON_SEQ_GEN_INMEMORY_LRU_CACHE_ENABLED_DEFAULT_VALUE
-   */
-  public static final String CARBON_SEQ_GEN_INMEMORY_LRU_CACHE_ENABLED_DEFAULT_VALUE = "false";
-  /**
    * CARBON_MAX_THREAD_FOR_SORTING
    */
   public static final String CARBON_MAX_THREAD_FOR_SORTING = "carbon.max.thread.for.sorting";
@@ -545,15 +379,6 @@ public final class CarbonCommonConstants {
    */
   public static final int DEFAULT_COLLECTION_SIZE = 16;
   /**
-   * CARBON_DATALOAD_VALID_CSVFILE_SIZE
-   */
-  public static final String CARBON_DATALOAD_VALID_CSVFILE_SIZE =
-      "carbon.dataload.valid.csvfile.size(in GB)";
-  /**
-   * CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE
-   */
-  public static final String CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE = "5";
-  /**
    * CARBON_TIMESTAMP_DEFAULT_FORMAT
    */
   public static final String CARBON_TIMESTAMP_DEFAULT_FORMAT = "yyyy-MM-dd HH:mm:ss";
@@ -562,15 +387,6 @@ public final class CarbonCommonConstants {
    */
   public static final String CARBON_TIMESTAMP_FORMAT = "carbon.timestamp.format";
   /**
-   * CARBON_DATALOAD_VALID_CSVFILE_SIZE
-   */
-  public static final String CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE =
-      "carbon.dataload.csv.filecount";
-  /**
-   * CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE
-   */
-  public static final String CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE_DEFAULTVALUE = "100";
-  /**
    * STORE_LOCATION_HDFS
    */
   public static final String STORE_LOCATION_HDFS = "carbon.storelocation.hdfs";
@@ -751,14 +567,6 @@ public final class CarbonCommonConstants {
    */
   public static final String CARBON_MAX_LEVEL_CACHE_SIZE_DEFAULT = "-1";
   /**
-   * retry interval after which loading a level file will be retried
-   */
-  public static final String CARBON_LOAD_LEVEL_RETRY_INTERVAL = "Carbon.load.level.retry.interval";
-  /**
-   * retry interval default value
-   */
-  public static final String CARBON_LOAD_LEVEL_RETRY_INTERVAL_DEFAULT = "12";
-  /**
    * DOUBLE_VALUE_MEASURE
    */
   public static final char SUM_COUNT_VALUE_MEASURE = 'n';
@@ -781,8 +589,6 @@ public final class CarbonCommonConstants {
    * for dimensions , one of ignore dictionary dimensions , one for measures.
    */
   public static final int ARRAYSIZE = 3;
-  public static final String CARBON_UNIFIED_STORE_PATH = "carbon.unified.store.path";
-  public static final String CARBON_UNIFIED_STORE_PATH_DEFAULT = "false";
   /**
    * CARBON_PREFETCH_BUFFERSIZE
    */
@@ -796,22 +602,6 @@ public final class CarbonCommonConstants {
    */
   public static final String TEMPWRITEFILEEXTENSION = ".write";
   /**
-   * MERGE_THRESHOLD_VALUE
-   */
-  public static final String MERGE_THRESHOLD_VALUE = "carbon.merge.threshold";
-  /**
-   * MERGE_THRESHOLD_DEFAULT_VAL
-   */
-  public static final String MERGE_THRESHOLD_DEFAULT_VAL = "10";
-  /**
-   * TO_LOAD_MERGE_MAX_SIZE
-   */
-  public static final String TO_LOAD_MERGE_MAX_SIZE = "to.merge.load.max.size";
-  /**
-   * TO_LOAD_MERGE_MAX_SIZE_DEFAULT
-   */
-  public static final String TO_LOAD_MERGE_MAX_SIZE_DEFAULT = "1";
-  /**
    * ENABLE_AUTO_LOAD_MERGE
    */
   public static final String ENABLE_AUTO_LOAD_MERGE = "carbon.enable.auto.load.merge";
@@ -868,16 +658,6 @@ public final class CarbonCommonConstants {
    */
   public static final int INVALID_SURROGATE_KEY = -1;
 
-  /**
-   * table split partition
-   */
-  public static final String TABLE_SPLIT_PARTITION = "carbon.table.split.partition.enable";
-
-  /**
-   * table split partition default value
-   */
-  public static final String TABLE_SPLIT_PARTITION_DEFAULT_VALUE = "false";
-
   public static final String INVALID_SEGMENT_ID = "-1";
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java b/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
index 725ee56..63f8157 100644
--- a/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
+++ b/core/src/main/java/org/carbondata/core/datastorage/util/StoreFactory.java
@@ -19,7 +19,6 @@
 
 package org.carbondata.core.datastorage.util;
 
-import org.carbondata.core.constants.CarbonCommonConstants;
 import org.carbondata.core.datastorage.store.FileHolder;
 import org.carbondata.core.datastorage.store.NodeKeyStore;
 import org.carbondata.core.datastorage.store.NodeMeasureDataStore;
@@ -38,22 +37,9 @@ import org.carbondata.core.datastorage.store.impl.key.compressed.CompressedSingl
 import org.carbondata.core.datastorage.store.impl.key.compressed.CompressedSingleArrayKeyInMemoryStore;
 import org.carbondata.core.datastorage.store.impl.key.uncompressed.SingleArrayKeyFileStore;
 import org.carbondata.core.datastorage.store.impl.key.uncompressed.SingleArrayKeyInMemoryStore;
-import org.carbondata.core.util.CarbonProperties;
 
 public final class StoreFactory {
   /**
-   * Single Array Key store.
-   */
-  private static final String SINGLE_ARRAY = "SINGLE_ARRAY";
-  /**
-   * Compressed single array key store.
-   */
-  private static final String COMPRESSED_SINGLE_ARRAY = "COMPRESSED_SINGLE_ARRAY";
-  /**
-   * Double array data store.
-   */
-  private static final String COMPRESSED_DOUBLE_ARRAY = "COMPRESSED_DOUBLE_ARRAY";
-  /**
    * key type.
    */
   private static StoreType keyType;
@@ -63,26 +49,8 @@ public final class StoreFactory {
   private static StoreType valueType;
 
   static {
-    String keytype = CarbonProperties.getInstance().getProperty(CarbonCommonConstants.KEYSTORE_TYPE,
-        CarbonCommonConstants.KEYSTORE_TYPE_DEFAULT_VAL);
-    String valuetype = CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.VALUESTORE_TYPE,
-            CarbonCommonConstants.VALUESTORE_TYPE_DEFAULT_VAL);
-
-    // set key type
-    if (COMPRESSED_SINGLE_ARRAY.equals(keytype)) {
-      keyType = StoreType.COMPRESSED_SINGLE_ARRAY;
-    } else if (SINGLE_ARRAY.equals(keytype)) {
-      keyType = StoreType.SINGLE_ARRAY;
-    } else {
-      keyType = StoreType.COMPRESSED_SINGLE_ARRAY;
-    }
-    // set value type
-    if (COMPRESSED_DOUBLE_ARRAY.equals(valuetype)) {
-      valueType = StoreType.COMPRESSED_DOUBLE_ARRAY;
-    } else {
-      valueType = StoreType.HEAVY_VALUE_COMPRESSION;
-    }
+    keyType = StoreType.COMPRESSED_SINGLE_ARRAY;
+    valueType = StoreType.HEAVY_VALUE_COMPRESSION;
   }
 
   private StoreFactory() {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
index a337ac4..e6a1ce9 100644
--- a/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/carbondata/core/util/CarbonProperties.java
@@ -75,34 +75,12 @@ public final class CarbonProperties {
           CarbonCommonConstants.STORE_LOCATION_DEFAULT_VAL);
     }
 
-    if (null == carbonProperties.getProperty(CarbonCommonConstants.VALUESTORE_TYPE)) {
-      carbonProperties.setProperty(CarbonCommonConstants.VALUESTORE_TYPE,
-          CarbonCommonConstants.VALUESTORE_TYPE_DEFAULT_VAL);
-    }
-
-    if (null == carbonProperties.getProperty(CarbonCommonConstants.KEYSTORE_TYPE)) {
-      carbonProperties.setProperty(CarbonCommonConstants.KEYSTORE_TYPE,
-          CarbonCommonConstants.KEYSTORE_TYPE_DEFAULT_VAL);
-    }
-
     validateBlockletSize();
     validateMaxFileSize();
     validateNumCores();
     validateNumCoresBlockSort();
-    validateBatchSize();
     validateSortSize();
-    validateCardinalityIncrementValue();
-    validateOnlineMergerSize();
-    validateOfflineMergerSize();
-    validateSortBufferSize();
-    validateDataLoadQSize();
-    validateDataLoadConcExecSize();
-    validateDecimalPointers();
-    validateDecimalPointersAgg();
-    validateCsvFileSize();
-    validateNumberOfCsvFile();
     validateBadRecordsLocation();
-    validateBadRecordsEncryption();
     validateHighCardinalityIdentify();
     validateHighCardinalityThreshold();
     validateHighCardinalityInRowCountPercentage();
@@ -117,168 +95,6 @@ public final class CarbonProperties {
     }
   }
 
-  private void validateBadRecordsEncryption() {
-    String badRecordsEncryption =
-        carbonProperties.getProperty(CarbonCommonConstants.CARBON_BADRECORDS_ENCRYPTION);
-    if (null == badRecordsEncryption || badRecordsEncryption.length() == 0) {
-      carbonProperties.setProperty(CarbonCommonConstants.CARBON_BADRECORDS_ENCRYPTION,
-          CarbonCommonConstants.CARBON_BADRECORDS_ENCRYPTION_DEFAULT_VAL);
-    }
-  }
-
-  private void validateCsvFileSize() {
-    try {
-      int csvFileSizeProperty = Integer.parseInt(carbonProperties
-          .getProperty(CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE,
-              CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE));
-      if (csvFileSizeProperty < 1) {
-        LOGGER.info("Invalid value for " + CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE
-                + "\"Only Positive Integer(greater than zero) is allowed. Using the default value\""
-                + CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE);
-
-        carbonProperties.setProperty(CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE,
-            CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("Invalid value for " + CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE
-              + "\"Only Positive Integer(greater than zero) is allowed. Using the default value\""
-              + CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE);
-
-      carbonProperties.setProperty(CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE,
-          CarbonCommonConstants.CARBON_DATALOAD_VALID_CSVFILE_SIZE_DEFAULTVALUE);
-    }
-  }
-
-  private void validateNumberOfCsvFile() {
-    String count = CarbonCommonConstants.CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE;
-    try {
-      int csvFileSizeProperty = Integer.parseInt(carbonProperties
-          .getProperty(count,
-              CarbonCommonConstants.CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE_DEFAULTVALUE));
-      if (csvFileSizeProperty < 1) {
-        LOGGER.info("Invalid value for " + count
-                + "\"Only Positive Integer(greater than zero) is allowed. Using the default value\""
-                + CarbonCommonConstants.CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE_DEFAULTVALUE);
-
-        carbonProperties.setProperty(count,
-            CarbonCommonConstants.CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE_DEFAULTVALUE);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("Invalid value for " + count
-              + "\"Only Positive Integer(greater than zero) is allowed. Using the default value\""
-              + CarbonCommonConstants.CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE_DEFAULTVALUE);
-
-      carbonProperties.setProperty(count,
-          CarbonCommonConstants.CARBON_DATALOAD_VALID_NUMBAER_OF_CSVFILE_DEFAULTVALUE);
-    }
-  }
-
-  /**
-   * This method validates the batch size
-   */
-  private void validateOnlineMergerSize() {
-    String onlineMergeSize = carbonProperties
-        .getProperty(CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE,
-            CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-    try {
-      int offlineMergerSize = Integer.parseInt(onlineMergeSize);
-
-      if (offlineMergerSize < CarbonCommonConstants.ONLINE_MERGE_MIN_VALUE
-          || offlineMergerSize > CarbonCommonConstants.ONLINE_MERGE_MAX_VALUE) {
-        LOGGER.info("The online Merge Size value \"" + onlineMergeSize
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-        carbonProperties.setProperty(CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE,
-            CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The online Merge Size value \"" + onlineMergeSize
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-      carbonProperties.setProperty(CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE,
-          CarbonCommonConstants.ONLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-    }
-  }
-
-  /**
-   * This method validates the batch size
-   */
-  private void validateOfflineMergerSize() {
-    String offLineMergerSize = carbonProperties
-        .getProperty(CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE,
-            CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-    try {
-      int offLineMergeSize = Integer.parseInt(offLineMergerSize);
-
-      if (offLineMergeSize < CarbonCommonConstants.OFFLINE_MERGE_MIN_VALUE
-          || offLineMergeSize > CarbonCommonConstants.OFFLINE_MERGE_MAX_VALUE) {
-        LOGGER.info("The offline Merge Size value \"" + offLineMergerSize
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-        carbonProperties.setProperty(CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE,
-            CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The offline Merge Size value \"" + offLineMergerSize
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-      carbonProperties.setProperty(CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE,
-          CarbonCommonConstants.OFFLINE_MERGE_FILE_SIZE_DEFAULT_VALUE);
-    }
-  }
-
-  /**
-   * This method validates the batch size
-   */
-  private void validateBatchSize() {
-    String batchSizeStr = carbonProperties.getProperty(CarbonCommonConstants.BATCH_SIZE,
-        CarbonCommonConstants.BATCH_SIZE_DEFAULT_VAL);
-    try {
-      int batchSize = Integer.parseInt(batchSizeStr);
-
-      if (batchSize < CarbonCommonConstants.BATCH_SIZE_MIN_VAL
-          || batchSize > CarbonCommonConstants.BATCH_SIZE_MAX_VAL) {
-        LOGGER.info("The batch size value \"" + batchSizeStr + "\" is invalid. "
-            + "Using the default value \"" + CarbonCommonConstants.BATCH_SIZE_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.BATCH_SIZE,
-            CarbonCommonConstants.BATCH_SIZE_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The batch size value \"" + batchSizeStr
-          + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.BATCH_SIZE_DEFAULT_VAL);
-      carbonProperties.setProperty(CarbonCommonConstants.BATCH_SIZE,
-          CarbonCommonConstants.BATCH_SIZE_DEFAULT_VAL);
-    }
-  }
-
-  /**
-   * This method validates the batch size
-   */
-  private void validateCardinalityIncrementValue() {
-    String cardinalityIncr = carbonProperties
-        .getProperty(CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE,
-            CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL);
-    try {
-      int batchSize = Integer.parseInt(cardinalityIncr);
-
-      if (batchSize < CarbonCommonConstants.CARDINALITY_INCREMENT_MIN_VAL
-          || batchSize > CarbonCommonConstants.CARDINALITY_INCREMENT_MAX_VAL) {
-        LOGGER.info("The batch size value \"" + cardinalityIncr
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL);
-        carbonProperties.setProperty(CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE,
-            CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The cardinality size value \"" + cardinalityIncr
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.BATCH_SIZE_DEFAULT_VAL);
-      carbonProperties.setProperty(CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE,
-          CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL);
-    }
-  }
-
   /**
    * This method validates the blocklet size
    */
@@ -306,111 +122,6 @@ public final class CarbonProperties {
   }
 
   /**
-   * This method validates data load queue size
-   */
-  private void validateDataLoadQSize() {
-    String dataLoadQSize = carbonProperties.getProperty(CarbonCommonConstants.DATA_LOAD_Q_SIZE,
-        CarbonCommonConstants.DATA_LOAD_Q_SIZE_DEFAULT);
-    try {
-      int dataLoadQSizeInt = Integer.parseInt(dataLoadQSize);
-
-      if (dataLoadQSizeInt < CarbonCommonConstants.DATA_LOAD_Q_SIZE_MIN
-          || dataLoadQSizeInt > CarbonCommonConstants.DATA_LOAD_Q_SIZE_MAX) {
-        LOGGER.info("The data load queue size value \"" + dataLoadQSize
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.DATA_LOAD_Q_SIZE_DEFAULT);
-        carbonProperties.setProperty(CarbonCommonConstants.DATA_LOAD_Q_SIZE,
-            CarbonCommonConstants.DATA_LOAD_Q_SIZE_DEFAULT);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The data load queue size value \"" + dataLoadQSize
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.DATA_LOAD_Q_SIZE_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.DATA_LOAD_Q_SIZE,
-          CarbonCommonConstants.DATA_LOAD_Q_SIZE_DEFAULT);
-    }
-  }
-
-  /**
-   * This method validates the data load concurrent exec size
-   */
-  private void validateDataLoadConcExecSize() {
-    String dataLoadConcExecSize = carbonProperties
-        .getProperty(CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE,
-            CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-    try {
-      int dataLoadConcExecSizeInt = Integer.parseInt(dataLoadConcExecSize);
-
-      if (dataLoadConcExecSizeInt < CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_MIN
-          || dataLoadConcExecSizeInt > CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_MAX) {
-        LOGGER.info("The data load concurrent exec size value \"" + dataLoadConcExecSize
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-        carbonProperties.setProperty(CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE,
-            CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The data load concurrent exec size value \"" + dataLoadConcExecSize
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE,
-          CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-    }
-  }
-
-  /**
-   * This method validates the decimal pointers size
-   */
-  private void validateDecimalPointers() {
-    String decimalPointers = carbonProperties
-        .getProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS,
-            CarbonCommonConstants.CARBON_DECIMAL_POINTERS_DEFAULT);
-    try {
-      int decimalPointersInt = Integer.parseInt(decimalPointers);
-
-      if (decimalPointersInt < 0 || decimalPointersInt > 15) {
-        LOGGER.info("The decimal pointers agg \"" + decimalPointers
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-        carbonProperties.setProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS,
-            CarbonCommonConstants.CARBON_DECIMAL_POINTERS_DEFAULT);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The decimal pointers agg \"" + decimalPointers
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.DATA_LOAD_CONC_EXE_SIZE_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS,
-          CarbonCommonConstants.CARBON_DECIMAL_POINTERS_DEFAULT);
-    }
-  }
-
-  /**
-   * This method validates the data load concurrent exec size
-   */
-  private void validateDecimalPointersAgg() {
-    String decimalPointers = carbonProperties
-        .getProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG,
-            CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG_DEFAULT);
-    try {
-      int decimalPointersInt = Integer.parseInt(decimalPointers);
-
-      if (decimalPointersInt < 0 || decimalPointersInt > 15) {
-        LOGGER.info("The decimal pointers agg \"" + decimalPointers
-                + "\" is invalid. Using the default value \""
-                + CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG_DEFAULT);
-        carbonProperties.setProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG,
-            CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG_DEFAULT);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The decimal pointers agg \"" + decimalPointers
-              + "\" is invalid. Using the default value \""
-              + CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG_DEFAULT);
-      carbonProperties.setProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG,
-          CarbonCommonConstants.CARBON_DECIMAL_POINTERS_AGG_DEFAULT);
-    }
-  }
-
-  /**
    * TODO: This method validates the maximum number of blocklets per file ?
    */
   private void validateMaxFileSize() {
@@ -515,31 +226,6 @@ public final class CarbonProperties {
     }
   }
 
-  /**
-   * This method validates the sort size
-   */
-  private void validateSortBufferSize() {
-    String sortSizeStr = carbonProperties.getProperty(CarbonCommonConstants.SORT_BUFFER_SIZE,
-        CarbonCommonConstants.SORT_BUFFER_SIZE_DEFAULT_VALUE);
-    try {
-      int sortSize = Integer.parseInt(sortSizeStr);
-
-      if (sortSize < CarbonCommonConstants.SORT_BUFFER_SIZE_MIN_VALUE) {
-        LOGGER.info("The batch size value \"" + sortSizeStr
-            + "\" is invalid. Using the default value \""
-            + CarbonCommonConstants.SORT_BUFFER_SIZE_DEFAULT_VALUE);
-        carbonProperties.setProperty(CarbonCommonConstants.SORT_BUFFER_SIZE,
-            CarbonCommonConstants.SORT_BUFFER_SIZE_DEFAULT_VALUE);
-      }
-    } catch (NumberFormatException e) {
-      LOGGER.info("The batch size value \"" + sortSizeStr
-          + "\" is invalid. Using the default value \""
-          + CarbonCommonConstants.SORT_BUFFER_SIZE_DEFAULT_VALUE);
-      carbonProperties.setProperty(CarbonCommonConstants.SORT_BUFFER_SIZE,
-          CarbonCommonConstants.SORT_BUFFER_SIZE_DEFAULT_VALUE);
-    }
-  }
-
   private void validateHighCardinalityIdentify() {
     String highcardIdentifyStr = carbonProperties.getProperty(
         CarbonCommonConstants.HIGH_CARDINALITY_IDENTIFY_ENABLE,

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
index 2d50f07..0f94c5a 100644
--- a/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/carbondata/core/util/CarbonUtil.java
@@ -299,9 +299,7 @@ public final class CarbonUtil {
    */
   public static int[] getIncrementedCardinality(int[] dimCardinality) {
     // get the cardinality incr factor
-    final int incrValue = Integer.parseInt(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE,
-            CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL));
+    final int incrValue = CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL;
 
     int perIncr = 0;
     int remainder = 0;
@@ -333,9 +331,7 @@ public final class CarbonUtil {
 
   public static int getIncrementedCardinality(int dimCardinality) {
     // get the cardinality incr factor
-    final int incrValue = Integer.parseInt(CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE,
-            CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL));
+    final int incrValue = CarbonCommonConstants.CARDINALITY_INCREMENT_VALUE_DEFAULT_VAL;
 
     int perIncr = 0;
     int remainder = 0;
@@ -1254,26 +1250,6 @@ public final class CarbonUtil {
   }
 
   /**
-   * This method will read the retry time interval for loading level files in
-   * memory
-   *
-   * @return
-   */
-  public static long getRetryIntervalForLoadingLevelFile() {
-    long retryInterval = 0;
-    try {
-      retryInterval = Long.parseLong(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.CARBON_LOAD_LEVEL_RETRY_INTERVAL,
-              CarbonCommonConstants.CARBON_LOAD_LEVEL_RETRY_INTERVAL_DEFAULT));
-    } catch (NumberFormatException e) {
-      retryInterval = Long.parseLong(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.CARBON_LOAD_LEVEL_RETRY_INTERVAL_DEFAULT));
-    }
-    retryInterval = retryInterval * 1000;
-    return retryInterval;
-  }
-
-  /**
    * Below method will be used to get the aggregator type
    * CarbonCommonConstants.SUM_COUNT_VALUE_MEASURE will return when value is double measure
    * CarbonCommonConstants.BYTE_VALUE_MEASURE will be returned when value is byte array
@@ -1296,13 +1272,6 @@ public final class CarbonUtil {
     }
     String basePath = prop.getProperty(CarbonCommonConstants.STORE_LOCATION,
         CarbonCommonConstants.STORE_LOCATION_DEFAULT_VAL);
-    String useUniquePath = CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.CARBON_UNIFIED_STORE_PATH,
-            CarbonCommonConstants.CARBON_UNIFIED_STORE_PATH_DEFAULT);
-    if (null != schemaName && !schemaName.isEmpty() && null != cubeName && !cubeName.isEmpty()
-        && "true".equals(useUniquePath)) {
-      basePath = basePath + File.separator + schemaName + File.separator + cubeName;
-    }
     return basePath;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/dev/molap.properties.template
----------------------------------------------------------------------
diff --git a/dev/molap.properties.template b/dev/molap.properties.template
deleted file mode 100644
index 7aa69d4..0000000
--- a/dev/molap.properties.template
+++ /dev/null
@@ -1,94 +0,0 @@
-#################### Performance Configuration ##################
-#File read buffer size used during sorting:MIN=:MAX=
-carbon.sort.file.buffer.size=20
-#Rowset size exchanged between data load graph steps.:MIN=:MAX=
-carbon.graph.rowset.size=100000
-#Number of cores to be used.:MIN=:MAX=
-carbon.number.of.cores=4
-#Number of cores to be used while data loading:MIN=:MAX=
-carbon.number.of.cores.while.loading=6
-#Carbon Inmemory record size:MIN=:MAX=
-carbon.inmemory.record.size=100000
-#CARBON sort size.:MIN=:MAX=
-carbon.sort.size=500000
-#Improves the performance of filter query
-carbon.enable.quick.filter=false
-#Algorithm for hashmap for hashkey calculation
-carbon.enableXXHash=true
-
-#################### System Configuration ##################
-#Mandatory. Carbon Store path
-carbon.storelocation=hdfs://hacluster/Opt/CarbonStore
-#Base directory for Data files
-carbon.ddl.base.hdfs.url=hdfs://hacluster/opt/data
-#Path where the bad records are stored
-carbon.badRecords.location=/opt/Carbon/Spark/badrecords
-#To unify the carbon Cube and store path.
-carbon.unified.store.path=true
-#Mandatory. path to kettle home
-carbon.kettle.home=<SPARK_HOME>/lib/carbonplugins
-
-#################### Extra Configuration ##################
-######Carbon level write buffers in KB:MIN=:MAX=
-#carbon.level.write.bufferinkb=12238
-######File write buffer size used during sorting.
-#carbon.sort.file.write.buffer.size=10485760
-######Minimum no of intermediate files after which sort merged to be started.
-#carbon.sort.intermediate.files.limit=20
-######Number of threads for intermediate merging.
-#carbon.sort.intermedaite.number.of.threads=5
-######csv reading buffer size.
-#carbon.csv.read.buffersize.byte=1048576
-######High Cardinality value
-#high.cardinality.value =50000
-######Carbon blocklet size. Note: this configuration cannot be change once store is generated
-#carbon.blocklet.size=120000
-######CARBON maximum no of threads used for sorting.
-#carbon.max.thread.for.sorting=3
-######Maximum time allowed for one query to be executed.
-#max.query.execution.time=60
-######Aggregate table suggestion takes by number load for data sampling.
-#carbon.agg.loadCount=2
-######Number of partition to read
-#carbon.agg.partitionCount=1
-######Aggregate table suggestion takes number of fact file per load for data sampling.
-#carbon.agg.factCount=2
-######Aggregate table suggestion takes number of records per fact for data sampling.
-#carbon.agg.recordCount=5
-######benefitRatio =total records/no of records in aggregate table.if benefit ratio for aggregate combination is greater than configured value than it is selected for suggestion
-#carbon.agg.benefitRatio=10
-######Whether to cahe Datastats suggestion
-#carbon.agg.datastats.cache=false
-######Any query which takes more than configured value in seconds are considered for Aggregate suggestion.
-#carbon.agg.query.performance.goal=3
-######If this parameter is set to true, Carbon will cache the metadata on Server start up and reduce the first query execution time.NOTE: Curent descriptions not applicable when level cache is enabled.
-#carbon.agg.querystats.expiryday=30
-######If this parameter is set to true, Carbon will cache the metadata on Server start up and reduce the first query execution time.NOTE: Curent descriptions not applicable when level cache is enabled.
-#carbon.is.loadcube.startup=false
-######If this parameter is set to true, Carbon will cache the metadata after the successful data loading and reduce the first query execution time.NOTE: Curent descriptions not applicable when level cache is enabled.
-#carbon.is.loadcube.dataload=false
-######How to times retry to get the lock
-#carbon.load.metadata.lock.retries=3
-######Interval between the retries to get the lock
-#carbon.load.metadata.lock.retry.timeout.sec=5
-######Maximum number of blocklets written in a single file.:Min=1:Max=1000
-#carbon.max.file.size=100
-######Sort buffer size:MI=5:MAX=
-#carbon.sort.buffer.size=5000
-######Timestamp format of input data used for timestamp data type.
-#carbon.timestamp.format=yyyy-MM-dd HH:mm:ss
-######Cube is completely kept in memory.
-#carbon.forced.in.memory.cube=false
-######Maintains the complete cube cache in memory while data loading. Useful for increasing data load performance in case of history data loading. Set it to true for data load performance tuning.
-#carbon.seqgen.inmemory.lru.cache.enabled=false
-######Min max is feature added to enhance query performance. To disable this feature, make it false.
-#carbon.enableMinMax=true
-######Temporary store location, By default it will take System.getProperty("java.io.tmpdir")
-#carbon.tempstore.location=/opt/Carbon/TempStoreLoc
-
-
-#################### AUDIT LOGGING(Used when it is used without FI) ##################
-#carbon.auditlog.file.path=logs/CarbonAudit.log
-#carbon.auditlog.max.file.size=10MB
-#carbon.auditlog.max.backup.files=10
-#carbon.logging.level=INFO

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/dev/molap.properties_spark
----------------------------------------------------------------------
diff --git a/dev/molap.properties_spark b/dev/molap.properties_spark
deleted file mode 100644
index 0c564ff..0000000
--- a/dev/molap.properties_spark
+++ /dev/null
@@ -1,90 +0,0 @@
-carbon.schema.maxFileSize=50
-dataload.taskstatus.retention=2
-carbon.numberOfCubesToLoadConcurrent=5
-max.memory.threshold=60
-min.memory.threshold=50
-#1 means every day
-carbon.retention.schedule=1
-carbon.dataload.queuesize=100
-carbon.dataload.concurrent.execution.size=1
-carbon.result.limit=100000000
-mysql.null.value=\\N
-mssql.null.value=
-oracle.null.value=NULL
-carbon.sort.size=1000000
-carbon.queryexecutor.concurrent.execution.size=3
-#################### EXECUTION THREADS ##################
-carbon.number.of.cores=4
-carbon.smartJump.avoid.percent=70
-carbon.agg.enableXXHash=true
-carbon.spark.resultlimit=20000
-carbon.cache.used=false
-
-mysql.resultset.cursor.moveonly.forward=false
-
-carbon.level.write.bufferinkb=12238
-carbon.graph.rowset.size=100000
-carbon.sort.file.write.buffer.size=10485760
-carbon.sort.intermediate.files.limit=50
-carbon.sort.file.buffer.size=20
-carbon.sort.intermedaite.number.of.therads=5
-carbon.csv.read.buffersize.byte=1048576
-carbon.csv.read.copies=6
-carbon.datawriter.write.all.node=true
-carbon.data.load.log.counter=500000
-carbon.number.of.cores.while.loading=6
-carbon.prefetch.in.merge=true
-carbon.prefetch.bufferSize=20000
-carbon.inmemory.cache.use=true
-carbon.dataload.log.enabled=true
-
-
-## Spark CARBON related Properties
-#spark.dataset.location=../datasets_test/
-#spark.dp.location=../datapipelines_test/
-#spark.sqlconnections.location=../unibi-solutions/system/dbconnection/sqlconnections_test.xml
-#spark.url=local
-
-#carbon.storelocation=hdfs://master:54310/opt/ravi/store
-#carbon.storelocation=/opt/ravi/store1day
-carbon.storelocation=hdfs://master:54310/opt/ravi/perfstore
-#carbon.storelocation=/opt/ravi/store1day
-#carbon.storelocation=/opt/ravi/storebasecarbon
-#carbon.storelocation=/opt/ravi/storesinglenode
-
-
-
-
-spark.dataset.location=hdfs://master:54310/opt/ravi/sparkcarbon/datasets/
-spark.dp.location=hdfs://master:54310/opt/ravi/sparkcarbon/datapipelines/
-spark.sqlconnections.location=hdfs://master:54310/opt/ravi/sparkcarbon/sqlconnections/sqlconnections_test.xml
-spark.url=spark://master:7077
-spark.home=/opt/spark-1.0.0-rc3
-#spark.schema.path=/opt/ravi/steelwheels.carbon.xml
-spark.schema.path=/opt/ravi/PCC_Java.xml
-spark.schema.name=PCC
-spark.cube.name=ODM
-
-spark.executor.memory=200g
-spark.cores.max=76
-spark.usekryo.serializer=true
-spark.eventLog.enabled=true
-spark.sql.shuffle.partitions=200
-
-##### New properties for columnar ####################################################################
-# Enbale Columnar
-carbon.is.columnar.storage=true
-#Int or Short based indexes. use Int now (TODO  Short is not working) 
-is.int.based.indexer=true			
-#Store Unique Values for a column if not high cardinality dimension 
-aggregate.columnar.keyblock=true
-#Threshold for a dimension be considered High Cardinality 
-high.cardinality.value=100000
-#Numbers of tuples in Leaf  ( this can be 15x for columar store comared to row based store since each column is sperately read/decompressed) 
-carbon.leaf.node.size=120000
-#Use multiple of 8 bits for a colmn value
-carbon.is.fullyfilled.bits=true
-#To use NumberCompressor.java for compression . Since no benefit was found, keep it false
-is.compressed.keyblock=false
-#How many levels will be combined into one column .TODO only one supported
-carbon.dimension.split.value.in.columnar=1

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
index b9a6649..56c6574 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
@@ -112,15 +112,6 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String, client: C
 
   val metadata = loadMetadata(storePath)
 
-  lazy val useUniquePath = if ("true".equalsIgnoreCase(CarbonProperties.getInstance().
-    getProperty(
-      CarbonCommonConstants.CARBON_UNIFIED_STORE_PATH,
-      CarbonCommonConstants.CARBON_UNIFIED_STORE_PATH_DEFAULT))) {
-    true
-  } else {
-    false
-  }
-
   def lookupRelation1(
       databaseName: Option[String],
       tableName: String,
@@ -230,34 +221,8 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String, client: C
     }
     val fileType = FileFactory.getFileType(metadataPath)
     val metaDataBuffer = new ArrayBuffer[TableMeta]
-    if (useUniquePath) {
-      if (FileFactory.isFileExist(metadataPath, fileType)) {
-        val file = FileFactory.getCarbonFile(metadataPath, fileType)
-        val schemaFolders = file.listFiles()
-
-        schemaFolders.foreach(schemaFolder => {
-          if (schemaFolder.isDirectory) {
-            val cubeFolders = schemaFolder.listFiles()
-
-            cubeFolders.foreach(cubeFolder => {
-              val schemaPath = metadataPath + "/" + schemaFolder.getName + "/" + cubeFolder.getName
-              try {
-                fillMetaData(schemaPath, fileType, metaDataBuffer)
-                updateSchemasUpdatedTime(schemaFolder.getName, cubeFolder.getName)
-              } catch {
-                case ex: org.apache.hadoop.security.AccessControlException =>
-                // Ingnore Access control exception and get only accessible cube details
-              }
-            })
-          }
-        })
-      }
-
-    } else {
-
-      fillMetaData(metadataPath, fileType, metaDataBuffer)
-      updateSchemasUpdatedTime("", "")
-    }
+    fillMetaData(metadataPath, fileType, metaDataBuffer)
+    updateSchemasUpdatedTime("", "")
     MetaData(metaDataBuffer)
 
   }
@@ -605,13 +570,7 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String, client: C
 
   private def getTimestampFileAndType(schemaName: String, cubeName: String) = {
 
-    val timestampFile = if (useUniquePath) {
-      storePath + "/" + schemaName + "/" + cubeName + "/" +
-      CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE
-    }
-    else {
-      storePath + "/" + CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE
-    }
+    val timestampFile = storePath + "/" + CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE
 
     val timestampFileType = FileFactory.getFileType(timestampFile)
     (timestampFile, timestampFileType)
@@ -627,14 +586,8 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String, client: C
 
     touchSchemasTimestampFile(schemaName, cubeName)
 
-    if (useUniquePath) {
-      cubeModifiedTimeStore.put(schemaName + '_' + cubeName,
-        FileFactory.getCarbonFile(timestampFile, timestampFileType).getLastModifiedTime)
-    }
-    else {
-      cubeModifiedTimeStore.put("default",
-        FileFactory.getCarbonFile(timestampFile, timestampFileType).getLastModifiedTime)
-    }
+    cubeModifiedTimeStore.put("default",
+      FileFactory.getCarbonFile(timestampFile, timestampFileType).getLastModifiedTime)
 
   }
 
@@ -645,26 +598,11 @@ class CarbonMetastoreCatalog(hive: HiveContext, val storePath: String, client: C
   }
 
   def checkSchemasModifiedTimeAndReloadCubes() {
-    if (useUniquePath) {
-      metadata.cubesMeta.foreach(c => {
-        val (timestampFile, timestampFileType) = getTimestampFileAndType(
-          c.carbonTableIdentifier.getDatabaseName, c.carbonTableIdentifier.getTableName)
-
-        if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
-          if (!(FileFactory.getCarbonFile(timestampFile, timestampFileType).getLastModifiedTime ==
-                cubeModifiedTimeStore.get(c.carbonTableIdentifier.getDatabaseName + "_" +
-                                          c.carbonTableIdentifier.getTableName))) {
-            refreshCache()
-          }
-        }
-      })
-    } else {
-      val (timestampFile, timestampFileType) = getTimestampFileAndType("", "")
-      if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
-        if (!(FileFactory.getCarbonFile(timestampFile, timestampFileType).
-          getLastModifiedTime == cubeModifiedTimeStore.get("default"))) {
-          refreshCache()
-        }
+    val (timestampFile, timestampFileType) = getTimestampFileAndType("", "")
+    if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
+      if (!(FileFactory.getCarbonFile(timestampFile, timestampFileType).
+        getLastModifiedTime == cubeModifiedTimeStore.get("default"))) {
+        refreshCache()
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/60490179/processing/src/main/java/org/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/store/CarbonFactDataHandlerColumnar.java b/processing/src/main/java/org/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
index d77f8b5..4c4652c 100644
--- a/processing/src/main/java/org/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
+++ b/processing/src/main/java/org/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
@@ -86,12 +86,6 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
   private static final LogService LOGGER =
       LogServiceFactory.getLogService(CarbonFactDataHandlerColumnar.class.getName());
   /**
-   * decimalPointers
-   */
-  private final byte decimalPointers = Byte.parseByte(CarbonProperties.getInstance()
-      .getProperty(CarbonCommonConstants.CARBON_DECIMAL_POINTERS,
-          CarbonCommonConstants.CARBON_DECIMAL_POINTERS_DEFAULT));
-  /**
    * data writer
    */
   private CarbonFactDataWriter dataWriter;
@@ -870,7 +864,7 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
           double minVal = (double) min[count];
           max[count] = (maxVal > value ? max[count] : value);
           min[count] = (minVal < value ? min[count] : value);
-          int num = (value % 1 == 0) ? 0 : decimalPointers;
+          int num = (value % 1 == 0) ? 0 : CarbonCommonConstants.CARBON_DECIMAL_POINTERS_DEFAULT;
           decimal[count] = (decimal[count] > num ? decimal[count] : num);
         } else if (type[count] == CarbonCommonConstants.BIG_INT_MEASURE) {
           long value = (long) row[count];
@@ -878,7 +872,7 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
           long minVal = (long) min[count];
           max[count] = (maxVal > value ? max[count] : value);
           min[count] = (minVal < value ? min[count] : value);
-          int num = (value % 1 == 0) ? 0 : decimalPointers;
+          int num = (value % 1 == 0) ? 0 : CarbonCommonConstants.CARBON_DECIMAL_POINTERS_DEFAULT;
           decimal[count] = (decimal[count] > num ? decimal[count] : num);
         } else if (type[count] == CarbonCommonConstants.BIG_DECIMAL_MEASURE) {
           byte[] buff = (byte[]) row[count];


Mime
View raw message