carbondata-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From man...@apache.org
Subject [carbondata] branch master updated: [CARBONDATA-3889] Cleanup code typo in carbondata-spark module
Date Thu, 30 Jul 2020 12:18:59 GMT
This is an automated email from the ASF dual-hosted git repository.

manhua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new e92348e  [CARBONDATA-3889] Cleanup code typo in carbondata-spark module
e92348e is described below

commit e92348e95fc22f7edcf57cf2a4a7bb956fe86a1f
Author: QiangCai <qiangcai@qq.com>
AuthorDate: Tue Jul 28 21:34:03 2020 +0800

    [CARBONDATA-3889] Cleanup code typo in carbondata-spark module
    
    Why is this PR needed?
    There are many typos in carbondata-spark module.
    
    What changes were proposed in this PR?
    Cleanup code typo in carbondata-spark module
    
    Does this PR introduce any user interface change?
    No, not change interface name
    
    Is any new testcase added?
    No, not impact function.
    
    This closes #3867
---
 .../org/apache/carbondata/api/CarbonStore.scala    | 18 +++---
 .../carbondata/events/AlterTableEvents.scala       |  4 +-
 .../carbondata/events/DeleteSegmentEvents.scala    | 12 ++--
 .../org/apache/carbondata/events/Events.scala      |  6 +-
 .../carbondata/events/IndexServerEvents.scala      |  4 +-
 .../index/CarbonMergeBloomIndexFilesRDD.scala      |  2 +-
 .../apache/carbondata/index/IndexRebuildRDD.scala  |  6 +-
 .../indexserver/DistributedRDDUtils.scala          |  2 +-
 .../apache/carbondata/indexserver/IndexJobs.scala  | 31 +++++-----
 .../carbondata/indexserver/IndexServer.scala       | 10 ++--
 .../scala/org/apache/carbondata/spark/KeyVal.scala |  6 +-
 .../carbondata/spark/load/CsvRDDHelper.scala       |  2 +-
 .../spark/load/DataLoadProcessBuilderOnSpark.scala | 18 +++---
 .../spark/load/DataLoadProcessorStepOnSpark.scala  |  4 +-
 .../carbondata/spark/load/GlobalSortHelper.scala   |  2 +-
 .../spark/rdd/CarbonDataRDDFactory.scala           | 28 ++++-----
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     | 22 +++----
 .../carbondata/spark/rdd/CarbonScanRDD.scala       |  8 +--
 .../spark/rdd/CarbonTableCompactor.scala           |  8 +--
 .../carbondata/spark/rdd/CompactionFactory.scala   |  2 +-
 .../spark/rdd/NewCarbonDataLoadRDD.scala           | 24 ++++----
 .../carbondata/spark/rdd/StreamHandoffRDD.scala    | 18 +++---
 .../carbondata/spark/util/CarbonScalaUtil.scala    | 32 +++++-----
 .../apache/carbondata/spark/util/CommonUtil.scala  |  4 +-
 .../carbondata/spark/util/DataGenerator.scala      |  2 +-
 .../vectorreader/VectorizedCarbonRecordReader.java |  2 +-
 .../carbondata/stream/StreamJobManager.scala       |  2 +-
 .../carbondata/streaming/StreamSinkFactory.scala   |  4 +-
 .../org/apache/carbondata/view/MVRefresher.scala   | 14 ++---
 .../org/apache/spark/CarbonInputMetrics.scala      |  2 +-
 .../apache/spark/DataSkewRangePartitioner.scala    | 12 ++--
 .../spark/rdd/DataLoadPartitionCoalescer.scala     | 20 +++----
 .../apache/spark/sql/CarbonDataFrameWriter.scala   |  2 +-
 .../spark/sql/CarbonDatasourceHadoopRelation.scala |  4 +-
 .../scala/org/apache/spark/sql/CarbonEnv.scala     |  6 +-
 .../org/apache/spark/sql/CarbonExpressions.scala   |  4 +-
 .../scala/org/apache/spark/sql/CarbonSession.scala |  8 +--
 .../org/apache/spark/sql/CarbonVectorProxy.java    |  4 +-
 .../spark/sql/CustomDeterministicExpression.scala  |  2 +-
 .../execution/datasources/CarbonFileIndex.scala    |  2 +-
 .../datasources/CarbonFileIndexReplaceRule.scala   |  2 +-
 .../datasources/SparkCarbonFileFormat.scala        |  4 +-
 ...uport.scala => SparkUnsafeRowReadSupport.scala} |  2 +-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala    | 14 ++---
 .../spark/sql/catalyst/CarbonParserUtil.scala      | 12 ++--
 .../sql/events/MergeBloomIndexEventListener.scala  |  6 +-
 .../sql/execution/CastExpressionOptimization.scala |  9 +--
 .../command/cache/CarbonShowCacheCommand.scala     |  2 +-
 .../command/carbonTableSchemaCommon.scala          | 34 +++++------
 .../command/index/CarbonCreateIndexCommand.scala   |  2 +-
 .../execution/command/index/DropIndexCommand.scala |  2 +-
 .../command/management/CarbonAddLoadCommand.scala  |  6 +-
 .../CarbonAlterTableCompactionCommand.scala        |  2 +-
 .../management/CarbonInsertFromStageCommand.scala  | 18 +++---
 .../management/CarbonInsertIntoCommand.scala       |  2 +-
 .../management/CarbonInsertIntoWithDf.scala        |  2 +-
 .../command/management/CarbonLoadDataCommand.scala |  2 +-
 .../CarbonShowSegmentsAsSelectCommand.scala        |  4 +-
 .../command/management/CommonLoadUtils.scala       | 24 ++++----
 .../management/RefreshCarbonTableCommand.scala     |  6 +-
 .../mutation/CarbonProjectForDeleteCommand.scala   |  4 +-
 .../mutation/CarbonProjectForUpdateCommand.scala   |  4 +-
 .../command/mutation/DeleteExecution.scala         | 16 ++---
 .../execution/command/mutation/IUDCommonUtil.scala |  4 +-
 .../mutation/merge/CarbonMergeDataSetCommand.scala | 14 ++---
 .../mutation/merge/HistoryTableLoadHelper.scala    |  6 +-
 .../mutation/merge/MergeDataSetBuilder.scala       |  2 +-
 .../command/mutation/merge/MergeProjection.scala   |  6 +-
 .../command/mutation/merge/MutationAction.scala    | 10 ++--
 .../command/mutation/merge/interfaces.scala        |  2 +-
 .../CarbonAlterTableAddHivePartitionCommand.scala  |  6 +-
 .../CarbonAlterTableDropHivePartitionCommand.scala |  2 +-
 .../schema/CarbonAlterTableDropColumnCommand.scala |  2 +-
 .../command/table/CarbonCreateTableCommand.scala   |  3 +-
 .../command/table/CarbonExplainCommand.scala       |  4 +-
 .../command/view/CarbonCreateMVCommand.scala       |  6 +-
 .../datasources/SparkCarbonTableFormat.scala       | 12 ++--
 .../sql/execution/strategy/CarbonPlanHelper.scala  |  2 +-
 .../spark/sql/execution/strategy/DDLHelper.scala   |  8 +--
 .../execution/strategy/MixedFormatHandler.scala    | 14 ++---
 .../sql/execution/strategy/PushDownHelper.scala    |  6 +-
 .../spark/sql/hive/CarbonAnalysisRules.scala       |  4 +-
 .../spark/sql/hive/CarbonFileMetastore.scala       |  2 +-
 .../sql/hive/CarbonHiveIndexMetadataUtil.scala     |  4 +-
 .../spark/sql/hive/CarbonHiveMetaStore.scala       |  4 +-
 .../apache/spark/sql/hive/CarbonMetaStore.scala    |  4 +-
 .../org/apache/spark/sql/hive/CarbonRelation.scala | 12 ++--
 .../spark/sql/hive/CarbonSessionCatalog.scala      |  2 +-
 .../spark/sql/hive/CarbonSessionCatalogUtil.scala  |  6 +-
 .../apache/spark/sql/hive/CarbonSessionUtil.scala  |  4 +-
 .../CreateCarbonSourceTableAsSelectCommand.scala   |  2 +-
 .../apache/spark/sql/hive/DistributionUtil.scala   | 18 +++---
 .../spark/sql/hive/SqlAstBuilderHelper.scala       |  2 +-
 .../execution/command/CarbonHiveCommands.scala     |  2 +-
 .../apache/spark/sql/index/CarbonIndexUtil.scala   |  4 +-
 .../apache/spark/sql/listeners/MVListeners.scala   |  2 +-
 .../spark/sql/listeners/PrePrimingListener.scala   | 10 ++--
 .../sql/optimizer/CarbonUDFTransformRule.scala     |  4 +-
 .../org/apache/spark/sql/optimizer/MVMatcher.scala | 68 +++++++++++-----------
 .../apache/spark/sql/optimizer/MVRewriteRule.scala |  2 +-
 .../spark/sql/parser/CarbonSpark2SqlParser.scala   | 18 +++---
 .../sql/parser/CarbonSparkSqlParserUtil.scala      | 24 ++++----
 .../apache/spark/sql/profiler/ProfilerLogger.scala |  4 +-
 .../Jobs/BlockletIndexDetailsWithSchema.java       |  2 +-
 .../Jobs/CarbonBlockLoaderHelper.java              |  4 +-
 .../Jobs/SparkBlockletIndexLoaderJob.scala         |  2 +-
 .../secondaryindex/command/SICreationCommand.scala | 20 +++----
 .../AlterTableColumnRenameEventListener.scala      |  2 +-
 .../AlterTableCompactionPostEventListener.scala    |  2 +-
 .../SILoadEventListenerForFailedSegments.scala     | 14 ++---
 .../hive/CarbonInternalMetastore.scala             |  2 +-
 .../joins/BroadCastSIFilterPushJoin.scala          | 22 +++----
 .../load/CarbonInternalLoaderUtil.java             | 44 +++++++-------
 ...aratorWithOutKettle.java => RowComparator.java} | 14 ++---
 .../optimizer/CarbonSITransformationRule.scala     |  4 +-
 .../optimizer/CarbonSecondaryIndexOptimizer.scala  | 31 +++++-----
 .../query/SecondaryIndexQueryResultProcessor.java  |  6 +-
 .../secondaryindex/rdd/CarbonSIRebuildRDD.scala    |  6 +-
 .../secondaryindex/rdd/SecondaryIndexCreator.scala | 10 ++--
 .../secondaryindex/util/SecondaryIndexUtil.scala   |  6 +-
 .../org/apache/spark/sql/test/util/QueryTest.scala |  2 +-
 .../org/apache/spark/sql/util/SparkSQLUtil.scala   |  8 +--
 .../apache/spark/sql/util/SparkTypeConverter.scala |  6 +-
 .../org/apache/spark/util/AlterTableUtil.scala     | 18 +++---
 .../apache/spark/util/CarbonReflectionUtils.scala  | 10 ++--
 .../scala/org/apache/spark/util/SparkUtil.scala    |  6 +-
 .../scala/org/apache/spark/util/TableLoader.scala  | 10 ++--
 .../apache/spark/sql/CarbonToSparkAdapter.scala    |  2 +-
 .../spark/sql/hive/CarbonSessionStateBuilder.scala |  4 +-
 .../testsuite/binary/TestBinaryDataType.scala      |  2 +-
 .../TestCreateHiveTableWithCarbonDS.scala          |  2 +-
 .../TestNonTransactionalCarbonTableForBinary.scala |  4 +-
 .../dataload/TestRangeColumnDataLoad.scala         |  4 +-
 .../dblocation/DBLocationCarbonTableTestCase.scala | 32 +++++-----
 .../testsuite/iud/UpdateCarbonTableTestCase.scala  |  6 +-
 .../StandardPartitionTableQueryTestCase.scala      |  2 +-
 .../register/TestRegisterCarbonTable.scala         | 48 +++++++--------
 .../AlterTableColumnRenameTestCase.scala           |  2 +-
 .../SparkCarbonDataSourceBinaryTest.scala          |  2 +-
 .../datasource/SparkCarbonDataSourceTest.scala     | 28 ++++-----
 ...TestCreateTableUsingSparkCarbonFileFormat.scala | 14 ++---
 .../org/apache/spark/util/SparkUtilTest.scala      | 22 +++----
 142 files changed, 637 insertions(+), 641 deletions(-)

diff --git a/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala b/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
index 6f259b0..c45143a 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
@@ -117,7 +117,7 @@ object CarbonStore {
 
   /*
    * Collect all stage files and matched success files and loading files.
-   * return unloaded stagefiles and loading stagefiles in the end.
+   * return unloaded stage files and loading stage files in the end.
    */
   def listStageFiles(
         loadDetailsDir: String): (Array[CarbonFile], Array[CarbonFile]) = {
@@ -343,7 +343,7 @@ object CarbonStore {
         if (metadataDetail.getSegmentStatus.equals(SegmentStatus.MARKED_FOR_DELETE) &&
             metadataDetail.getSegmentFile == null) {
           val loadStartTime: Long = metadataDetail.getLoadStartTime
-          // delete all files of @loadStartTime from tablepath
+          // delete all files of @loadStartTime from table path
           cleanCarbonFilesInFolder(listOfDefaultPartFilesIterator, loadStartTime)
           partitionSpecList.foreach {
             partitionSpec =>
@@ -352,7 +352,7 @@ object CarbonStore {
               if (!partitionLocation.toString.startsWith(carbonTable.getTablePath)) {
                 val partitionCarbonFile = FileFactory
                   .getCarbonFile(partitionLocation.toString)
-                // list all files from partitionLoacation
+                // list all files from partitionLocation
                 val listOfExternalPartFilesIterator = partitionCarbonFile.listFiles(true)
                 // delete all files of @loadStartTime from externalPath
                 cleanCarbonFilesInFolder(listOfExternalPartFilesIterator, loadStartTime)
@@ -382,8 +382,8 @@ object CarbonStore {
   }
 
   // validates load ids
-  private def validateLoadIds(loadids: Seq[String]): Unit = {
-    if (loadids.isEmpty) {
+  private def validateLoadIds(loadIds: Seq[String]): Unit = {
+    if (loadIds.isEmpty) {
       val errorMessage = "Error: Segment id(s) should not be empty."
       throw new MalformedCarbonCommandException(errorMessage)
     }
@@ -391,20 +391,20 @@ object CarbonStore {
 
   // TODO: move dbName and tableName to caller, caller should handle the log and error
   def deleteLoadById(
-      loadids: Seq[String],
+      loadIds: Seq[String],
       dbName: String,
       tableName: String,
       carbonTable: CarbonTable): Unit = {
 
-    validateLoadIds(loadids)
+    validateLoadIds(loadIds)
 
     val path = carbonTable.getMetadataPath
 
     try {
       val invalidLoadIds = SegmentStatusManager.updateDeletionStatus(
-        carbonTable.getAbsoluteTableIdentifier, loadids.asJava, path).asScala
+        carbonTable.getAbsoluteTableIdentifier, loadIds.asJava, path).asScala
       if (invalidLoadIds.isEmpty) {
-        LOGGER.info(s"Delete segment by Id is successfull for $dbName.$tableName.")
+        LOGGER.info(s"Delete segment by Id is successful for $dbName.$tableName.")
       } else {
         sys.error(s"Delete segment by Id is failed. Invalid ID is: ${invalidLoadIds.mkString(",")}")
       }
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala b/integration/spark/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
index 2b50118..d891c0c 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/events/AlterTableEvents.scala
@@ -155,7 +155,7 @@ case class AlterTableCompactionPreEvent(sparkSession: SparkSession,
     mergedLoadName: String) extends Event with AlterTableCompactionEventInfo
 
 /**
- * Compaction Event for handling pre update status file opeartions, lister has to implement this
+ * Compaction Event for handling pre update status file operations, lister has to implement this
  * event before updating the table status file
  * @param sparkSession
  * @param carbonTable
@@ -166,7 +166,7 @@ case class AlterTableCompactionPostEvent(sparkSession: SparkSession,
     carbonMergerMapping: CarbonMergerMapping,
     compactedLoads: java.util.List[String]) extends Event with AlterTableCompactionEventInfo
 /**
- * Compaction Event for handling pre update status file opeartions, lister has to implement this
+ * Compaction Event for handling pre update status file operations, lister has to implement this
  * event before updating the table status file
  * @param sparkSession
  * @param carbonTable
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/events/DeleteSegmentEvents.scala b/integration/spark/src/main/scala/org/apache/carbondata/events/DeleteSegmentEvents.scala
index 0008492..73daff1 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/events/DeleteSegmentEvents.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/events/DeleteSegmentEvents.scala
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.metadata.schema.table.CarbonTable
  * @param sparkSession
  */
 case class DeleteSegmentByIdPreEvent(carbonTable: CarbonTable, loadIds: Seq[String],
-    sparkSession: SparkSession) extends Event with DeleteSegmentbyIdEventInfo
+    sparkSession: SparkSession) extends Event with DeleteSegmentByIdEventInfo
 
 
 /**
@@ -37,7 +37,7 @@ case class DeleteSegmentByIdPreEvent(carbonTable: CarbonTable, loadIds: Seq[Stri
  * @param sparkSession
  */
 case class DeleteSegmentByIdPostEvent(carbonTable: CarbonTable, loadIds: Seq[String],
-    sparkSession: SparkSession) extends Event with DeleteSegmentbyIdEventInfo
+    sparkSession: SparkSession) extends Event with DeleteSegmentByIdEventInfo
 
 
 /**
@@ -47,7 +47,7 @@ case class DeleteSegmentByIdPostEvent(carbonTable: CarbonTable, loadIds: Seq[Str
  * @param sparkSession
  */
 case class DeleteSegmentByIdAbortEvent(carbonTable: CarbonTable, loadIds: Seq[String],
-    sparkSession: SparkSession) extends Event with DeleteSegmentbyIdEventInfo
+    sparkSession: SparkSession) extends Event with DeleteSegmentByIdEventInfo
 
 /**
  *
@@ -56,7 +56,7 @@ case class DeleteSegmentByIdAbortEvent(carbonTable: CarbonTable, loadIds: Seq[St
  * @param sparkSession
  */
 case class DeleteSegmentByDatePreEvent(carbonTable: CarbonTable, loadDates: String,
-    sparkSession: SparkSession) extends Event with DeleteSegmentbyDateEventInfo
+    sparkSession: SparkSession) extends Event with DeleteSegmentByDateEventInfo
 
 /**
  *
@@ -65,7 +65,7 @@ case class DeleteSegmentByDatePreEvent(carbonTable: CarbonTable, loadDates: Stri
  * @param sparkSession
  */
 case class DeleteSegmentByDatePostEvent(carbonTable: CarbonTable, loadDates: String,
-    sparkSession: SparkSession) extends Event with DeleteSegmentbyDateEventInfo
+    sparkSession: SparkSession) extends Event with DeleteSegmentByDateEventInfo
 
 /**
  *
@@ -74,4 +74,4 @@ case class DeleteSegmentByDatePostEvent(carbonTable: CarbonTable, loadDates: Str
  * @param sparkSession
  */
 case class DeleteSegmentByDateAbortEvent(carbonTable: CarbonTable, loadDates: String,
-    sparkSession: SparkSession) extends Event with DeleteSegmentbyDateEventInfo
+    sparkSession: SparkSession) extends Event with DeleteSegmentByDateEventInfo
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/events/Events.scala b/integration/spark/src/main/scala/org/apache/carbondata/events/Events.scala
index a6f0457..50de629 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/events/Events.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/events/Events.scala
@@ -141,7 +141,7 @@ trait AlterTableHivePartitionInfo {
 /**
  * event for DeleteSegmentById
  */
-trait DeleteSegmentbyIdEventInfo {
+trait DeleteSegmentByIdEventInfo {
   val carbonTable: CarbonTable
   val loadIds: Seq[String]
 }
@@ -149,7 +149,7 @@ trait DeleteSegmentbyIdEventInfo {
 /**
  * event for DeleteSegmentByDate
  */
-trait DeleteSegmentbyDateEventInfo {
+trait DeleteSegmentByDateEventInfo {
   val carbonTable: CarbonTable
   val loadDates: String
 }
@@ -208,7 +208,7 @@ trait BuildIndexEventsInfo {
 }
 
 /**
- * EventInfo for prepriming on IndexServer. This event is used to
+ * EventInfo for pre-priming on IndexServer. This event is used to
  * fire a call to the index server when the load is complete.
  */
 trait IndexServerEventInfo {
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/events/IndexServerEvents.scala b/integration/spark/src/main/scala/org/apache/carbondata/events/IndexServerEvents.scala
index 48369cc..94b12ca 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/events/IndexServerEvents.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/events/IndexServerEvents.scala
@@ -21,11 +21,11 @@ import org.apache.spark.sql.SparkSession
 import org.apache.carbondata.core.index.Segment
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 
-// Event for Prepriming in cache
+// Event for Pre-priming in cache
 case class IndexServerLoadEvent(sparkSession: SparkSession,
     carbonTable: CarbonTable,
     segment: List[Segment],
-    invalidsegment: List[String]) extends Event with IndexServerEventInfo
+    invalidSegment: List[String]) extends Event with IndexServerEventInfo
 
 case class IndexServerEvent(sparkSession: SparkSession,
     carbonTable: CarbonTable,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala
index 1056ddd..5ed3585 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala
@@ -31,7 +31,7 @@ import org.apache.carbondata.spark.rdd.CarbonRDD
 
 
 /**
- * RDD to merge all bloomindex files of specified segment for bloom index
+ * RDD to merge all bloom index files of specified segment for bloom index
  */
 class CarbonMergeBloomIndexFilesRDD(
   @transient private val ss: SparkSession,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/index/IndexRebuildRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/index/IndexRebuildRDD.scala
index b756b52..85c20ea 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/index/IndexRebuildRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/index/IndexRebuildRDD.scala
@@ -196,7 +196,7 @@ class RawBytesReadSupport(segmentProperties: SegmentProperties, indexColumns: Ar
    */
   override def readRow(data: Array[Object]): Array[Object] = {
 
-    val surrogatKeys = if (segmentProperties.getNumberOfDictDimensions > 0) {
+    val surrogateKeys = if (segmentProperties.getNumberOfDictDimensions > 0) {
       ByteUtil.convertBytesToLongArray(
         data(0).asInstanceOf[ByteArrayWrapper].getDictionaryKey)
     } else {
@@ -207,7 +207,7 @@ class RawBytesReadSupport(segmentProperties: SegmentProperties, indexColumns: Ar
     val rtn = new Array[Object](indexColumns.length + 3)
     indexColumns.zipWithIndex.foreach { case (col, i) =>
       rtn(i) = if (indexCol2IdxInDictArray.contains(col.getColName)) {
-        surrogatKeys(indexCol2IdxInDictArray(col.getColName)).toInt.asInstanceOf[Integer]
+        surrogateKeys(indexCol2IdxInDictArray(col.getColName)).toInt.asInstanceOf[Integer]
       } else if (indexCol2IdxInNoDictArray.contains(col.getColName)) {
         val bytes = data(0).asInstanceOf[ByteArrayWrapper].getNoDictionaryKeyByIndex(
           indexCol2IdxInNoDictArray(col.getColName))
@@ -316,7 +316,7 @@ class IndexRebuildRDD[K, V](
         reader = new CarbonRecordReader[Array[Object]](model, readSupport, inputMetrics,
           attemptContext.getConfiguration)
         reader.initialize(inputSplit, attemptContext)
-        // skip clear indexSchema and we will do this adter rebuild
+        // skip clear indexSchema and we will do this after rebuild
         reader.setSkipClearIndexAtClose(true)
 
         // Note that indexSchema rebuilding is based on query, the blockletId in rowWithPosition
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/DistributedRDDUtils.scala b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/DistributedRDDUtils.scala
index 5f30fc9..615f280 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/DistributedRDDUtils.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/DistributedRDDUtils.scala
@@ -350,7 +350,7 @@ object DistributedRDDUtils {
   }
 
   /**
-   * This function creates an event for prepriming of the index server
+   * This function creates an event for pre-priming of the index server
    */
   def triggerPrepriming(sparkSession: SparkSession,
       carbonTable: CarbonTable,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala
index 0d85fad..8d927ea 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexJobs.scala
@@ -52,7 +52,7 @@ class DistributedIndexJob extends AbstractIndexJob {
       .createTempFolderForIndexServer(indexFormat.getQueryId)
     LOGGER
       .info("Temp folder path for Query ID: " + indexFormat.getQueryId + " is " + splitFolderPath)
-    val (resonse, time) = logTime {
+    val (response, time) = logTime {
       try {
         val spark = SparkSQLUtil.getSparkSession
         indexFormat.setTaskGroupId(SparkSQLUtil.getTaskGroupId(spark))
@@ -73,34 +73,33 @@ class DistributedIndexJob extends AbstractIndexJob {
       }
     }
     LOGGER.info(s"Time taken to get response from server: $time ms")
-    resonse
+    response
   }
 
   /**
-   * Iterate over FiltersReslover,
-   *   a. Change only RowLevelFilterResolverImpl because SparkUnkown is part of it
-   * and others FilterReslover like ConditionalFilterResolverImpl so directly return.
-   *     b. Change SparkUnkownExpression to TrueExpression so that isScanRequired
+   * Iterate over FilterResolver,
+   * a. Change only RowLevelFilterResolverImpl because SparkUnknown is part of it
+   * and other FilterResolver like ConditionalFilterResolverImpl so directly return.
+   * b. Change SparkUnknownExpression to TrueExpression so that isScanRequired
    * selects block/blocklet.
    *
-   * @param filterInf       FiltersReslover to be changed
-   * @param tableIdentifer  AbsoluteTableIdentifier object
-   * @param filterProcessor changed FiltersReslover.
-   * @return
+   * @param filterInf       FilterResolver to be changed
+   * @param tableIdentifier AbsoluteTableIdentifier object
+   * @param filterProcessor FilterExpressionProcessor
+   * @return changed FilterResolver.
    */
-  def removeSparkUnknown(filterInf: FilterResolverIntf,
-      tableIdentifer: AbsoluteTableIdentifier,
-                         filterProcessor: FilterExpressionProcessor): FilterResolverIntf = {
+  def removeSparkUnknown(filterInf: FilterResolverIntf, tableIdentifier: AbsoluteTableIdentifier,
+      filterProcessor: FilterExpressionProcessor): FilterResolverIntf = {
     if (filterInf.isInstanceOf[LogicalFilterResolverImpl]) {
       return new LogicalFilterResolverImpl(
-        removeSparkUnknown(filterInf.getLeft, tableIdentifer, filterProcessor),
-        removeSparkUnknown(filterInf.getRight, tableIdentifer, filterProcessor),
+        removeSparkUnknown(filterInf.getLeft, tableIdentifier, filterProcessor),
+        removeSparkUnknown(filterInf.getRight, tableIdentifier, filterProcessor),
         filterProcessor.removeUnknownExpression(filterInf.getFilterExpression).
           asInstanceOf[BinaryExpression])
     }
     if (filterInf.isInstanceOf[RowLevelFilterResolverImpl] &&
         filterInf.getFilterExpression.getFilterExpressionType == ExpressionType.UNKNOWN) {
-      return filterProcessor.changeUnknownResolverToTrue(tableIdentifer)
+      return filterProcessor.changeUnknownResolverToTrue(tableIdentifier)
     }
     filterInf
   }
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala
index e8768fd..4eab3e4 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala
@@ -192,8 +192,8 @@ object IndexServer extends ServerInterface {
       val sparkSession = SparkSQLUtil.getSparkSession
       val databaseName = carbonTable.getDatabaseName
       val tableName = carbonTable.getTableName
-      val jobgroup: String = " Invalided Segment Cache for " + databaseName + "." + tableName
-      sparkSession.sparkContext.setLocalProperty("spark.job.description", jobgroup)
+      val jobGroup: String = " Invalided Segment Cache for " + databaseName + "." + tableName
+      sparkSession.sparkContext.setLocalProperty("spark.job.description", jobGroup)
       sparkSession.sparkContext.setLocalProperty("spark.jobGroup.id", jobGroupId)
       if (!isFallBack) {
         val indexServerEvent = IndexServerEvent(sparkSession,
@@ -213,7 +213,7 @@ object IndexServer extends ServerInterface {
 
   override def showCache(tableId: String = "", executorCache: Boolean): Array[String] = {
     doAs {
-      val jobgroup: String = "Show Cache " + (tableId match {
+      val jobGroup: String = "Show Cache " + (tableId match {
         case "" =>
           if (executorCache) {
             "for all the Executors."
@@ -224,7 +224,7 @@ object IndexServer extends ServerInterface {
       })
       val sparkSession = SparkSQLUtil.getSparkSession
       sparkSession.sparkContext.setLocalProperty("spark.jobGroup.id", UUID.randomUUID().toString)
-      sparkSession.sparkContext.setLocalProperty("spark.job.description", jobgroup)
+      sparkSession.sparkContext.setLocalProperty("spark.job.description", jobGroup)
       new DistributedShowCacheRDD(sparkSession, tableId, executorCache).collect()
     }
   }
@@ -306,7 +306,7 @@ object IndexServer extends ServerInterface {
   }
 
   /**
-   * This class to define the acl for indexserver ,similar to HDFSPolicyProvider.
+   * This class to define the acl for index server ,similar to HDFSPolicyProvider.
    * key in Service can be configured in hadoop-policy.xml or in  Configuration().This ACL
    * will be used for Authorization in
    * org.apache.hadoop.security.authorize.ServiceAuthorizationManager#authorize
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/KeyVal.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
index 92f656c..f4de2ea 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/KeyVal.scala
@@ -75,12 +75,12 @@ class updateResultImpl
   }
 }
 
-trait DeleteDelataResult[K, V] extends Serializable {
+trait DeleteDelateResult[K, V] extends Serializable {
   def getKey(key: SegmentStatus, value: (SegmentUpdateDetails, ExecutionErrors, Long)): (K, V)
 }
 
-class DeleteDelataResultImpl
-  extends DeleteDelataResult[SegmentStatus, (SegmentUpdateDetails, ExecutionErrors, Long)] {
+class DeleteDeltaResultImpl
+  extends DeleteDelateResult[SegmentStatus, (SegmentUpdateDetails, ExecutionErrors, Long)] {
   override def getKey(key: SegmentStatus,
       value: (SegmentUpdateDetails, ExecutionErrors, Long)): (SegmentStatus, (SegmentUpdateDetails,
     ExecutionErrors, Long)) = {
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/CsvRDDHelper.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/CsvRDDHelper.scala
index 2548110..fde8d45 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/CsvRDDHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/CsvRDDHelper.scala
@@ -50,7 +50,7 @@ object CsvRDDHelper {
   private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
 
   /**
-   * createsw a RDD that does reading of multiple CSV files
+   * create a RDD that does reading of multiple CSV files
    */
   def csvFileScanRDD(
       spark: SparkSession,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
index e7e1baf..419e0b6 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
@@ -174,7 +174,7 @@ object DataLoadProcessBuilderOnSpark {
 
     // clean cache only if persisted and keeping unpersist non-blocking as non-blocking call will
     // not have any functional impact as spark automatically monitors the cache usage on each node
-    // and drops out old data partiotions in a least-recently used (LRU) fashion.
+    // and drops out old data partitions in a least-recently used (LRU) fashion.
     if (numPartitions > 1) {
       convertRDD.unpersist(false)
     }
@@ -267,7 +267,7 @@ object DataLoadProcessBuilderOnSpark {
     })
     // clean cache only if persisted and keeping unpersist non-blocking as non-blocking call will
     // not have any functional impact as spark automatically monitors the cache usage on each node
-    // and drops out old data partiotions in a least-recently used (LRU) fashion.
+    // and drops out old data partitions in a least-recently used (LRU) fashion.
     if (numPartitions > 1) {
       rdd.unpersist(false)
     }
@@ -382,7 +382,7 @@ object DataLoadProcessBuilderOnSpark {
 
   /**
    * provide RDD for sample
-   * CSVRecordReader(univocity parser) will output only one column
+   * CSVRecordReader(Univocity parser) will output only one column
    */
   private def getSampleRDD(
       sparkSession: SparkSession,
@@ -446,13 +446,13 @@ object DataLoadProcessBuilderOnSpark {
         // better to generate a CarbonData file for each partition
         val totalSize = model.getTotalSize.toDouble
         val table = model.getCarbonDataLoadSchema.getCarbonTable
-        numPartitions = getNumPatitionsBasedOnSize(totalSize, table, model, false)
+        numPartitions = getNumPartitionsBasedOnSize(totalSize, table, model, false)
       }
     }
     numPartitions
   }
 
-  def getNumPatitionsBasedOnSize(totalSize: Double,
+  def getNumPartitionsBasedOnSize(totalSize: Double,
       table: CarbonTable,
       model: CarbonLoadModel,
       mergerFlag: Boolean): Int = {
@@ -482,16 +482,16 @@ object DataLoadProcessBuilderOnSpark {
     if (column.isDimension) {
       val dimension = column.asInstanceOf[CarbonDimension]
       if (dimension.getDataType == DataTypes.DATE) {
-        new PrimtiveOrdering(DataTypes.INT)
+        new PrimitiveOrdering(DataTypes.INT)
       } else {
         if (DataTypeUtil.isPrimitiveColumn(column.getDataType)) {
-          new PrimtiveOrdering(column.getDataType)
+          new PrimitiveOrdering(column.getDataType)
         } else {
           new ByteArrayOrdering()
         }
       }
     } else {
-      new PrimtiveOrdering(column.getDataType)
+      new PrimitiveOrdering(column.getDataType)
     }
   }
 
@@ -574,7 +574,7 @@ object DataLoadProcessBuilderOnSpark {
   }
 }
 
-class PrimtiveOrdering(dataType: DataType) extends Ordering[Object] {
+class PrimitiveOrdering(dataType: DataType) extends Ordering[Object] {
   val comparator = org.apache.carbondata.core.util.comparator.Comparator
     .getComparator(dataType)
 
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
index 40b8ad0..05b9ac3 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
@@ -165,7 +165,7 @@ object DataLoadProcessorStepOnSpark {
     }
   }
 
-  def inputAndconvertFunc(
+  def inputAndConvertFunc(
       rows: Iterator[Array[AnyRef]],
       index: Int,
       modelBroadcast: Broadcast[CarbonLoadModel],
@@ -467,7 +467,7 @@ object DataLoadProcessorStepOnSpark {
 class NewInputProcessorStepImpl(configuration: CarbonDataLoadConfiguration,
     rows: Iterator[CarbonRow]) extends AbstractDataLoadProcessorStep(configuration, null) {
   /**
-   * Tranform the data as per the implementation.
+   * Transform the data as per the implementation.
    *
    * @return Array of Iterator with data. It can be processed parallel if implementation class wants
    * @throws CarbonDataLoadingException
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
index 00891b9..8029808 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/GlobalSortHelper.scala
@@ -35,7 +35,7 @@ object GlobalSortHelper {
    *
    * @param loadModel       Carbon load model instance
    * @param badRecordsAccum Accumulator to maintain the load state if 0 then success id !0 then
-   *                        partial successfull
+   *                        partial successful
    * @param hasBadRecord    if <code>true<code> then load bad records vice versa.
    */
   def badRecordsLogger(loadModel: CarbonLoadModel,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index daa4ca2..540de75 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -206,7 +206,7 @@ object CarbonDataRDDFactory {
               val compactionSize = CarbonDataMergerUtil
                 .getCompactionSize(CompactionType.MAJOR, carbonLoadModel)
 
-              val newcompactionModel = CompactionModel(
+              val newCompactionModel = CompactionModel(
                 compactionSize,
                 compactionType,
                 table,
@@ -218,7 +218,7 @@ object CarbonDataRDDFactory {
               try {
                 CompactionFactory.getCompactor(
                   newCarbonLoadModel,
-                  newcompactionModel,
+                  newCompactionModel,
                   executor,
                   sqlContext,
                   storeLocation,
@@ -484,7 +484,7 @@ object CarbonDataRDDFactory {
         LOGGER.error(ex)
     }
     try {
-      // handle the status file updation for the update cmd.
+      // handle the status file update for the update cmd.
       if (updateModel.isDefined && !updateModel.get.loadAsNewSegment) {
         if (loadStatus == SegmentStatus.LOAD_FAILURE) {
           CarbonScalaUtil.updateErrorInUpdateModel(updateModel.get, executorMessage)
@@ -494,7 +494,7 @@ object CarbonDataRDDFactory {
                    carbonLoadModel.getBadRecordsAction.split(",")(1) == LoggerAction.FAIL.name) {
           return null
         } else {
-          // in success case handle updation of the table status file.
+          // in success case handle update of the table status file.
           // success case.
           val segmentDetails = new util.HashSet[Segment]()
           var resultSize = 0
@@ -520,7 +520,7 @@ object CarbonDataRDDFactory {
             segmentMetaDataInfoMap.asJava)
 
           // this means that the update doesnt have any records to update so no need to do table
-          // status file updation.
+          // status file update.
           if (resultSize == 0) {
             return null
           }
@@ -531,7 +531,7 @@ object CarbonDataRDDFactory {
             true,
             new util.ArrayList[Segment](0),
             new util.ArrayList[Segment](segmentFiles), "")) {
-            LOGGER.error("Data update failed due to failure in table status updation.")
+            LOGGER.error("Data update failed due to failure in table status update.")
             updateModel.get.executorErrors.errorMsg = errorMessage
             updateModel.get.executorErrors.failureCauses = FailureCauses
               .STATUS_FILE_UPDATION_FAILURE
@@ -652,8 +652,8 @@ object CarbonDataRDDFactory {
             clearIndexFiles(carbonTable, carbonLoadModel.getSegmentId)
           }
           LOGGER.info("********clean up done**********")
-          LOGGER.error("Data load failed due to failure in table status updation.")
-          throw new Exception("Data load failed due to failure in table status updation.")
+          LOGGER.error("Data load failed due to failure in table status update.")
+          throw new Exception("Data load failed due to failure in table status update.")
         }
         if (SegmentStatus.LOAD_PARTIAL_SUCCESS == loadStatus) {
           LOGGER.info("Data load is partially successful for " +
@@ -714,8 +714,8 @@ object CarbonDataRDDFactory {
   }
   /**
    * Add and update the segment files. In case of update scenario the carbonindex files are written
-   * to the same segment so we need to update old segment file. So this ethod writes the latest data
-   * to new segment file and merges this file old file to get latest updated files.
+   * to the same segment so we need to update old segment file. So this method writes the latest
+   * data to new segment file and merges this file old file to get latest updated files.
    * @param carbonTable
    * @param segmentDetails
    * @return
@@ -1064,10 +1064,10 @@ object CarbonDataRDDFactory {
   }
 
   /**
-   * Execute load process to load from input dataframe
+   * Execute load process to load from input DataFrame
    *
    * @param sqlContext sql context
-   * @param dataFrame optional dataframe for insert
+   * @param dataFrame optional DataFrame for insert
    * @param scanResultRDD optional internal row rdd for direct insert
    * @param carbonLoadModel load model
    * @return Return an array that contains all of the elements in NewDataFrameLoaderRDD.
@@ -1194,8 +1194,8 @@ object CarbonDataRDDFactory {
       str = str + "#Node: " + entry._1 + ", no.of.blocks: " + tableBlock.size() +
             f", totalsize.of.blocks: ${totalSize * 0.1 * 10 / 1024 /1024}%.2fMB"
       tableBlock.asScala.foreach(tableBlockInfo =>
-        if (!tableBlockInfo.getLocations.exists(hostentry =>
-          hostentry.equalsIgnoreCase(entry._1)
+        if (!tableBlockInfo.getLocations.exists(hostEntry =>
+          hostEntry.equalsIgnoreCase(entry._1)
         )) {
           str = str + " , mismatch locations: " + tableBlockInfo.getLocations
             .foldLeft("")((a, b) => a + "," + b)
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index fff5863..c849a54 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -67,7 +67,7 @@ import org.apache.carbondata.processing.loading.model.CarbonLoadModel
 import org.apache.carbondata.processing.merger._
 import org.apache.carbondata.processing.util.{CarbonDataProcessorUtil, CarbonLoaderUtil}
 import org.apache.carbondata.spark.MergeResult
-import org.apache.carbondata.spark.load.{DataLoadProcessBuilderOnSpark, PrimtiveOrdering, StringOrdering}
+import org.apache.carbondata.spark.load.{DataLoadProcessBuilderOnSpark, PrimitiveOrdering, StringOrdering}
 import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil}
 
 class CarbonMergerRDD[K, V](
@@ -141,12 +141,12 @@ class CarbonMergerRDD[K, V](
 
         // During UPDATE DELTA COMPACTION case all the blocks received in compute belongs to
         // one segment, so max cardinality will be calculated from first block of segment
-        if (CompactionType.IUD_UPDDEL_DELTA == carbonMergerMapping.campactionType) {
+        if (CompactionType.IUD_UPDDEL_DELTA == carbonMergerMapping.compactionType) {
           var dataFileFooter: DataFileFooter = null
           try {
             // As the tableBlockInfoList is sorted take the ColCardinality from the last
             // Block of the sorted list as it will have the last updated cardinality.
-            // Blocks are sorted by order of updation using TableBlockInfo.compare method so
+            // Blocks are sorted by order of the update using TableBlockInfo.compare method so
             // the last block after the sort will be the latest one.
             dataFileFooter = CarbonUtil
               .readMetadataFile(tableBlockInfoList.get(tableBlockInfoList.size() - 1))
@@ -160,7 +160,7 @@ class CarbonMergerRDD[K, V](
           carbonMergerMapping.maxSegmentColumnSchemaList = dataFileFooter.getColumnInTable.asScala
             .toList
         }
-        mergeNumber = if (CompactionType.IUD_UPDDEL_DELTA == carbonMergerMapping.campactionType) {
+        mergeNumber = if (CompactionType.IUD_UPDDEL_DELTA == carbonMergerMapping.compactionType) {
           tableBlockInfoList.get(0).getSegment.toString
         } else {
           mergedLoadName.substring(
@@ -247,7 +247,7 @@ class CarbonMergerRDD[K, V](
             segmentProperties,
             tempStoreLoc,
             carbonLoadModel,
-            carbonMergerMapping.campactionType,
+            carbonMergerMapping.compactionType,
             partitionSpec)
 
         } else {
@@ -257,7 +257,7 @@ class CarbonMergerRDD[K, V](
             carbonLoadModel,
             carbonTable,
             segmentProperties,
-            carbonMergerMapping.campactionType,
+            carbonMergerMapping.compactionType,
             factTableName,
             partitionSpec)
 
@@ -408,7 +408,7 @@ class CarbonMergerRDD[K, V](
       val numOfPartitions = Math
         .max(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL.toInt,
           Math.min(totalTaskCount, DataLoadProcessBuilderOnSpark
-            .getNumPatitionsBasedOnSize(totalSize, carbonTable, carbonLoadModel, true)))
+            .getNumPartitionsBasedOnSize(totalSize, carbonTable, carbonLoadModel, true)))
       val colName = rangeColumn.getColName
       LOGGER.info(s"Compacting on range column: $colName")
       allRanges = getRangesFromRDD(rangeColumn,
@@ -524,7 +524,7 @@ class CarbonMergerRDD[K, V](
               // Creating FilterExpression for the range column
               var minVal: Object = null
               var maxVal: Object = null
-              // For first task we will create an Or Filter and also accomodate null values
+              // For first task we will create an Or Filter and also accommodate null values
               // For last task we will take as GreaterThan Expression of last value
               if (i != 0) {
                 minVal = newRanges(i - 1)
@@ -688,16 +688,16 @@ class CarbonMergerRDD[K, V](
     if (column.isDimension) {
       val dimension = column.asInstanceOf[CarbonDimension]
       if (dimension.getDataType == DataTypes.DATE) {
-        new PrimtiveOrdering(DataTypes.INT)
+        new PrimitiveOrdering(DataTypes.INT)
       } else {
         if (DataTypeUtil.isPrimitiveColumn(column.getDataType)) {
-          new PrimtiveOrdering(column.getDataType)
+          new PrimitiveOrdering(column.getDataType)
         } else {
           new StringOrdering()
         }
       }
     } else {
-      new PrimtiveOrdering(column.getDataType)
+      new PrimitiveOrdering(column.getDataType)
     }
   }
 
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 4d1f3a3..32186df 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -169,7 +169,7 @@ class CarbonScanRDD[T: ClassTag](
       val batchPartitions = distributeColumnarSplits(columnarSplits)
       distributeEndTime = System.currentTimeMillis()
       // check and remove InExpression from filterExpression
-      checkAndRemoveInExpressinFromFilterExpression(batchPartitions)
+      checkAndRemoveInExpressionFromFilter(batchPartitions)
       if (streamSplits.isEmpty) {
         partitions = batchPartitions.toArray
       } else {
@@ -195,7 +195,7 @@ class CarbonScanRDD[T: ClassTag](
              | Identified no.of.streaming splits/tasks: ${ streamPartitions.size },
              | no.of.streaming files: ${format.getHitStreamFiles},
              | no.of.total streaming files: ${format.getNumStreamFiles},
-             | no.of.total streaming segement: ${format.getNumStreamSegments}
+             | no.of.total streaming segment: ${format.getNumStreamSegments}
           """.stripMargin)
 
       }
@@ -576,7 +576,7 @@ class CarbonScanRDD[T: ClassTag](
     model.setFreeUnsafeMemory(!isAdded)
     // add task completion before calling initialize as initialize method will internally
     // call for usage of unsafe method for processing of one blocklet and if there is any
-    // exceptionwhile doing that the unsafe memory occupied for that task will not
+    // exception while doing that the unsafe memory occupied for that task will not
     // get cleared
     context.addTaskCompletionListener {
       new QueryTaskCompletionListener(!isAdded,
@@ -685,7 +685,7 @@ class CarbonScanRDD[T: ClassTag](
    *
    * @param identifiedPartitions
    */
-  private def checkAndRemoveInExpressinFromFilterExpression(
+  private def checkAndRemoveInExpressionFromFilter(
       identifiedPartitions: mutable.Buffer[Partition]) = {
     if (null != indexFilter) {
       if (identifiedPartitions.nonEmpty &&
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala
index 3187f14..84d5189 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala
@@ -332,7 +332,7 @@ class CarbonTableCompactor(carbonLoadModel: CarbonLoadModel,
 
       val endTime = System.nanoTime()
       LOGGER.info(s"time taken to merge $mergedLoadName is ${ endTime - startTime }")
-      val statusFileUpdation =
+      val statusFileUpdate =
         ((compactionType == CompactionType.IUD_UPDDEL_DELTA) &&
          CarbonDataMergerUtil
            .updateLoadMetadataIUDUpdateDeltaMergeStatus(loadsToMerge,
@@ -374,10 +374,10 @@ class CarbonTableCompactor(carbonLoadModel: CarbonLoadModel,
         true
       }
       // here either of the conditions can be true, when delete segment is fired after compaction
-      // has started, statusFileUpdation will be false , but at the same time commitComplete can be
+      // has started, statusFileUpdate will be false , but at the same time commitComplete can be
       // true because compaction for all indexes will be finished at a time to the maximum level
       // possible (level 1, 2 etc). so we need to check for either condition
-      if (!statusFileUpdation || !commitComplete) {
+      if (!statusFileUpdate || !commitComplete) {
         LOGGER.error(s"Compaction request failed for table ${ carbonLoadModel.getDatabaseName }." +
                      s"${ carbonLoadModel.getTableName }")
         throw new Exception(s"Compaction failed to update metadata for table" +
@@ -387,7 +387,7 @@ class CarbonTableCompactor(carbonLoadModel: CarbonLoadModel,
         LOGGER.info(s"Compaction request completed for table " +
                     s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
 
-        // Prepriming index for compaction
+        // Pre-priming index for compaction
         val segmentsForPriming = if (compactionType.equals(CompactionType.IUD_DELETE_DELTA) ||
             compactionType.equals(CompactionType.IUD_UPDDEL_DELTA)) {
             validSegments.asScala.map(_.getSegmentNo).toList
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CompactionFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CompactionFactory.scala
index 12f0f98..dc0deb9 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CompactionFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CompactionFactory.scala
@@ -28,7 +28,7 @@ import org.apache.carbondata.processing.loading.model.CarbonLoadModel
 object CompactionFactory {
 
   /**
-   *  Returns appropriate Compactable object.
+   *  Returns appropriate Compactor object.
    */
   def getCompactor(carbonLoadModel: CarbonLoadModel,
       compactionModel: CompactionModel,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 5670c1f..7661d30 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -121,7 +121,7 @@ class NewCarbonDataLoadRDD[K, V](
   }
 
   override def checkpoint() {
-    // Do nothing. Hadoop RDD should not be checkpointed.
+    // Do nothing. Hadoop RDD should not do checkpoint.
   }
 
   override def internalCompute(theSplit: Partition, context: TaskContext): Iterator[(K, V)] = {
@@ -311,7 +311,7 @@ class NewDataFrameLoaderRDD[K, V](
         // Initialize to set carbon properties
         loader.initialize()
         val executor = new DataLoadExecutor
-        // in case of success, failure or cancelation clear memory and stop execution
+        // in case of success, failure or cancellation clear memory and stop execution
         context
           .addTaskCompletionListener(new InsertTaskCompletionListener(executor,
             executionErrors,
@@ -368,10 +368,10 @@ class NewRddIterator(rddIter: Iterator[Row],
     carbonLoadModel: CarbonLoadModel,
     context: TaskContext) extends CarbonIterator[Array[AnyRef]] {
 
-  private var timeStampformatString = carbonLoadModel.getTimestampFormat
+  private var timestampFormatString = carbonLoadModel.getTimestampFormat
   private var dateFormatString = carbonLoadModel.getDateFormat
-  if (timeStampformatString.isEmpty) {
-    timeStampformatString = CarbonProperties.getInstance()
+  if (timestampFormatString.isEmpty) {
+    timestampFormatString = CarbonProperties.getInstance()
       .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
         CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
   }
@@ -380,7 +380,7 @@ class NewRddIterator(rddIter: Iterator[Row],
       .getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
         CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
   }
-  private val timeStampFormat = new SimpleDateFormat(timeStampformatString)
+  private val timeStampFormat = new SimpleDateFormat(timestampFormatString)
   private val dateFormat = new SimpleDateFormat(dateFormatString)
   private val complexDelimiters = carbonLoadModel.getComplexDelimiters
   private val serializationNullFormat =
@@ -437,10 +437,10 @@ class LazyRddIterator(serializer: SerializerInstance,
     carbonLoadModel: CarbonLoadModel,
     context: TaskContext) extends CarbonIterator[Array[AnyRef]] {
 
-  private var timeStampformatString = carbonLoadModel.getTimestampFormat
+  private var timestampFormatString = carbonLoadModel.getTimestampFormat
   private var dateFormatString = carbonLoadModel.getDateFormat
-  if (timeStampformatString.isEmpty) {
-    timeStampformatString = CarbonProperties.getInstance()
+  if (timestampFormatString.isEmpty) {
+    timestampFormatString = CarbonProperties.getInstance()
       .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
         CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
   }
@@ -449,13 +449,13 @@ class LazyRddIterator(serializer: SerializerInstance,
       .getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
         CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
   }
-  private val timeStampFormat = new SimpleDateFormat(timeStampformatString)
+  private val timeStampFormat = new SimpleDateFormat(timestampFormatString)
   private val dateFormat = new SimpleDateFormat(dateFormatString)
   private val complexDelimiters = carbonLoadModel.getComplexDelimiters
   private val serializationNullFormat =
     carbonLoadModel.getSerializationNullFormat.split(CarbonCommonConstants.COMMA, 2)(1)
-  // the order of fields in dataframe and createTable may be different, here we need to know whether
-  // each fields in dataframe is Varchar or not.
+  // the order of fields in DataFrame and createTable may be different, here we need to know whether
+  // each fields in data DataFrame is Varchar or not.
   import scala.collection.JavaConverters._
   private val isVarcharTypeMapping = {
     val col2VarcharType = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
index fd5a1a8..8a17da2 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
@@ -298,7 +298,7 @@ object StreamHandoffRDD {
       carbonLoadModel: CarbonLoadModel,
       sparkSession: SparkSession,
       operationContext: OperationContext,
-      handoffSegmenId: String): Unit = {
+      handoffSegmentId: String): Unit = {
     var loadStatus = SegmentStatus.SUCCESS
     var errorMessage: String = "Handoff failure"
     try {
@@ -317,7 +317,7 @@ object StreamHandoffRDD {
         sparkSession,
         new HandoffResultImpl(),
         carbonLoadModel,
-        handoffSegmenId).collect()
+        handoffSegmentId).collect()
 
       status.foreach { x =>
         if (!x._2) {
@@ -327,7 +327,7 @@ object StreamHandoffRDD {
     } catch {
       case ex: Exception =>
         loadStatus = SegmentStatus.LOAD_FAILURE
-        LOGGER.error(s"Handoff failed on streaming segment $handoffSegmenId", ex)
+        LOGGER.error(s"Handoff failed on streaming segment $handoffSegmentId", ex)
         errorMessage = errorMessage + ": " + ex.getCause.getMessage
         LOGGER.error(errorMessage)
     }
@@ -349,14 +349,14 @@ object StreamHandoffRDD {
           carbonLoadModel)
       OperationListenerBus.getInstance().fireEvent(loadTablePreStatusUpdateEvent, operationContext)
 
-      val done = updateLoadMetadata(handoffSegmenId, carbonLoadModel)
+      val done = updateLoadMetadata(handoffSegmentId, carbonLoadModel)
 
       val loadTablePostStatusUpdateEvent: LoadTablePostStatusUpdateEvent =
         new LoadTablePostStatusUpdateEvent(carbonLoadModel)
       OperationListenerBus.getInstance()
         .fireEvent(loadTablePostStatusUpdateEvent, operationContext)
       if (!done) {
-        LOGGER.error("Handoff failed due to failure in table status updation.")
+        LOGGER.error("Handoff failed due to failure in table status update.")
         throw new Exception(errorMessage)
       }
     }
@@ -384,7 +384,7 @@ object StreamHandoffRDD {
       if (carbonLock.lockWithRetries()) {
         LOGGER.info(
           "Acquired lock for table" + loadModel.getDatabaseName() + "." + loadModel.getTableName()
-          + " for table status updation")
+          + " for table status update")
         val listOfLoadFolderDetailsArray =
           SegmentStatusManager.readLoadMetadata(metaDataFilepath)
 
@@ -414,16 +414,16 @@ object StreamHandoffRDD {
         SegmentStatusManager.writeLoadDetailsIntoFile(tableStatusPath, listOfLoadFolderDetailsArray)
         status = true
       } else {
-        LOGGER.error("Not able to acquire the lock for Table status updation for table " + loadModel
+        LOGGER.error("Not able to acquire the lock for Table status update for table " + loadModel
           .getDatabaseName() + "." + loadModel.getTableName())
       }
     } finally {
       if (carbonLock.unlock()) {
-        LOGGER.info("Table unlocked successfully after table status updation" +
+        LOGGER.info("Table unlocked successfully after table status update" +
                     loadModel.getDatabaseName() + "." + loadModel.getTableName())
       } else {
         LOGGER.error("Unable to unlock Table lock for table" + loadModel.getDatabaseName() +
-                     "." + loadModel.getTableName() + " during table status updation")
+                     "." + loadModel.getTableName() + " during table status update")
       }
     }
     status
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
index 0433d37..9ad76a7 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
@@ -92,7 +92,7 @@ object CarbonScalaUtil {
    *
    * @param value           Input value to convert
    * @param dataType        Datatype to convert and then convert to String
-   * @param timeStampFormat Timestamp format to convert in case of timestamp datatypes
+   * @param timeStampFormat Timestamp format to convert in case of timestamp data types
    * @param dateFormat      DataFormat to convert in case of DateType datatype
    * @return converted String
    */
@@ -145,7 +145,7 @@ object CarbonScalaUtil {
    *
    * @param value           Input value to convert
    * @param dataType        Datatype to convert and then convert to String
-   * @param timeStampFormat Timestamp format to convert in case of timestamp datatypes
+   * @param timeStampFormat Timestamp format to convert in case of timestamp data types
    * @param dateFormat      DataFormat to convert in case of DateType datatype
    * @return converted String
    */
@@ -344,7 +344,7 @@ object CarbonScalaUtil {
           specLinkedMap,
           table).toMap
       f.copy(spec = changedSpec)
-    }.groupBy(p => p.spec).map(f => f._2.head).toSeq // Avoid duplicates by do groupby
+    }.groupBy(p => p.spec).map(f => f._2.head).toSeq // Avoid duplicates by do group by
   }
 
   /**
@@ -399,7 +399,7 @@ object CarbonScalaUtil {
       } else {
         finalProperties.put("tableName", dbAndTableName(0))
       }
-      // Overriding the tablePath in case tablepath already exists. This will happen when old
+      // Overriding the 'tablePath' in case 'tablepath' already exists. This will happen when old
       // table schema is updated by the new code then both `path` and `tablepath` will exist. In
       // this case use tablepath
       parameters.get("tablepath") match {
@@ -599,8 +599,7 @@ object CarbonScalaUtil {
    *
    * @param tableProperties
    */
-  def validateDuplicateLocalDictIncludeExcludeColmns(tableProperties: mutable.Map[String,
-    String]): Unit = {
+  def validateDuplicateColumnsForLocalDict(tableProperties: mutable.Map[String, String]): Unit = {
     val isLocalDictIncludeDefined = tableProperties
       .get(CarbonCommonConstants.LOCAL_DICTIONARY_INCLUDE)
       .isDefined
@@ -666,24 +665,24 @@ object CarbonScalaUtil {
     }
 
     // check if column is other than STRING or VARCHAR datatype
-    localDictColumns.foreach { dictColm =>
+    localDictColumns.foreach { dictColumn =>
       if (fields
-        .exists(x => x.column.equalsIgnoreCase(dictColm) &&
+        .exists(x => x.column.equalsIgnoreCase(dictColumn) &&
                      !x.dataType.get.equalsIgnoreCase("STRING") &&
                      !x.dataType.get.equalsIgnoreCase("VARCHAR") &&
                      !x.dataType.get.equalsIgnoreCase("STRUCT") &&
                      !x.dataType.get.equalsIgnoreCase("MAP") &&
                      !x.dataType.get.equalsIgnoreCase("ARRAY"))) {
-        if (fields.exists(x => x.column.equalsIgnoreCase(dictColm)
+        if (fields.exists(x => x.column.equalsIgnoreCase(dictColumn)
                 && x.dataType.get.equalsIgnoreCase("BINARY"))
                 && tableProperties.get("local_dictionary_exclude").nonEmpty
-                && tableProperties.get("local_dictionary_exclude").get.contains(dictColm)
+                && tableProperties.get("local_dictionary_exclude").get.contains(dictColumn)
                 && (tableProperties.get("local_dictionary_include").isEmpty
-                || (!tableProperties.get("local_dictionary_include").get.contains(dictColm)))) {
+                || (!tableProperties.get("local_dictionary_include").get.contains(dictColumn)))) {
           LOGGER.info("Local_dictionary_exclude supports binary")
         } else {
           val errorMsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " +
-                  dictColm.trim +
+                  dictColumn.trim +
                   " is not a string/complex/varchar datatype column. LOCAL_DICTIONARY_COLUMN" +
                   " should be no dictionary string/complex/varchar datatype column." +
                   "Please check the DDL."
@@ -693,13 +692,12 @@ object CarbonScalaUtil {
     }
 
     // Validate whether any of the child columns of complex dataType column is a string column
-    localDictColumns.foreach { dictColm =>
+    localDictColumns.foreach { dictColumn =>
       if (fields
-        .exists(x => x.column.equalsIgnoreCase(dictColm) && x.children.isDefined &&
-                     null != x.children.get &&
-                     !validateChildColumnsRecursively(x))) {
+        .exists(x => x.column.equalsIgnoreCase(dictColumn) && x.children.isDefined &&
+                     null != x.children.get && !validateChildColumnsRecursively(x))) {
         val errMsg =
-          s"None of the child columns of complex dataType column $dictColm specified in " +
+          s"None of the child columns of complex dataType column $dictColumn specified in " +
           "local_dictionary_include are not of string dataType."
         throw new MalformedCarbonCommandException(errMsg)
       }
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index 3114db1..e55f205 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -668,7 +668,7 @@ object CommonUtil {
     } else {
       val trimmedCacheLevel = cacheLevel.trim.toUpperCase
       if (!supportedCacheLevel.contains(trimmedCacheLevel)) {
-        val errorMessage = s"Invalid value: Allowed vaLues for ${
+        val errorMessage = s"Invalid value: Allowed values for ${
           CarbonCommonConstants.CACHE_LEVEL} are BLOCK AND BLOCKLET"
         throw new MalformedCarbonCommandException(errorMessage)
       }
@@ -702,7 +702,7 @@ object CommonUtil {
   }
 
   /**
-   * Validate the column_meta_cache option in tableproperties
+   * Validate the column_meta_cache option in table properties
    *
    * @param tableIdentifier
    * @param databaseName
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataGenerator.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataGenerator.scala
index 64c4e14..85d4eef 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataGenerator.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataGenerator.scala
@@ -47,7 +47,7 @@ object DataGenerator {
    * generate DataFrame with above table schema
    *
    * @param spark SparkSession
-   * @return Dataframe of test data
+   * @return DataFrame of test data
    */
   def generateDataFrame(spark: SparkSession, totalNum: Int): DataFrame = {
     val rdd = spark.sparkContext
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java b/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
index bcea438..7d7cd0e 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
@@ -85,7 +85,7 @@ public class VectorizedCarbonRecordReader extends AbstractRecordReader<Object> {
   private boolean[] isNoDictStringField;
 
   /**
-   * The default config on whether columnarBatch should be onheap.
+   * The default config on whether columnarBatch should be on-heap.
    */
   private static final MemoryMode DEFAULT_MEMORY_MODE = MemoryMode.ON_HEAP;
 
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala b/integration/spark/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
index a36f561..1bbb6a2 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/stream/StreamJobManager.scala
@@ -80,7 +80,7 @@ object StreamJobManager {
    * @param sourceTable stream source table
    * @param sinkTable sink table to insert to
    * @param query query string
-   * @param streamDf dataframe that containing the query from stream source table
+   * @param streamDf DataFrame that containing the query from stream source table
    * @param options options provided by user
    * @return Job ID
    */
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/streaming/StreamSinkFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/streaming/StreamSinkFactory.scala
index 9a1d2d4..95ebd78 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/streaming/StreamSinkFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/streaming/StreamSinkFactory.scala
@@ -92,7 +92,7 @@ object StreamSinkFactory {
       carbonTable,
       parameters,
       "")
-    // fire pre event before streamin is started
+    // fire pre event before streaming is started
     // in case of streaming options and optionsFinal can be same
     val operationContext = new OperationContext
     val (tableIndexes, indexOperationContext) = CommonLoadUtils.firePreLoadEvents(sparkSession,
@@ -118,7 +118,7 @@ object StreamSinkFactory {
       carbonLoadModel,
       operationContext)
 
-    // fire post event before streamin is started
+    // fire post event before streaming is started
     CommonLoadUtils.firePostLoadEvents(sparkSession,
       carbonLoadModel,
       tableIndexes,
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/view/MVRefresher.scala b/integration/spark/src/main/scala/org/apache/carbondata/view/MVRefresher.scala
index 60d3692..00a2f00 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/view/MVRefresher.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/view/MVRefresher.scala
@@ -68,19 +68,19 @@ object MVRefresher {
     // Clean up the old invalid segment data before creating a new entry for new load.
     SegmentStatusManager.deleteLoadsAndUpdateMetadata(viewTable, false, null)
     val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(viewTableIdentifier)
-    // Acquire table status lock to handle concurrent dataloading
+    // Acquire table status lock to handle concurrent data loading
     val lock: ICarbonLock = segmentStatusManager.getTableStatusLock
     val segmentMapping: util.Map[String, util.List[String]] =
       new util.HashMap[String, util.List[String]]
     val viewManager = MVManagerInSpark.get(session)
     try if (lock.lockWithRetries) {
-      LOGGER.info("Acquired lock for mv " + viewIdentifier + " for table status updation")
+      LOGGER.info("Acquired lock for mv " + viewIdentifier + " for table status update")
       val viewTableMetadataPath: String =
         CarbonTablePath.getMetadataPath(viewIdentifier.getTablePath)
       val loadMetadataDetails = SegmentStatusManager.readLoadMetadata(viewTableMetadataPath)
       val loadMetadataDetailList: util.List[LoadMetadataDetails] =
         new util.ArrayList[LoadMetadataDetails](CarbonCommonConstants.DEFAULT_COLLECTION_SIZE)
-      // Mark for delete all stale loadMetadetail
+      // Mark for delete all stale loadMetataDetail
       for (loadMetadataDetail <- loadMetadataDetails) {
         if (((loadMetadataDetail.getSegmentStatus eq SegmentStatus.INSERT_IN_PROGRESS) ||
              (loadMetadataDetail.getSegmentStatus eq SegmentStatus.INSERT_OVERWRITE_IN_PROGRESS)) &&
@@ -122,7 +122,7 @@ object MVRefresher {
           }
         }
       segmentMap = new Gson().toJson(segmentMapping)
-      // To handle concurrent dataloading to mv, create new loadMetaEntry and
+      // To handle concurrent data loading to mv, create new loadMetaEntry and
       // set segmentMap to new loadMetaEntry and pass new segmentId with load command
       val loadMetadataDetail: LoadMetadataDetails = new LoadMetadataDetails
       val segmentId: String = String.valueOf(
@@ -137,21 +137,21 @@ object MVRefresher {
         loadMetadataDetailList.toArray(new Array[LoadMetadataDetails](loadMetadataDetailList
           .size)))
     } else {
-      LOGGER.error("Not able to acquire the lock for Table status updation for table " +
+      LOGGER.error("Not able to acquire the lock for table status update for table " +
                    viewSchema.getIdentifier.getDatabaseName + "." +
                    viewSchema.getIdentifier.getTableName)
       viewManager.setStatus(viewSchema.getIdentifier, MVStatus.DISABLED)
       return false
     } finally {
       if (lock.unlock) {
-        LOGGER.info("Table unlocked successfully after table status updation" +
+        LOGGER.info("Table unlocked successfully after table status update" +
                     viewSchema.getIdentifier.getDatabaseName + "." +
                     viewSchema.getIdentifier.getTableName)
       } else {
         LOGGER.error("Unable to unlock Table lock for table" +
                      viewSchema.getIdentifier.getDatabaseName + "." +
                      viewSchema.getIdentifier.getTableName +
-                     " during table status updation")
+                     " during table status update")
       }
     }
     refreshInternal(viewManager, viewSchema, viewTable, newLoadName, segmentMapping, session)
diff --git a/integration/spark/src/main/scala/org/apache/spark/CarbonInputMetrics.scala b/integration/spark/src/main/scala/org/apache/spark/CarbonInputMetrics.scala
index 69a9bfd..2842d2b 100644
--- a/integration/spark/src/main/scala/org/apache/spark/CarbonInputMetrics.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/CarbonInputMetrics.scala
@@ -33,7 +33,7 @@ import org.apache.carbondata.spark.InitInputMetrics
 class CarbonInputMetrics extends InitInputMetrics{
   @transient val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
     var inputMetrics: InputMetrics = _
-    // bytes read before compute by other map rdds in lineage
+    // bytes read before compute by other map RDDs in lineage
     var existingBytesRead: Long = _
     var recordCount: Long = _
     var inputMetricsInterval: Long = _
diff --git a/integration/spark/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala b/integration/spark/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala
index e72e78e..836ee16 100644
--- a/integration/spark/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala
@@ -319,8 +319,8 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
   @throws(classOf[IOException])
   private def writeObject(out: ObjectOutputStream): Unit = {
     Utils.tryOrIOException {
-      val sfactory = SparkEnv.get.serializer
-      sfactory match {
+      val serializer = SparkEnv.get.serializer
+      serializer match {
         case js: JavaSerializer => out.defaultWriteObject()
         case _ =>
           out.writeInt(skewCount)
@@ -332,7 +332,7 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
           out.writeObject(ordering)
           out.writeObject(binarySearch)
 
-          val ser = sfactory.newInstance()
+          val ser = serializer.newInstance()
           Utils.serializeViaNestedStream(out, ser) { stream =>
             stream.writeObject(scala.reflect.classTag[Array[K]])
             stream.writeObject(rangeBounds)
@@ -345,8 +345,8 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
   private def readObject(in: ObjectInputStream): Unit = {
     Utils.tryOrIOException {
       needInit = true
-      val sfactory = SparkEnv.get.serializer
-      sfactory match {
+      val serializer = SparkEnv.get.serializer
+      serializer match {
         case js: JavaSerializer => in.defaultReadObject()
         case _ =>
           skewCount = in.readInt()
@@ -358,7 +358,7 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
           ordering = in.readObject().asInstanceOf[Ordering[K]]
           binarySearch = in.readObject().asInstanceOf[(Array[K], K) => Int]
 
-          val ser = sfactory.newInstance()
+          val ser = serializer.newInstance()
           Utils.deserializeViaNestedStream(in, ser) { ds =>
             implicit val classTag = ds.readObject[ClassTag[Array[K]]]()
             rangeBounds = ds.readObject[Array[K]]()
diff --git a/integration/spark/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala b/integration/spark/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
index 6d45cf4..77d3304 100644
--- a/integration/spark/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
@@ -28,7 +28,7 @@ import org.apache.carbondata.common.logging.LogServiceFactory
 /**
  * DataLoadPartitionCoalescer
  * Repartition the partitions of rdd to few partitions, one partition per node.
- * exmaple:
+ * example:
  * blk_hst  host1 host2 host3 host4 host5
  * block1   host1 host2 host3
  * block2         host2       host4 host5
@@ -106,7 +106,7 @@ class DataLoadPartitionCoalescer(prev: RDD[_], nodeList: Array[String]) {
         // if a partition has no location, add to noLocalityPartitions
         tmpNoLocalityPartitions += p.index
       } else {
-        // add partion to hostMapPartitionIds and partitionIdMapHosts
+        // add partition to hostMapPartitionIds and partitionIdMapHosts
         locs.foreach { loc =>
           val host = loc.host
           hostMapPartitionIds.get(host) match {
@@ -265,14 +265,14 @@ class DataLoadPartitionCoalescer(prev: RDD[_], nodeList: Array[String]) {
   private def repartitionNoLocality(): Array[Partition] = {
     // no locality repartition
     LOGGER.info("no locality partition")
-    val prevPartIndexs = new Array[ArrayBuffer[Int]](numOfParts)
+    val prevPartIndexes = new Array[ArrayBuffer[Int]](numOfParts)
     for (i <- 0 until numOfParts) {
-      prevPartIndexs(i) = new ArrayBuffer[Int]
+      prevPartIndexes(i) = new ArrayBuffer[Int]
     }
     for (i <- 0 until prevPartitions.length) {
-      prevPartIndexs(i % numOfParts) += prevPartitions(i).index
+      prevPartIndexes(i % numOfParts) += prevPartitions(i).index
     }
-    prevPartIndexs.filter(_.nonEmpty).zipWithIndex.map { x =>
+    prevPartIndexes.filter(_.nonEmpty).zipWithIndex.map { x =>
       CoalescedRDDPartition(x._2, prev, x._1.toArray, getLocation(x._2))
     }
   }
@@ -289,16 +289,16 @@ class DataLoadPartitionCoalescer(prev: RDD[_], nodeList: Array[String]) {
     // sort host and partitions
     tempNoEmptyHosts = sortHostAndPartitions(tempNoEmptyHosts)
     // assign locality partition to non empty hosts
-    val templocalityResult = assignPartitionNodeLocality(tempNoEmptyHosts)
+    val tempLocalityResult = assignPartitionNodeLocality(tempNoEmptyHosts)
     // collect non empty hosts and empty hosts
     val noEmptyHosts = mutable.Buffer[String]()
     val localityResult = mutable.Buffer[ArrayBuffer[Int]]()
-    for(index <- 0 until templocalityResult.size) {
-      if (templocalityResult(index).isEmpty) {
+    for(index <- 0 until tempLocalityResult.size) {
+      if (tempLocalityResult(index).isEmpty) {
         emptyHosts += tempNoEmptyHosts(index)._1
       } else {
         noEmptyHosts += tempNoEmptyHosts(index)._1
-        localityResult += templocalityResult(index)
+        localityResult += tempLocalityResult(index)
       }
     }
     // 2. do no locality repartition
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
index 7cafa59..d07dbb4 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
@@ -32,7 +32,7 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) {
   private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
 
   def saveAsCarbonFile(parameters: Map[String, String] = Map()): Unit = {
-    // create a new table using dataframe's schema and write its content into the table
+    // create a new table using DataFrame's schema and write its content into the table
     sqlContext.sparkSession.sql(
       makeCreateTableString(dataFrame.schema, new CarbonOption(parameters))).collect()
     writeToCarbonFile(parameters)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
index 026f291..b3d17f5 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
@@ -79,8 +79,8 @@ case class CarbonDatasourceHadoopRelation(
 
     val projection = new CarbonProjection
 
-    // As Filter pushdown for Complex datatype is not supported, if filter is applied on complex
-    // column, then Projection pushdown on Complex Columns will not take effect. Hence, check if
+    // As Filter push down for Complex datatype is not supported, if filter is applied on complex
+    // column, then Projection push down on Complex Columns will not take effect. Hence, check if
     // filter contains Struct Complex Column.
     val complexFilterExists = filterComplex.map(col =>
       col.map(_.isInstanceOf[GetStructField]))
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index fbe9e32..cb6060d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -102,14 +102,14 @@ class CarbonEnv {
     sparkSession.udf.register(MVFunctions.TIME_SERIES_FUNCTION, new TimeSeriesFunction)
 
     // update carbon session parameters , preserve thread parameters
-    val currentThreadSesssionInfo = ThreadLocalSessionInfo.getCarbonSessionInfo
+    val currentThreadSessionInfo = ThreadLocalSessionInfo.getCarbonSessionInfo
     carbonSessionInfo = new CarbonSessionInfo()
     // We should not corrupt the information in carbonSessionInfo object which is at the
     // session level. Instead create a new object and in that set the user specified values in
     // thread/session params
     val threadLevelCarbonSessionInfo = new CarbonSessionInfo()
-    if (currentThreadSesssionInfo != null) {
-      threadLevelCarbonSessionInfo.setThreadParams(currentThreadSesssionInfo.getThreadParams)
+    if (currentThreadSessionInfo != null) {
+      threadLevelCarbonSessionInfo.setThreadParams(currentThreadSessionInfo.getThreadParams)
     }
     ThreadLocalSessionInfo.setCarbonSessionInfo(threadLevelCarbonSessionInfo)
     ThreadLocalSessionInfo.setConfigurationToCurrentThread(
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonExpressions.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonExpressions.scala
index d473bc4..d168fc6 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonExpressions.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonExpressions.scala
@@ -28,7 +28,7 @@ import org.apache.spark.sql.types.DataType
 /**
  * This class contains the wrappers of all the case classes which are common
  * across spark version 2.1 and 2.2 but have change in parameter list.
- * Below are the overriden unapply methods in order to make it work
+ * Below are the overridden unapply methods in order to make it work
  * across both the version of spark2.1 and spark 2.2
  */
 object CarbonExpressions {
@@ -86,7 +86,7 @@ object CarbonExpressions {
   }
 
   /**
-   * uapply method of UnresolvedRelation
+   * unapply method of UnresolvedRelation
    */
   object CarbonUnresolvedRelation {
     def unapply(plan: LogicalPlan): Option[(TableIdentifier)] = {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala
index af2ff3c..4700582 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala
@@ -155,14 +155,14 @@ object CarbonSession {
 
   private val statementId = new AtomicLong(0)
 
-  private var enableInMemCatlog: Boolean = false
+  private var isInMemoryCatalog: Boolean = false
 
   private[sql] val threadStatementId = new ThreadLocal[Long]()
 
   implicit class CarbonBuilder(builder: Builder) {
 
     def enableInMemoryCatalog(): Builder = {
-      enableInMemCatlog = true
+      isInMemoryCatalog = true
       builder
     }
     def getOrCreateCarbonSession(): SparkSession = {
@@ -177,7 +177,7 @@ object CarbonSession {
 
     def getOrCreateCarbonSession(storePath: String,
         metaStorePath: String): SparkSession = synchronized {
-      if (!enableInMemCatlog) {
+      if (!isInMemoryCatalog) {
         builder.enableHiveSupport()
       }
       val options =
@@ -252,7 +252,7 @@ object CarbonSession {
           sc
         }
 
-        session = new CarbonSession(sparkContext, None, !enableInMemCatlog)
+        session = new CarbonSession(sparkContext, None, !isInMemoryCatalog)
 
         val carbonProperties = CarbonProperties.getInstance()
         if (StringUtils.isNotBlank(storePath)) {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java
index ca1ea95..158b3e2 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java
@@ -50,7 +50,7 @@ public class CarbonVectorProxy {
    * handles the complexity of spark 2.3 version related api changes since
    * spark ColumnVector and ColumnarBatch interfaces are still evolving.
    *
-   * @param memMode       which represent the type onheap or offheap vector.
+   * @param memMode       which represent the type on-heap or off-heap vector.
    * @param outputSchema, metadata related to current schema of table.
    * @param rowNum        rows number for vector reading
    * @param useLazyLoad   Whether to use lazy load while getting the data.
@@ -79,7 +79,7 @@ public class CarbonVectorProxy {
     }
 
     /**
-     * This API will return a columnvector from a batch of column vector rows
+     * This API will return a column vector from a batch of column vector rows
      * based on the ordinal
      *
      * @param ordinal
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CustomDeterministicExpression.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CustomDeterministicExpression.scala
index 233f28d..8f8b196 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CustomDeterministicExpression.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CustomDeterministicExpression.scala
@@ -34,7 +34,7 @@ case class CustomDeterministicExpression(nonDt: Expression ) extends Expression
 
   override def children: Seq[Expression] = nonDt.children
 
-  def childexp: Expression = nonDt
+  def childExp: Expression = nonDt
 
   override def genCode(ctx: CodegenContext): ExprCode = nonDt.genCode(ctx)
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndex.scala b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndex.scala
index e0c7c9c..b6feab4 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndex.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndex.scala
@@ -52,7 +52,7 @@ case class CarbonFileIndex(
   extends FileIndex with AbstractCarbonFileIndex {
 
   // When this flag is set it just returns empty files during pruning. It is needed for carbon
-  // session partition flow as we handle directly through indexSchema pruining.
+  // session partition flow as we handle directly through indexSchema pruning.
   private var actAsDummy = false
 
   override def rootPaths: Seq[Path] = fileIndex.rootPaths
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndexReplaceRule.scala b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndexReplaceRule.scala
index ebce50f..2592a6a 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndexReplaceRule.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonFileIndexReplaceRule.scala
@@ -87,7 +87,7 @@ class CarbonFileIndexReplaceRule extends Rule[LogicalPlan] {
   }
 
   /**
-   * Get datafolders recursively
+   * Get data folders recursively
    */
   private def getDataFolders(
       tableFolder: CarbonFile,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
index a9a01ee..015af6c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
@@ -32,7 +32,7 @@ import org.apache.spark.internal.Logging
 import org.apache.spark.internal.io.FileCommitProtocol
 import org.apache.spark.memory.MemoryMode
 import org.apache.spark.sql._
-import org.apache.spark.sql.carbondata.execution.datasources.readsupport.SparkUnsafeRowReadSuport
+import org.apache.spark.sql.carbondata.execution.datasources.readsupport.SparkUnsafeRowReadSupport
 import org.apache.spark.sql.carbondata.execution.datasources.tasklisteners.{CarbonLoadTaskCompletionListenerImpl, CarbonQueryTaskCompletionListenerImpl}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.JoinedRow
@@ -435,7 +435,7 @@ class SparkCarbonFileFormat extends FileFormat
           vectorizedReader
         } else {
           val reader = new CarbonRecordReader(model,
-            new SparkUnsafeRowReadSuport(requiredSchema), broadcastedHadoopConf.value.value)
+            new SparkUnsafeRowReadSupport(requiredSchema), broadcastedHadoopConf.value.value)
           reader.initialize(split, hadoopAttemptContext)
           reader
         }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/readsupport/SparkUnsafeRowReadSuport.scala b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/readsupport/SparkUnsafeRowReadSupport.scala
similarity index 94%
rename from integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/readsupport/SparkUnsafeRowReadSuport.scala
rename to integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/readsupport/SparkUnsafeRowReadSupport.scala
index cffde6c..5a4d572 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/readsupport/SparkUnsafeRowReadSuport.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/readsupport/SparkUnsafeRowReadSupport.scala
@@ -28,7 +28,7 @@ import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport
 /**
  * Read support class which converts carbon row array format to sparks Internal row.
  */
-class SparkUnsafeRowReadSuport(requiredSchema: StructType) extends CarbonReadSupport[InternalRow] {
+class SparkUnsafeRowReadSupport(requiredSchema: StructType) extends CarbonReadSupport[InternalRow] {
   private val unsafeProjection = UnsafeProjection.create(requiredSchema)
   override def initialize(carbonColumns: Array[CarbonColumn],
       carbonTable: CarbonTable): Unit = {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index d266680..9928f48 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -192,9 +192,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       .map(_.invoke(this).asInstanceOf[Keyword].str)
 
   override val lexical = {
-    val sqllex = new SqlLexical()
-    sqllex.initialize(newReservedWords)
-    sqllex
+    val sqlLex = new SqlLexical()
+    sqlLex.initialize(newReservedWords)
+    sqlLex
 
   }
 
@@ -229,7 +229,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
 
   protected lazy val options: Parser[(String, String)] =
     (stringLit <~ "=") ~ stringLit ^^ {
-      case opt ~ optvalue => (opt.trim.toLowerCase(), optvalue)
+      case opt ~ optValue => (opt.trim.toLowerCase(), optValue)
       case _ => ("", "")
     }
 
@@ -242,19 +242,19 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
 
   protected lazy val partitions: Parser[(String, Option[String])] =
     (ident <~ "=".?) ~ stringLit.? ^^ {
-      case opt ~ optvalue => (opt.trim, optvalue)
+      case opt ~ optValue => (opt.trim, optValue)
       case _ => ("", None)
     }
 
   protected lazy val valueOptions: Parser[(Int, Int)] =
     (numericLit <~ ",") ~ numericLit ^^ {
-      case opt ~ optvalue => (opt.toInt, optvalue.toInt)
+      case opt ~ optValue => (opt.toInt, optValue.toInt)
       case _ => (0, 0)
     }
 
   protected lazy val columnOptions: Parser[(String, String)] =
     (stringLit <~ ",") ~ stringLit ^^ {
-      case opt ~ optvalue => (opt, optvalue)
+      case opt ~ optValue => (opt, optValue)
       case _ =>
         throw new MalformedCarbonCommandException(s"value cannot be empty")
     }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
index 7619afd..0c67aeb 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
@@ -302,7 +302,7 @@ object CarbonParserUtil {
       }
 
       // validate if both local dictionary include and exclude contains same column
-      CarbonScalaUtil.validateDuplicateLocalDictIncludeExcludeColmns(tableProperties)
+      CarbonScalaUtil.validateDuplicateColumnsForLocalDict(tableProperties)
     }
 
     // get no inverted index columns from table properties.
@@ -375,12 +375,12 @@ object CarbonParserUtil {
     }
     // long_string_columns columns cannot be in no_inverted_index columns
     var longStringColumns = varcharColumns.map(_.toUpperCase)
-    var noInvColIntersecLongStrCols = longStringColumns
+    var noInvColIntersectLongStrCols = longStringColumns
       .intersect(noInvertedIdxCols.map(_.toUpperCase))
-    if (!noInvColIntersecLongStrCols.isEmpty) {
+    if (!noInvColIntersectLongStrCols.isEmpty) {
       throw new MalformedCarbonCommandException(
         s"Column(s): ${
-          noInvColIntersecLongStrCols.mkString(",")
+          noInvColIntersectLongStrCols.mkString(",")
         } both in no_inverted_index and long_string_columns which is not allowed.")
     }
     // long_string_columns columns cannot be in partition columns
@@ -644,7 +644,7 @@ object CarbonParserUtil {
       validateLongStringColumns(fields, varcharCols)
     }
 
-    // All columns in sortkey should be there in create table cols
+    // All columns in sort_columns should be there in create table cols
     var sortKeyOption = tableProperties.get(CarbonCommonConstants.SORT_COLUMNS)
     if (!sortKeyOption.isDefined) {
       // default no columns are selected for sorting in no_sort scope
@@ -710,7 +710,7 @@ object CarbonParserUtil {
         dimFields += field
       } else if (isDetectAsDimensionDataType(field.dataType.get)) {
         dimFields += field
-        // consider all String and binary cols as noDicitonaryDims by default
+        // consider all String and binary cols as noDictionaryDims by default
         if ((DataTypes.STRING.getName.equalsIgnoreCase(field.dataType.get)) ||
             (DataTypes.BINARY.getName.equalsIgnoreCase(field.dataType.get))) {
           noDictionaryDims :+= field.column
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/events/MergeBloomIndexEventListener.scala b/integration/spark/src/main/scala/org/apache/spark/sql/events/MergeBloomIndexEventListener.scala
index 8b032ff..3c1aea3 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/events/MergeBloomIndexEventListener.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/events/MergeBloomIndexEventListener.scala
@@ -61,14 +61,14 @@ class MergeBloomIndexEventListener extends OperationEventListener with Logging {
         if (index.size > 0 && segmentIds.size > 0) {
           // we extract bloom indexSchema name and index columns here
           // because TableIndex is not serializable
-          val bloomDMnames = ListBuffer.empty[String]
+          val bloomIndexNames = ListBuffer.empty[String]
           val bloomIndexColumns = ListBuffer.empty[Seq[String]]
           index.foreach( dm => {
-            bloomDMnames += dm.getIndexSchema.getIndexName
+            bloomIndexNames += dm.getIndexSchema.getIndexName
             bloomIndexColumns += dm.getIndexSchema.getIndexColumns.map(_.trim.toLowerCase)
           })
           new CarbonMergeBloomIndexFilesRDD(sparkSession, carbonTable,
-            segmentIds, bloomDMnames, bloomIndexColumns).collect()
+            segmentIds, bloomIndexNames, bloomIndexColumns).collect()
         }
     }
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala
index 90558ac..cd882e3 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala
@@ -65,8 +65,8 @@ object CastExpressionOptimization {
         } catch {
           case e: ParseException =>
             try {
-              val parsenew: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSz")
-              parsenew.parse(v.toString).getTime() * 1000L
+              val format: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSz")
+              format.parse(v.toString).getTime() * 1000L
             } catch {
               case e: ParseException =>
                 val gmtDay = new SimpleDateFormat("yyyy-MM-dd", Locale.US)
@@ -140,7 +140,8 @@ object CastExpressionOptimization {
 
   /**
    * This routines tries to apply rules on Cast Filter Predicates and if the rules applied and the
-   * values can be toss back to native datatypes the cast is removed. Current two rules are applied
+   * values can be toss back to native data types the cast is removed.
+   * Current two rules are applied
    * a) Left : timestamp column      Right : String Value
    * Input from Spark : cast (col as string) <> 'String Literal'
    * Change to        : Column <> 'Long value of Timestamp String'
@@ -394,7 +395,7 @@ object CastExpressionOptimization {
 
   /**
    * Spark compares data based on double also.
-   * Ex. slect * ...where time >0 , this will return all data
+   * Ex. select * ...where time >0 , this will return all data
    * So better  give to Spark as Cast Expression.
    *
    * @param numericTimeValue
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index e7fd07e..6d3b550 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -136,7 +136,7 @@ case class CarbonShowCacheCommand(showExecutorCache: Boolean,
     if (!isDistributedPruningEnabled) {
       // Block here. this feature is only with index server enabled
       throw new UnsupportedOperationException(
-        "Show Executor Metacache is only avalable with Index Server Enabled")
+        "Show Executor MetaCache is only available with Index Server Enabled")
     } else {
       // get all the executor details from the index server
       try {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index 751a6c3..e551301 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -56,9 +56,9 @@ case class TableModel(
     msrCols: Seq[Field],
     sortKeyDims: Option[Seq[String]],
     varcharCols: Option[Seq[String]],
-    highcardinalitydims: Option[Seq[String]],
+    highCardinalityDims: Option[Seq[String]],
     noInvertedIdxCols: Option[Seq[String]],
-    innvertedIdxCols: Option[Seq[String]],
+    invertedIdxCols: Option[Seq[String]],
     colProps: Option[util.Map[String, util.List[ColumnProperty]]] = None,
     bucketFields: Option[BucketFields],
     partitionInfo: Option[PartitionInfo],
@@ -100,7 +100,7 @@ case class CarbonMergerMapping(
     factTableName: String,
     validSegments: Array[Segment],
     tableId: String,
-    campactionType: CompactionType,
+    compactionType: CompactionType,
     // maxSegmentColumnSchemaList is list of column schema of last segment of compaction
     var maxSegmentColumnSchemaList: List[ColumnSchema],
     @transient currentPartitions: Option[Seq[PartitionSpec]])
@@ -238,9 +238,9 @@ class AlterTableColumnSchemaGenerator(
       (x.isDimensionColumn && !x.getDataType.isComplexType()
           && x.getSchemaOrdinal != -1 && (x.getDataType != DataTypes.VARCHAR)))
     var newCols = Seq[ColumnSchema]()
-    var invertedIndxCols: Array[String] = Array[String]()
+    var invertedIndexCols: Array[String] = Array[String]()
     if (alterTableModel.tableProperties.get(CarbonCommonConstants.INVERTED_INDEX).isDefined) {
-      invertedIndxCols = alterTableModel.tableProperties(CarbonCommonConstants.INVERTED_INDEX)
+      invertedIndexCols = alterTableModel.tableProperties(CarbonCommonConstants.INVERTED_INDEX)
         .split(',').map(_.trim)
     }
 
@@ -294,9 +294,9 @@ class AlterTableColumnSchemaGenerator(
       newCols ++= Seq(columnSchema)
     })
 
-    if (invertedIndxCols.nonEmpty) {
+    if (invertedIndexCols.nonEmpty) {
       for (column <- newCols) {
-        if (invertedIndxCols.contains(column.getColumnName) && column.isDimensionColumn) {
+        if (invertedIndexCols.contains(column.getColumnName) && column.isDimensionColumn) {
           column.setUseInvertedIndex(true)
         }
       }
@@ -441,7 +441,7 @@ class AlterTableColumnSchemaGenerator(
       }
 
       // validate if both local dictionary include and exclude contains same column
-      CarbonScalaUtil.validateDuplicateLocalDictIncludeExcludeColmns(alterMutableTblProperties)
+      CarbonScalaUtil.validateDuplicateColumnsForLocalDict(alterMutableTblProperties)
 
       CarbonUtil
         .setLocalDictColumnsToWrapperSchema(newCols.asJava,
@@ -627,7 +627,7 @@ class TableNewProcessor(cm: TableModel) {
     val columnSchema = new ColumnSchema()
     columnSchema.setDataType(dataType)
     columnSchema.setColumnName(colName)
-    val highCardinalityDims = cm.highcardinalitydims.getOrElse(Seq())
+    val highCardinalityDims = cm.highCardinalityDims.getOrElse(Seq())
     if (highCardinalityDims.contains(colName)) {
       encoders.remove(Encoding.DICTIONARY)
     }
@@ -719,10 +719,10 @@ class TableNewProcessor(cm: TableModel) {
           cm.tableProperties(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE))
     }
     cm.msrCols.foreach { field =>
-      // if aggregate function is defined in case of preaggregate and agg function is sum or avg
+      // if aggregate function is defined in case of pre-aggregate and agg function is sum or avg
       // then it can be stored as measure
       var isAggFunPresent = false
-      // getting the encoder from maintable so whatever encoding is applied in maintable
+      // getting the encoder from main table so whatever encoding is applied in main table
       // same encoder can be applied on aggregate table
       val encoders = new java.util.ArrayList[Encoding]()
 
@@ -750,7 +750,7 @@ class TableNewProcessor(cm: TableModel) {
       }
     }
 
-    val invertedIndexCols = cm.innvertedIdxCols.getOrElse(Seq())
+    val invertedIndexCols = cm.invertedIdxCols.getOrElse(Seq())
     for (column <- allColumns) {
       // When the column is measure or the specified no inverted index column in DDL,
       // set useInvertedIndex to false, otherwise true.
@@ -863,12 +863,12 @@ class TableNewProcessor(cm: TableModel) {
   }
 
   //  For checking if the specified col group columns are specified in fields list.
-  protected def checkColGroupsValidity(colGrps: Seq[String],
+  protected def checkColGroupsValidity(columnGroups: Seq[String],
       allCols: Seq[ColumnSchema],
       highCardCols: Seq[String]): Unit = {
-    if (null != colGrps) {
-      colGrps.foreach(columngroup => {
-        val rowCols = columngroup.split(",")
+    if (null != columnGroups) {
+      columnGroups.foreach(columnGroup => {
+        val rowCols = columnGroup.split(",")
         rowCols.foreach(colForGrouping => {
           var found: Boolean = false
           // check for dimensions + measures
@@ -877,7 +877,7 @@ class TableNewProcessor(cm: TableModel) {
               found = true
             }
           })
-          // check for No Dicitonary dimensions
+          // check for No Dictionary dimensions
           highCardCols.foreach(noDicCol => {
             if (colForGrouping.trim.equalsIgnoreCase(noDicCol)) {
               found = true
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/CarbonCreateIndexCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/CarbonCreateIndexCommand.scala
index 83f593e..16df7bb 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/CarbonCreateIndexCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/CarbonCreateIndexCommand.scala
@@ -149,7 +149,7 @@ case class CarbonCreateIndexCommand(
                   column.getColName
                 }")
             }
-            // For bloomfilter, the index column datatype cannot be complex type
+            // For bloom filter, the index column datatype cannot be complex type
             if (column.isComplex) {
               throw new MalformedIndexCommandException(
                 s"BloomFilter does not support complex datatype column: ${
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/DropIndexCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/DropIndexCommand.scala
index d87cf0a..3d4d32c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/DropIndexCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/index/DropIndexCommand.scala
@@ -241,7 +241,7 @@ private[sql] case class DropIndexCommand(
   }
 
   /**
-   * AcquireLock and remove indexInfo from parent tabe
+   * AcquireLock and remove indexInfo from parent table
    */
   private def removeIndexInfoFromParentTable(sparkSession: SparkSession,
       dbName: String,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
index a98cec5..b5c2dfe 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddLoadCommand.scala
@@ -276,7 +276,7 @@ case class CarbonAddLoadCommand(
         model.getSegmentId)
       // when this event is triggered, SI load listener will be called for all the SI tables under
       // this main table, no need to load the SI tables for add load command, so add this property
-      // to check in SI loadevent listener to avoid loading to SI.
+      // to check in SI load event listener to avoid loading to SI.
       operationContext.setProperty("isAddLoad", "true")
       val loadTablePreStatusUpdateEvent: LoadTablePreStatusUpdateEvent =
         new LoadTablePreStatusUpdateEvent(
@@ -325,8 +325,8 @@ case class CarbonAddLoadCommand(
       FileFactory.deleteFile(segmentFile)
       clearIndexFiles(carbonTable, model.getSegmentId)
       LOGGER.info("********clean up done**********")
-      LOGGER.error("Data load failed due to failure in table status updation.")
-      throw new Exception("Data load failed due to failure in table status updation.")
+      LOGGER.error("Data load failed due to failure in table status update.")
+      throw new Exception("Data load failed due to failure in table status update.")
     }
     val viewManager = MVManagerInSpark.get(sparkSession)
     val viewSchemas = new util.ArrayList[MVSchema]()
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
index dc50cf5..05534e0 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
@@ -124,7 +124,7 @@ case class CarbonAlterTableCompactionCommand(
             .getTableStatusFilePath(table.getTablePath), loadMetaDataDetails)
         } else {
           throw new ConcurrentOperationException(table.getDatabaseName,
-            table.getTableName, "table status updation", "upgrade segments")
+            table.getTableName, "table status update", "upgrade segments")
         }
       } finally {
         tableStatusLock.unlock()
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala
index e8331f0..52c7807 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertFromStageCommand.scala
@@ -152,9 +152,9 @@ case class CarbonInsertFromStageCommand(
       }
 
       // We add a tag 'loading' to the stages in process.
-      // different insertstage processes can load different data separately
+      // different insert stage processes can load different data separately
       // by choose the stages without 'loading' tag or stages loaded timeout.
-      // which avoid loading the same data between concurrent insertstage processes.
+      // which avoid loading the same data between concurrent insert stage processes.
       // The 'loading' tag is actually an empty file with
       // '.loading' suffix filename
       val numThreads = Math.min(Math.max(stageFiles.length, 1), 10)
@@ -512,7 +512,7 @@ case class CarbonInsertFromStageCommand(
           // make isFailed to be true if createNewFile return false.
           // the reason can be file exists or exceptions.
           var isFailed = !stageLoadingFile.createNewFile()
-          // if file exists, modify the lastmodifiedtime of the file.
+          // if file exists, modify the lastModifiedTime of the file.
           if (isFailed) {
             // make isFailed to be true if setLastModifiedTime return false.
             isFailed = !stageLoadingFile.setLastModifiedTime(System.currentTimeMillis());
@@ -523,7 +523,7 @@ case class CarbonInsertFromStageCommand(
     }.map { future =>
       future.get()
     }.filter { files =>
-      // keep the files when isFailed is ture. so we can retry on these files.
+      // keep the files when isFailed is true. so we can retry on these files.
       files._3
     }.map { files =>
       (files._1, files._2)
@@ -568,7 +568,7 @@ case class CarbonInsertFromStageCommand(
               + CarbonTablePath.LOADING_FILE_SUFFIX);
           var isFailed = false
           // If delete() return false, maybe the reason is FileNotFount or FileFailedClean.
-          // Considering FileNotFound means FileCleanSucessfully.
+          // Considering FileNotFound means file clean successfully.
           // We need double check the file exists or not when delete() return false.
           if (!(files._1.delete() && files._2.delete() && stageLoadingFile.delete())) {
             // If the file still exists,  make isFailed to be true
@@ -581,7 +581,7 @@ case class CarbonInsertFromStageCommand(
     }.map { future =>
       future.get()
     }.filter { files =>
-      // keep the files when isFailed is ture. so we can retry on these files.
+      // keep the files when isFailed is true. so we can retry on these files.
       files._3
     }.map { files =>
       (files._1, files._2)
@@ -620,7 +620,7 @@ case class CarbonInsertFromStageCommand(
       snapshotFilePath: String): Boolean = {
     val snapshotFile = FileFactory.getCarbonFile(snapshotFilePath)
     // If delete() return false, maybe the reason is FileNotFount or FileFailedClean.
-    // Considering FileNotFound means FileCleanSucessfully.
+    // Considering FileNotFound means file clean successfully.
     // We need double check the file exists or not when delete() return false.
     if (!snapshotFile.delete()) {
       return snapshotFile.exists()
@@ -662,9 +662,9 @@ case class CarbonInsertFromStageCommand(
         (file.getName.substring(0, file.getName.indexOf(".")), file)
       }.toMap
 
-      // different insertstage processes can load different data separately
+      // different insert stage processes can load different data separately
       // by choose the stages without 'loading' tag or stages loaded timeout.
-      // which avoid loading the same data between concurrent insertstage processes.
+      // which avoid loading the same data between concurrent insert stage processes.
       // Overall, There are two conditions to choose stages to process:
       // 1) stages never loaded, choose the stages without '.loading' tag.
       // 2) stages loaded timeout, the timeout threshold depends on INSERT_STAGE_TIMEOUT
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
index 8c14917..01afa5d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
@@ -137,7 +137,7 @@ case class CarbonInsertIntoCommand(databaseNameOp: Option[String],
     val hadoopConf = sparkSession.sessionState.newHadoopConf()
     CarbonProperties.getInstance().addProperty("zookeeper.enable.lock", "false")
     val factPath = ""
-    currPartitions = CommonLoadUtils.getCurrentParitions(sparkSession, table)
+    currPartitions = CommonLoadUtils.getCurrentPartitions(sparkSession, table)
     CommonLoadUtils.setNumberOfCoresWhileLoading(sparkSession)
     val optionsFinal: util.Map[String, String] = CommonLoadUtils.getFinalLoadOptions(table, options)
     val carbonLoadModel: CarbonLoadModel = CommonLoadUtils.prepareLoadModel(
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoWithDf.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoWithDf.scala
index fe9f3a2..2496b96 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoWithDf.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoWithDf.scala
@@ -71,7 +71,7 @@ case class CarbonInsertIntoWithDf(databaseNameOp: Option[String],
     val hadoopConf = sparkSession.sessionState.newHadoopConf()
     CarbonProperties.getInstance().addProperty("zookeeper.enable.lock", "false")
     val factPath = ""
-    val currPartitions = CommonLoadUtils.getCurrentParitions(sparkSession, table)
+    val currPartitions = CommonLoadUtils.getCurrentPartitions(sparkSession, table)
     CommonLoadUtils.setNumberOfCoresWhileLoading(sparkSession)
     val optionsFinal: util.Map[String, String] =
       CommonLoadUtils.getFinalLoadOptions(table, options)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index e50f575..a331e7b 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -95,7 +95,7 @@ case class CarbonLoadDataCommand(databaseNameOp: Option[String],
     val dbName = CarbonEnv.getDatabaseName(databaseNameOp)(sparkSession)
     CarbonProperties.getInstance().addProperty("zookeeper.enable.lock", "false")
     val factPath = FileUtils.getPaths(factPathFromUser, hadoopConf)
-    currPartitions = CommonLoadUtils.getCurrentParitions(sparkSession, table)
+    currPartitions = CommonLoadUtils.getCurrentPartitions(sparkSession, table)
     CommonLoadUtils.setNumberOfCoresWhileLoading(sparkSession)
     val optionsFinal: util.Map[String, String] =
       CommonLoadUtils.getFinalLoadOptions(table, options)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsAsSelectCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsAsSelectCommand.scala
index f1e668e..66a8b9a 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsAsSelectCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsAsSelectCommand.scala
@@ -125,7 +125,7 @@ case class CarbonShowSegmentsAsSelectCommand(
       segments: Array[LoadMetadataDetails],
       rows: Seq[SegmentRow]): Unit = {
 
-    // populate a dataframe containing all segment information
+    // populate a DataFrame containing all segment information
     val tablePath = carbonTable.getTablePath
     val segmentRowView = rows ++ segments.toSeq.map { segment =>
       val mergedToId = CarbonStore.getMergeTo(segment)
@@ -150,7 +150,7 @@ case class CarbonShowSegmentsAsSelectCommand(
         if (segment.getSegmentFile == null) "NA" else segment.getSegmentFile)
     }
 
-    // create a temp view using the populated dataframe and execute the query on it
+    // create a temp view using the populated DataFrame and execute the query on it
     val df = sparkSession.createDataFrame(segmentRowView)
     checkIfTableExist(sparkSession, tempViewName)
     df.createOrReplaceTempView(tempViewName)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
index fa2d178..15a0823 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
@@ -191,7 +191,7 @@ object CommonLoadUtils {
       .addProperty(CarbonCommonConstants.NUM_CORES_LOADING, numCoresLoading)
   }
 
-  def getCurrentParitions(sparkSession: SparkSession,
+  def getCurrentPartitions(sparkSession: SparkSession,
       table: CarbonTable): util.List[PartitionSpec] = {
     val currPartitions = if (table.isHivePartitionTable) {
       CarbonFilters.getCurrentPartitions(
@@ -392,7 +392,7 @@ object CommonLoadUtils {
     val finalRDD = convertRDD.mapPartitionsWithIndex { case(index, rows) =>
       DataTypeUtil.setDataTypeConverter(new SparkDataTypeConverterImpl)
       ThreadLocalSessionInfo.setConfigurationToCurrentThread(conf.value.value)
-      DataLoadProcessorStepOnSpark.inputAndconvertFunc(
+      DataLoadProcessorStepOnSpark.inputAndConvertFunc(
         rows,
         index,
         modelBroadcast,
@@ -405,7 +405,7 @@ object CommonLoadUtils {
   }
 
   /**
-   * Transform the rdd to logical plan as per the sortscope. If it is global sort scope then it
+   * Transform the rdd to logical plan as per the sort_scope. If it is global sort scope then it
    * will convert to sort logical plan otherwise project plan.
    */
   def transformQueryWithRow(rdd: RDD[Row],
@@ -453,7 +453,7 @@ object CommonLoadUtils {
       catalogAttributes.find(_.name.equalsIgnoreCase(a.name)).get
     })
     attributes = attributes.map { attr =>
-      // Update attribute datatypes in case of dictionary columns, in case of dictionary columns
+      // Update attribute data types in case of dictionary columns, in case of dictionary columns
       // datatype is always int
       val column = table.getColumnByName(attr.name)
       val updatedDataType = if (column.getDataType ==
@@ -505,7 +505,7 @@ object CommonLoadUtils {
     }
     val partitionsLen = updatedRdd.partitions.length
 
-    // If it is global sort scope then appl sort logical plan on the sort columns
+    // If it is global sort scope then apply sort logical plan on the sort columns
     if (sortScope == SortScopeOptions.SortScope.GLOBAL_SORT) {
       // Because if the number of partitions greater than 1, there will be action operator(sample)
       // in sortBy operator. So here we cache the rdd to avoid do input and convert again.
@@ -558,7 +558,7 @@ object CommonLoadUtils {
   }
 
   /**
-   * Transform the rdd to logical plan as per the sortscope. If it is global sort scope then it
+   * Transform the rdd to logical plan as per the sort_scope. If it is global sort scope then it
    * will convert to sort logical plan otherwise project plan.
    */
   def transformQueryWithInternalRow(rdd: RDD[InternalRow],
@@ -615,7 +615,7 @@ object CommonLoadUtils {
       df: DataFrame,
       carbonLoadModel: CarbonLoadModel): LogicalPlan = {
     SparkUtil.setNullExecutionId(sparkSession)
-    // In case of update, we don't need the segmrntid column in case of partitioning
+    // In case of update, we don't need the segmentId column in case of partitioning
     val dropAttributes = df.logicalPlan.output.dropRight(1)
     val finalOutput = catalogTable.schema.map { attr =>
       dropAttributes.find { d =>
@@ -808,11 +808,11 @@ object CommonLoadUtils {
 
   def getTimeAndDateFormatFromLoadModel(loadModel: CarbonLoadModel): (SimpleDateFormat,
     SimpleDateFormat) = {
-    var timeStampformatString = loadModel.getTimestampFormat
-    if (timeStampformatString.isEmpty) {
-      timeStampformatString = loadModel.getDefaultTimestampFormat
+    var timestampFormatString = loadModel.getTimestampFormat
+    if (timestampFormatString.isEmpty) {
+      timestampFormatString = loadModel.getDefaultTimestampFormat
     }
-    val timeStampFormat = new SimpleDateFormat(timeStampformatString)
+    val timeStampFormat = new SimpleDateFormat(timestampFormatString)
     var dateFormatString = loadModel.getDateFormat
     if (dateFormatString.isEmpty) {
       dateFormatString = loadModel.getDefaultDateFormat
@@ -1082,7 +1082,7 @@ object CommonLoadUtils {
         }
       }
 
-      // Prepriming for Partition table here
+      // Pre-priming for Partition table here
       if (!StringUtils.isEmpty(loadParams.carbonLoadModel.getSegmentId)) {
         DistributedRDDUtils.triggerPrepriming(loadParams.sparkSession,
           table,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/RefreshCarbonTableCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/RefreshCarbonTableCommand.scala
index 8cf3e7b..a86a874 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/RefreshCarbonTableCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/RefreshCarbonTableCommand.scala
@@ -103,7 +103,7 @@ case class RefreshCarbonTableCommand(
   /**
    * Refresh the sort_column flag in column schema in case of old store. Before V3, sort_column
    * option is not set but by default all dimension columns should be treated
-   * as sort columns if SORT_COLUMNS property is not defined in tblproperties
+   * as sort columns if SORT_COLUMNS property is not defined in table properties
    *
    * @param tableInfo
    */
@@ -163,7 +163,7 @@ case class RefreshCarbonTableCommand(
       val refreshTablePreExecutionEvent: RefreshTablePreExecutionEvent =
         new RefreshTablePreExecutionEvent(sparkSession,
           tableInfo.getOrCreateAbsoluteTableIdentifier())
-      if (SparkUtil.isSparkVersionXandAbove("2.4")) {
+      if (SparkUtil.isSparkVersionXAndAbove("2.4")) {
         // During refresh table, when this option is set to true, creating managed tables with
         // nonempty location is allowed. Otherwise, an analysis exception is thrown.
         // https://kb.databricks.com/jobs/spark-overwrite-cancel.html
@@ -178,7 +178,7 @@ case class RefreshCarbonTableCommand(
       case e: AnalysisException => throw e
       case e: Exception => throw e
     } finally {
-      if (SparkUtil.isSparkVersionXandAbove("2.4")) {
+      if (SparkUtil.isSparkVersionXAndAbove("2.4")) {
         // Set it back to default
         sparkSession.sessionState.conf
           .setConfString(allowCreateTableNonEmptyLocationConf, allowCreateTableNonEmptyLocation)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
index 5eed830..cf81200 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForDeleteCommand.scala
@@ -120,7 +120,7 @@ private[sql] case class CarbonProjectForDeleteCommand(
         isUpdateOperation = false,
         executorErrors)
 
-      // Check for any failures occured during delete delta execution
+      // Check for any failures occurred during delete delta execution
       if (executorErrors.failureCauses != FailureCauses.NONE) {
         throw new Exception(executorErrors.errorMsg)
       }
@@ -136,7 +136,7 @@ private[sql] case class CarbonProjectForDeleteCommand(
         viewManager.onTruncate(viewSchemas)
       }
 
-      // prepriming for delete command
+      // pre-priming for delete command
       DeleteExecution.reloadDistributedSegmentCache(carbonTable,
         deletedSegments, operationContext)(sparkSession)
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
index 43ef3f3..7d3225d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/CarbonProjectForUpdateCommand.scala
@@ -120,7 +120,7 @@ private[sql] case class CarbonProjectForUpdateCommand(
         logInfo("Successfully able to get the table metadata file lock")
       }
       else {
-        throw new Exception("Table is locked for updation. Please try after some time")
+        throw new Exception("Table is locked for update. Please try after some time")
       }
 
       val executionErrors = new ExecutionErrors(FailureCauses.NONE, "")
@@ -180,7 +180,7 @@ private[sql] case class CarbonProjectForUpdateCommand(
             executionErrors,
             segmentsToBeDeleted)
 
-          // prepriming for update command
+          // pre-priming for update command
           DeleteExecution.reloadDistributedSegmentCache(carbonTable,
             segmentsToBeDeleted, operationContext)(sparkSession)
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
index 384fecc..b120ba8 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala
@@ -50,7 +50,7 @@ import org.apache.carbondata.events.{IndexServerLoadEvent, OperationContext, Ope
 import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
 import org.apache.carbondata.processing.exception.MultipleMatchingException
 import org.apache.carbondata.processing.loading.FailureCauses
-import org.apache.carbondata.spark.DeleteDelataResultImpl
+import org.apache.carbondata.spark.DeleteDeltaResultImpl
 
 object DeleteExecution {
   val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
@@ -203,7 +203,7 @@ object DeleteExecution {
         load: LoadMetadataDetails, isPartitionTable: Boolean
     ): Iterator[(SegmentStatus, (SegmentUpdateDetails, ExecutionErrors, Long))] = {
 
-      val result = new DeleteDelataResultImpl()
+      val result = new DeleteDeltaResultImpl()
       var deleteStatus = SegmentStatus.LOAD_FAILURE
       val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
       // here key = segment/blockName
@@ -266,9 +266,9 @@ object DeleteExecution {
                 CarbonUpdateUtil.getRequiredFieldFromTID(TID, TupleIdEnum.BLOCK_ID) +
                 CarbonCommonConstants.FACT_FILE_EXT)
           }
-          val deleteDeletaPath = CarbonUpdateUtil
+          val deleteDeltaPath = CarbonUpdateUtil
             .getDeleteDeltaFilePath(blockPath, blockName, timestamp)
-          val carbonDeleteWriter = new CarbonDeleteDeltaWriterImpl(deleteDeletaPath)
+          val carbonDeleteWriter = new CarbonDeleteDeltaWriterImpl(deleteDeltaPath)
 
 
 
@@ -324,7 +324,7 @@ object DeleteExecution {
     (res, blockMappingVO)
   }
 
-  // all or none : update status file, only if complete delete opeartion is successfull.
+  // all or none : update status file, only if complete delete operation is successful.
   def checkAndUpdateStatusFiles(
       executorErrors: ExecutionErrors,
       res: Array[List[(SegmentStatus, (SegmentUpdateDetails, ExecutionErrors, Long))]],
@@ -384,15 +384,15 @@ object DeleteExecution {
       // In case of failure , clean all related delete delta files
       CarbonUpdateUtil.cleanStaleDeltaFiles(carbonTable, timestamp)
       val errorMessage = "Delete data operation is failed due to failure " +
-                         "in table status updation."
-      LOGGER.error("Delete data operation is failed due to failure in table status updation.")
+                         "in table status update."
+      LOGGER.error("Delete data operation is failed due to failure in table status update.")
       executorErrors.failureCauses = FailureCauses.STATUS_FILE_UPDATION_FAILURE
       executorErrors.errorMsg = errorMessage
     }
     segmentsTobeDeleted
   }
 
-  // all or none : update status file, only if complete delete opeartion is successfull.
+  // all or none : update status file, only if complete delete operation is successful.
   def processSegments(executorErrors: ExecutionErrors,
       res: Array[List[(SegmentStatus, (SegmentUpdateDetails, ExecutionErrors, Long))]],
       carbonTable: CarbonTable,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/IUDCommonUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/IUDCommonUtil.scala
index 220d75d..658f172 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/IUDCommonUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/IUDCommonUtil.scala
@@ -61,9 +61,9 @@ object IUDCommonUtil {
             .getDatabaseName + "." +
           logicalRelation.relation.asInstanceOf[CarbonDatasourceHadoopRelation].carbonTable
             .getTableName
-        val sementProperty = carbonProperties
+        val segmentProperty = carbonProperties
           .getProperty(CarbonCommonConstants.CARBON_INPUT_SEGMENTS + dbAndTb, "")
-        if (!(sementProperty.equals("") || sementProperty.trim.equals("*"))) {
+        if (!(segmentProperty.equals("") || segmentProperty.trim.equals("*"))) {
           throw new MalformedCarbonCommandException("carbon.input.segments." + dbAndTb +
                                                     "should not be set for table used in UPDATE " +
                                                     "query. Please reset the property to carbon" +
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala
index fb5b200..7e02f4a 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/CarbonMergeDataSetCommand.scala
@@ -56,7 +56,7 @@ import org.apache.carbondata.processing.loading.FailureCauses
 
 /**
  * This command will merge the data of source dataset to target dataset backed by carbon table.
- * @param targetDsOri Target dataset to merge the data. This dataset should be backed by carbontable
+ * @param targetDsOri Target dataset to merge the data. It should be backed by carbon table
  * @param srcDS  Source dataset, it can be any data.
  * @param mergeMatches It contains the join condition and list match conditions to apply.
  */
@@ -105,7 +105,7 @@ case class CarbonMergeDataSetCommand(
     // decide join type based on match conditions
     val joinType = decideJoinType
 
-    // Add the tupleid udf to get the tupleid to generate delete delta.
+    // Add the getTupleId() udf to get the tuple id to generate delete delta.
     val frame =
       targetDs
         .withColumn(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID, expr("getTupleId()"))
@@ -146,9 +146,9 @@ case class CarbonMergeDataSetCommand(
 
     val st = System.currentTimeMillis()
     // Create accumulators to log the stats
-    val stats = Stats(createLongAccumalator("insertedRows"),
-      createLongAccumalator("updatedRows"),
-      createLongAccumalator("deletedRows"))
+    val stats = Stats(createLongAccumulator("insertedRows"),
+      createLongAccumulator("updatedRows"),
+      createLongAccumulator("deletedRows"))
     val targetSchema = StructType(tableCols.map { f =>
       rltn.head.carbonRelation.schema.find(_.name.equalsIgnoreCase(f)).get
     } ++ Seq(StructField(status_on_mergeds, IntegerType)))
@@ -214,7 +214,7 @@ case class CarbonMergeDataSetCommand(
     LOGGER.info(
       " Time taken to merge data  :: " + (System.currentTimeMillis() - st))
 
-    // Load the history table if the inserthistorytable action is added by user.
+    // Load the history table if the insert history table action is added by user.
     HistoryTableLoadHelper.loadHistoryTable(sparkSession, rltn.head, carbonTable,
       trxMgr, mutationAction, mergeMatches)
     // Do IUD Compaction.
@@ -362,7 +362,7 @@ case class CarbonMergeDataSetCommand(
     }, path)
   }
 
-  private def createLongAccumalator(name: String) = {
+  private def createLongAccumulator(name: String) = {
     val acc = new LongAccumulator
     acc.setValue(0)
     acc.metadata = AccumulatorMetadata(AccumulatorContext.newId(), Some(name), false)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
index ffd6ca1..2a88456 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
@@ -56,7 +56,7 @@ object HistoryTableLoadHelper {
         .find(_.isInstanceOf[InsertInHistoryTableAction])
         .get
         .asInstanceOf[InsertInHistoryTableAction]
-      // Get the history table dataframe.
+      // Get the history table DataFrame.
       val histDataFrame: Dataset[Row] = sparkSession.table(insert.historyTable)
       // check if the user wants to insert update history records into history table.
       val updateDataFrame = if (trxMgr.getUpdateTrx != -1) {
@@ -66,7 +66,7 @@ object HistoryTableLoadHelper {
           f.getActions.exists(_.isInstanceOf[UpdateAction])
         }.head.getActions.filter(_.isInstanceOf[InsertInHistoryTableAction]).head.
           asInstanceOf[InsertInHistoryTableAction]
-        // Create the dataframe to fetch history updated records.
+        // Create the DataFrame to fetch history updated records.
         Some(createHistoryDataFrame(sparkSession, rltn, carbonTable, insertHist,
           histDataFrame, trxMgr.getUpdateTrx))
       } else {
@@ -102,7 +102,7 @@ object HistoryTableLoadHelper {
   }
 
   /**
-   * It creates the dataframe to fetch deleted/updated records in the particular transaction.
+   * It creates the DataFrame to fetch deleted/updated records in the particular transaction.
    */
   private def createHistoryDataFrame(sparkSession: SparkSession,
       rltn: CarbonDatasourceHadoopRelation,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeDataSetBuilder.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeDataSetBuilder.scala
index 2525dcd..214f224 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeDataSetBuilder.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeDataSetBuilder.scala
@@ -126,7 +126,7 @@ class MergeDataSetBuilder(existingDsOri: Dataset[Row], currDs: Dataset[Row],
 
   private def checkBuilder(): Unit = {
     if (matchList.size() == 0) {
-      throw new AnalysisException("Atleast one matcher should be called before calling an action")
+      throw new AnalysisException("At least one matcher should be called before calling an action")
     }
   }
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeProjection.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeProjection.scala
index f0c6945..04ed2c7 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeProjection.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeProjection.scala
@@ -64,7 +64,7 @@ case class MergeProjection(
     }
     if (colsMap != null) {
       val output = new Array[Expression](tableCols.length)
-      val expecOutput = new Array[Expression](tableCols.length)
+      val expectOutput = new Array[Expression](tableCols.length)
       colsMap.foreach { case (k, v) =>
         val tableIndex = tableCols.indexOf(k.toString().toLowerCase)
         if (tableIndex < 0) {
@@ -75,7 +75,7 @@ case class MergeProjection(
             ds.queryExecution.analyzed.resolveQuoted(a.name,
               sparkSession.sessionState.analyzer.resolver).get
         }
-        expecOutput(tableIndex) =
+        expectOutput(tableIndex) =
           existingDsOutput.find(_.name.equalsIgnoreCase(tableCols(tableIndex))).get
       }
       if (output.contains(null)) {
@@ -84,7 +84,7 @@ case class MergeProjection(
       (new InterpretedMutableProjection(output++Seq(
         ds.queryExecution.analyzed.resolveQuoted(statusCol,
         sparkSession.sessionState.analyzer.resolver).get),
-        ds.queryExecution.analyzed.output), expecOutput)
+        ds.queryExecution.analyzed.output), expectOutput)
     } else {
       (null, null)
     }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MutationAction.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MutationAction.scala
index 556423f..d6ea288 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MutationAction.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MutationAction.scala
@@ -36,7 +36,7 @@ import org.apache.carbondata.processing.loading.FailureCauses
 abstract class MutationAction(sparkSession: SparkSession, carbonTable: CarbonTable) {
 
   /**
-   * The RDD of tupleids and delta status will be processed here to write the delta on store
+   * The RDD of tupleIds and delta status will be processed here to write the delta on store
    */
   def handleAction(dataRDD: RDD[Row],
       executorErrors: ExecutionErrors,
@@ -141,12 +141,12 @@ object MutationActionFactory {
       carbonTable: CarbonTable,
       hasDelAction: Boolean,
       hasUpAction: Boolean,
-      hasInsrtHistUpd: Boolean,
-      hasInsrtHistDel: Boolean): MutationAction = {
+      hasInsertHistUpd: Boolean,
+      hasInsertHistDel: Boolean): MutationAction = {
     var actions = Seq.empty[MutationAction]
     // If the merge has history insert action then write the delete delta in two separate actions.
     // As it is needed to know which are deleted records and which are insert records.
-    if (hasInsrtHistDel || hasInsrtHistUpd) {
+    if (hasInsertHistDel || hasInsertHistUpd) {
       if (hasUpAction) {
         actions ++= Seq(HandleUpdateAction(sparkSession, carbonTable))
       }
@@ -166,7 +166,7 @@ object MutationActionFactory {
   }
 
   def checkErrors(executorErrors: ExecutionErrors): Unit = {
-    // Check for any failures occured during delete delta execution
+    // Check for any failures occurred during delete delta execution
     if (executorErrors.failureCauses != FailureCauses.NONE) {
       throw new CarbonMergeDataSetException(executorErrors.errorMsg)
     }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/interfaces.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/interfaces.scala
index 91f0322..cba30ef 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/interfaces.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/interfaces.scala
@@ -23,7 +23,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.util.LongAccumulator
 
 /**
- * It describes the type of match like whenmatched or whennotmatched etc., it holds all the actions
+ * It describes the type of match like whenMatched or whenNotMatched etc., it holds all the actions
  * to be done when this match passes.
  */
 abstract class MergeMatch extends Serializable {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableAddHivePartitionCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableAddHivePartitionCommand.scala
index 09614a8..46d6cdc 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableAddHivePartitionCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableAddHivePartitionCommand.scala
@@ -115,11 +115,11 @@ case class CarbonAlterTableAddHivePartitionCommand(
         partitionSpecsAndLocsTobeAdded)
       if (segmentFile != null) {
         val indexToSchemas = SegmentFileStore.getSchemaFiles(segmentFile, table.getTablePath)
-        val tableColums = table.getTableInfo.getFactTable.getListOfColumns.asScala
+        val tableColumns = table.getTableInfo.getFactTable.getListOfColumns.asScala
         val isSameSchema = indexToSchemas.asScala.exists{ case(key, columnSchemas) =>
           columnSchemas.asScala.exists { col =>
-            tableColums.exists(p => p.getColumnUniqueId.equals(col.getColumnUniqueId))
-          } && columnSchemas.size() == tableColums.length
+            tableColumns.exists(p => p.getColumnUniqueId.equals(col.getColumnUniqueId))
+          } && columnSchemas.size() == tableColumns.length
         }
         if (!isSameSchema) {
           throw new UnsupportedOperationException(
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
index 2c34f1e..a3f4351 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableDropHivePartitionCommand.scala
@@ -41,7 +41,7 @@ import org.apache.carbondata.spark.rdd.CarbonDropPartitionRDD
 /**
  * Drop the partitions from hive and carbon store. It drops the partitions in following steps
  * 1. Drop the partitions from carbon store, it just create one new mapper file in each segment
- * with uniqueid.
+ * with unique id.
  * 2. Drop partitions from hive.
  * 3. In any above step fails then roll back the newly created files
  * 4. After success of steps 1 and 2 , it commits the files by removing the old fails.
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
index fd7c147..5c03871 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
@@ -104,7 +104,7 @@ private[sql] case class CarbonAlterTableDropColumnCommand(
 
       var dictionaryColumns = Seq[org.apache.carbondata.core.metadata.schema.table.column
       .ColumnSchema]()
-      // TODO: if deleted column list includes bucketted column throw an error
+      // TODO: if deleted column list includes bucketed column throw an error
       alterTableDropColumnModel.columns.foreach { column =>
         var columnExist = false
         tableColumns.foreach { tableColumn =>
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
index 87dfba0..4c9d660 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
@@ -146,7 +146,8 @@ case class CarbonCreateTableCommand(
           }
         } catch {
           case e: AnalysisException =>
-            // AnalysisException thrown with table already exists msg incase of conurrent drivers
+            // AnalysisException thrown with table already exists message in case of
+            // concurrent drivers
             if (e.getMessage().contains("already exists")) {
 
               // Clear the cache first
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
index f4a4ca3..d8da00c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonExplainCommand.scala
@@ -36,7 +36,7 @@ case class CarbonExplainCommand(
     setAuditInfo(Map("query" -> explainCommand.logicalPlan.simpleString))
     val isCommand = explainCommand.logicalPlan match {
       case _: Command => true
-      case Union(childern) if childern.forall(_.isInstanceOf[Command]) => true
+      case Union(children) if children.forall(_.isInstanceOf[Command]) => true
       case _ => false
     }
 
@@ -76,7 +76,7 @@ object CarbonExplainCommand {
           sparkSession.sessionState.executePlan(explain.logicalPlan)
         queryExecution.toRdd.partitions
         // For count(*) queries the explain collector will be disabled, so profiler
-        // informations not required in such scenarios.
+        // information not required in such scenarios.
         if (null == ExplainCollector.getFormattedOutput) {
           Seq.empty
         }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
index 1106870..5be4ef7 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/view/CarbonCreateMVCommand.scala
@@ -459,7 +459,7 @@ case class CarbonCreateMVCommand(
               Seq.empty
             }
           } else {
-            // if not found then countinue search for the rest of the elements. Because the rest
+            // if not found then continue search for the rest of the elements. Because the rest
             // of the elements can also decide if the table has to be partitioned or not.
             generatePartitionerField(tail, partitionerFields)
           }
@@ -472,7 +472,7 @@ case class CarbonCreateMVCommand(
   }
 
   private def checkQuery(logicalPlan: LogicalPlan): ModularPlan = {
-    // if there is limit in query string, throw exception, as its not a valid usecase
+    // if there is limit in query string, throw exception, as its not a valid use case
     logicalPlan match {
       case Limit(_, _) =>
         throw new MalformedCarbonCommandException("Materialized view does not support the query " +
@@ -589,7 +589,7 @@ case class CarbonCreateMVCommand(
         }.isDefined || needFullRefresh
         expression
     }
-    // TODO:- Remove this case when incremental datalaoding is supported for multiple tables
+    // TODO:- Remove this case when incremental data loading is supported for multiple tables
     logicalPlan.transformDown {
       case join@Join(_, _, _, _) =>
         needFullRefresh = true
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
index 1bf3946..ecb8547 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
@@ -580,7 +580,7 @@ private class CarbonOutputWriter(path: String,
     }.getRecordWriter(context).asInstanceOf[CarbonRecordWriter]
   }
 
-  // TODO Implement writesupport interface to support writing Row directly to recordwriter
+  // TODO Implement write support interface to support writing Row directly to record writer
   def writeCarbon(row: InternalRow): Unit = {
     val numOfColumns = nonPartitionFieldTypes.length + partitionData.length
     val data: Array[AnyRef] = CommonUtil.getObjectArrayFromInternalRowAndConvertComplexType(
@@ -660,13 +660,13 @@ object CarbonOutputWriter {
       }
       if (staticPartition != null && staticPartition.get(col.getColumnName.toLowerCase)) {
         // TODO: why not use CarbonScalaUtil.convertToDateAndTimeFormats ?
-        val converetedVal =
+        val convertedValue =
           CarbonScalaUtil.convertStaticPartitions(partitionData(index), col)
         if (col.getDataType.equals(DataTypes.DATE)) {
-          converetedVal.toInt.asInstanceOf[AnyRef]
+          convertedValue.toInt.asInstanceOf[AnyRef]
         } else {
           DataTypeUtil.getDataBasedOnDataType(
-            converetedVal,
+            convertedValue,
             dataType,
             converter)
         }
@@ -686,11 +686,11 @@ object CarbonOutputWriter {
       val formattedPartitions = CarbonScalaUtil.updatePartitions(
         updatedPartitions.asInstanceOf[mutable.LinkedHashMap[String, String]],
         model.getCarbonDataLoadSchema.getCarbonTable)
-      val partitionstr = formattedPartitions.map { p =>
+      val partitionString = formattedPartitions.map { p =>
         ExternalCatalogUtils.escapePathName(p._1) + "=" + ExternalCatalogUtils.escapePathName(p._2)
       }.mkString(CarbonCommonConstants.FILE_SEPARATOR)
       model.getCarbonDataLoadSchema.getCarbonTable.getTablePath +
-      CarbonCommonConstants.FILE_SEPARATOR + partitionstr
+      CarbonCommonConstants.FILE_SEPARATOR + partitionString
     } else {
       var updatedPath = FileFactory.getUpdatedFilePath(path)
       updatedPath.substring(0, updatedPath.lastIndexOf("/"))
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala
index b1e53c2..a73bac2 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala
@@ -68,7 +68,7 @@ object CarbonPlanHelper {
         ExecutedCommandExec(addColumnCommand) :: Nil
       }
       // TODO: remove this else if check once the 2.1 version is unsupported by carbon
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       val structField = (alterTableAddColumnsModel.dimCols ++ alterTableAddColumnsModel.msrCols)
         .map { f =>
           val structField = StructField(f.column,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
index ad81e79..86fd4ce 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
@@ -205,7 +205,7 @@ object DDLHelper {
       table.table.toLowerCase)(sparkSession)
     if (carbonTable != null && carbonTable.isFileLevelFormat) {
       throw new MalformedCarbonCommandException(
-        "Unsupported alter operation on Carbon external fileformat table")
+        "Unsupported alter operation on Carbon external file format table")
     } else if (carbonTable != null && !carbonTable.getTableInfo.isTransactionalTable) {
       throw new MalformedCarbonCommandException(
         "Unsupported operation on non transactional table")
@@ -225,7 +225,7 @@ object DDLHelper {
         Map.empty[String, String],
         tableModel.dimCols,
         tableModel.msrCols,
-        tableModel.highcardinalitydims.getOrElse(Seq.empty))
+        tableModel.highCardinalityDims.getOrElse(Seq.empty))
       CarbonAlterTableAddColumnCommand(alterTableAddColumnsModel)
     }
   }
@@ -240,7 +240,7 @@ object DDLHelper {
       tableName.table.toLowerCase)(sparkSession)
     if (carbonTable != null && carbonTable.isFileLevelFormat) {
       throw new MalformedCarbonCommandException(
-        "Unsupported alter operation on Carbon external fileformat table")
+        "Unsupported alter operation on Carbon external file format table")
     } else if (carbonTable != null && !carbonTable.getTableInfo.isTransactionalTable) {
       throw new MalformedCarbonCommandException(
         "Unsupported operation on non transactional table")
@@ -427,7 +427,7 @@ object DDLHelper {
   ): Seq[SparkPlan] = {
     val isCommand = explainCommand.logicalPlan match {
       case _: Command => true
-      case Union(childern) if childern.forall(_.isInstanceOf[Command]) => true
+      case Union(children) if children.forall(_.isInstanceOf[Command]) => true
       case _ => false
     }
     if (explainCommand.logicalPlan.isStreaming || isCommand) {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
index 089bb72..84e59f1 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
@@ -133,7 +133,7 @@ object MixedFormatHandler {
   }
 
   /**
-   * Generates the RDD for non carbon segments. It uses the spark underlying fileformats and
+   * Generates the RDD for non carbon segments. It uses the spark underlying file formats and
    * generates the RDD in its native format without changing any of its flow to keep the original
    * performance and features.
    *
@@ -145,10 +145,10 @@ object MixedFormatHandler {
       projects: Seq[NamedExpression],
       filters: Seq[Expression],
       readCommittedScope: ReadCommittedScope,
-      identier: AbsoluteTableIdentifier,
+      identifier: AbsoluteTableIdentifier,
       supportBatch: Boolean = true): Option[(RDD[InternalRow], Boolean)] = {
     val loadMetadataDetails = readCommittedScope.getSegmentList
-    val segsToAccess = getSegmentsToAccess(identier)
+    val segsToAccess = getSegmentsToAccess(identifier)
     val rdds = loadMetadataDetails.filter(metaDetail =>
       (metaDetail.getSegmentStatus.equals(SegmentStatus.SUCCESS) ||
        metaDetail.getSegmentStatus.equals(SegmentStatus.LOAD_PARTIAL_SUCCESS)))
@@ -157,9 +157,9 @@ object MixedFormatHandler {
         currLoad.getFileFormat.equals(FileFormatName.ROW_V1))
       .filter(l => segsToAccess.isEmpty || segsToAccess.contains(l.getLoadName))
       .groupBy(_.getFileFormat)
-      .map { case (format, detailses) =>
+      .map { case (format, details) =>
         // collect paths as input to scan RDD
-        val paths = detailses. flatMap { d =>
+        val paths = details. flatMap { d =>
           val segmentFile = SegmentFileStore.readSegmentFile(
             CarbonTablePath.getSegmentFilePath(readCommittedScope.getFilePath, d.getSegmentFile))
 
@@ -189,7 +189,7 @@ object MixedFormatHandler {
         Some(rdds.head)
       } else {
         if (supportBatch && rdds.exists(!_._2)) {
-          extraRDD(l, projects, filters, readCommittedScope, identier, false)
+          extraRDD(l, projects, filters, readCommittedScope, identifier, false)
         } else {
           var rdd: RDD[InternalRow] = null
           rdds.foreach { r =>
@@ -236,7 +236,7 @@ object MixedFormatHandler {
   }
 
   /**
-   * Generates the RDD using the spark fileformat.
+   * Generates the RDD using the spark file format.
    */
   private def getRDDForExternalSegments(l: LogicalRelation,
       projects: Seq[NamedExpression],
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/PushDownHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/PushDownHelper.scala
index 335b3bc..8033d39 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/PushDownHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/PushDownHelper.scala
@@ -31,9 +31,9 @@ object PushDownHelper {
       projects: Seq[NamedExpression],
       projection: CarbonProjection
   ): Unit = {
-    // In case of Struct or StructofStruct Complex type, get the project column for given
-    // parent/child field and pushdown the corresponding project column. In case of Array, Map,
-    // ArrayofStruct, StructofArray, MapOfStruct or StructOfMap, pushdown parent column
+    // In case of Struct or StructOfStruct Complex type, get the project column for given
+    // parent/child field and push down the corresponding project column. In case of Array, Map,
+    // ArrayOfStruct, StructOfArray, MapOfStruct or StructOfMap, push down parent column
     val output = ArrayBuffer[String]()
     projects.foreach(PushDownHelper.collectColumns(_, output))
     if (output.isEmpty) {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
index 04996a0..46cb3c6 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
@@ -234,9 +234,9 @@ case class CarbonIUDAnalysisRule(sparkSession: SparkSession) extends Rule[Logica
       System.currentTimeMillis().toString)
   }
 
-  override def apply(logicalplan: LogicalPlan): LogicalPlan = {
+  override def apply(logicalPlan: LogicalPlan): LogicalPlan = {
 
-    logicalplan transform {
+    logicalPlan transform {
       case UpdateTable(t, cols, sel, alias, where) => processUpdateQuery(t, cols, sel, alias, where)
       case DeleteRecords(statement, alias, table) =>
         processDeleteRecordsQuery(
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
index 4c5f16d..0b898eb 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
@@ -278,7 +278,7 @@ class CarbonFileMetastore extends CarbonMetaStore {
 
         val thriftTableInfo: TableInfo = if (tblInfoFromCache != null) {
           // In case the TableInfo is present in the Carbon Metadata Cache
-          // then get the tableinfo from the cache rather than infering from
+          // then get the table info from the cache rather than inferring from
           // the CarbonData file.
           schemaConverter
             .fromWrapperToExternalTableInfo(tblInfoFromCache, dbName, tableName)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
index a7871fb..b6fb6ee 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
@@ -48,8 +48,8 @@ object CarbonHiveIndexMetadataUtil {
       tableName: String,
       sparkSession: SparkSession): Unit = {
     try {
-      val tabelIdentifier = TableIdentifier(tableName, Some(databaseName))
-      sparkSession.sessionState.catalog.dropTable(tabelIdentifier, true, false)
+      val tableIdentifier = TableIdentifier(tableName, Some(databaseName))
+      sparkSession.sessionState.catalog.dropTable(tableIdentifier, true, false)
     } catch {
       case e: Exception =>
         LOGGER.error(
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetaStore.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetaStore.scala
index c510c7c..1f15f90 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetaStore.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetaStore.scala
@@ -33,14 +33,14 @@ import org.apache.carbondata.format.SchemaEvolutionEntry
 import org.apache.carbondata.spark.util.CarbonSparkUtil
 
 /**
- * Metastore to store carbonschema in hive
+ * Metastore to store carbon schema in hive
  */
 class CarbonHiveMetaStore extends CarbonFileMetastore {
 
   override def isReadFromHiveMetaStore: Boolean = true
 
   /**
-   * Create spark session from paramters.
+   * Create spark session from parameters.
    *
    * @param parameters
    * @param absIdentifier
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
index db71e53..75265c2 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetaStore.scala
@@ -30,7 +30,7 @@ import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.format.SchemaEvolutionEntry
 
 /**
- * Interface for Carbonmetastore
+ * Interface for CarbonMetastore
  */
 trait CarbonMetaStore {
 
@@ -44,7 +44,7 @@ trait CarbonMetaStore {
     (sparkSession: SparkSession): LogicalPlan
 
   /**
-   * Create spark session from paramters.
+   * Create spark session from parameters.
    *
    * @param parameters
    * @param absIdentifier
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
index 02dc50f..508e910 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
@@ -55,8 +55,8 @@ case class CarbonRelation(
       .filterNot(_.isSpatialColumn)
       .asJava)
     sett.asScala.toSeq.map(dim => {
-      val dimval = carbonTable.getDimensionByName(dim.getColName)
-      val output: DataType = dimval.getDataType.getName.toLowerCase match {
+      val dimension = carbonTable.getDimensionByName(dim.getColName)
+      val output: DataType = dimension.getDataType.getName.toLowerCase match {
         case "array" =>
           CarbonMetastoreTypes.toDataType(
             s"array<${SparkTypeConverter.getArrayChildren(carbonTable, dim.getColName)}>")
@@ -67,7 +67,7 @@ case class CarbonRelation(
           CarbonMetastoreTypes.toDataType(
             s"map<${SparkTypeConverter.getMapChildren(carbonTable, dim.getColName)}>")
         case dType =>
-          val dataType = addDecimalScaleAndPrecision(dimval, dType)
+          val dataType = addDecimalScaleAndPrecision(dimension, dType)
           CarbonMetastoreTypes.toDataType(dataType)
       }
 
@@ -171,11 +171,11 @@ case class CarbonRelation(
     }
   }
 
-  def addDecimalScaleAndPrecision(dimval: CarbonDimension, dataType: String): String = {
+  def addDecimalScaleAndPrecision(dimension: CarbonDimension, dataType: String): String = {
     var dType = dataType
-    if (DataTypes.isDecimal(dimval.getDataType)) {
+    if (DataTypes.isDecimal(dimension.getDataType)) {
       dType +=
-      "(" + dimval.getColumnSchema.getPrecision + "," + dimval.getColumnSchema.getScale + ")"
+      "(" + dimension.getColumnSchema.getPrecision + "," + dimension.getColumnSchema.getScale + ")"
     }
     dType
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala
index 35e9c0b..316d329 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala
@@ -62,7 +62,7 @@ trait CarbonSessionCatalog {
       identifier: TableIdentifier): Seq[CatalogTablePartition]
 
   /**
-   * Update the storageformat with new location information
+   * Update the storage format with new location information
    */
   def updateStorageLocation(
       path: Path,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalogUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalogUtil.scala
index db66acc..7288239 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalogUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalogUtil.scala
@@ -62,7 +62,7 @@ object CarbonSessionCatalogUtil {
   }
 
   /**
-   * Below method will be used to update serd properties
+   * Below method will be used to update serde properties
    * @param tableIdentifier table identifier
    * @param schemaParts schema parts
    * @param cols cols
@@ -107,7 +107,7 @@ object CarbonSessionCatalogUtil {
     //    For Spark2.2 we need to use unified Spark thrift server instead of carbon thrift
     //    server. CarbonSession is not available anymore so HiveClient is created directly
     //    using sparkSession.sharedState which internally contains all required carbon rules,
-    //    optimizers pluged-in through SessionStateBuilder in spark-defaults.conf.
+    //    optimizers plug-in through SessionStateBuilder in spark-defaults.conf.
     //    spark.sql.session.state.builder=org.apache.spark.sql.hive.CarbonSessionStateBuilder
     CarbonToSparkAdapter.getHiveExternalCatalog(sparkSession).client
   }
@@ -163,7 +163,7 @@ object CarbonSessionCatalogUtil {
   }
 
   /**
-   * Update the storageformat with new location information
+   * Update the storage format with new location information
    */
   def updateStorageLocation(
       path: Path,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionUtil.scala
index b90b617..1ca5676 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionUtil.scala
@@ -33,7 +33,7 @@ import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
 
 /**
- * This class refresh the relation from cache if the carbontable in
+ * This class refresh the relation from cache if the carbon table in
  * carbon catalog is not same as cached carbon relation's carbon table.
  */
 object CarbonSessionUtil {
@@ -53,7 +53,7 @@ object CarbonSessionUtil {
     var isRelationRefreshed = false
 
     /**
-     * Set the stats to none in case of carbontable
+     * Set the stats to none in case of carbon table
      */
     def setStatsNone(catalogTable: CatalogTable): Unit = {
       if (CarbonSource.isCarbonDataSource(catalogTable)) {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CreateCarbonSourceTableAsSelectCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CreateCarbonSourceTableAsSelectCommand.scala
index d87daf1..41abfce 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CreateCarbonSourceTableAsSelectCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CreateCarbonSourceTableAsSelectCommand.scala
@@ -117,7 +117,7 @@ case class CreateCarbonSourceTableAsSelectCommand(
 
     try {
       val physicalPlan = session.sessionState.executePlan(data).executedPlan
-      CarbonReflectionUtils.invokewriteAndReadMethod(dataSource,
+      CarbonReflectionUtils.invokeWriteAndReadMethod(dataSource,
         Dataset.ofRows(session, query),
         data,
         session,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/DistributionUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/DistributionUtil.scala
index 04ff0ce..c37d44c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/DistributionUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/DistributionUtil.scala
@@ -56,12 +56,12 @@ object DistributionUtil {
   }
 
   /*
-   * This method will return the list of executers in the cluster.
+   * This method will return the list of executors in the cluster.
    * For this we take the  memory status of all node with getExecutorMemoryStatus
    * and extract the keys. getExecutorMemoryStatus also returns the driver memory also
    * In client mode driver will run in the localhost
-   * There can be executor spawn in same drive node. So we can remove first occurance of
-   * localhost for retriving executor list
+   * There can be executor spawn in same drive node. So we can remove first occurrence of
+   * localhost for retrieving executor list
    */
   def getNodeList(sparkContext: SparkContext): Array[String] = {
     val arr = sparkContext.getExecutorMemoryStatus.map { kv =>
@@ -70,19 +70,19 @@ object DistributionUtil {
     val localhostIPs = getLocalhostIPs
     val selectedLocalIPList = localhostIPs.filter(arr.contains(_))
 
-    val nodelist: List[String] = withoutDriverIP(arr.toList)(selectedLocalIPList.contains(_))
+    val nodeList: List[String] = withoutDriverIP(arr.toList)(selectedLocalIPList.contains(_))
     val masterMode = sparkContext.getConf.get("spark.master")
-    if (nodelist.nonEmpty) {
+    if (nodeList.nonEmpty) {
       // Specific for Yarn Mode
       if ("yarn-cluster".equals(masterMode) || "yarn-client".equals(masterMode)) {
-        val nodeNames = nodelist.map { x =>
+        val nodeNames = nodeList.map { x =>
           val addr = InetAddress.getByName(x)
           addr.getHostName
         }
         nodeNames.toArray
       } else {
         // For Standalone cluster, node IPs will be returned.
-        nodelist.toArray
+        nodeList.toArray
       }
     } else {
       Seq(InetAddress.getLocalHost.getHostName).toArray
@@ -108,11 +108,11 @@ object DistributionUtil {
   }
 
   /*
-   * This method will remove the first occurance of any of the ips  mentioned in the predicate.
+   * This method will remove the first occurrence of any of the ips  mentioned in the predicate.
    * Eg: l = List(Master,slave1,Master,slave2,slave3) is the list of nodes where first Master is
    * the Driver  node.
    * this method withoutFirst (l)(x=> x == 'Master') will remove the first occurance of Master.
-   * The resulting List containt List(slave1,Master,slave2,slave3)
+   * The resulting List contain List(slave1,Master,slave2,slave3)
    */
   def withoutDriverIP[A](xs: List[A])(p: A => Boolean): List[A] = {
     xs match {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
index 7fb44b1..41c80d8 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
@@ -82,7 +82,7 @@ trait SqlAstBuilderHelper extends SparkSqlAstBuilder {
       tblProperties.toMap,
       tableModel.dimCols,
       tableModel.msrCols,
-      tableModel.highcardinalitydims.getOrElse(Seq.empty))
+      tableModel.highCardinalityDims.getOrElse(Seq.empty))
 
     CarbonAlterTableAddColumnCommand(alterTableAddColumnsModel)
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
index b4eae44..124bd63 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/execution/command/CarbonHiveCommands.scala
@@ -125,7 +125,7 @@ object CarbonSetCommand {
       else {
         throw new MalformedCarbonCommandException(
           "property should be in \" carbon.table.load.sort.scope.<database_name>" +
-          ".<table_name>=<sort_sope> \" format.")
+          ".<table_name>=<sort_scope> \" format.")
       }
     } else if (key.startsWith(CarbonCommonConstants.CARBON_ENABLE_INDEX_SERVER)) {
       val keySplits = key.split("\\.")
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala
index 86576fa..f97e15e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/index/CarbonIndexUtil.scala
@@ -211,7 +211,7 @@ object CarbonIndexUtil {
   }
 
   /**
-   * Get the column compressor for the index table. Check first in the index table tableproperties
+   * Get the column compressor for the index table. Check first in the index table properties
    * and then fall back to main table at last to the default compressor
    */
   def getCompressorForIndexTable(
@@ -375,7 +375,7 @@ object CarbonIndexUtil {
         carbonTable = carbonTable,
         thriftTable = thriftTable)(sparkSession)
       CarbonSessionCatalogUtil.alterTable(tableIdentifier, schemaParts, None, sparkSession)
-      // remove from the cache so that the table will be loaded again with the new tableproperties
+      // remove from the cache so that the table will be loaded again with the new table properties
       CarbonInternalMetastore
         .removeTableFromMetadataCache(carbonTable.getDatabaseName, tableName)(sparkSession)
       // refresh the parent table relation
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala
index 2177bc5..ce4a0d7 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala
@@ -52,7 +52,7 @@ object MVCompactionPostEventListener extends OperationEventListener {
    */
   override def onEvent(event: Event, operationContext: OperationContext): Unit = {
     val compactionEvent = event.asInstanceOf[AlterTableCompactionPreStatusUpdateEvent]
-    val compactionType = compactionEvent.carbonMergerMapping.campactionType
+    val compactionType = compactionEvent.carbonMergerMapping.compactionType
     if (compactionType == CompactionType.CUSTOM) {
       return
     }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/PrePrimingListener.scala b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/PrePrimingListener.scala
index 2be59d4..492f244 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/PrePrimingListener.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/PrePrimingListener.scala
@@ -24,7 +24,7 @@ import org.apache.carbondata.core.index.IndexInputFormat
 import org.apache.carbondata.events.{Event, IndexServerLoadEvent, OperationContext, OperationEventListener}
 import org.apache.carbondata.indexserver.IndexServer
 
-// Listener for the PrePriming Event. This listener calls the index server using an Asynccall
+// Listener for the PrePriming Event. This listener calls the index server using an async call
 object PrePrimingEventListener extends OperationEventListener {
 
   private val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
@@ -36,7 +36,7 @@ object PrePrimingEventListener extends OperationEventListener {
     val indexInputFormat = new IndexInputFormat(carbonTable,
       null,
       prePrimingEvent.segment.asJava,
-      prePrimingEvent.invalidsegment.asJava,
+      prePrimingEvent.invalidSegment.asJava,
       null,
       false,
       null,
@@ -47,12 +47,12 @@ object PrePrimingEventListener extends OperationEventListener {
         IndexServer.getClient.getCount(indexInputFormat)
       }
       catch {
-        // Consider a scenario where prepriming is in progress and the index server crashes, in
+        // Consider a scenario where pre-priming is in progress and the index server crashes, in
         // this case since we should not fail the corresponding operation where pre-priming is
-        // triggered. Because prepriming is an optimization for cache loading prior to query,
+        // triggered. Because pre-priming is an optimization for cache loading prior to query,
         // so no exception should be thrown.
         case ex: Exception =>
-          LOGGER.error(s"Prepriming failed for table ${carbonTable.getTableName} ", ex)
+          LOGGER.error(s"Pre-priming failed for table ${carbonTable.getTableName} ", ex)
       }
     }
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonUDFTransformRule.scala b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonUDFTransformRule.scala
index c080cd9..28bb44d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonUDFTransformRule.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonUDFTransformRule.scala
@@ -34,7 +34,7 @@ class CarbonUDFTransformRule extends Rule[LogicalPlan] with PredicateHelper {
   private def pushDownUDFToJoinLeftRelation(plan: LogicalPlan): LogicalPlan = {
     val output = plan.transform {
       case proj@Project(cols, Join(
-      left, right, jointype: org.apache.spark.sql.catalyst.plans.JoinType, condition)) =>
+      left, right, joinType: org.apache.spark.sql.catalyst.plans.JoinType, condition)) =>
         var projectionToBeAdded: Seq[org.apache.spark.sql.catalyst.expressions.Alias] = Seq.empty
         var udfExists = false
         val newCols = cols.map {
@@ -56,7 +56,7 @@ class CarbonUDFTransformRule extends Rule[LogicalPlan] with PredicateHelper {
               Project(relation.output ++ projectionToBeAdded, relation)
             case other => other
           }
-          Project(newCols, Join(newLeft, right, jointype, condition))
+          Project(newCols, Join(newLeft, right, joinType, condition))
         } else {
           proj
         }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVMatcher.scala b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVMatcher.scala
index 4aee148..7f57bee 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVMatcher.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVMatcher.scala
@@ -143,7 +143,7 @@ private abstract class MVMatchPattern extends Logging {
     }
     if (aliasMapMain.size == 1) {
       val subsumerName: Option[String] = aliasMapMain.get(0)
-      // Replace all compensation1 attributes with refrences of subsumer attributeset
+      // Replace all compensation1 attributes with refrences of subsumer attribute set
       val compensationFinal = compensation1.transformExpressions {
         case ref: Attribute if subqueryAttributeSet.contains(ref) =>
           CarbonToSparkAdapter.createAttributeReference(
@@ -174,8 +174,8 @@ private abstract class MVMatchPattern extends Logging {
     //                /      \
     //   AttributeReference   Literal
     subsume match {
-      case Alias(funcction: ScalaUDF, _) if funcction.function.isInstanceOf[TimeSeriesFunction] =>
-        val children = funcction.children
+      case Alias(function: ScalaUDF, _) if function.function.isInstanceOf[TimeSeriesFunction] =>
+        val children = function.children
         val subsumerTimeSeriesFunctions = subsumerList.filter(
           expression =>
             expression.isInstanceOf[Alias] &&
@@ -210,7 +210,7 @@ private abstract class MVMatchPattern extends Logging {
           }
         )
       case expression: Expression =>
-        val transformedExpwithLowerCase = expression.transform {
+        val transformedExpWithLowerCase = expression.transform {
           case function: ScalaUDF if function.function.isInstanceOf[TimeSeriesFunction] =>
             getTransformedTimeSeriesFunction(function)
           case other => other
@@ -223,14 +223,14 @@ private abstract class MVMatchPattern extends Logging {
               case other => other
             }
         }
-        transformedExprListWithLowerCase.exists(_.semanticEquals(transformedExpwithLowerCase))
+        transformedExprListWithLowerCase.exists(_.semanticEquals(transformedExpWithLowerCase))
       case _ => false
     }
   }
 
   /**
    * Check if expr1 and expr2 matches TimeSeriesUDF function. If both expressions are
-   * timeseries udf functions, then check it's childrens are same irrespective of case.
+   * timeseries udf functions, then check it's children are same irrespective of case.
    */
   protected def isExpressionMatches(expression1: Expression, expression2: Expression): Boolean = {
     (expression1, expression2) match {
@@ -292,7 +292,7 @@ private abstract class MVMatchPattern extends Logging {
                                alias_m(alias.toAttribute).child.isInstanceOf[AggregateExpression] &&
                                alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
                                  .aggregateFunction.isInstanceOf[Count] =>
-            // case for groupby
+            // case for group by
             val cnt_a = alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
             val exprs_a = cnt_a.aggregateFunction.asInstanceOf[Count].children
             if (cnt_a.isDistinct != cnt_q.isDistinct || exprs_q.length != exprs_a.length) {
@@ -429,7 +429,7 @@ private abstract class MVMatchPattern extends Logging {
           case alias: Alias if alias_m.contains(alias.toAttribute) &&
                                alias_m(alias.toAttribute).child.isInstanceOf[AggregateExpression] &&
                                alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
-                                 .aggregateFunction.isInstanceOf[Count] => // case for groupby
+                                 .aggregateFunction.isInstanceOf[Count] => // case for group by
             val cnt_a = alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
             val exprs_a = cnt_a.aggregateFunction.asInstanceOf[Count].children
             if (!cnt_a.isDistinct && exprs_a.sameElements(Set(expr_q))) {
@@ -677,12 +677,12 @@ private object SelectSelectNoChildDelta extends MVMatchPattern with PredicateHel
         if (isUniqueRmE.isEmpty && isUniqueEmR.isEmpty && extrajoin.isEmpty && isPredicateRmE &&
             isPredicateEmdR && isOutputEdR) {
           val mappings = sel_1a.children.zipWithIndex.map {
-            case (childr, fromIdx) if sel_1q.children.contains(childr) =>
-              val toIndx = sel_1q.children.indexWhere{
-                case relation: ModularRelation => relation.fineEquals(childr)
-                case other => other == childr
+            case (child, fromIndex) if sel_1q.children.contains(child) =>
+              val toIndex = sel_1q.children.indexWhere{
+                case relation: ModularRelation => relation.fineEquals(child)
+                case other => other == child
               }
-              (toIndx -> fromIdx)
+              (toIndex -> fromIndex)
 
           }
           val e2r = mappings.toMap
@@ -748,10 +748,10 @@ private object SelectSelectNoChildDelta extends MVMatchPattern with PredicateHel
               tAliasMap += (tChildren.indexOf(usel_1a) -> generator.newSubsumerName())
 
               sel_1q.children.zipWithIndex.foreach {
-                case (childe, idx) =>
+                case (child, idx) =>
                   if (e2r.get(idx).isEmpty) {
-                    tChildren += childe
-                    sel_1q.aliasMap.get(idx).map(x => tAliasMap += (tChildren.indexOf(childe) -> x))
+                    tChildren += child
+                    sel_1q.aliasMap.get(idx).map(x => tAliasMap += (tChildren.indexOf(child) -> x))
                   }
               }
 
@@ -920,7 +920,7 @@ private object GroupbyGroupbyNoChildDelta extends MVMatchPattern {
             tryMatch(
               gb_2a, gb_2q, aliasMap).flatMap {
               case g: GroupBy =>
-                // Check any agg function exists on outputlist, in case of expressions like
+                // Check any agg function exists on output list, in case of expressions like
                 // sum(a), then create new alias and copy to group by node
                 val aggFunExists = g.outputList.exists { f =>
                   f.find {
@@ -990,8 +990,8 @@ private object GroupbyGroupbySelectOnlyChildDelta
 
   /**
    * org.apache.carbondata.mv.plans.MorePredicateHelper#canEvaluate will be checking the
-   * exprE.references as subset of AttibuteSet(exprListR), which will just take the column name from
-   * UDF, so it will be always false for ScalaUDF.
+   * exprE.references as subset of AttributeSet(exprListR), which will just take the column name
+   * from UDF, so it will be always false for ScalaUDF.
    * This method takes care of checking whether the exprE references can be derived form list
    *
    * Example:
@@ -1080,16 +1080,16 @@ private object GroupbyGroupbySelectOnlyChildDelta
         val needRegrouping = !gb_2a.predicateList
           .forall(f => gb_2q.predicateList.contains(f) ||
                        isExpressionMatches(f, gb_2q.predicateList))
-        val canPullup = sel_1c1.predicateList.forall(expr =>
+        val canPullUp = sel_1c1.predicateList.forall(expr =>
           isDerivable(expr, gb_2a.predicateList ++ rejoinOutputList, gb_2q, gb_2a, compensation))
         val isAggEmR = gb_2q.outputList.collect {
           case agg: aggregate.AggregateExpression =>
             gb_2a.outputList.exists(_.semanticEquals(agg))
         }.forall(identity)
 
-        if (isGroupingEdR && ((!needRegrouping && isAggEmR) || needRegrouping) && canPullup) {
+        if (isGroupingEdR && ((!needRegrouping && isAggEmR) || needRegrouping) && canPullUp) {
           // pull up
-          val pullupOutputList = gb_2a.outputList.map(_.toAttribute) ++ rejoinOutputList
+          val pullUpOutputList = gb_2a.outputList.map(_.toAttribute) ++ rejoinOutputList
           val myOutputList = gb_2a.outputList.filter {
             case alias: Alias =>
               val aliasList = gb_2q.outputList.filter(_.isInstanceOf[Alias])
@@ -1101,18 +1101,18 @@ private object GroupbyGroupbySelectOnlyChildDelta
           // TODO: find out if we really need to check needRegrouping or just use myOutputList
           val sel_2c1 = if (needRegrouping) {
             sel_1c1
-              .copy(outputList = pullupOutputList,
-                inputList = pullupOutputList,
+              .copy(outputList = pullUpOutputList,
+                inputList = pullUpOutputList,
                 children = sel_1c1.children
                   .map { case _: modular.Select => gb_2a; case other => other })
           } else {
             sel_1c1
               .copy(outputList = myOutputList,
-                inputList = pullupOutputList,
+                inputList = pullUpOutputList,
                 children = sel_1c1.children
                   .map { case _: modular.Select => gb_2a; case other => other })
           }
-          // sel_1c1.copy(outputList = pullupOutputList, inputList = pullupOutputList, children =
+          // sel_1c1.copy(outputList = pullUpOutputList, inputList = pullUpOutputList, children =
           // sel_1c1.children.map { _ match { case s: modular.Select => gb_2a; case other =>
           // other } })
 
@@ -1124,10 +1124,10 @@ private object GroupbyGroupbySelectOnlyChildDelta
               tryMatch(gb_2a, gb_2q, aliasMap).flatMap {
                 case g: GroupBy =>
 
-                  // Check any agg function exists on outputlist,
+                  // Check any agg function exists on output list,
                   // in case of expressions like sum(a)+sum(b) ,
                   // output list directly replaces with alias with in place of function
-                  // so we should remove the groupby clause in those cases.
+                  // so we should remove the group by clause in those cases.
                   val aggFunExists = g.outputList.exists { f =>
                     f.find {
                       case _: AggregateExpression => true
@@ -1254,7 +1254,7 @@ private object SelectSelectGroupbyChildDelta
    * top of subsumer
    *
    * To simplify this we assume in subsumer outputList of top select 1-1 corresponds to the
-   * outputList of groupby
+   * outputList of group by
    * note that subsumer outputList is list of attributes and that of groupby is list of aliases
    *
    */
@@ -1266,7 +1266,7 @@ private object SelectSelectGroupbyChildDelta
     (subsumer, subsumee, compensation) match {
       // top selects whose children do not match exactly
       // for simplicity, we assume outputList of subsumer is 1-1 corresponding to that of its
-      // immediately groupby child
+      // immediately group by child
       case (
         _@modular.Select(
         _, _, _, _, _,
@@ -1446,7 +1446,7 @@ private object SelectSelectGroupbyChildDelta
               sel_3a,
               compensation))
 
-        val canSELPullup = gb_2c.child.isInstanceOf[Select] &&
+        val canSELPullUp = gb_2c.child.isInstanceOf[Select] &&
                            gb_2c.child.asInstanceOf[Select].predicateList
                              .forall(expr =>
                                isDerivable(
@@ -1455,7 +1455,7 @@ private object SelectSelectGroupbyChildDelta
                                  sel_3q,
                                  sel_3a,
                                  compensation))
-        val canGBPullup = gb_2c.predicateList
+        val canGBPullUp = gb_2c.predicateList
           .forall(expr =>
             isDerivable(
               expr,
@@ -1467,8 +1467,8 @@ private object SelectSelectGroupbyChildDelta
         if (extrajoin.isEmpty && isPredicateRmE &&
             isPredicateEmdR &&
             isOutputEdR &&
-            canSELPullup &&
-            canGBPullup) {
+            canSELPullUp &&
+            canGBPullUp) {
           gb_2c.child match {
             case s: Select =>
               val sel_3c1 = s.withNewChildren(
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVRewriteRule.scala b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVRewriteRule.scala
index db16767..20e33e7 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVRewriteRule.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/MVRewriteRule.scala
@@ -81,7 +81,7 @@ class MVRewriteRule(session: SparkSession) extends Rule[LogicalPlan] {
     }
     logicalPlan.transform {
       case Aggregate(groupBy, aggregations, child) =>
-        // check for if plan is for dataload for preaggregate table, then skip applying mv
+        // check for if plan is for data load for pre-aggregate table, then skip applying mv
         val haveDummyFunction = aggregations.exists {
           aggregation =>
             if (aggregation.isInstanceOf[UnresolvedAlias]) {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index 97e1c31..9840381 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -194,7 +194,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
           // validate for supported table properties
           validateTableProperties(properties)
           // validate column_meta_cache property if defined
-          validateColumnMetaCacheAndCacheLevelProeprties(
+          validateColumnMetaCacheAndCacheLevelProperties(
             table.database, indexName.toLowerCase, tableColumns, properties)
           validateColumnCompressorProperty(
             properties.getOrElse(CarbonCommonConstants.COMPRESSOR, null))
@@ -275,8 +275,8 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
           }
           if (isJoinWithMainTable && isLimitPresent) {
             throw new UnsupportedOperationException(
-              "Update subquery has join with maintable and limit leads to multiple join for each " +
-              "limit for each row")
+              "Update subquery has join with main table and limit leads to multiple join for " +
+              "each limit for each row")
           }
           if (!isJoinWithMainTable) {
             // Should go as value update, not as join update. So execute the sub query.
@@ -461,8 +461,8 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
     DELETE ~> FROM ~ TABLE ~> (ident <~ ".").? ~ ident ~
     (WHERE ~> (SEGMENT ~ "." ~ ID) ~> IN ~> "(" ~> repsep(segmentId, ",")) <~ ")" ~
     opt(";") ^^ {
-      case dbName ~ tableName ~ loadids =>
-        CarbonDeleteLoadByIdCommand(loadids, dbName, tableName.toLowerCase())
+      case dbName ~ tableName ~ loadIds =>
+        CarbonDeleteLoadByIdCommand(loadIds, dbName, tableName.toLowerCase())
     }
 
   protected lazy val deleteSegmentByLoadDate: Parser[LogicalPlan] =
@@ -618,7 +618,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
         CarbonAlterTableDropColumnCommand(alterTableDropColumnModel)
     }
 
-  private def validateColumnMetaCacheAndCacheLevelProeprties(dbName: Option[String],
+  private def validateColumnMetaCacheAndCacheLevelProperties(dbName: Option[String],
       tableName: String,
       tableColumns: Seq[String],
       tableProperties: scala.collection.mutable.Map[String, String]): Unit = {
@@ -640,7 +640,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
   }
 
   private def validateColumnCompressorProperty(columnCompressor: String): Unit = {
-    // Add validatation for column compressor when creating index table
+    // Add validation for column compressor when creating index table
     try {
       if (null != columnCompressor) {
         CompressorFactory.getInstance().getCompressor(columnCompressor)
@@ -675,8 +675,8 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
   protected lazy val dropIndex: Parser[LogicalPlan] =
     DROP ~> INDEX ~> opt(IF ~> EXISTS) ~ ident ~
     ontable <~ opt(";") ^^ {
-      case ifexist ~ indexName ~ table =>
-        DropIndexCommand(ifexist.isDefined, table.database, table.table, indexName.toLowerCase)
+      case ifExist ~ indexName ~ table =>
+        DropIndexCommand(ifExist.isDefined, table.database, table.table, indexName.toLowerCase)
     }
 
   /**
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
index 4b9a126..71e54ff 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
@@ -109,7 +109,7 @@ object CarbonSparkSqlParserUtil {
         tableInfo.getFactTable.getTableName, "Table should have at least one column.")
     }
 
-    // Add validatation for column compressor when create table
+    // Add validation for column compressor when create table
     val columnCompressor = tableInfo.getFactTable.getTableProperties.get(
       CarbonCommonConstants.COMPRESSOR)
     try {
@@ -131,7 +131,7 @@ object CarbonSparkSqlParserUtil {
    *                         TablePropertyListContext,
    *                         LocationSpecContext, Option[String], TerminalNode, QueryContext,
    *                         String)
-   * @param extraTableTuple  A tupple of (Seq[StructField], Boolean, TableIdentifier, Boolean,
+   * @param extraTableTuple  A tuple of (Seq[StructField], Boolean, TableIdentifier, Boolean,
    *                         Seq[String],
    *                         Option[String], mutable.Map[String, String], Map[String, String],
    *                         Seq[StructField],
@@ -259,15 +259,15 @@ object CarbonSparkSqlParserUtil {
         .get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE)
       if (CarbonScalaUtil.validateLocalDictionaryEnable(isLocalDic_enabled) &&
           isLocalDic_enabled.toBoolean) {
-        val allcolumns = table.getFactTable.getListOfColumns
-        for (i <- 0 until allcolumns.size()) {
-          val cols = allcolumns.get(i)
+        val allColumns = table.getFactTable.getListOfColumns
+        for (i <- 0 until allColumns.size()) {
+          val cols = allColumns.get(i)
           if (cols.getDataType == DataTypes.STRING || cols.getDataType == DataTypes.VARCHAR) {
             cols.setLocalDictColumn(true)
           }
-          allcolumns.set(i, cols)
+          allColumns.set(i, cols)
         }
-        table.getFactTable.setListOfColumns(allcolumns)
+        table.getFactTable.setListOfColumns(allColumns)
       }
       table
     } else {
@@ -451,14 +451,14 @@ object CarbonSparkSqlParserUtil {
         .get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE)
       if (CarbonScalaUtil.validateLocalDictionaryEnable(isLocalDic_enabled) &&
           isLocalDic_enabled.toBoolean) {
-        val allcolumns = tableInfo.getFactTable.getListOfColumns
-        for (i <- 0 until allcolumns.size()) {
-          val cols = allcolumns.get(i)
+        val allColumns = tableInfo.getFactTable.getListOfColumns
+        for (i <- 0 until allColumns.size()) {
+          val cols = allColumns.get(i)
           if (cols.getDataType == DataTypes.STRING || cols.getDataType == DataTypes.VARCHAR) {
             cols.setLocalDictColumn(true)
           }
         }
-        tableInfo.getFactTable.setListOfColumns(allcolumns)
+        tableInfo.getFactTable.setListOfColumns(allColumns)
       }
       tableInfo
     } else {
@@ -738,7 +738,7 @@ object CarbonSparkSqlParserUtil {
       tableProps.toMap,
       tableModel.dimCols,
       tableModel.msrCols,
-      tableModel.highcardinalitydims.getOrElse(Seq.empty))
+      tableModel.highCardinalityDims.getOrElse(Seq.empty))
     CarbonAlterTableAddColumnCommand(alterTableAddColumnsModel)
   }
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/profiler/ProfilerLogger.scala b/integration/spark/src/main/scala/org/apache/spark/sql/profiler/ProfilerLogger.scala
index 40ab68c..022e3e3 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/profiler/ProfilerLogger.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/profiler/ProfilerLogger.scala
@@ -110,7 +110,7 @@ private[profiler] class StatementSummary(
 
   override def toString: String = {
     if (isCommand) {
-      buildForComand()
+      buildForCommand()
     } else {
       buildForQuery()
     }
@@ -142,7 +142,7 @@ private[profiler] class StatementSummary(
    *   |__ 3.execution taken: 1051 ms
    *   |__ ...
    */
-  private def buildForComand(): String = {
+  private def buildForCommand(): String = {
     val builder = new JavaStringBuilder(1000)
     builder.append(s"\n[statement id]: ${ statementId }")
     builder.append(s"\n[sql text]:\n")
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java
index ee7ff7e..43429e2 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 
 /**
  * class that holds indexes, column cardinality, columnSchema and other related information for
- * BlockletIndexInputFormate return value
+ * BlockletIndexInputFormat return value
  * TODO: When this code is moved to open source, this class can be removed and the required code
  * can be added to BlockletIndexWrapper class
  */
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java
index dbf0c12..e0003b8 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java
@@ -49,7 +49,7 @@ public class CarbonBlockLoaderHelper {
     return carbonBlockLoaderHelper;
   }
 
-  private Set<String> getTableblocks(AbsoluteTableIdentifier absoluteTableIdentifier) {
+  private Set<String> getTableBlocks(AbsoluteTableIdentifier absoluteTableIdentifier) {
     CopyOnWriteArraySet<String> blockSet = tableBlockMap.get(absoluteTableIdentifier);
     if (null == blockSet) {
       CopyOnWriteArraySet<String> newBlockSet = new CopyOnWriteArraySet<String>();
@@ -71,7 +71,7 @@ public class CarbonBlockLoaderHelper {
    */
   public Boolean checkAlreadySubmittedBlock(final AbsoluteTableIdentifier absoluteTableIdentifier,
       final String uniqueBlockId) {
-    Set<String> tableBlocks = getTableblocks(absoluteTableIdentifier);
+    Set<String> tableBlocks = getTableBlocks(absoluteTableIdentifier);
     // tableBlocks is a type of CopyOnWriteArraySet, so avoided taking lock during write/add
     return tableBlocks.add(uniqueBlockId);
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala
index 288c1e0..b81cc56 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala
@@ -162,7 +162,7 @@ class IndexLoaderRDD(
     val inputSplit = split.asInstanceOf[IndexLoaderPartition].inputSplit
     val reader = indexFormat.createRecordReader(inputSplit, attemptContext)
     val iter = new Iterator[(TableBlockIndexUniqueIdentifier, BlockletIndexDetailsWithSchema)] {
-      // in case of success, failure or cancelation clear memory and stop execution
+      // in case of success, failure or cancellation clear memory and stop execution
       context.addTaskCompletionListener { _ =>
         reader.close()
       }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
index 2e979d6..a03345f 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
@@ -60,12 +60,12 @@ class ErrorMessage(message: String) extends Exception(message) {
 /**
  * Command for index table creation
  *
- * @param indexModel        SecondaryIndex model holding the index infomation
+ * @param indexModel        SecondaryIndex model holding the index information
  * @param tableProperties   SI table properties
  * @param ifNotExists       true if IF NOT EXISTS is set
  * @param isDeferredRefresh true if WITH DEFERRED REFRESH is set
- * @param isCreateSIndex    if false then will not create index table schema in the carbonstore
- *                          and will avoid dataload for SI creation.
+ * @param isCreateSIndex    if false then will not create index table schema in the carbon store
+ *                          and will avoid data load for SI creation.
  */
 private[sql] case class CarbonCreateSecondaryIndexCommand(
     indexModel: IndexModel,
@@ -383,7 +383,7 @@ private[sql] case class CarbonCreateSecondaryIndexCommand(
 
       CarbonHiveIndexMetadataUtil.refreshTable(databaseName, tableName, sparkSession)
 
-      // refersh the parent table relation
+      // refresh the parent table relation
       sparkSession.sessionState.catalog.refreshTable(identifier)
       // load data for secondary index
       if (isCreateSIndex) {
@@ -395,10 +395,10 @@ private[sql] case class CarbonCreateSecondaryIndexCommand(
         SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath)
       val siTblLoadMetadataDetails: Array[LoadMetadataDetails] =
         SegmentStatusManager.readLoadMetadata(indexTablePath)
-      val isMaintableSegEqualToSISegs = CarbonInternalLoaderUtil
+      val isMainTableSegEqualToSISegs = CarbonInternalLoaderUtil
         .checkMainTableSegEqualToSISeg(mainTblLoadMetadataDetails,
           siTblLoadMetadataDetails)
-      if (isMaintableSegEqualToSISegs) {
+      if (isMainTableSegEqualToSISegs) {
         // enable the SI table
         sparkSession.sql(
           s"""ALTER TABLE $databaseName.$indexTableName SET
@@ -452,7 +452,7 @@ private[sql] case class CarbonCreateSecondaryIndexCommand(
       0,
       0,
       schemaOrdinal)
-    // sort column proeprty should be true for implicit no dictionary column position reference
+    // sort column property should be true for implicit no dictionary column position reference
     // as there exist a same behavior for no dictionary columns by default
     blockletId.setSortColumn(true)
     // set the blockletId column as local dict column implicit no dictionary column position
@@ -518,12 +518,12 @@ private[sql] case class CarbonCreateSecondaryIndexCommand(
   def setLocalDictionaryConfigs(indexTblPropertiesMap: java.util.HashMap[String, String],
       parentTblPropertiesMap: java.util.Map[String, String],
       allColumns: List[ColumnSchema]): Unit = {
-    val isLocalDictEnabledFormainTable = parentTblPropertiesMap
+    val isLocalDictEnabledForMainTable = parentTblPropertiesMap
       .get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE)
     indexTblPropertiesMap
       .put(
         CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE,
-        isLocalDictEnabledFormainTable)
+        isLocalDictEnabledForMainTable)
     indexTblPropertiesMap
       .put(
         CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD,
@@ -537,7 +537,7 @@ private[sql] case class CarbonCreateSecondaryIndexCommand(
         localDictColumns :+= column.getColumnName
       }
     )
-    if (isLocalDictEnabledFormainTable != null && isLocalDictEnabledFormainTable.toBoolean) {
+    if (isLocalDictEnabledForMainTable != null && isLocalDictEnabledForMainTable.toBoolean) {
       indexTblPropertiesMap
         .put(
           CarbonCommonConstants.LOCAL_DICTIONARY_INCLUDE,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
index 2215a9c..060c371 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
@@ -132,7 +132,7 @@ class AlterTableColumnRenameEventListener extends OperationEventListener with Lo
             if (indexCarbonTable != null) {
               // failure tables will be automatically taken care in
               // CarbonAlterTableColRenameDataTypeChangeCommand, just need to revert the success
-              // tables, so get the latest timestamp for evolutionhistory
+              // tables, so get the latest timestamp for evolution history
               val thriftTable: TableInfo = catalog.getThriftTableInfo(indexCarbonTable)
               val evolutionEntryList = thriftTable.fact_table.schema_evolution
                 .schema_evolution_history
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableCompactionPostEventListener.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableCompactionPostEventListener.scala
index 75a6b3f..8beb776 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableCompactionPostEventListener.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableCompactionPostEventListener.scala
@@ -55,7 +55,7 @@ class AlterTableCompactionPostEventListener extends OperationEventListener with
         val carbonLoadModel = alterTableCompactionPostEvent.carbonLoadModel
         val sQLContext = alterTableCompactionPostEvent.sparkSession.sqlContext
         val compactionType: CompactionType = alterTableCompactionPostEvent.carbonMergerMapping
-          .campactionType
+          .compactionType
         if (compactionType.toString
           .equalsIgnoreCase(CompactionType.SEGMENT_INDEX.toString)) {
           val carbonMainTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
index 71c6559..89d60f6 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
@@ -106,7 +106,7 @@ class SILoadEventListenerForFailedSegments extends OperationEventListener with L
 
                   var details = SegmentStatusManager.readLoadMetadata(indexTable.getMetadataPath)
                   // If it empty, then no need to do further computations because the
-                  // tabletstatus might not have been created and hence next load will take care
+                  // tablet status might not have been created and hence next load will take care
                   if (details.isEmpty) {
                     return
                   }
@@ -170,14 +170,14 @@ class SILoadEventListenerForFailedSegments extends OperationEventListener with L
                           detail(0).setSegmentStatus(SegmentStatus.SUCCESS)
                           // in concurrent scenario, if a compaction is going on table, then SI
                           // segments are updated first in table status and then the main table
-                          // segment, so in any load runs paralley this listener shouldn't consider
+                          // segment, so in any load runs parallel this listener shouldn't consider
                           // those segments accidentally. So try to take the segment lock.
-                          val segmentLockOfProbableOngngCompactionSeg = CarbonLockFactory
+                          val segmentLockOfProbableOnCompactionSeg = CarbonLockFactory
                             .getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier,
                               CarbonTablePath.addSegmentPrefix(mainTableDetail(0).getLoadName) +
                               LockUsage.LOCK)
-                          if (segmentLockOfProbableOngngCompactionSeg.lockWithRetries()) {
-                            segmentLocks += segmentLockOfProbableOngngCompactionSeg
+                          if (segmentLockOfProbableOnCompactionSeg.lockWithRetries()) {
+                            segmentLocks += segmentLockOfProbableOnCompactionSeg
                             LOGGER.error("Added in SILoadFailedSegment " + detail(0).getLoadName)
                             failedLoadMetadataDetails.add(detail(0))
                           }
@@ -189,7 +189,7 @@ class SILoadEventListenerForFailedSegments extends OperationEventListener with L
                       // in the case when in SI table a segment is deleted and it's entry is
                       // deleted from the tablestatus file, the corresponding .segment file from
                       // the metadata folder should also be deleted as it contains the
-                      // mergefilename which does not exist anymore as the segment is deleted.
+                      // merge file name which does not exist anymore as the segment is deleted.
                       deleteStaleSegmentFileIfPresent(carbonLoadModel,
                         indexTable,
                         failedLoadMetadataDetails)
@@ -213,7 +213,7 @@ class SILoadEventListenerForFailedSegments extends OperationEventListener with L
 
                     // check if main table has load in progress and SI table has no load
                     // in progress entry, then no need to enable the SI table
-                    // Only if the valid segments of maintable match the valid segments of SI
+                    // Only if the valid segments of main table match the valid segments of SI
                     // table then we can enable the SI for query
                     if (CarbonInternalLoaderUtil
                           .checkMainTableSegEqualToSISeg(mainTblLoadMetadataDetails,
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
index 854cf3c..9049bbd 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
@@ -150,7 +150,7 @@ object CarbonInternalMetastore {
     // then once the property has a value true/false, make decision based on the property value
     if (null != carbonTable && (null == indexTableExists || indexTableExists.toBoolean)) {
       // When Index information is not loaded in main table, then it will fetch
-      // index info from hivemetastore and set it in the carbon table.
+      // index info from hive metastore and set it in the carbon table.
       val indexTableMap =
       new ConcurrentHashMap[String, java.util.Map[String, java.util.Map[String, String]]]
       try {
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/joins/BroadCastSIFilterPushJoin.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/joins/BroadCastSIFilterPushJoin.scala
index 1bbf0ae..e757955 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/joins/BroadCastSIFilterPushJoin.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/joins/BroadCastSIFilterPushJoin.scala
@@ -248,11 +248,11 @@ object BroadCastSIFilterPushJoin {
       logger.info("Pushing down filter for broadcast join. Filter size:" + filters(0).length)
       tableScan.get match {
         case scan: CarbonDataSourceScan =>
-          addPushdownToCarbonRDD(scan.rdd,
-            addPushdownFilters(filterKeys, filters))
+          addPushDownToCarbonRDD(scan.rdd,
+            addPushDownFilters(filterKeys, filters))
         case _ =>
-          addPushdownToCarbonRDD(tableScan.get.asInstanceOf[RowDataSourceScanExec].rdd,
-            addPushdownFilters(filterKeys, filters))
+          addPushDownToCarbonRDD(tableScan.get.asInstanceOf[RowDataSourceScanExec].rdd,
+            addPushDownFilters(filterKeys, filters))
       }
     }
   }
@@ -314,7 +314,7 @@ object BroadCastSIFilterPushJoin {
         logger.info(
           "Segments ignored are : " + util.Arrays.toString(filteredSegmentToAccessTemp.toArray))
       }
-      // if no valid segments after filteration
+      // if no valid segments after filter
       if (filteredSegmentToAccess.size == 0) {
         return new util.ArrayList[Segment](0)
       } else {
@@ -496,14 +496,14 @@ object BroadCastSIFilterPushJoin {
           throw new UnsupportedOperationException(ex.getMessage)
       }
     val segmentToAccess = getFilteredSegments(scanRDD.head)
-    val segmentIdtoAccess = new Array[String](segmentToAccess.length)
+    val segmentIdToAccess = new Array[String](segmentToAccess.length)
     for (i <- segmentToAccess.indices) {
-      segmentIdtoAccess(i) = segmentToAccess(i).getSegmentNo
+      segmentIdToAccess(i) = segmentToAccess(i).getSegmentNo
     }
-    segmentIdtoAccess
+    segmentIdToAccess
   }
 
-  private def addPushdownToCarbonRDD(rdd: RDD[InternalRow],
+  private def addPushDownToCarbonRDD(rdd: RDD[InternalRow],
       expressions: Seq[Expression]): Unit = {
     rdd match {
       case value: CarbonScanRDD[InternalRow] =>
@@ -518,7 +518,7 @@ object BroadCastSIFilterPushJoin {
     }
   }
 
-  private def addPushdownFilters(keys: Seq[Expression],
+  private def addPushDownFilters(keys: Seq[Expression],
       filters: Array[Array[Expression]]): Seq[Expression] = {
 
     // TODO Values in the IN filter is duplicate. replace the list with set
@@ -527,7 +527,7 @@ object BroadCastSIFilterPushJoin {
       buffer += In(a._1, filters(a._2)).asInstanceOf[Expression]
     }
 
-    // Let's not pushdown condition. Only filter push down is sufficient.
+    // Let's not push down condition. Only filter push down is sufficient.
     // Conditions can be applied on hash join result.
     val cond = if (buffer.size > 1) {
       val e = buffer.remove(0)
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
index f6a8835..827dba1 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
@@ -61,7 +61,7 @@ public class CarbonInternalLoaderUtil {
   }
 
   /**
-   * This method will return the mapping of valid segments to segment laod start time
+   * This method will return the mapping of valid segments to segment load start time
    *
    */
   public static Map<String, Long> getSegmentToLoadStartTimeMapping(LoadMetadataDetails[] details) {
@@ -77,7 +77,7 @@ public class CarbonInternalLoaderUtil {
   }
 
   /**
-   * This API will write the load level metadata for the loadmanagement module inorder to
+   * This API will write the load level metadata for the load management module inorder to
    * manage the load and query execution management smoothly.
    *
    * @return boolean which determines whether status update is done or not.
@@ -93,7 +93,7 @@ public class CarbonInternalLoaderUtil {
     try {
       if (carbonLock.lockWithRetries()) {
         LOGGER.info("Acquired lock for table" + databaseName + "." + tableName
-            + " for table status updation");
+            + " for table status update");
 
         if (isSegmentsAlreadyCompactedForNewMetaDataDetails(indexCarbonTables, tableName,
             newLoadMetadataDetails)) {
@@ -171,28 +171,28 @@ public class CarbonInternalLoaderUtil {
         status = true;
       } else {
         LOGGER.error(
-            "Not able to acquire the lock for Table status updation for table " + databaseName + "."
+            "Not able to acquire the lock for Table status update for table " + databaseName + "."
                 + tableName);
       }
     } catch (IOException e) {
       LOGGER.error(
-          "Not able to acquire the lock for Table status updation for table " + databaseName + "."
+          "Not able to acquire the lock for Table status update for table " + databaseName + "."
               + tableName);
     }
     finally {
       if (carbonLock.unlock()) {
-        LOGGER.info("Table unlocked successfully after table status updation" + databaseName + "."
+        LOGGER.info("Table unlocked successfully after table status update" + databaseName + "."
             + tableName);
       } else {
         LOGGER.error("Unable to unlock Table lock for table" + databaseName + "." + tableName
-            + " during table status updation");
+            + " during table status update");
       }
     }
     return status;
   }
 
   /**
-   * This method read the details of SI table and check whether new metadatadetails are already
+   * This method read the details of SI table and check whether new metadata details are already
    * compacted, if it is, then already compaction for SI is completed and updating with new segment
    * status is useless, this can happen in case of updating the status of index while loading
    * segments for failed segments, so do not update anything, just exit gracefully
@@ -238,7 +238,7 @@ public class CarbonInternalLoaderUtil {
       String[] loadsToMerge, String mergedLoadNumber, Map<String, String> segmentToLoadStartTimeMap,
       long mergeLoadStartTime, SegmentStatus segmentStatus, long newLoadStartTime,
       List<String> rebuiltSegments) throws IOException {
-    boolean tableStatusUpdationStatus = false;
+    boolean tableStatusUpdateStatus = false;
     List<String> loadMergeList = new ArrayList<>(Arrays.asList(loadsToMerge));
     SegmentStatusManager segmentStatusManager =
         new SegmentStatusManager(indexCarbonTable.getAbsoluteTableIdentifier());
@@ -248,7 +248,7 @@ public class CarbonInternalLoaderUtil {
     try {
       if (carbonLock.lockWithRetries()) {
         LOGGER.info("Acquired lock for the table " + indexCarbonTable.getDatabaseName() + "."
-            + indexCarbonTable.getTableName() + " for table status updation ");
+            + indexCarbonTable.getTableName() + " for table status update ");
         LoadMetadataDetails[] loadDetails =
             SegmentStatusManager.readLoadMetadata(indexCarbonTable.getMetadataPath());
 
@@ -273,8 +273,8 @@ public class CarbonInternalLoaderUtil {
         // create entry for merged one.
         LoadMetadataDetails loadMetadataDetails = new LoadMetadataDetails();
         loadMetadataDetails.setSegmentStatus(segmentStatus);
-        long loadEnddate = CarbonUpdateUtil.readCurrentTime();
-        loadMetadataDetails.setLoadEndTime(loadEnddate);
+        long loadEndDate = CarbonUpdateUtil.readCurrentTime();
+        loadMetadataDetails.setLoadEndTime(loadEndDate);
         loadMetadataDetails.setLoadName(mergedLoadNumber);
         loadMetadataDetails.setSegmentFile(SegmentFileStore.genSegmentFileName(mergedLoadNumber,
             String.valueOf(segmentToLoadStartTimeMap.get(mergedLoadNumber)))
@@ -300,23 +300,23 @@ public class CarbonInternalLoaderUtil {
         SegmentStatusManager.writeLoadDetailsIntoFile(
             CarbonTablePath.getTableStatusFilePath(indexCarbonTable.getTablePath()),
             updatedDetailsList.toArray(new LoadMetadataDetails[0]));
-        tableStatusUpdationStatus = true;
+        tableStatusUpdateStatus = true;
       } else {
         LOGGER.error(
             "Could not able to obtain lock for table" + indexCarbonTable.getDatabaseName() + "."
-                + indexCarbonTable.getTableName() + "for table status updation");
+                + indexCarbonTable.getTableName() + "for table status update");
       }
     } finally {
       if (carbonLock.unlock()) {
-        LOGGER.info("Table unlocked successfully after table status updation" + indexCarbonTable
+        LOGGER.info("Table unlocked successfully after table status update" + indexCarbonTable
             .getDatabaseName() + "." + indexCarbonTable.getTableName());
       } else {
         LOGGER.error(
             "Unable to unlock Table lock for table" + indexCarbonTable.getDatabaseName() + "."
-                + indexCarbonTable.getTableName() + " during table status updation");
+                + indexCarbonTable.getTableName() + " during table status update");
       }
     }
-    return tableStatusUpdationStatus;
+    return tableStatusUpdateStatus;
   }
 
   /**
@@ -330,15 +330,15 @@ public class CarbonInternalLoaderUtil {
         getListOfValidSlices(siTableLoadMetadataDetails);
     Collections.sort(mainTableSegmentsList);
     Collections.sort(indexList);
-    // In the case when number of SI segments are more than the maintable segments do nothing
-    // and proceed to process the segments. Return False in case if maintable segments are more
+    // In the case when number of SI segments are more than the main table segments do nothing
+    // and proceed to process the segments. Return False in case if main table segments are more
     // than SI Segments
     if (indexList.size() < mainTableSegmentsList.size()) {
       return false;
     }
     // There can be cases when the number of segments in the main table are less than the index
     // table. In this case mapping all the segments in main table to SI table.
-    // Return False if a segment in maintable is not in indextable
+    // Return False if a segment in main table is not in index table
     HashSet<String> indexTableSet = new HashSet<String>();
     for (int i = 0; i < indexList.size(); i++) {
       indexTableSet.add(indexList.get(i));
@@ -355,8 +355,8 @@ public class CarbonInternalLoaderUtil {
    * Method to check if main table has in progress load and same segment not present in SI
    */
   public static boolean checkInProgLoadInMainTableAndSI(CarbonTable carbonTable,
-                                                      LoadMetadataDetails[] mainTableLoadMetadataDetails,
-                                                      LoadMetadataDetails[] siTableLoadMetadataDetails) {
+      LoadMetadataDetails[] mainTableLoadMetadataDetails,
+      LoadMetadataDetails[] siTableLoadMetadataDetails) {
     List<String> allSiSlices = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
     for (LoadMetadataDetails oneLoad : siTableLoadMetadataDetails) {
       allSiSlices.add(oneLoad.getLoadName());
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparatorWithOutKettle.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparator.java
similarity index 85%
rename from integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparatorWithOutKettle.java
rename to integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparator.java
index 8077113..61b569e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparatorWithOutKettle.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparator.java
@@ -24,24 +24,24 @@ import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.core.util.comparator.SerializableComparator;
 
 /**
- * This class is for comparing the two mdkeys in no kettle flow.
+ * This class is for comparing the two MDKeys in no kettle flow.
  */
-public class RowComparatorWithOutKettle implements Comparator<Object[]> {
+public class RowComparator implements Comparator<Object[]> {
 
   /**
    * noDictionaryColMaping mapping of dictionary dimensions and no dictionary dimensions.
    */
-  private boolean[] noDictionaryColMaping;
+  private boolean[] noDictionaryColMapping;
 
   private DataType[] noDicDataTypes;
 
-  public RowComparatorWithOutKettle(boolean[] noDictionaryColMaping, DataType[] noDicDataTypes) {
-    this.noDictionaryColMaping = noDictionaryColMaping;
+  public RowComparator(boolean[] noDictionaryColMapping, DataType[] noDicDataTypes) {
+    this.noDictionaryColMapping = noDictionaryColMapping;
     this.noDicDataTypes = noDicDataTypes;
   }
 
   /**
-   * Below method will be used to compare two mdkeys
+   * Below method will be used to compare two MDKeys
    */
   public int compare(Object[] rowA, Object[] rowB) {
     int diff = 0;
@@ -52,7 +52,7 @@ public class RowComparatorWithOutKettle implements Comparator<Object[]> {
     int[] rightMdkArray = (int[]) rowB[0];
     Object[] leftNonDictArray = (Object[]) rowA[1];
     Object[] rightNonDictArray = (Object[]) rowB[1];
-    for (boolean isNoDictionary : noDictionaryColMaping) {
+    for (boolean isNoDictionary : noDictionaryColMapping) {
       if (isNoDictionary) {
         if (DataTypeUtil.isPrimitiveColumn(noDicDataTypes[dataTypeIdx])) {
           // use data types based comparator for the no dictionary measure columns
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSITransformationRule.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSITransformationRule.scala
index 2918bf9..c802060 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSITransformationRule.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSITransformationRule.scala
@@ -95,7 +95,7 @@ class CarbonSITransformationRule(sparkSession: SparkSession)
    */
   private def isProjectionNeeded(plan: LogicalPlan): Boolean = {
     var needProjection = false
-    if (SparkUtil.isSparkVersionXandAbove("2.3")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.3")) {
       plan collect {
         case create: CreateHiveTableAsSelectCommand =>
           needProjection = true
@@ -115,7 +115,7 @@ class CarbonSITransformationRule(sparkSession: SparkSession)
 
   private def isCreateTableAsSelect(plan: LogicalPlan): Boolean = {
     var isCreateTableAsSelectFlow = false
-    if (SparkUtil.isSparkVersionXandAbove("2.3")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.3")) {
       plan collect {
         case CreateHiveTableAsSelectCommand(_, _, _, _) =>
           isCreateTableAsSelectFlow = true
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSecondaryIndexOptimizer.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSecondaryIndexOptimizer.scala
index b8e0990..b4c9d67 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSecondaryIndexOptimizer.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonSecondaryIndexOptimizer.scala
@@ -73,7 +73,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
    * with i1 as (select positionReference from index1 where dt='20261201'
    * group by positionReference),
    * with i2 as (select positionReference from index2 where age='10'),
-   * with indexJion as (select positionReference from i1 join i2 on
+   * with indexJoin as (select positionReference from i1 join i2 on
    * i1.positionReference = i2.positionReference limit 10),
    * with index as (select positionReference from indexJoin group by positionReference)
    * select * from a join index on a.positionId = index.positionReference limit 10
@@ -86,7 +86,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
     var originalFilterAttributes: Set[String] = Set.empty
     var filterAttributes: Set[String] = Set.empty
     var matchingIndexTables: Seq[String] = Seq.empty
-    // all filter attributes are retrived
+    // all filter attributes are retrieved
     filter.condition collect {
       case attr: AttributeReference =>
         originalFilterAttributes = originalFilterAttributes. +(attr.name.toLowerCase)
@@ -144,7 +144,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
         // if the user has requested for the positionID column, in that case we are
         // adding this table property. This is used later to check whether to
         // remove the positionId column from the projection list.
-        // This property will be reflected across sessions as it is directly added to tblproperties.
+        // This property will be reflected across sessions as it is directly added to tblProperties.
         // So concurrent query run with getPositionID() UDF will have issue.
         // But getPositionID() UDF is restricted to testing purpose.
         indexableRelation.carbonTable.getTableInfo.getFactTable.getTableProperties
@@ -326,8 +326,8 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
   }
 
   /**
-   * This method will combine 2 dataframes by applying union or join based on the nodeType and
-   * create a new dataframe
+   * This method will combine 2 DataFrames by applying union or join based on the nodeType and
+   * create a new DataFrame
    *
    */
   private def applyUnionOrJoinOnDataFrames(nodeType: NodeType,
@@ -436,7 +436,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
   }
 
   /**
-   * This method will check whether the condition is valid for SI pushdown. If yes then return the
+   * This method will check whether the condition is valid for SI push down. If yes then return the
    * tableName which contains this condition
    *
    * @param condition
@@ -448,12 +448,12 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
       indexTableColumnsToTableMapping: mutable.Map[String, Set[String]],
       pushDownRequired: Boolean): Option[String] = {
     // In case of Like Filter in OR, both the conditions should not be transformed
-    // Incase of like filter in And, only like filter should be removed and
+    // In case of like filter in And, only like filter should be removed and
     // other filter should be transformed with index table
 
-    // Incase NI condition with and, eg., NI(col1 = 'a') && col1 = 'b',
+    // In case NI condition with and, eg., NI(col1 = 'a') && col1 = 'b',
     // only col1 = 'b' should be pushed to index table.
-    // Incase NI condition with or, eg., NI(col1 = 'a') || col1 = 'b',
+    // In case NI condition with or, eg., NI(col1 = 'a') || col1 = 'b',
     // both the condition should not be pushed to index table.
 
     var tableName: Option[String] = None
@@ -659,7 +659,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
   }
 
   /**
-   * This method is used to determn whether limit has to be pushed down to secondary index or not.
+   * This method is used to determine whether limit has to be pushed down to secondary index or not.
    *
    * @param relation
    * @return false if carbon table is not an index table and update status file exists because
@@ -676,7 +676,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
 
   def transformFilterToJoin(plan: LogicalPlan, needProjection: Boolean): LogicalPlan = {
     val isRowDeletedInTableMap = scala.collection.mutable.Map.empty[String, Boolean]
-    // if the join pushdown is enabled, then no need to add projection list to the logical plan as
+    // if the join push down is enabled, then no need to add projection list to the logical plan as
     // we can directly map the join output with the required projections
     // if it is false then the join will not be pushed down to carbon and
     // there it is required to add projection list to map the output from the join
@@ -730,7 +730,7 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
       // When limit is provided in query, this limit literal can be pushed down to index table
       // if all the filter columns have index table, then limit can be pushed down before grouping
       // last index table, as number of records returned after join where unique and it will
-      // definitely return atleast 1 record.
+      // definitely return at least 1 record.
       case limit@Limit(literal: Literal,
       filter@Filter(condition, logicalRelation@MatchIndexableRelation(indexableRelation)))
         if !condition.isInstanceOf[IsNotNull] &&
@@ -827,12 +827,9 @@ class CarbonSecondaryIndexOptimizer(sparkSession: SparkSession) {
 }
 
 object MatchIndexableRelation {
-
-  type ReturnType = (CarbonDatasourceHadoopRelation)
-
-  def unapply(plan: LogicalPlan): Option[ReturnType] = {
+  def unapply(plan: LogicalPlan): Option[CarbonDatasourceHadoopRelation] = {
     plan match {
-      case l: LogicalRelation if (l.relation.isInstanceOf[CarbonDatasourceHadoopRelation]) =>
+      case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
         Some(l.relation.asInstanceOf[CarbonDatasourceHadoopRelation])
       case _ => None
     }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
index e6e08bf..cc5af16 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
@@ -51,7 +51,7 @@ import org.apache.carbondata.processing.util.CarbonDataProcessorUtil;
 
 import org.apache.log4j.Logger;
 import org.apache.spark.sql.secondaryindex.exception.SecondaryIndexException;
-import org.apache.spark.sql.secondaryindex.load.RowComparatorWithOutKettle;
+import org.apache.spark.sql.secondaryindex.load.RowComparator;
 import org.apache.spark.sql.secondaryindex.util.SecondaryIndexUtil;
 
 /**
@@ -74,7 +74,7 @@ public class SecondaryIndexQueryResultProcessor {
    */
   private SortDataRows sortDataRows;
   /**
-   * segment proeprties which contains required information for a segment
+   * segment properties which contains required information for a segment
    */
   private SegmentProperties segmentProperties;
   /**
@@ -297,7 +297,7 @@ public class SecondaryIndexQueryResultProcessor {
       Object[] previousRow = null;
       // comparator for grouping the similar data, means every record
       // should be unique in index table
-      RowComparatorWithOutKettle comparator = new RowComparatorWithOutKettle(noDictionaryColMapping,
+      RowComparator comparator = new RowComparator(noDictionaryColMapping,
           SecondaryIndexUtil.getNoDictDataTypes(indexTable));
       intermediateFileMerger.finish();
       sortDataRows = null;
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/CarbonSIRebuildRDD.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/CarbonSIRebuildRDD.scala
index f00dac5..ed793a7 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/CarbonSIRebuildRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/CarbonSIRebuildRDD.scala
@@ -202,7 +202,7 @@ class CarbonSIRebuildRDD[K, V](
         try {
           // As the tableBlockInfoList is sorted take the ColCardinality from the last
           // Block of the sorted list as it will have the last updated cardinality.
-          // Blocks are sorted by order of updation using TableBlockInfo.compare method so
+          // Blocks are sorted by order of the update using TableBlockInfo.compare method so
           // the last block after the sort will be the latest one.
           dataFileFooter = CarbonUtil
             .readMetadataFile(tableBlockInfoList.get(tableBlockInfoList.size() - 1))
@@ -286,7 +286,7 @@ class CarbonSIRebuildRDD[K, V](
             segmentProperties,
             tempStoreLoc,
             carbonLoadModelCopy,
-            carbonMergerMapping.campactionType,
+            carbonMergerMapping.compactionType,
             null)
 
         } else {
@@ -296,7 +296,7 @@ class CarbonSIRebuildRDD[K, V](
             carbonLoadModelCopy,
             indexTable,
             segmentProperties,
-            carbonMergerMapping.campactionType,
+            carbonMergerMapping.compactionType,
             indexTableName,
             null)
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/SecondaryIndexCreator.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/SecondaryIndexCreator.scala
index e897051..cc0bdd9 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/SecondaryIndexCreator.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/rdd/SecondaryIndexCreator.scala
@@ -67,7 +67,7 @@ object SecondaryIndexCreator {
     LOGGER
       .info(s"Configured thread pool size for distributing segments in secondary index creation " +
             s"is $threadPoolSize")
-    // create executor service to parallely run the segments
+    // create executor service to parallel run the segments
     val executorService = java.util.concurrent.Executors.newFixedThreadPool(threadPoolSize)
     if (null == indexCarbonTable) {
       // avoid more lookupRelation to table
@@ -200,10 +200,10 @@ object SecondaryIndexCreator {
 
       if (hasFailedSegments) {
         // if the call is from compaction, we need to fail the main table compaction also, and if
-        // the load is called from SIloadEventListener, which is for corresponding main table
+        // the load is called from SILoadEventListener, which is for corresponding main table
         // segment, then if SI load fails, we need to fail main table load also, so throw exception,
         // if load is called from SI creation or SILoadEventListenerForFailedSegments, no need to
-        // fail, just make the segement as marked for delete, so that next load to main table will
+        // fail, just make the segment as marked for delete, so that next load to main table will
         // take care
         if (isCompactionCall || !isLoadToFailedSISegments) {
           throw new Exception("Secondary index creation failed")
@@ -281,7 +281,7 @@ object SecondaryIndexCreator {
 
       // update the status of all the segments to marked for delete if data load fails, so that
       // next load which is triggered for SI table in post event of main table data load clears
-      // all the segments of marked for delete and retriggers the load to same segments again in
+      // all the segments of marked for delete and re-triggers the load to same segments again in
       // that event
       if (failedSISegments.nonEmpty && !isCompactionCall) {
         tableStatusUpdateForFailure = FileInternalUtil.updateTableStatus(
@@ -392,7 +392,7 @@ object SecondaryIndexCreator {
   }
 
   /**
-   * This method will get the configuration for thread pool size which will decide the numbe rof
+   * This method will get the configuration for thread pool size which will decide the number of
    * segments to run in parallel for secondary index creation
    *
    */
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/SecondaryIndexUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/SecondaryIndexUtil.scala
index 034cd1c..2c3260a 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/SecondaryIndexUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/SecondaryIndexUtil.scala
@@ -364,7 +364,7 @@ object SecondaryIndexUtil {
     val taskBlockInfo = new TaskBlockInfo
     tableBlockInfoList.asScala.foreach { info =>
       val taskNo = CarbonTablePath.DataFileUtil.getTaskNo(info.getFilePath)
-      groupCorrespodingInfoBasedOnTask(info, taskBlockInfo, taskNo)
+      groupCorrespondingInfoBasedOnTask(info, taskBlockInfo, taskNo)
     }
     taskBlockInfo
   }
@@ -373,7 +373,7 @@ object SecondaryIndexUtil {
    * Grouping the taskNumber and list of TableBlockInfo.
    *
    */
-  private def groupCorrespodingInfoBasedOnTask(info: TableBlockInfo,
+  private def groupCorrespondingInfoBasedOnTask(info: TableBlockInfo,
       taskBlockMapping: TaskBlockInfo,
       taskNo: String): Unit = {
     // get the corresponding list from task mapping.
@@ -562,7 +562,7 @@ object SecondaryIndexUtil {
       CarbonDataMergerUtil.getMergedLoadName(sortedSegments)
     } else {
       throw new UnsupportedOperationException(
-        "Compaction requires atleast 2 segments to be merged.But the input list size is " +
+        "Compaction requires at least 2 segments to be merged.But the input list size is " +
         list.size())
     }
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala b/integration/spark/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala
index c08370b..65a2b5c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala
@@ -126,7 +126,7 @@ class QueryTest extends PlanTest {
   val resourcesPath = TestQueryExecutor.resourcesPath
   val target = TestQueryExecutor.target
   val integrationPath = TestQueryExecutor.integrationPath
-  val dblocation = TestQueryExecutor.location
+  val dbLocation = TestQueryExecutor.location
   val defaultParallelism = sqlContext.sparkContext.defaultParallelism
   val targetTestClass = System.getProperty("user.dir") + "/target/test-classes"
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
index 2101090..55d3fdf 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
@@ -57,7 +57,7 @@ object SparkSQLUtil {
     logicalPlanObj.stats
   }
 
-  def invokeQueryPlannormalizeExprId(r: NamedExpression, input: AttributeSeq)
+  def invokeQueryPlanNormalizeExprId(r: NamedExpression, input: AttributeSeq)
       : NamedExpression = {
     QueryPlan.normalizeExprId(r, input)
   }
@@ -86,7 +86,7 @@ object SparkSQLUtil {
     EliminateView
   }
 
-  def getPullupCorrelatedPredicatesObj(): Rule[LogicalPlan] = {
+  def getPullUpCorrelatedPredicatesObj(): Rule[LogicalPlan] = {
     PullupCorrelatedPredicates
   }
 
@@ -162,10 +162,10 @@ object SparkSQLUtil {
       carbonTable: CarbonTable): DataFrame = {
     /**
      * [[org.apache.spark.sql.catalyst.expressions.objects.ValidateExternalType]] validates the
-     * datatype of column data and corresponding datatype in schema provided to create dataframe.
+     * datatype of column data and corresponding datatype in schema provided to create DataFrame.
      * Since carbonScanRDD gives Long data for timestamp column and corresponding column datatype in
      * schema is Timestamp, this validation fails if we use createDataFrame API which takes rdd as
-     * input. Hence, using below API which creates dataframe from qualified tablename.
+     * input. Hence, using below API which creates DataFrame from qualified table name.
      */
     sparkSession.sqlContext.table(carbonTable.getDatabaseName + "." + carbonTable.getTableName)
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala b/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala
index 100ad17..0e628bb 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala
@@ -140,11 +140,11 @@ object SparkTypeConverter {
     }.mkString(",")
   }
 
-  def addDecimalScaleAndPrecision(dimval: CarbonColumn, dataType: String): String = {
+  def addDecimalScaleAndPrecision(dimension: CarbonColumn, dataType: String): String = {
     var dType = dataType
-    if (CarbonDataTypes.isDecimal(dimval.getDataType)) {
+    if (CarbonDataTypes.isDecimal(dimension.getDataType)) {
       dType +=
-      "(" + dimval.getColumnSchema.getPrecision + "," + dimval.getColumnSchema.getScale + ")"
+      "(" + dimension.getColumnSchema.getPrecision + "," + dimension.getColumnSchema.getScale + ")"
     }
     dType
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala b/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
index 3a1ac1e..8d12a7c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
@@ -362,7 +362,7 @@ object AlterTableUtil {
   /**
    * This method modifies the table properties if column rename happened
    * @param tableProperties tableProperties of the table
-   * @param oldColumnName old COlumnname before rename
+   * @param oldColumnName old Column name before rename
    * @param newColumnName new column name to rename
    */
   def modifyTablePropertiesAfterColumnRename(
@@ -494,7 +494,7 @@ object AlterTableUtil {
           }
         }
         // check if duplicate columns are present in both local dictionary include and exclude
-        CarbonScalaUtil.validateDuplicateLocalDictIncludeExcludeColmns(tblPropertiesMap)
+        CarbonScalaUtil.validateDuplicateColumnsForLocalDict(tblPropertiesMap)
       } else {
         // This removes the comment parameter from thriftTable
         // since thriftTable also holds comment as its property.
@@ -528,13 +528,13 @@ object AlterTableUtil {
           }
         }
         // check if duplicate columns are present in both local dictionary include and exclude
-        CarbonScalaUtil.validateDuplicateLocalDictIncludeExcludeColmns(tblPropertiesMap)
+        CarbonScalaUtil.validateDuplicateColumnsForLocalDict(tblPropertiesMap)
       }
-      val (tableIdentifier, schemParts) = updateSchemaInfo(
+      val (tableIdentifier, schemaParts) = updateSchemaInfo(
         carbonTable = carbonTable,
         schemaEvolutionEntry,
         thriftTable = thriftTable)(sparkSession)
-      CarbonSessionCatalogUtil.alterTable(tableIdentifier, schemParts, None, sparkSession)
+      CarbonSessionCatalogUtil.alterTable(tableIdentifier, schemaParts, None, sparkSession)
       CarbonSessionCatalogUtil.alterTableProperties(
         sparkSession, tableIdentifier, lowerCasePropertiesMap.toMap, propKeys)
       sparkSession.catalog.refreshTable(tableIdentifier.quotedString)
@@ -577,7 +577,7 @@ object AlterTableUtil {
   }
 
   /**
-   * this method validates the local dictioanry properties for alter set
+   * this method validates the local dictionary properties for alter set
    *
    * @param lowerCasePropertiesMap
    * @param tblPropertiesMap
@@ -734,7 +734,7 @@ object AlterTableUtil {
     val newCompactionLevelThreshold =
       propertiesMap.get(CarbonCommonConstants.TABLE_COMPACTION_LEVEL_THRESHOLD)
     if (newCompactionLevelThreshold.isDefined) {
-      // check compactionlevelthreshold is in the specified range and in the format of number
+      // check compaction level threshold is in the specified range and in the format of number
       if (CarbonProperties.getInstance().getIntArray(newCompactionLevelThreshold.get).length == 0) {
         throw new InvalidConfigurationException(
           s"Cannot set COMPACTION_LEVEL_THRESHOLD as ${newCompactionLevelThreshold.get}")
@@ -1009,10 +1009,10 @@ object AlterTableUtil {
     // varchar dataType column
     if (property._1.equalsIgnoreCase(CarbonCommonConstants.LOCAL_DICTIONARY_INCLUDE)) {
       // Validate whether any of the child columns of complex dataType column is a string column
-      localDictColumns.foreach { dictColm =>
+      localDictColumns.foreach { dictColumn =>
         for (elem <- allColumns.indices) {
           var column = allColumns(elem)
-          if (column.getColumnName.equalsIgnoreCase(dictColm) && column.getNumberOfChild > 0 &&
+          if (column.getColumnName.equalsIgnoreCase(dictColumn) && column.getNumberOfChild > 0 &&
               !validateChildColumns(allColumns, column.getNumberOfChild, elem. +(1))) {
             val errMsg =
               "None of the child columns specified in the complex dataType column(s) in " +
diff --git a/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala b/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
index 250469a..507dbd5 100644
--- a/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
@@ -98,20 +98,20 @@ object CarbonReflectionUtils {
 
 
   def getOverWriteOption[T: TypeTag : reflect.ClassTag](name: String, obj: T): Boolean = {
-    var overwriteboolean: Boolean = false
+    var isOverwriteBoolean: Boolean = false
     val im = rm.reflect(obj)
     for (m <- typeOf[T].members.filter(!_.isMethod)) {
       if (m.toString.contains("overwrite")) {
         val typ = m.typeSignature
         if (typ.toString.contains("Boolean")) {
           // Spark2.2
-          overwriteboolean = im.reflectField(m.asTerm).get.asInstanceOf[Boolean]
+          isOverwriteBoolean = im.reflectField(m.asTerm).get.asInstanceOf[Boolean]
         } else {
-          overwriteboolean = getOverWrite("enabled", im.reflectField(m.asTerm).get)
+          isOverwriteBoolean = getOverWrite("enabled", im.reflectField(m.asTerm).get)
         }
       }
     }
-    overwriteboolean
+    isOverwriteBoolean
   }
 
   private def getOverWrite[T: TypeTag : reflect.ClassTag](name: String, obj: T): Boolean = {
@@ -189,7 +189,7 @@ object CarbonReflectionUtils {
       relation.catalogTable.map(_.identifier))
   }
 
-  def invokewriteAndReadMethod(dataSourceObj: DataSource,
+  def invokeWriteAndReadMethod(dataSourceObj: DataSource,
       dataFrame: DataFrame,
       data: LogicalPlan,
       session: SparkSession,
diff --git a/integration/spark/src/main/scala/org/apache/spark/util/SparkUtil.scala b/integration/spark/src/main/scala/org/apache/spark/util/SparkUtil.scala
index 1111287..e92cf5e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/util/SparkUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/util/SparkUtil.scala
@@ -40,7 +40,7 @@ object SparkUtil {
    * Version passed should be of format x.y  e.g 2.2 ,2.3 , SPARK_VERSION
    * will be of format x.y.z e.g 2.3.0,2.2.1
    */
-  def isSparkVersionXandAbove(xVersion: String, isEqualComparision: Boolean = false): Boolean = {
+  def isSparkVersionXAndAbove(xVersion: String, isEqualComparision: Boolean = false): Boolean = {
     val tmpArray = SPARK_VERSION.split("\\.")
     // convert to float
     val sparkVersion = if (tmpArray.length >= 2) {
@@ -57,13 +57,13 @@ object SparkUtil {
   }
 
   def isSparkVersionEqualTo(xVersion: String): Boolean = {
-    isSparkVersionXandAbove(xVersion, true)
+    isSparkVersionXAndAbove(xVersion, true)
   }
 
   def setNullExecutionId(sparkSession: SparkSession): Unit = {
     // "spark.sql.execution.id is already set" exception will be
     // thrown if not set to null in spark2.2 and below versions
-    if (!SparkUtil.isSparkVersionXandAbove("2.3")) {
+    if (!SparkUtil.isSparkVersionXAndAbove("2.3")) {
       sparkSession.sparkContext.setLocalProperty(EXECUTION_ID_KEY, null)
     }
   }
diff --git a/integration/spark/src/main/scala/org/apache/spark/util/TableLoader.scala b/integration/spark/src/main/scala/org/apache/spark/util/TableLoader.scala
index ef9a931..6b354fd 100644
--- a/integration/spark/src/main/scala/org/apache/spark/util/TableLoader.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/util/TableLoader.scala
@@ -42,13 +42,13 @@ object TableLoader {
     val path = new Path(propertiesFile)
     val fs = path.getFileSystem(FileFactory.getConfiguration)
     props.load(fs.open(path))
-    val elments = props.entrySet().iterator()
+    val elements = props.entrySet().iterator()
     val map = new mutable.HashMap[String, String]()
     System.out.println("properties file:")
-    while (elments.hasNext) {
-      val elment = elments.next()
-      System.out.println(s"${elment.getKey}=${elment.getValue}")
-      map.put(elment.getKey.asInstanceOf[String], elment.getValue.asInstanceOf[String])
+    while (elements.hasNext) {
+      val element = elements.next()
+      System.out.println(s"${element.getKey}=${element.getValue}")
+      map.put(element.getKey.asInstanceOf[String], element.getValue.asInstanceOf[String])
     }
 
     immutable.Map(map.toSeq: _*)
diff --git a/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala b/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
index 34705f9..79318fa 100644
--- a/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
+++ b/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
@@ -118,7 +118,7 @@ object CarbonToSparkAdapter {
 
   /**
    * As a part of SPARK-24085 Hive tables supports scala subquery for
-   * parition tables,so Carbon also needs to supports
+   * the partitioned tables,so Carbon also needs to supports
    * @param partitionSet
    * @param filterPredicates
    * @return
diff --git a/integration/spark/src/main/spark2.3/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala b/integration/spark/src/main/spark2.3/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala
index 86faa3d..832596e 100644
--- a/integration/spark/src/main/spark2.3/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala
+++ b/integration/spark/src/main/spark2.3/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala
@@ -37,7 +37,7 @@ import org.apache.spark.sql.{CarbonEnv, SparkSession}
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
 
 /**
- * This class will have carbon catalog and refresh the relation from cache if the carbontable in
+ * This class will have carbon catalog and refresh the relation from cache if the carbon table in
  * carbon catalog is not same as cached carbon relation's carbon table
  *
  * @param externalCatalog
@@ -145,7 +145,7 @@ class CarbonHiveSessionCatalog(
   }
 
   /**
-   * Update the storageformat with new location information
+   * Update the storage format with new location information
    */
   override def updateStorageLocation(
       path: Path,
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
index ae0eea8..4965537 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
@@ -1642,7 +1642,7 @@ class TestBinaryDataType extends QueryTest with BeforeAndAfterAll {
             assert(e2.getMessage.contains(message_2_3))
             assert(e3.getMessage.contains(message_2_3))
         }
-        else if (SparkUtil.isSparkVersionXandAbove("2.4")) {
+        else if (SparkUtil.isSparkVersionXAndAbove("2.4")) {
             assert(e1.getMessage.contains(message_2_4))
             assert(e2.getMessage.contains(message_2_4))
             assert(e3.getMessage.contains(message_2_4))
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateHiveTableWithCarbonDS.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateHiveTableWithCarbonDS.scala
index 2d2e9b6..709a2e9 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateHiveTableWithCarbonDS.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateHiveTableWithCarbonDS.scala
@@ -48,7 +48,7 @@ class TestCreateHiveTableWithCarbonDS extends QueryTest with BeforeAndAfterAll {
   }
 
   private def verifyTable = {
-    if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       val table = CarbonSessionCatalogUtil
         .getClient(sqlContext.sparkSession).getTable("default", "source")
       assertResult(table.schema.fields.length)(3)
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala
index da897a1..b2b48ef 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala
@@ -73,7 +73,7 @@ class TestNonTransactionalCarbonTableForBinary extends QueryTest with BeforeAndA
     test("test read image carbon with external table, generate by sdk, CTAS") {
         sql("DROP TABLE IF EXISTS binaryCarbon")
         sql("DROP TABLE IF EXISTS binaryCarbon3")
-        if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
             sql(s"CREATE EXTERNAL TABLE binaryCarbon STORED AS carbondata LOCATION '$writerPath'")
             sql(s"CREATE TABLE binaryCarbon3 STORED AS carbondata AS SELECT * FROM binaryCarbon")
 
@@ -113,7 +113,7 @@ class TestNonTransactionalCarbonTableForBinary extends QueryTest with BeforeAndA
         sql("DROP TABLE IF EXISTS binaryCarbon2")
         sql("DROP TABLE IF EXISTS binaryCarbon3")
         sql("DROP TABLE IF EXISTS binaryCarbon4")
-        if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
             sql(s"CREATE TABLE binaryCarbon USING CARBON LOCATION '$writerPath'")
             sql(
                 s"""
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
index 716ff02..f31b503 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
@@ -36,7 +36,7 @@ import org.apache.carbondata.core.metadata.datatype.DataTypes
 import org.apache.carbondata.core.metadata.{CarbonMetadata, SegmentFileStore}
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.util.path.CarbonTablePath
-import org.apache.carbondata.spark.load.PrimtiveOrdering
+import org.apache.carbondata.spark.load.PrimitiveOrdering
 
 class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {
   var filePath: String = s"$resourcesPath/globalsort"
@@ -727,7 +727,7 @@ class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with Bef
   test("DataSkewRangePartitioner.combineDataSkew") {
     val partitioner =
       new DataSkewRangePartitioner(1, null,
-        false)(new PrimtiveOrdering(DataTypes.STRING),
+        false)(new PrimitiveOrdering(DataTypes.STRING),
         classTag[Object])
 
     testCombineDataSkew(
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dblocation/DBLocationCarbonTableTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dblocation/DBLocationCarbonTableTestCase.scala
index f5c753e..4b799a0 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dblocation/DBLocationCarbonTableTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dblocation/DBLocationCarbonTableTestCase.scala
@@ -48,7 +48,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
 
   //TODO fix this test case
   test("Update operation on carbon table with insert into") {
-    sql(s"create database carbon2 location '$dblocation'")
+    sql(s"create database carbon2 location '$dbLocation'")
     sql("use carbon2")
     sql("""create table carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
@@ -64,14 +64,14 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("create and drop database test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("drop database if exists carbon cascade")
   }
 
   test("create two databases at same table") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     try {
-      sql(s"create database carbon1 location '$dblocation'")
+      sql(s"create database carbon1 location '$dbLocation'")
     } catch {
       case e: AnalysisException =>
         assert(true)
@@ -79,7 +79,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("create table and load data") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/dblocation/test.csv' INTO table carbon.carbontable""")
@@ -87,7 +87,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("create table and insert data") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
@@ -96,7 +96,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("create table and 2 times data load") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
@@ -107,7 +107,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
 
 
   test("Update operation on carbon table") {
-    sql(s"create database carbon1 location '$dblocation'")
+    sql(s"create database carbon1 location '$dbLocation'")
     sql("use carbon1")
     sql(
       """
@@ -126,7 +126,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Delete operation on carbon table") {
-    sql(s"create database carbon1 location '$dblocation'")
+    sql(s"create database carbon1 location '$dbLocation'")
     sql("use carbon1")
     sql("""create table carbon1.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
@@ -141,7 +141,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table add column test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
@@ -156,7 +156,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table change column datatype test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
@@ -170,7 +170,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table change dataType with sort column after adding measure column test"){
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql(
       """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string)
@@ -189,7 +189,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table change dataType with sort column after adding date datatype with default value test"){
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql(
       """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string)
@@ -208,7 +208,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table change dataType with sort column after adding dimension column with default value test"){
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql(
       """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string)
@@ -227,7 +227,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table change dataType with sort column after rename test"){
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql(
       """create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string)
@@ -248,7 +248,7 @@ class DBLocationCarbonTableTestCase extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table drop column test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index ec89570..7c9f966 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -629,7 +629,7 @@ class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_UPDATE_PERSIST_ENABLE, "false")
     sql("drop database if exists carbon1 cascade")
-    sql(s"create database carbon1 location '$dblocation'")
+    sql(s"create database carbon1 location '$dbLocation'")
     sql("use carbon1")
     sql("""CREATE TABLE carbontable(id int, name string, city string, age int)
          STORED AS carbondata""")
@@ -760,13 +760,13 @@ class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     sql("delete from t where c3 = 2").show()
     sql("update t set(c4) = ('yyy') where c3 = 3").show()
     checkAnswer(sql("select count(*) from t where c4 = 'yyy'"), Seq(Row(1)))
-    val f = new File(dblocation + CarbonCommonConstants.FILE_SEPARATOR +
+    val f = new File(dbLocation + CarbonCommonConstants.FILE_SEPARATOR +
                      CarbonCommonConstants.FILE_SEPARATOR + "t" +
                      CarbonCommonConstants.FILE_SEPARATOR + "Fact" +
                      CarbonCommonConstants.FILE_SEPARATOR + "Part0")
     if (!FileFactory.isFileExist(
       CarbonTablePath.getSegmentFilesLocation(
-        dblocation + CarbonCommonConstants.FILE_SEPARATOR +
+        dbLocation + CarbonCommonConstants.FILE_SEPARATOR +
         CarbonCommonConstants.FILE_SEPARATOR + "t"))) {
       assert(f.list().length == 2)
     }
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
index 2229716..690c724 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala
@@ -515,7 +515,7 @@ test("Creation of partition table should fail if the colname in table schema and
       sql("alter table onlyPart drop columns(name)")
     }
     assert(ex1.getMessage.contains("alter table drop column is failed, cannot have the table with all columns as partition column"))
-    if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       val ex2 = intercept[MalformedCarbonCommandException] {
         sql("alter table onlyPart change age age bigint")
       }
diff --git a/integration/spark/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala b/integration/spark/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
index f0da789..1008601 100644
--- a/integration/spark/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
@@ -62,14 +62,14 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("register tables test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
-    backUpData(dblocation, Some("carbon"), "carbontable")
+    backUpData(dbLocation, Some("carbon"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       checkAnswer(sql("select count(*) from carbontable"), Row(1))
       checkAnswer(sql("select c1 from carbontable"), Seq(Row("a")))
@@ -77,14 +77,14 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("register table test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
-    backUpData(dblocation, Some("carbon"), "carbontable")
+    backUpData(dbLocation, Some("carbon"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       checkAnswer(sql("select count(*) from carbontable"), Row(1))
       checkAnswer(sql("select c1 from carbontable"), Seq(Row("a")))
@@ -92,16 +92,16 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Update operation on carbon table should pass after registration or refresh") {
-    sql(s"create database carbon1 location '$dblocation'")
+    sql(s"create database carbon1 location '$dbLocation'")
     sql("use carbon1")
     sql("drop table if exists carbontable")
     sql("""create table carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     sql("insert into carbontable select 'b',1,'bb','bbb'")
-    backUpData(dblocation, Some("carbon1"), "carbontable")
+    backUpData(dbLocation, Some("carbon1"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       // update operation
       sql("""update carbon1.carbontable d  set (d.c2) = (d.c2 + 1) where d.c1 = 'a'""").show()
@@ -114,7 +114,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Update operation on carbon table") {
-    sql(s"create database carbon1 location '$dblocation'")
+    sql(s"create database carbon1 location '$dbLocation'")
     sql("use carbon1")
     sql(
       """
@@ -123,10 +123,10 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
       """)
     val testData = s"$resourcesPath/sample.csv"
     sql(s"LOAD DATA LOCAL INPATH '$testData' into table automerge")
-    backUpData(dblocation, Some("carbon1"), "automerge")
+    backUpData(dbLocation, Some("carbon1"), "automerge")
     sql("drop table automerge")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "automerge")
+      restoreData(dbLocation, "automerge")
       sql("refresh table automerge")
       // update operation
       sql("""update carbon1.automerge d  set (d.id) = (d.id + 1) where d.id > 2""").show()
@@ -138,15 +138,15 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Delete operation on carbon table") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     sql("insert into carbontable select 'b',1,'bb','bbb'")
-    backUpData(dblocation, Some("carbon"), "carbontable")
+    backUpData(dbLocation, Some("carbon"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       // delete operation
       sql("""delete from carbontable where c3 = 'aa'""").show
@@ -159,15 +159,15 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table add column test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     sql("insert into carbontable select 'b',1,'bb','bbb'")
-    backUpData(dblocation, Some("carbon"), "carbontable")
+    backUpData(dbLocation, Some("carbon"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       sql("Alter table carbontable add columns(c4 string) " +
           "TBLPROPERTIES('DEFAULT.VALUE.c4'='def')")
@@ -180,15 +180,15 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table change column datatype test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     sql("insert into carbontable select 'b',1,'bb','bbb'")
-    backUpData(dblocation, Some("carbon"), "carbontable")
+    backUpData(dbLocation, Some("carbon"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       sql("Alter table carbontable change c2 c2 long")
       checkAnswer(
@@ -200,15 +200,15 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterEach {
   }
 
   test("Alter table drop column test") {
-    sql(s"create database carbon location '$dblocation'")
+    sql(s"create database carbon location '$dbLocation'")
     sql("use carbon")
     sql("""create table carbon.carbontable (c1 string,c2 int,c3 string,c5 string) STORED AS carbondata""")
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     sql("insert into carbontable select 'b',1,'bb','bbb'")
-    backUpData(dblocation, Some("carbon"), "carbontable")
+    backUpData(dbLocation, Some("carbon"), "carbontable")
     sql("drop table carbontable")
     if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
-      restoreData(dblocation, "carbontable")
+      restoreData(dbLocation, "carbontable")
       sql("refresh table carbontable")
       sql("Alter table carbontable drop columns(c2)")
       checkAnswer(
diff --git a/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AlterTableColumnRenameTestCase.scala b/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AlterTableColumnRenameTestCase.scala
index 5593439..29e4b43 100644
--- a/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AlterTableColumnRenameTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AlterTableColumnRenameTestCase.scala
@@ -297,7 +297,7 @@ class AlterTableColumnRenameTestCase extends QueryTest with BeforeAndAfterAll {
     sql("alter table rename change deptno classno bigint")
     if (SparkUtil.isSparkVersionEqualTo("2.1")) {
       checkExistence(sql("describe formatted rename"), false, "This column has comment ")
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       checkExistence(sql("describe formatted rename"), true, "This column has comment ")
     }
   }
diff --git a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala
index 9efa18c..2612407 100644
--- a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala
@@ -208,7 +208,7 @@ class SparkCarbonDataSourceBinaryTest extends QueryTest with BeforeAndAfterAll {
   }
 
   test("Don't support insert into partition table") {
-    if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       sql("DROP TABLE IF EXISTS binaryCarbon")
       sql("DROP TABLE IF EXISTS binaryCarbon2")
       sql("DROP TABLE IF EXISTS binaryCarbon3")
diff --git a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
index 70939fa..42548c6 100644
--- a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
@@ -107,7 +107,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
     FileUtils.deleteDirectory(new File(writerOutputPath))
     val num = 10000
     buildTestData(num, List("stringField", "intField"))
-    if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       sql("DROP TABLE IF EXISTS carbontable_varchar")
       sql("DROP TABLE IF EXISTS carbontable_varchar2")
       sql(s"CREATE TABLE carbontable_varchar USING CARBON LOCATION '$writerOutputPath'")
@@ -196,7 +196,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
           assert(e.getMessage.contains("Operation not allowed: ALTER TABLE ADD COLUMNS"))
-        } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
           assert(e.getMessage
             .contains("ALTER ADD COLUMNS does not support datasource table with type carbon."))
         }
@@ -339,7 +339,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
           assert(e.getMessage.contains("Operation not allowed: ALTER TABLE change"))
-        } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
           assert(e.getMessage
             .contains("ALTER TABLE CHANGE COLUMN is not supported for changing column"))
         }
@@ -373,7 +373,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
           assert(e.getMessage.contains("ALTER TABLE test_parquet ADD COLUMNS"))
-        } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
           e.printStackTrace()
           assert(false)
         }
@@ -458,7 +458,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
           assert(e.getMessage.contains("Operation not allowed: ALTER TABLE CHANGE"))
-        } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
           e.printStackTrace()
           assert(false)
         }
@@ -485,7 +485,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
           assert(e.getMessage.contains("Operation not allowed: ALTER TABLE CHANGE"))
-        } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
           assert(e.getMessage
             .contains("ALTER TABLE CHANGE COLUMN is not supported for changing column"))
         }
@@ -502,7 +502,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
           assert(e.getMessage.contains("Operation not allowed: ALTER TABLE CHANGE"))
-        } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+        } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
           assert(e.getMessage
             .contains("ALTER TABLE CHANGE COLUMN is not supported for changing column"))
         }
@@ -1582,7 +1582,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
         sql("create table complextable (stringfield string, structfield struct<bytefield: " +
             "byte, floatfield: float>) " +
             s"using carbon options(path '$path')")
-      } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
         sql("create table complextable (stringfield string, structfield struct<bytefield: " +
             "byte, floatfield: float>) " +
             s"using carbon location '$path'")
@@ -1657,7 +1657,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
             s"string," +
             s"salary long, bytefield byte) using parquet options(path " +
             s"'$path2')")
-      } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
         sql(s"create table sort_table (age int, height double, name string, address string," +
             s" salary long, bytefield byte) using carbon location '$path'")
         FileFactory
@@ -1718,7 +1718,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
             s"array<byte>, floatarray array<float>) using carbon " +
             s"options( path " +
             s"'$path')")
-      } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
         sql(s"create table complextable (stringfield string, bytearray " +
             s"array<byte>, floatarray array<float>) using carbon " +
             s"location " +
@@ -1755,7 +1755,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
       sql(s"create table par_table(male boolean, age int, height double, name string, address " +
           s"string," +
           s"salary long, floatField float, bytefield byte) using parquet options(path '$path')")
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       sql(s"create table par_table(male boolean, age int, height double, name string, address " +
           s"string," +
           s"salary long, floatField float, bytefield byte) using parquet location '$path'")
@@ -1933,7 +1933,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
           new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL))
       }
       sql(s"create table carbon_external using carbon options(path '$writerPath')")
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       sql(s"create table carbon_external using carbon location '$writerPath'")
     }
     assert(sql("select * from carbon_external").count() == rowCount)
@@ -1973,7 +1973,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
         }
         sql(s"create table multi_page (a string, b float, c byte) using carbon options(path " +
             s"'$path')")
-      } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
         sql(s"create table multi_page (a string, b float, c byte) using carbon location " +
             s"'$path'")
       }
@@ -2132,7 +2132,7 @@ class SparkCarbonDataSourceTest extends QueryTest with BeforeAndAfterAll {
 
   test("Don't support load for datasource") {
     sql("DROP TABLE IF EXISTS binaryCarbon")
-    if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       sql(
         s"""
            | CREATE TABLE binaryCarbon(
diff --git a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestCreateTableUsingSparkCarbonFileFormat.scala b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestCreateTableUsingSparkCarbonFileFormat.scala
index 1a39385..fb98a41 100644
--- a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestCreateTableUsingSparkCarbonFileFormat.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestCreateTableUsingSparkCarbonFileFormat.scala
@@ -118,7 +118,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
     if (SparkUtil.isSparkVersionEqualTo("2.1")) {
       //data source file format
       sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH '$writerPath') """)
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       //data source file format
       sql(
         s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
@@ -166,7 +166,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
     if (SparkUtil.isSparkVersionEqualTo("2.1")) {
       //data source file format
       sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH '$writerPath') """)
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       //data source file format
       sql(
         s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
@@ -197,7 +197,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
       if (SparkUtil.isSparkVersionEqualTo("2.1")) {
         //data source file format
         sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH '$writerPath') """)
-      } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
         //data source file format
         sql(
           s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
@@ -227,7 +227,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
       if (SparkUtil.isSparkVersionEqualTo("2.1")) {
         //data source file format
         sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH '$writerPath') """)
-      } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
         //data source file format
         sql(
           s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
@@ -257,7 +257,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
     if (SparkUtil.isSparkVersionEqualTo("2.1")) {
       //data source file format
       sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH '$writerPath') """)
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       //data source file format
       sql(
         s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
@@ -306,7 +306,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
     sql("DROP TABLE IF EXISTS test_json")
     if (SparkUtil.isSparkVersionEqualTo("2.1")) {
       sql(s"""CREATE TABLE test_json USING carbon OPTIONS (PATH '$path') """)
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       sql(
         s"""CREATE TABLE test_json USING carbon LOCATION
            |'$path' """.stripMargin)
@@ -326,7 +326,7 @@ class TestCreateTableUsingSparkCarbonFileFormat extends QueryTest with BeforeAnd
     if (SparkUtil.isSparkVersionEqualTo("2.1")) {
       //data source file format
       sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH '$writerPath') """)
-    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+    } else if (SparkUtil.isSparkVersionXAndAbove("2.2")) {
       //data source file format
       sql(
         s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
diff --git a/integration/spark/src/test/scala/org/apache/spark/util/SparkUtilTest.scala b/integration/spark/src/test/scala/org/apache/spark/util/SparkUtilTest.scala
index e02681a..39a283b 100644
--- a/integration/spark/src/test/scala/org/apache/spark/util/SparkUtilTest.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/util/SparkUtilTest.scala
@@ -24,18 +24,18 @@ class SparkUtilTest extends FunSuite{
 
   test("Test Spark Version API with X and Above") {
     if (SPARK_VERSION.startsWith("2.1")) {
-      assert(SparkUtil.isSparkVersionXandAbove("2.1"))
-      assert(!SparkUtil.isSparkVersionXandAbove("2.2"))
-      assert(!SparkUtil.isSparkVersionXandAbove("2.3"))
+      assert(SparkUtil.isSparkVersionXAndAbove("2.1"))
+      assert(!SparkUtil.isSparkVersionXAndAbove("2.2"))
+      assert(!SparkUtil.isSparkVersionXAndAbove("2.3"))
     } else if (SPARK_VERSION.startsWith("2.2")) {
-      assert(SparkUtil.isSparkVersionXandAbove("2.1"))
-      assert(SparkUtil.isSparkVersionXandAbove("2.2"))
-      assert(!SparkUtil.isSparkVersionXandAbove("2.3"))
+      assert(SparkUtil.isSparkVersionXAndAbove("2.1"))
+      assert(SparkUtil.isSparkVersionXAndAbove("2.2"))
+      assert(!SparkUtil.isSparkVersionXAndAbove("2.3"))
     } else {
-      assert(SparkUtil.isSparkVersionXandAbove("2.1"))
-      assert(SparkUtil.isSparkVersionXandAbove("2.2"))
-      assert(SparkUtil.isSparkVersionXandAbove("2.3") ||
-             SparkUtil.isSparkVersionXandAbove("2.4"))
+      assert(SparkUtil.isSparkVersionXAndAbove("2.1"))
+      assert(SparkUtil.isSparkVersionXAndAbove("2.2"))
+      assert(SparkUtil.isSparkVersionXAndAbove("2.3") ||
+             SparkUtil.isSparkVersionXAndAbove("2.4"))
     }
   }
 
@@ -52,7 +52,7 @@ class SparkUtilTest extends FunSuite{
       assert(!SparkUtil.isSparkVersionEqualTo("2.1"))
       assert(!SparkUtil.isSparkVersionEqualTo("2.2"))
       assert(SparkUtil.isSparkVersionEqualTo("2.3") ||
-             SparkUtil.isSparkVersionXandAbove("2.4"))
+             SparkUtil.isSparkVersionXAndAbove("2.4"))
     }
   }
 }
\ No newline at end of file


Mime
View raw message