hive-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "ASF GitHub Bot (Jira)" <j...@apache.org>
Subject [jira] [Work logged] (HIVE-23539) Optimize data copy during repl load operation for HDFS based staging location
Date Tue, 16 Jun 2020 13:54:00 GMT

     [ https://issues.apache.org/jira/browse/HIVE-23539?focusedWorklogId=446527&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-446527
]

ASF GitHub Bot logged work on HIVE-23539:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 16/Jun/20 13:53
            Start Date: 16/Jun/20 13:53
    Worklog Time Spent: 10m 
      Work Description: aasha commented on a change in pull request #1084:
URL: https://github.com/apache/hive/pull/1084#discussion_r440855768



##########
File path: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
##########
@@ -1539,15 +1539,14 @@ public void testCheckPointingWithSourceTableDataInserted() throws
Throwable {
             .run("insert into t2 values (24)")
             .run("insert into t1 values (4)")
             .dump(primaryDbName, dumpClause);
-
+    assertEquals(modifiedTimeTable1CopyFile, fs.listStatus(tablet1Path)[0].getModificationTime());

Review comment:
       Why is this changed?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
##########
@@ -243,29 +246,34 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc
               : LoadFileType.OVERWRITE_EXISTING);
       stagingDir = PathUtils.getExternalTmpPath(replicaWarehousePartitionLocation, context.pathInfo);
     }
-
-    Task<?> copyTask = ReplCopyTask.getLoadCopyTask(
-        event.replicationSpec(),
-        new Path(event.dataPath() + Path.SEPARATOR + getPartitionName(sourceWarehousePartitionLocation)),
-        stagingDir,
-        context.hiveConf, false, false
-    );
-
+    Path partDataSrc = new Path(event.dataPath() + File.separator + getPartitionName(sourceWarehousePartitionLocation));
+    Path moveSource = performOnlyMove ? partDataSrc : stagingDir;
     Task<?> movePartitionTask = null;
     if (loadFileType != LoadFileType.IGNORE) {
       // no need to create move task, if file is moved directly to target location.
-      movePartitionTask = movePartitionTask(table, partSpec, stagingDir, loadFileType);
+      movePartitionTask = movePartitionTask(table, partSpec, moveSource, loadFileType);
     }
-
-    if (ptnRootTask == null) {
-      ptnRootTask = copyTask;
+    if (performOnlyMove) {
+      if (ptnRootTask == null) {
+        ptnRootTask = addPartTask;

Review comment:
       How was addPartTask added before your change?

##########
File path: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
##########
@@ -179,6 +179,63 @@ public void externalTableReplicationWithLocalStaging() throws Throwable
{
             .verifyResult("800");
   }
 
+  @Test
+  public void testHdfsMoveOptimizationOnTargetStaging() throws Throwable {

Review comment:
       Check for empty staging as the data is moved.

##########
File path: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
##########
@@ -1596,6 +1595,10 @@ public void testCheckPointingWithNewTablesAdded() throws Throwable
{
             .run("insert into t3 values (3)")
             .dump(primaryDbName, dumpClause);
 
+    assertEquals(modifiedTimeTable1, fs.getFileStatus(tablet1Path).getModificationTime());

Review comment:
       Why is this changed?

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
##########
@@ -320,7 +334,7 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc
   private String getPartitionName(Path partitionMetadataFullPath) {
     //Get partition name by removing the metadata base path.
     //Needed for getting the data path
-    return partitionMetadataFullPath.toString().substring(event.metadataPath().toString().length());
+    return partitionMetadataFullPath.toString().substring(event.metadataPath().toString().length()
+ 1);

Review comment:
       Can we do a split instead at '/'. This may be error prone

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
##########
@@ -686,9 +700,25 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String
dbName,
         loadTableWork.setInheritTableSpecs(false);
         moveWork.setLoadTableWork(loadTableWork);
       }
+      Task<?> loadPartTask = TaskFactory.get(moveWork, x.getConf());
+      if (performOnlyMove) {
+        if (addPartTask != null) {
+          addPartTask.addDependentTask(loadPartTask);
+        }
+        x.getTasks().add(loadPartTask);
+        return addPartTask == null ? loadPartTask : addPartTask;
+      }
+
+      Task<?> copyTask = null;
+      if (replicationSpec.isInReplicationScope()) {

Review comment:
       This check is already done before. Can be simplified

##########
File path: itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java
##########
@@ -179,6 +179,63 @@ public void externalTableReplicationWithLocalStaging() throws Throwable
{
             .verifyResult("800");
   }
 
+  @Test

Review comment:
       Negative test for different staging when the data is not moved.

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
##########
@@ -447,16 +448,16 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String
dbName,
     assert table != null;
     assert table.getParameters() != null;
     Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
-    Path destPath = null, loadPath = null;
+    Path destPath = null;
     LoadFileType lft;
     boolean isAutoPurge = false;
     boolean needRecycle = false;
     boolean copyToMigratedTxnTable = replicationSpec.isMigratingToTxnTable();
-
+    boolean performOnlyMove = replicationSpec.isInReplicationScope() && Utils.onSameHDFSFileSystem(dataPath,
tgtPath);

Review comment:
       Can be added as a separate method. 

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
##########
@@ -498,37 +498,43 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String
dbName,
       );
     }
 
-    Task<?> copyTask = null;
-    if (replicationSpec.isInReplicationScope()) {
-      copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, destPath, x.getConf(),
-              isAutoPurge, needRecycle, copyToMigratedTxnTable, false);
-    } else {
-      copyTask = TaskFactory.get(new CopyWork(dataPath, destPath, false));
-    }
-
     MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false);
-
-
+    /**
+     * If the Repl staging directory ('hive.repl.rootdir') is on the target cluster itself
and the FS scheme is hdfs,
+     * data is moved directly from Repl staging data dir of the table to the table's location
on target warehouse.
+     */
+    Path moveDataSrc = performOnlyMove ? dataPath : destPath;
     if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(table))
{

Review comment:
       replicationSpec.isInReplicationScope( is already done here

##########
File path: ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
##########
@@ -686,9 +687,25 @@ private static ImportTableDesc getBaseCreateTableDescFromTable(String
dbName,
         loadTableWork.setInheritTableSpecs(false);
         moveWork.setLoadTableWork(loadTableWork);
       }
+      Task<?> loadPartTask = TaskFactory.get(moveWork, x.getConf());
+      if (performOnlyMove) {
+        if (addPartTask != null) {
+          addPartTask.addDependentTask(loadPartTask);
+        }
+        x.getTasks().add(loadPartTask);
+        return addPartTask == null ? loadPartTask : addPartTask;

Review comment:
       There are already multiple return statements in the same method. 




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 446527)
    Time Spent: 1h 40m  (was: 1.5h)

> Optimize data copy during repl load operation for HDFS based staging location
> -----------------------------------------------------------------------------
>
>                 Key: HIVE-23539
>                 URL: https://issues.apache.org/jira/browse/HIVE-23539
>             Project: Hive
>          Issue Type: Improvement
>            Reporter: Pravin Sinha
>            Assignee: Pravin Sinha
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HIVE-23539.01.patch
>
>          Time Spent: 1h 40m
>  Remaining Estimate: 0h
>




--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Mime
View raw message