kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wdberke...@apache.org
Subject [kudu] branch master updated: [backup] Factor out kudu-backup-tools module
Date Wed, 15 May 2019 23:20:28 GMT
This is an automated email from the ASF dual-hosted git repository.

wdberkeley pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


The following commit(s) were added to refs/heads/master by this push:
     new 64365d3  [backup] Factor out kudu-backup-tools module
64365d3 is described below

commit 64365d3fe116b8217f93731b1443cb85d67c8918
Author: Grant Henke <granthenke@apache.org>
AuthorDate: Mon May 13 09:40:25 2019 -0500

    [backup] Factor out kudu-backup-tools module
    
    This patch refactors the kudu-backup module to make
    a kudu-backup-tools module. This module will be used
    in a future patch to create a more lightweight CLI tool
    that shades in all of its dependencies.
    
    While refactoring, I also broke out SessionIO into
    BackupIO and BackupUtils.
    
    Change-Id: I6ef2c21fbc31b11b20f0588b6de3cd4998b67443
    Reviewed-on: http://gerrit.cloudera.org:8080/13320
    Tested-by: Kudu Jenkins
    Reviewed-by: Will Berkeley <wdberkeley@gmail.com>
    Reviewed-by: Mike Percy <mpercy@apache.org>
---
 java/gradle/shadow.gradle                          | 20 +++++---
 .../build.gradle                                   | 25 +++++-----
 .../src/main/protobuf/backup.proto                 |  0
 .../scala/org/apache/kudu/backup/BackupGraph.scala |  0
 .../scala/org/apache/kudu/backup/BackupIO.scala}   | 43 ++----------------
 .../org/apache/kudu/backup/TableMetadata.scala     | 12 +++--
 .../src/test/resources/log4j.properties            | 23 ++++++++++
 .../org/apache/kudu/backup/TestBackupGraph.scala   | 40 ++++++++++------
 java/kudu-backup/build.gradle                      | 20 ++------
 .../scala/org/apache/kudu/backup/BackupUtils.scala | 53 ++++++++++++++++++++++
 .../scala/org/apache/kudu/backup/KuduBackup.scala  |  7 +--
 .../scala/org/apache/kudu/backup/KuduRestore.scala |  4 +-
 .../scala/org/apache/kudu/backup/Options.scala     | 12 +----
 .../org/apache/kudu/backup/TestKuduBackup.scala    |  4 +-
 java/settings.gradle                               |  1 +
 15 files changed, 156 insertions(+), 108 deletions(-)

diff --git a/java/gradle/shadow.gradle b/java/gradle/shadow.gradle
index f89b98d..63e7c11 100644
--- a/java/gradle/shadow.gradle
+++ b/java/gradle/shadow.gradle
@@ -41,12 +41,20 @@ configurations.archives.artifacts.removeAll {
   it instanceof ArchivePublishArtifact && it.archiveTask == jar
 }
 
-shadowJar {
-  dependencies {
-    // Our shaded jars always try to pull in the slf4j api from
-    // kudu-client, though we never want it included. Excluding it
-    // here prevents the need to always list it.
-    exclude dependency(libs.slf4jApi)
+// Define an overridable property to indicate tool jars that should
+// include all of their dependencies.
+// We use this below to ensure slf4j is included.
+shadow.ext {
+  isToolJar = false
+}
+if (!shadow.isToolJar) {
+  shadowJar {
+    dependencies {
+      // Our shaded library jars always try to pull in the slf4j api from
+      // kudu-client, though we never want it included. Excluding it
+      // here prevents the need to always list it.
+      exclude dependency(libs.slf4jApi)
+    }
   }
 }
 
diff --git a/java/kudu-backup/build.gradle b/java/kudu-backup-tools/build.gradle
similarity index 71%
copy from java/kudu-backup/build.gradle
copy to java/kudu-backup-tools/build.gradle
index 53b59af..850060c 100644
--- a/java/kudu-backup/build.gradle
+++ b/java/kudu-backup-tools/build.gradle
@@ -19,29 +19,29 @@ apply plugin: "scala"
 apply from: "$rootDir/gradle/protobuf.gradle"
 apply from: "$rootDir/gradle/shadow.gradle"
 
+// Mark this as a tool jar so shadow doesn't exclude any dependencies.
+shadow {
+  isToolJar = true
+}
+
 dependencies {
   compile project(path: ":kudu-client", configuration: "shadow")
-  compile project(path: ":kudu-spark", configuration: "shadow")
   compile libs.protobufJava
   compile (libs.protobufJavaUtil) {
     // Make sure wrong Guava version is not pulled in.
     exclude group: "com.google.guava", module: "guava"
   }
+  compile libs.hadoopCommon
   compile (libs.scopt)  {
     // Make sure wrong Scala version is not pulled in.
     exclude group: "org.scala-lang", module: "scala-library"
   }
-  // TODO(KUDU-2500): Spark uses reflection which requires the annotations at runtime.
-  compile libs.yetusAnnotations
+  compile libs.scalaLibrary
+  compile libs.slf4jApi
 
-  provided libs.scalaLibrary
-  provided libs.sparkAvro
-  provided libs.sparkCore
-  provided libs.sparkSql
-  provided libs.slf4jApi
+  optional libs.yetusAnnotations
 
   testCompile project(path: ":kudu-test-utils", configuration: "shadow")
-  testCompile project(path: ":kudu-spark", configuration: "test")
   testCompile libs.junit
   testCompile libs.log4j
   testCompile libs.scalatest
@@ -57,13 +57,10 @@ sourceSets {
   }
 }
 
-// Adjust the artifact name to match the maven build.
-archivesBaseName = "kudu-backup${versions.sparkBase}_${versions.scalaBase}"
-
-// kudu-backup has no public Javadoc.
+// kudu-backup-tools has no public Javadoc.
 javadoc {
   enabled = false
 }
 
-// Skip publishing kudu-backup until it's ready to be supported long-term.
+// Skip publishing kudu-backup-tools until it's ready to be supported long-term.
 uploadArchives.enabled = false
\ No newline at end of file
diff --git a/java/kudu-backup/src/main/protobuf/backup.proto b/java/kudu-backup-tools/src/main/protobuf/backup.proto
similarity index 100%
rename from java/kudu-backup/src/main/protobuf/backup.proto
rename to java/kudu-backup-tools/src/main/protobuf/backup.proto
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/BackupGraph.scala b/java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/BackupGraph.scala
similarity index 100%
rename from java/kudu-backup/src/main/scala/org/apache/kudu/backup/BackupGraph.scala
rename to java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/BackupGraph.scala
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/SessionIO.scala b/java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/BackupIO.scala
similarity index 84%
rename from java/kudu-backup/src/main/scala/org/apache/kudu/backup/SessionIO.scala
rename to java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/BackupIO.scala
index 82578ad..43a359d 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/SessionIO.scala
+++ b/java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/BackupIO.scala
@@ -26,15 +26,9 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.FileSystem
 import org.apache.hadoop.fs.LocatedFileStatus
 import org.apache.hadoop.fs.Path
-import org.apache.kudu.Schema
 import org.apache.kudu.backup.Backup.TableMetadataPB
-import org.apache.kudu.backup.SessionIO._
+import org.apache.kudu.backup.BackupIO._
 import org.apache.kudu.client.KuduTable
-import org.apache.kudu.spark.kudu.SparkUtil
-import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.types.ByteType
-import org.apache.spark.sql.types.StructField
-import org.apache.spark.sql.types.StructType
 import org.apache.yetus.audience.InterfaceAudience
 import org.apache.yetus.audience.InterfaceStability
 import org.slf4j.Logger
@@ -65,46 +59,19 @@ import scala.collection.mutable
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class SessionIO(val session: SparkSession, options: CommonOptions) {
+class BackupIO(val conf: Configuration, rootPathStr: String) {
   val log: Logger = LoggerFactory.getLogger(getClass)
 
-  val conf: Configuration = session.sparkContext.hadoopConfiguration
-  val rootPath: Path = new Path(options.rootPath)
+  val rootPath: Path = new Path(rootPathStr)
   val fs: FileSystem = rootPath.getFileSystem(conf)
 
   /**
-   * Returns the Spark schema for backup data based on the Kudu Schema.
-   * Additionally handles adding the RowAction column for incremental backup/restore.
-   */
-  def dataSchema(schema: Schema, includeRowAction: Boolean = true): StructType = {
-    var fields = SparkUtil.sparkSchema(schema).fields
-    if (includeRowAction) {
-      val changeTypeField = generateRowActionColumn(schema)
-      fields = fields ++ Seq(changeTypeField)
-    }
-    StructType(fields)
-  }
-
-  /**
-   * Generates a RowAction column and handles column name collisions.
-   * The column name can vary because it's accessed positionally.
-   */
-  private def generateRowActionColumn(schema: Schema): StructField = {
-    var columnName = "backup_row_action"
-    // If the column already exists and we need to pick an alternate column name.
-    while (schema.hasColumn(columnName)) {
-      columnName += "_"
-    }
-    StructField(columnName, ByteType)
-  }
-
-  /**
    * Return the path to the table directory.
    */
   def tablePath(table: KuduTable): Path = {
     val tableName = URLEncoder.encode(table.getName, "UTF-8")
     val dirName = s"${table.getTableId}-$tableName"
-    new Path(options.rootPath, dirName)
+    new Path(rootPath, dirName)
   }
 
   /**
@@ -269,7 +236,7 @@ class SessionIO(val session: SparkSession, options: CommonOptions) {
   }
 }
 
-object SessionIO {
+object BackupIO {
   // The name of the metadata file within a backup directory.
   val MetadataFileName = ".kudu-metadata.json"
 }
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/TableMetadata.scala b/java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/TableMetadata.scala
similarity index 98%
rename from java/kudu-backup/src/main/scala/org/apache/kudu/backup/TableMetadata.scala
rename to java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/TableMetadata.scala
index cfd3933..6fc49d3 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/TableMetadata.scala
+++ b/java/kudu-backup-tools/src/main/scala/org/apache/kudu/backup/TableMetadata.scala
@@ -49,7 +49,11 @@ object TableMetadata {
   val MetadataFileName = ".kudu-metadata.json"
   val MetadataVersion = 1
 
-  def getTableMetadata(table: KuduTable, options: BackupOptions): TableMetadataPB = {
+  def getTableMetadata(
+      table: KuduTable,
+      fromMs: Long,
+      toMs: Long,
+      format: String): TableMetadataPB = {
     val columnIds = new util.HashMap[String, Integer]()
     val columns = table.getSchema.getColumns.asScala.map { col =>
       columnIds.put(col.getName, table.getSchema.getColumnId(col.getName))
@@ -87,9 +91,9 @@ object TableMetadata {
     TableMetadataPB
       .newBuilder()
       .setVersion(MetadataVersion)
-      .setFromMs(options.fromMs)
-      .setToMs(options.toMs)
-      .setDataFormat(options.format)
+      .setFromMs(fromMs)
+      .setToMs(toMs)
+      .setDataFormat(format)
       .setTableName(table.getName)
       .setTableId(table.getTableId)
       .addAllColumns(columns.asJava)
diff --git a/java/kudu-backup-tools/src/test/resources/log4j.properties b/java/kudu-backup-tools/src/test/resources/log4j.properties
new file mode 100644
index 0000000..129752c
--- /dev/null
+++ b/java/kudu-backup-tools/src/test/resources/log4j.properties
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+log4j.rootLogger = INFO, out
+log4j.appender.out = org.apache.log4j.ConsoleAppender
+log4j.appender.out.layout = org.apache.log4j.PatternLayout
+log4j.appender.out.layout.ConversionPattern = %d{HH:mm:ss.SSS} [%p - %t] (%F:%L) %m%n
+
+log4j.logger.org.apache.kudu = DEBUG
diff --git a/java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
b/java/kudu-backup-tools/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
similarity index 82%
rename from java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
rename to java/kudu-backup-tools/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
index 480d2eb..314a063 100644
--- a/java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
+++ b/java/kudu-backup-tools/src/test/scala/org/apache/kudu/backup/TestBackupGraph.scala
@@ -16,19 +16,40 @@
 // under the License.
 package org.apache.kudu.backup
 
+import com.google.common.collect.ImmutableList
+import org.apache.kudu.client.CreateTableOptions
 import org.apache.kudu.client.KuduTable
-import org.apache.kudu.spark.kudu._
+import org.apache.kudu.test.ClientTestUtil.getBasicSchema
+import org.apache.kudu.test.KuduTestHarness
 import org.junit.Assert._
+import org.junit.Before
+import org.junit.Rule
 import org.junit.Test
 import org.slf4j.Logger
 import org.slf4j.LoggerFactory
 
-class TestBackupGraph extends KuduTestSuite {
+import scala.annotation.meta.getter
+
+class TestBackupGraph {
   val log: Logger = LoggerFactory.getLogger(getClass)
 
+  var tableName: String = "TestBackupGraph"
+  var table: KuduTable = _
+
+  @(Rule @getter)
+  val harness = new KuduTestHarness
+
+  @Before
+  def setUp(): Unit = {
+    // Create the test table.
+    val builder = new CreateTableOptions().setNumReplicas(3)
+    builder.setRangePartitionColumns(ImmutableList.of("key"))
+    table = harness.getClient.createTable(tableName, getBasicSchema, builder)
+  }
+
   @Test
   def testSimpleBackupGraph() {
-    val graph = new BackupGraph(table.getName)
+    val graph = new BackupGraph(table.getTableId)
     val full = createBackupVertex(table, 0, 1)
     graph.addBackup(full)
 
@@ -52,7 +73,7 @@ class TestBackupGraph extends KuduTestSuite {
 
   @Test
   def testForkingBackupGraph() {
-    val graph = new BackupGraph(table.getName)
+    val graph = new BackupGraph(table.getTableId)
     val full = createBackupVertex(table, 0, 1)
     graph.addBackup(full)
     // Duplicate fromMs of 1 creates a fork in the graph.
@@ -81,7 +102,7 @@ class TestBackupGraph extends KuduTestSuite {
 
   @Test
   def testMultiFullBackupGraph() {
-    val graph = new BackupGraph(table.getName)
+    val graph = new BackupGraph(table.getTableId)
     val full1 = createBackupVertex(table, 0, 1)
     graph.addBackup(full1)
     val inc1 = createBackupVertex(table, 1, 2)
@@ -131,14 +152,7 @@ class TestBackupGraph extends KuduTestSuite {
   }
 
   private def createBackupVertex(table: KuduTable, fromMs: Long, toMs: Long): BackupNode
= {
-    val options = new BackupOptions(
-      tables = Seq(table.getName),
-      rootPath = "foo/path",
-      "fooAddresses",
-      fromMs = fromMs,
-      toMs = toMs
-    )
-    val metadata = TableMetadata.getTableMetadata(table, options)
+    val metadata = TableMetadata.getTableMetadata(table, fromMs, toMs, "parquet")
     BackupNode(null, metadata)
   }
 }
diff --git a/java/kudu-backup/build.gradle b/java/kudu-backup/build.gradle
index 53b59af..514c29d 100644
--- a/java/kudu-backup/build.gradle
+++ b/java/kudu-backup/build.gradle
@@ -16,17 +16,16 @@
 // under the License.
 
 apply plugin: "scala"
-apply from: "$rootDir/gradle/protobuf.gradle"
 apply from: "$rootDir/gradle/shadow.gradle"
 
 dependencies {
+  // Note: We don't use the shaded version, so we can control the dependencies.
+  compile(project(path: ":kudu-backup-tools")) {
+    // Ensure we use the hadoop-client provided by Spark to avoid any compatibility issues.
+    exclude group: "org.apache.hadoop", module: "hadoop-common"
+  }
   compile project(path: ":kudu-client", configuration: "shadow")
   compile project(path: ":kudu-spark", configuration: "shadow")
-  compile libs.protobufJava
-  compile (libs.protobufJavaUtil) {
-    // Make sure wrong Guava version is not pulled in.
-    exclude group: "com.google.guava", module: "guava"
-  }
   compile (libs.scopt)  {
     // Make sure wrong Scala version is not pulled in.
     exclude group: "org.scala-lang", module: "scala-library"
@@ -48,15 +47,6 @@ dependencies {
   testCompile libs.slf4jLog4j12
 }
 
-// Add protobuf files to the proto source set.
-sourceSets {
-  main {
-    proto {
-      srcDir "src/main/protobuf"
-    }
-  }
-}
-
 // Adjust the artifact name to match the maven build.
 archivesBaseName = "kudu-backup${versions.sparkBase}_${versions.scalaBase}"
 
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/BackupUtils.scala b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/BackupUtils.scala
new file mode 100644
index 0000000..4b1def6
--- /dev/null
+++ b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/BackupUtils.scala
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.kudu.backup
+
+import org.apache.kudu.Schema
+import org.apache.kudu.spark.kudu.SparkUtil
+import org.apache.spark.sql.types.ByteType
+import org.apache.spark.sql.types.StructField
+import org.apache.spark.sql.types.StructType
+
+object BackupUtils {
+
+  /**
+   * Returns the Spark schema for backup data based on the Kudu Schema.
+   * Additionally handles adding the RowAction column for incremental backup/restore.
+   */
+  def dataSchema(schema: Schema, includeRowAction: Boolean = true): StructType = {
+    var fields = SparkUtil.sparkSchema(schema).fields
+    if (includeRowAction) {
+      val changeTypeField = generateRowActionColumn(schema)
+      fields = fields ++ Seq(changeTypeField)
+    }
+    StructType(fields)
+  }
+
+  /**
+   * Generates a RowAction column and handles column name collisions.
+   * The column name can vary because it's accessed positionally.
+   */
+  private def generateRowActionColumn(schema: Schema): StructField = {
+    var columnName = "backup_row_action"
+    // If the column already exists and we need to pick an alternate column name.
+    while (schema.hasColumn(columnName)) {
+      columnName += "_"
+    }
+    StructField(columnName, ByteType)
+  }
+
+}
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
index 2b0d84b..180e7c8 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
+++ b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
@@ -41,7 +41,7 @@ object KuduBackup {
         options.kuduMasterAddresses,
         session.sparkContext
       )
-    val io = new SessionIO(session, options)
+    val io = new BackupIO(session.sparkContext.hadoopConfiguration, options.rootPath)
 
     // Read the required backup metadata.
     val backupGraphs =
@@ -97,7 +97,7 @@ object KuduBackup {
       val rdd = new KuduBackupRDD(table, tableOptions, incremental, context, session.sparkContext)
       val df =
         session.sqlContext
-          .createDataFrame(rdd, io.dataSchema(table.getSchema, incremental))
+          .createDataFrame(rdd, BackupUtils.dataSchema(table.getSchema, incremental))
 
       // Write the data to the backup path.
       // The backup path contains the timestampMs and should not already exist.
@@ -108,7 +108,8 @@ object KuduBackup {
 
       // Generate and output the new metadata for this table.
       // The existence of metadata indicates this backup was successful.
-      val tableMetadata = TableMetadata.getTableMetadata(table, tableOptions)
+      val tableMetadata = TableMetadata
+        .getTableMetadata(table, tableOptions.fromMs, tableOptions.toMs, tableOptions.format)
       io.writeTableMetadata(tableMetadata, metadataPath)
     }
   }
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
index 7f1e515..1a5886f 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
+++ b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
@@ -47,7 +47,7 @@ object KuduRestore {
         options.kuduMasterAddresses,
         session.sparkContext
       )
-    val io = new SessionIO(session, options)
+    val io = new BackupIO(session.sparkContext.hadoopConfiguration, options.rootPath)
 
     // Read the required backup metadata.
     val backupGraphs = io.readBackupGraphsByTableName(options.tables, options.timestampMs)
@@ -80,7 +80,7 @@ object KuduRestore {
             createTableRangePartitionByRangePartition(restoreName, lastMetadata, context)
           }
         }
-        val backupSchema = io.dataSchema(TableMetadata.getKuduSchema(metadata))
+        val backupSchema = BackupUtils.dataSchema(TableMetadata.getKuduSchema(metadata))
         val rowActionCol = backupSchema.fields.last.name
         val table = context.syncClient.openTable(restoreName)
 
diff --git a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/Options.scala b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/Options.scala
index df9eaee..8bcbef4 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/Options.scala
+++ b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/Options.scala
@@ -25,14 +25,6 @@ import scopt.OptionParser
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-trait CommonOptions {
-  val tables: Seq[String]
-  val rootPath: String
-  val kuduMasterAddresses: String
-}
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
 case class BackupOptions(
     tables: Seq[String],
     rootPath: String,
@@ -46,7 +38,6 @@ case class BackupOptions(
     scanLeaderOnly: Boolean = BackupOptions.DefaultScanLeaderOnly,
     scanPrefetching: Boolean = BackupOptions.DefaultScanPrefetching,
     keepAlivePeriodMs: Long = BackupOptions.DefaultKeepAlivePeriodMs)
-    extends CommonOptions
 
 object BackupOptions {
   val DefaultForceFull: Boolean = false
@@ -166,8 +157,7 @@ case class RestoreOptions(
     kuduMasterAddresses: String = InetAddress.getLocalHost.getCanonicalHostName,
     tableSuffix: String = "",
     createTables: Boolean = RestoreOptions.DefaultCreateTables,
-    timestampMs: Long = System.currentTimeMillis()
-) extends CommonOptions
+    timestampMs: Long = System.currentTimeMillis())
 
 object RestoreOptions {
   val DefaultCreateTables: Boolean = true
diff --git a/java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestKuduBackup.scala b/java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestKuduBackup.scala
index 5c769ce..8a066ac 100644
--- a/java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestKuduBackup.scala
+++ b/java/kudu-backup/src/test/scala/org/apache/kudu/backup/TestKuduBackup.scala
@@ -572,7 +572,7 @@ class TestKuduBackup extends KuduTestSuite {
       options: BackupOptions,
       expectedRowCount: Long,
       expectIncremental: Boolean): Unit = {
-    val io = new SessionIO(ss, options)
+    val io = new BackupIO(ss.sparkContext.hadoopConfiguration, options.rootPath)
     val tableName = options.tables.head
     val table = harness.getClient.openTable(tableName)
     val backupPath = io.backupPath(table, options.toMs)
@@ -587,7 +587,7 @@ class TestKuduBackup extends KuduTestSuite {
     }
 
     // Verify the output data.
-    val schema = io.dataSchema(table.getSchema, expectIncremental)
+    val schema = BackupUtils.dataSchema(table.getSchema, expectIncremental)
     val df = ss.sqlContext.read
       .format(metadata.getDataFormat)
       .schema(schema)
diff --git a/java/settings.gradle b/java/settings.gradle
index 96ca03f..145c3c8 100644
--- a/java/settings.gradle
+++ b/java/settings.gradle
@@ -20,6 +20,7 @@
 
 rootProject.name = "kudu-parent"
 include "kudu-backup"
+include "kudu-backup-tools"
 include "kudu-client"
 include "kudu-client-tools"
 include "kudu-flume-sink"


Mime
View raw message