kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ale...@apache.org
Subject [kudu] branch master updated: [master_sentry-itest] one more scenario for authz cache
Date Tue, 14 May 2019 01:17:38 GMT
This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git


The following commit(s) were added to refs/heads/master by this push:
     new 88db198  [master_sentry-itest] one more scenario for authz cache
88db198 is described below

commit 88db1982ab0790363ad2e016a6fc83d67e6b088f
Author: Alexey Serbin <alexey@apache.org>
AuthorDate: Fri Apr 26 11:06:09 2019 -0700

    [master_sentry-itest] one more scenario for authz cache
    
    This patch adds an extra scenario to verify the functionality of Kudu
    authz system in case of HMS+Sentry integration.  The newly added
    scenario creates tables in Kudu when Sentry is temporary shut down and
    the master authz cache is enabled.
    
    Change-Id: I3d3c04e4137afff407e4db8ee39a4495d9add3dc
    Reviewed-on: http://gerrit.cloudera.org:8080/13291
    Tested-by: Alexey Serbin <aserbin@cloudera.com>
    Reviewed-by: Hao Hao <hao.hao@cloudera.com>
---
 src/kudu/client/master_proxy_rpc.cc               |   4 +-
 src/kudu/hms/mini_hms.cc                          |  35 ++++++-
 src/kudu/hms/mini_hms.h                           |  12 ++-
 src/kudu/integration-tests/hms_itest-base.cc      |  22 +++--
 src/kudu/integration-tests/hms_itest-base.h       |   6 +-
 src/kudu/integration-tests/master_sentry-itest.cc | 110 ++++++++++++++++++++++
 6 files changed, 172 insertions(+), 17 deletions(-)

diff --git a/src/kudu/client/master_proxy_rpc.cc b/src/kudu/client/master_proxy_rpc.cc
index 326df12..b873f1d 100644
--- a/src/kudu/client/master_proxy_rpc.cc
+++ b/src/kudu/client/master_proxy_rpc.cc
@@ -202,8 +202,8 @@ bool AsyncLeaderMasterRpc<ReqClass, RespClass>::RetryOrReconnectIfNecessary(
   // negotiation.
 
   // Authorization errors during negotiation generally indicate failure to
-  // authenticate. If that failure was due to an invalid token, try to get a
-  // new one by reconnecting with the master.
+  // authenticate. If that failure was due to an invalid authn token,
+  // try to get a new one by establising a new connection to the master.
   const ErrorStatusPB* err = retrier().controller().error_response();
   if (s.IsNotAuthorized()) {
     if (err && err->has_code() &&
diff --git a/src/kudu/hms/mini_hms.cc b/src/kudu/hms/mini_hms.cc
index e730f80..7ccb4a9 100644
--- a/src/kudu/hms/mini_hms.cc
+++ b/src/kudu/hms/mini_hms.cc
@@ -71,12 +71,16 @@ void MiniHms::EnableKerberos(string krb5_conf,
 }
 
 void MiniHms::EnableSentry(const HostPort& sentry_address,
-                           string sentry_service_principal) {
+                           string sentry_service_principal,
+                           int sentry_client_rpc_retry_num,
+                           int sentry_client_rpc_retry_interval_ms) {
   CHECK(!hms_process_);
   DCHECK(!sentry_service_principal.empty());
   VLOG(1) << Substitute("Enabling Sentry, at $0, for HMS", sentry_address.ToString());
   sentry_address_ = sentry_address.ToString();
   sentry_service_principal_ = std::move(sentry_service_principal);
+  sentry_client_rpc_retry_num_ = sentry_client_rpc_retry_num;
+  sentry_client_rpc_retry_interval_ms_ = sentry_client_rpc_retry_interval_ms;
 }
 
 void MiniHms::SetDataRoot(string data_root) {
@@ -366,6 +370,14 @@ Status MiniHms::CreateHiveSite() const {
     // - sentry.metastore.service.users
     //     Set of service users whose access will be excluded from
     //     Sentry authorization checks.
+    //
+    // - sentry.service.client.rpc.retry-total
+    //     Maximum number of attempts that Sentry RPC client does while
+    //     re-trying a remote call to Sentry.
+    //
+    // - sentry.service.client.rpc.retry.interval.msec
+    //     Time interval between attempts of Sentry's client to retry a remote
+    //     call to Sentry.
     static const string kSentryFileTemplate = R"(
 <configuration>
   <property>
@@ -387,12 +399,25 @@ Status MiniHms::CreateHiveSite() const {
     <name>sentry.metastore.service.users</name>
     <value>kudu</value>
   </property>
+
+  <property>
+    <name>sentry.service.client.rpc.retry-total</name>
+    <value>$3</value>
+  </property>
+
+  <property>
+    <name>sentry.service.client.rpc.retry.interval.msec</name>
+    <value>$4</value>
+  </property>
 </configuration>
   )";
-    string sentry_file_contents = Substitute(kSentryFileTemplate,
-                                             sentry_address_,
-                                             sentry_service_principal_,
-                                             "server1");
+    auto sentry_file_contents = Substitute(
+        kSentryFileTemplate,
+        sentry_address_,
+        sentry_service_principal_,
+        "server1",
+        sentry_client_rpc_retry_num_,
+        sentry_client_rpc_retry_interval_ms_);
     RETURN_NOT_OK(WriteStringToFile(Env::Default(),
                                     sentry_file_contents,
                                     JoinPathSegments(data_root_, "hive-sentry-site.xml")));
diff --git a/src/kudu/hms/mini_hms.h b/src/kudu/hms/mini_hms.h
index 0adcb21..7fb1c22 100644
--- a/src/kudu/hms/mini_hms.h
+++ b/src/kudu/hms/mini_hms.h
@@ -48,8 +48,16 @@ class MiniHms {
 
   // Configures the mini HMS to enable the Sentry plugin, passing the
   // Sentry service's principal to be used in Kerberos environment.
+  //
+  // Parameters 'sentry_client_rpc_retry_num' and
+  // 'sentry_client_rpc_retry_interval_ms' are used to override default settings
+  // of the Sentry client used by HMS plugins. The default values for these two
+  // parameters are set to allow for shorter HMS --> Sentry RPC timeout
+  // (i.e. shorter than with the default Sentry v2.{0,1} client's settings).
   void EnableSentry(const HostPort& sentry_address,
-                    std::string sentry_service_principal);
+                    std::string sentry_service_principal,
+                    int sentry_client_rpc_retry_num = 3,
+                    int sentry_client_rpc_retry_interval_ms = 500);
 
   // Configures the mini HMS to store its data in the provided path. If not set,
   // it uses a test-only temporary directory.
@@ -114,6 +122,8 @@ class MiniHms {
   // Sentry configuration
   std::string sentry_address_;
   std::string sentry_service_principal_;
+  int sentry_client_rpc_retry_num_;
+  int sentry_client_rpc_retry_interval_ms_;
 };
 
 } // namespace hms
diff --git a/src/kudu/integration-tests/hms_itest-base.cc b/src/kudu/integration-tests/hms_itest-base.cc
index 0369792..64a635b 100644
--- a/src/kudu/integration-tests/hms_itest-base.cc
+++ b/src/kudu/integration-tests/hms_itest-base.cc
@@ -35,6 +35,7 @@
 #include "kudu/hms/mini_hms.h"
 #include "kudu/mini-cluster/external_mini_cluster.h"
 #include "kudu/util/decimal_util.h"
+#include "kudu/util/monotime.h"
 #include "kudu/util/net/net_util.h"
 #include "kudu/util/status.h"
 #include "kudu/util/test_macros.h"
@@ -73,9 +74,9 @@ Status HmsITestBase::CreateDatabase(const string& database_name) {
 }
 
 Status HmsITestBase::CreateKuduTable(const string& database_name,
-                                     const string& table_name) {
+                                     const string& table_name,
+                                     MonoDelta timeout) {
   // Get coverage of all column types.
-  KuduSchema schema;
   KuduSchemaBuilder b;
   b.AddColumn("key")->Type(KuduColumnSchema::INT32)->NotNull()->PrimaryKey();
   b.AddColumn("int8_val")->Type(KuduColumnSchema::INT8);
@@ -94,14 +95,19 @@ Status HmsITestBase::CreateKuduTable(const string& database_name,
                               ->Precision(kMaxDecimal64Precision);
   b.AddColumn("decimal128_val")->Type(KuduColumnSchema::DECIMAL)
                                ->Precision(kMaxDecimal128Precision);
-
+  KuduSchema schema;
   RETURN_NOT_OK(b.Build(&schema));
   unique_ptr<KuduTableCreator> table_creator(client_->NewTableCreator());
-  return table_creator->table_name(Substitute("$0.$1", database_name, table_name))
-                       .schema(&schema)
-                       .num_replicas(1)
-                       .set_range_partition_columns({ "key" })
-                       .Create();
+  if (timeout.Initialized()) {
+    // If specified, set the timeout for the operation.
+    table_creator->timeout(timeout);
+  }
+  return table_creator->table_name(Substitute("$0.$1",
+                                              database_name, table_name))
+      .schema(&schema)
+      .num_replicas(1)
+      .set_range_partition_columns({ "key" })
+      .Create();
 }
 
 Status HmsITestBase::RenameHmsTable(const string& database_name,
diff --git a/src/kudu/integration-tests/hms_itest-base.h b/src/kudu/integration-tests/hms_itest-base.h
index b348031..50c814c 100644
--- a/src/kudu/integration-tests/hms_itest-base.h
+++ b/src/kudu/integration-tests/hms_itest-base.h
@@ -22,12 +22,15 @@
 
 #include <boost/optional/optional.hpp>
 
+#include "kudu/gutil/port.h"
 #include "kudu/hms/hms_client.h"
 #include "kudu/integration-tests/external_mini_cluster-itest-base.h"
 #include "kudu/util/status.h"
 
 namespace kudu {
 
+class MonoDelta;
+
 class HmsITestBase : public ExternalMiniClusterITestBase {
  public:
   Status StartHms() WARN_UNUSED_RESULT;
@@ -38,7 +41,8 @@ class HmsITestBase : public ExternalMiniClusterITestBase {
 
   // Creates a table in Kudu.
   Status CreateKuduTable(const std::string& database_name,
-                         const std::string& table_name) WARN_UNUSED_RESULT;
+                         const std::string& table_name,
+                         MonoDelta timeout = {}) WARN_UNUSED_RESULT;
 
   // Renames a table entry in the HMS catalog.
   Status RenameHmsTable(const std::string& database_name,
diff --git a/src/kudu/integration-tests/master_sentry-itest.cc b/src/kudu/integration-tests/master_sentry-itest.cc
index 02ea86a..c42e91a 100644
--- a/src/kudu/integration-tests/master_sentry-itest.cc
+++ b/src/kudu/integration-tests/master_sentry-itest.cc
@@ -873,6 +873,116 @@ TEST_F(SentryAuthzProviderCacheITest, ResetAuthzCacheConcurrentAlterTable)
{
   }
 }
 
+// This test scenario documents an artifact of the Kudu+HMS+Sentry integration
+// when authz cache is enabled (the cache is enabled by default). In essence,
+// information on the ownership of a table created during a short period of
+// Sentry's unavailability will not ever appear in Sentry. That might be
+// misleading because CreateTable() reports a success to the client. The created
+// table indeed exists and is fully functional otherwise, but the corresponding
+// owner privilege record is absent in Sentry.
+TEST_F(SentryAuthzProviderCacheITest, CreateTables) {
+  constexpr const char* const kGhostTables[] = { "t10", "t11" };
+
+  // Grant CREATE TABLE and METADATA privileges on the database.
+  ASSERT_OK(GrantCreateTablePrivilege({ kDatabaseName }));
+  ASSERT_OK(AlterRoleGrantPrivilege(
+      sentry_client_.get(), kDevRole,
+      GetDatabasePrivilege(kDatabaseName, "METADATA")));
+
+  // Make sure it's possible to create a table in the database. This also
+  // populates the privileges cache with information on the privileges
+  // granted on the database.
+  ASSERT_OK(CreateKuduTable(kDatabaseName, "t0"));
+
+  // An attempt to open a not-yet-existing table will fetch the information
+  // on the granted privileges on the table into the privileges cache.
+  for (const auto& t : kGhostTables) {
+    shared_ptr<KuduTable> kudu_table;
+    const auto s = client_->OpenTable(Substitute("$0.$1", kDatabaseName, t),
+                                      &kudu_table);
+    ASSERT_TRUE(s.IsNotFound()) << s.ToString();
+  }
+
+  ASSERT_OK(StopSentry());
+
+  // CreateTable() with operation timeout longer than HMS --> Sentry
+  // communication timeout successfully completes. After failing to push
+  // the information on the newly created table to Sentry due to the logic
+  // implemented in the SentrySyncHMSNotificationsPostEventListener plugin,
+  // HMS sends success response to Kudu master and Kudu successfully completes
+  // the rest of the steps.
+  ASSERT_OK(CreateKuduTable(kDatabaseName, kGhostTables[0]));
+
+  // In this case, the timeout for the CreateTable RPC is set to be lower than
+  // the HMS --> Sentry communication timeout (see corresponding parameters
+  // of the MiniHms::EnableSentry() method). CreateTable() successfully passes
+  // the authz phase since the information on privileges is cached and no
+  // Sentry RPC calls are attempted. However, since Sentry is down,
+  // CreateTable() takes a long time on the HMS's side and the client's
+  // request times out, while the creation of the table continues in the
+  // background.
+  {
+    const auto s = CreateKuduTable(kDatabaseName, kGhostTables[1],
+                                   MonoDelta::FromSeconds(1));
+    ASSERT_TRUE(s.IsTimedOut()) << s.ToString();
+  }
+
+  // Before starting Sentry, make sure the abandoned request to create the
+  // latter table succeeded even if CreateKuduTable() reported timeout.
+  // This is to make sure HMS stopped trying to push notification
+  // on table creation to Sentry anymore via the metastore plugin
+  // SentrySyncHMSNotificationsPostEventListener.
+  ASSERT_EVENTUALLY([&]{
+    bool exists;
+    ASSERT_OK(client_->TableExists(
+        Substitute("$0.$1", kDatabaseName, kGhostTables[1]), &exists));
+    ASSERT_TRUE(exists);
+  });
+
+  ASSERT_OK(ResetCache());
+
+  // After resetting the cache, it should not be possible to create another
+  // table: authz provider needs to fetch information on privileges directly
+  // from Sentry, but it's still down.
+  {
+    const auto s = CreateKuduTable(kDatabaseName, "t2");
+    ASSERT_TRUE(s.IsNetworkError()) << s.ToString();
+  }
+
+  ASSERT_OK(StartSentry());
+
+  // Try to create the table after starting Sentry back: it should be a success.
+  ASSERT_OK(CreateKuduTable(kDatabaseName, "t2"));
+
+  // Once table has been created, it should be possible to perform DDL operation
+  // on it since the user is the owner of the table.
+  {
+    unique_ptr<KuduTableAlterer> table_alterer(
+        client_->NewTableAlterer(Substitute("$0.$1", kDatabaseName, "t2")));
+    table_alterer->AddColumn("new_int8_columun")->Type(KuduColumnSchema::INT8);
+    ASSERT_OK(table_alterer->Alter());
+  }
+
+  // Try to run DDL against the tables created during Sentry's downtime. These
+  // should not be authorized since Sentry didn't received information on the
+  // ownership of those tables from HMS during their creation and there isn't
+  // any catch up for those events made after Sentry started.
+  for (const auto& t : kGhostTables) {
+    unique_ptr<KuduTableAlterer> table_alterer(
+        client_->NewTableAlterer(Substitute("$0.$1", kDatabaseName, t)));
+    table_alterer->AddColumn("new_int8_columun")->Type(KuduColumnSchema::INT8);
+    auto s = table_alterer->Alter();
+    ASSERT_TRUE(s.IsNotAuthorized()) << s.ToString();
+
+    // After granting the ALTER TABLE privilege the alteration should be
+    // successful. One caveat: it's necessary to reset the cache since the cache
+    // will not have the new record till the current entry hasn't yet expired.
+    ASSERT_OK(GrantAlterTablePrivilege({ kDatabaseName, t }));
+    ASSERT_OK(ResetCache());
+    ASSERT_OK(table_alterer->Alter());
+  }
+}
+
 // Basic test to verify access control and functionality of
 // the ResetAuthzCache(); integration with Sentry is not enabled.
 class AuthzCacheControlTest : public ExternalMiniClusterITestBase {


Mime
View raw message