sentry-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kal...@apache.org
Subject [2/2] sentry git commit: SENTRY-2233: Add e2e tests for testing HDFS sync for owner privileges. (Kalyan Kumar Kalvagadda reviewed by Lina li)
Date Fri, 24 Aug 2018 17:17:05 GMT
SENTRY-2233: Add e2e tests for testing HDFS sync for owner privileges. (Kalyan Kumar Kalvagadda reviewed by Lina li)


Project: http://git-wip-us.apache.org/repos/asf/sentry/repo
Commit: http://git-wip-us.apache.org/repos/asf/sentry/commit/85cf7f29
Tree: http://git-wip-us.apache.org/repos/asf/sentry/tree/85cf7f29
Diff: http://git-wip-us.apache.org/repos/asf/sentry/diff/85cf7f29

Branch: refs/heads/master
Commit: 85cf7f2964f5c4be7b9f21ea4ffec46ec2ca3fdb
Parents: 50e1d23
Author: Kalyan Kumar Kalvagadda <kkalyan@cloudera.com>
Authored: Fri Aug 24 12:15:31 2018 -0500
Committer: Kalyan Kumar Kalvagadda <kkalyan@cloudera.com>
Committed: Fri Aug 24 12:15:31 2018 -0500

----------------------------------------------------------------------
 .../TestHmsNotificationProcessing.java          |   4 +-
 ...msNotificationProcessingWithOutHdfsSync.java |  10 +-
 ...tificationProcessingWithOutSyncOnCreate.java |   4 +-
 ...NotificationProcessingWithOutSyncOnDrop.java |   4 +-
 .../e2e/dbprovider/TestOwnerPrivileges.java     | 302 ++++++++++++-------
 .../TestOwnerPrivilegesWithGrantOption.java     |  46 +--
 .../e2e/hdfs/TestHDFSIntegrationAdvanced.java   |  72 ++---
 .../tests/e2e/hdfs/TestHDFSIntegrationBase.java |  76 +++--
 .../e2e/hdfs/TestHDFSIntegrationEnd2End.java    | 176 +++++------
 .../hdfs/TestHDFSIntegrationTogglingConf.java   |  28 +-
 10 files changed, 430 insertions(+), 292 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessing.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessing.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessing.java
index e730dd5..d7cc8a1 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessing.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessing.java
@@ -79,8 +79,8 @@ public class TestHmsNotificationProcessing  extends TestHmsNotificationProcessin
             + " TO ROLE select_tbl1");
 
     // Make sure that an ACL is added for that
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", true);
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", true);
 
     //Drop the object
     statement.execute("DROP DATABASE " + DB1 + " CASCADE");

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutHdfsSync.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutHdfsSync.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutHdfsSync.java
index 9535dee..59f24ab 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutHdfsSync.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutHdfsSync.java
@@ -88,8 +88,8 @@ public class TestHmsNotificationProcessingWithOutHdfsSync extends TestHmsNotific
             + " TO ROLE select_tbl1");
 
     // Make sure that an ACL is added for that
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", false);
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", false);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", false);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", false);
 
     //Drop the object
     statement.execute("DROP DATABASE " + DB1 + " CASCADE");
@@ -140,8 +140,8 @@ public class TestHmsNotificationProcessingWithOutHdfsSync extends TestHmsNotific
             + " TO ROLE select_tbl1");
 
     // Make sure that an ACL is added for that
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", false);
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", false);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", false);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", false);
 
     //alter the object
     String temp = "alter table " + DB1 + "." + tableName1 + " rename to " + DB1 + "." + tableName2;
@@ -149,6 +149,6 @@ public class TestHmsNotificationProcessingWithOutHdfsSync extends TestHmsNotific
 
     Thread.sleep(WAIT_FOR_NOTIFICATION_PROCESSING);
     // Make sure that an ACL is updated got the new table name
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db/" + tableName2, FsAction.READ_EXECUTE, "hbase", false);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db/" + tableName2, FsAction.READ_EXECUTE, "hbase", false);
   }
 }

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnCreate.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnCreate.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnCreate.java
index 0339c7b..b89ed93 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnCreate.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnCreate.java
@@ -82,8 +82,8 @@ public class TestHmsNotificationProcessingWithOutSyncOnCreate extends TestHmsNot
 
 
     // Make sure that an ACL is added for that
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", true);
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", true);
 
     //Drop the object
     statement.execute("DROP DATABASE " + DB1 + " CASCADE");

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnDrop.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnDrop.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnDrop.java
index f70b6ab..8c00d5f 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnDrop.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestHmsNotificationProcessingWithOutSyncOnDrop.java
@@ -79,8 +79,8 @@ public class TestHmsNotificationProcessingWithOutSyncOnDrop extends TestHmsNotif
     verifyPrivilegesCount(statement, 2);
 
     // Make sure that an ACL is added for that
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", true);
-    verifyOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db_1.db/tb_1", FsAction.READ_EXECUTE, "hbase", true);
 
     //Drop the object
     statement.execute("DROP DATABASE " + DB1 + " CASCADE");

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivileges.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivileges.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivileges.java
index c085a0c..55a79ee 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivileges.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivileges.java
@@ -23,6 +23,10 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
 import com.google.common.collect.Sets;
+import com.google.common.collect.Lists;
+import com.google.common.base.Strings;
+
+import java.io.FileNotFoundException;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
@@ -30,7 +34,7 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.parquet.Strings;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegrationBase;
 import org.apache.sentry.tests.e2e.hive.StaticUserGroup;
 import org.apache.sentry.service.common.ServiceConstants.SentryPrincipalType;
@@ -40,7 +44,6 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
-import com.google.common.collect.Lists;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -61,7 +64,7 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
   private final static String renameTag = "_new";
   protected Connection connection;
-  protected Statement statement;
+  protected Statement statementAdmin;
 
   @BeforeClass
   public static void setup() throws Exception {
@@ -75,29 +78,30 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     super.setUpTempDir();
     admin = "hive";
     connection = hiveServer2.createConnection(admin, admin);
-    statement = connection.createStatement();
-    statement.execute("create role admin_role");
-    statement.execute("grant role admin_role to group hive");
-    statement.execute("grant all on server server1 to role admin_role");
+    statementAdmin = connection.createStatement();
+    statementAdmin.execute("create role admin_role");
+    statementAdmin.execute("grant role admin_role to group hive");
+    statementAdmin.execute("grant all on server server1 to role admin_role");
   }
 
   /**
    * Verify that the user who creases database has owner privilege on this database
+   * and also makes sure that HDFS ACL rules are updated.
    *
    * @throws Exception
    */
   @Test
-  public void testCreateDatabase() throws Exception {
+  public void testCreateDatabase() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON SERVER server1" + " TO ROLE create_db1");
+    statementAdmin.execute("GRANT CREATE ON SERVER server1" + " TO ROLE create_db1");
 
     // USER1 creates test DB
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -108,6 +112,9 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     verifyTableOwnerPrivilegeExistForPrincipal(statementUSER1_1, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, "", 1);
 
+    // Verify that HDFS ACL are added.
+    verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, null, null, true);
+
     // verify that user has all privilege on this database, i.e., "OWNER" means "ALL"
     // for authorization
     statementUSER1_1.execute("CREATE TABLE " + DB1 + "." + tableName1
@@ -118,7 +125,7 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     statementUSER1_1.execute("DROP TABLE " + DB1 + "." + tableName1 + renameTag);
     statementUSER1_1.execute("DROP DATABASE " + DB1 + " CASCADE");
 
-    statement.close();
+    statementAdmin.close();
     connection.close();
 
     statementUSER1_1.close();
@@ -126,22 +133,23 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
   }
 
   /**
-   * Verify that the user who does not creases database has no owner privilege on this database
+   * Verify that the user who does not creases database has no owner privilege on this database and
+   * also makes sure that there are not HDFS ACL.
    *
    * @throws Exception
    */
   @Test
-  public void testCreateDatabaseNegative() throws Exception {
+  public void testCreateDatabaseNegative() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON SERVER server1" + " TO ROLE create_db1");
+    statementAdmin.execute("GRANT CREATE ON SERVER server1" + " TO ROLE create_db1");
 
     // USER1 creates test DB
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -162,8 +170,10 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
       LOGGER.info("Expected Exception when dropping database " + ex.getMessage());
     }
 
+    // Verify that HDFS ACL are not set.
+    verifyHdfsAcl(Lists.newArrayList(USER1_2), null, DB1, null, null, false);
 
-    statement.close();
+    statementAdmin.close();
     connection.close();
 
     statementUSER1_1.close();
@@ -179,44 +189,47 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
    * @throws Exception
    */
   @Test
-  public void testCreateDatabaseAdmin() throws Exception {
+  public void testCreateDatabaseAdmin() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
 
     // admin user creates test DB
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
-    // verify no privileges created for new database
-    verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER, Lists.newArrayList(admin),
+    // verify privileges created for new database
+    verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER, Lists.newArrayList(admin),
         DB1, "", 1);
 
-    statement.close();
+    // Verify that HDFS ACL are set.
+    verifyHdfsAcl(Lists.newArrayList(admin), null, DB1, null, null, true);
+
+    statementAdmin.close();
     connection.close();
   }
 
   /**
    * Verify that after dropping a database, the user who creases database has no owner privilege
-   * on this dropped database
+   * on this dropped database and makes sure that HDFS ACLs are updated accordingly.
    *
    * @throws Exception
    */
   @Test
-  public void testDropDatabase() throws Exception {
+  public void testDropDatabase() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON SERVER server1" + " TO ROLE create_db1");
+    statementAdmin.execute("GRANT CREATE ON SERVER server1" + " TO ROLE create_db1");
 
     // USER1 creates test DB and then drop it
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -228,7 +241,10 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     verifyTableOwnerPrivilegeExistForPrincipal(statementUSER1_1, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, "", 0);
 
-    statement.close();
+    // Verify that HDFS ACL are not set.
+    verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, null, null, false);
+
+    statementAdmin.close();
     connection.close();
 
     statementUSER1_1.close();
@@ -242,20 +258,20 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
    */
   @Ignore("Enable the test once HIVE-18031 is in the hiver version integrated with Sentry")
   @Test
-  public void testAuthorizeAlterDatabaseSetOwner() throws Exception {
+  public void testAuthorizeAlterDatabaseSetOwner() throws Throwable {
     String ownerRole = "owner_role";
     String allWithGrantRole = "allWithGrant_role";
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_on_server", ownerRole};
 
     // create required roles, and assign them to USERGROUP1
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON SERVER " + SERVER_NAME + " TO ROLE create_on_server");
+    statementAdmin.execute("GRANT CREATE ON SERVER " + SERVER_NAME + " TO ROLE create_on_server");
 
     // USER1_1 create database
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -274,7 +290,7 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
     // admin issues alter database set owner
     try {
-      statement.execute("ALTER DATABASE " + DB1 + " SET OWNER ROLE " + ownerRole);
+      statementAdmin.execute("ALTER DATABASE " + DB1 + " SET OWNER ROLE " + ownerRole);
       Assert.fail("Expect altering database set owner to fail for admin");
     } catch (Exception ex) {
       // admin does not have all with grant option, so cannot issue this command
@@ -285,9 +301,9 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
     try {
       // create role that has all with grant on the table
-      statement.execute("create role " + allWithGrantRole);
-      statement.execute("grant role " + allWithGrantRole + " to group " + USERGROUP2);
-      statement.execute("GRANT ALL ON DATABASE " + DB1 + " to role " +
+      statementAdmin.execute("create role " + allWithGrantRole);
+      statementAdmin.execute("grant role " + allWithGrantRole + " to group " + USERGROUP2);
+      statementAdmin.execute("GRANT ALL ON DATABASE " + DB1 + " to role " +
           allWithGrantRole + " with grant option");
 
       // cannot issue command on a different database
@@ -305,27 +321,37 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
       // verify privileges is transferred to role owner_role, which is associated with USERGROUP1,
       // therefore to USER1_1
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.ROLE,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.ROLE,
           Lists.newArrayList(ownerRole),
           DB1, "", 1);
 
+      // Verify that HDFS ACL are not set.
+      verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, null, null, false);
+
+      // Verify that HDFS ACL are set.
+      verifyHdfsAcl(null, Lists.newArrayList(USERGROUP2), DB1, null, null, true);
+
       // alter database set owner to user USER1_1 and verify privileges is transferred to USER USER1_1
       statementUSER2_1
           .execute("ALTER DATABASE " + DB1 + " SET OWNER USER " + USER1_1);
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER,
           Lists.newArrayList(USER1_1), DB1, "", 1);
 
       // alter database set owner to user USER2_1, who already has explicit all with grant
       statementUSER2_1
           .execute("ALTER DATABASE " + DB1 + " SET OWNER USER " + USER2_1);
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER,
           Lists.newArrayList(USER2_1),
           DB1, "", 1);
 
+      // Verify that HDFS ACL are set.
+      verifyHdfsAcl(Lists.newArrayList(USER2_1), null, DB1, tableName1, null, true);
+
+
     } finally {
-      statement.execute("drop role " + allWithGrantRole);
+      statementAdmin.execute("drop role " + allWithGrantRole);
 
-      statement.close();
+      statementAdmin.close();
       connection.close();
 
       statementUSER1_1.close();
@@ -338,25 +364,26 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
 
   /**
-   * Verify that the user who creases table has owner privilege on this table
+   * Verify that the user who creases table has owner privilege on this table and
+   * and makes sure that HDFS ACLs are updated accordingly.
    *
    * @throws Exception
    */
   @Test
-  public void testCreateTable() throws Exception {
+  public void testCreateTable() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("USE " + DB1);
 
     // USER1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -369,6 +396,9 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     verifyTableOwnerPrivilegeExistForPrincipal(statementUSER1_1, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, tableName1, 1);
 
+    // Verify that HDFS ACL are added.
+    verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, tableName1, null, true);
+
     // verify that user has all privilege on this table, i.e., "OWNER" means "ALL"
     // for authorization
     statementUSER1_1.execute("INSERT INTO TABLE " + DB1 + "." + tableName1 + " VALUES (35)");
@@ -380,7 +410,8 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);
     statementUSER1_1.execute("DROP TABLE " + DB1 + "." + tableName1 + renameTag);
 
-    statement.close();
+
+    statementAdmin.close();
     connection.close();
 
     statementUSER1_1.close();
@@ -389,25 +420,25 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
   /**
    * Verify that the user who creases table has owner privilege on this table, but cannot
-   * access tables created by others
+   * access tables created by others and makes sure that HDFS ACLs are updated accordingly.
    *
    * @throws Exception
    */
   @Test
-  public void testCreateTableNegative() throws Exception {
+  public void testCreateTableNegative() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1 and USER2
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("USE " + DB1);
 
     // USER1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -443,8 +474,10 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     } catch  (Exception ex) {
       LOGGER.info("Expected Exception when dropping table: " + ex.getMessage());
     }
+    // Verify that HDFS ACL are not set.
+    verifyHdfsAcl(Lists.newArrayList(USER1_2), null, DB1, tableName1, null, false);
 
-    statement.close();
+    statementAdmin.close();
     connection.close();
 
     statementUSER1_1.close();
@@ -460,47 +493,51 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
    * @throws Exception
    */
   @Test
-  public void testCreateTableAdmin() throws Exception {
+  public void testCreateTableAdmin() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
 
     // admin creates test DB and then drop it
-    statement.execute("CREATE DATABASE " + DB1);
-    statement.execute("CREATE TABLE " + DB1 + "." + tableName1
+    statementAdmin.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("CREATE TABLE " + DB1 + "." + tableName1
         + " (under_col int comment 'the under column')");
 
-    // verify no owner privileges created for new table
-    verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER, Lists.newArrayList(admin),
+    // verify owner privileges created for new table
+    verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER, Lists.newArrayList(admin),
         DB1, tableName1, 1);
 
-    statement.close();
+    // Verify that HDFS ACL are set.
+    verifyHdfsAcl(Lists.newArrayList(admin), null, DB1, tableName1, null, true);
+
+    statementAdmin.close();
     connection.close();
   }
 
   /**
    * Verify that the user who creases table and then drops it has no owner privilege on this table
+   * and makes sure that HDFS ACLs are updated accordingly.
    *
    * @throws Exception
    */
   @Test
-  public void testDropTable() throws Exception {
+  public void testDropTable() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
 
     // USER1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -513,7 +550,7 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     verifyTableOwnerPrivilegeExistForPrincipal(statementUSER1_1, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, tableName1, 0);
 
-    statement.close();
+    statementAdmin.close();
     connection.close();
   }
 
@@ -524,20 +561,20 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
    */
   @Ignore("Enable the test once HIVE-18762 is in the hiver version integrated with Sentry")
   @Test
-  public void testAlterTable() throws Exception {
+  public void testAlterTable() throws Throwable {
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1", "owner_role"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("USE " + DB1);
 
     // USER1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -550,6 +587,9 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     verifyTableOwnerPrivilegeExistForPrincipal(statementUSER1_1, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, tableName1, 1);
 
+    // Verify that HDFS ACL are set.
+    verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, tableName1, null, true);
+
     // verify that user has all privilege on this table, i.e., "OWNER" means "ALL"
     // for authorization
     statementUSER1_1.execute("INSERT INTO TABLE " + DB1 + "." + tableName1 + " VALUES (35)");
@@ -558,6 +598,13 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     statementUSER1_1.execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER ROLE " +
         "owner_role");
 
+    // Verify that HDFS ACL are not set.
+    verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, tableName1, null, false);
+
+    // Verify that HDFS ACL are set.
+    verifyHdfsAcl(null, Lists.newArrayList(USERGROUP1), DB1, tableName1, null, true);
+
+
     // alter table rename is not blocked for notification processing in upstream due to
     // hive bug HIVE-18783, which is fixed in Hive 2.4.0 and 3.0
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);
@@ -583,7 +630,9 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     verifyTableOwnerPrivilegeExistForPrincipal(statementUSER1_1, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, tableName1, 1);
 
-    statement.close();
+    statementAdmin.close();
+
+    statementAdmin.close();
     connection.close();
 
     statementUSER1_1.close();
@@ -604,15 +653,15 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     roles = new String[]{"admin_role", "create_db1", ownerRole};
 
     // create required roles, and assign them to USERGROUP1
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("USE " + DB1);
 
     // USER1_1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -644,7 +693,7 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
     // admin issues alter table set owner
     try {
-      statement.execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER ROLE " + ownerRole);
+      statementAdmin.execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER ROLE " + ownerRole);
       Assert.fail("Expect altering table set owner to fail for admin");
     } catch (Exception ex) {
       // admin does not have grant option, so cannot issue this command
@@ -655,9 +704,9 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
     try {
       // create role that has all with grant on the table
-      statement.execute("create role " + allWithGrantRole);
-      statement.execute("grant role " + allWithGrantRole + " to group " + USERGROUP2);
-      statement.execute("grant all on table " + DB1 + "." + tableName1 + " to role " +
+      statementAdmin.execute("create role " + allWithGrantRole);
+      statementAdmin.execute("grant role " + allWithGrantRole + " to group " + USERGROUP2);
+      statementAdmin.execute("grant all on table " + DB1 + "." + tableName1 + " to role " +
           allWithGrantRole + " with grant option");
 
       // cannot issue command on a different table
@@ -675,27 +724,27 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
       // verify privileges is transferred to role owner_role, which is associated with USERGROUP1,
       // therefore to USER1_1
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.ROLE,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.ROLE,
           Lists.newArrayList(ownerRole),
           DB1, tableName1, 1);
 
       // alter table set owner to user USER1_1 and verify privileges is transferred to USER USER1_1
       statementUSER2_1
           .execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER USER " + USER1_1);
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER,
           Lists.newArrayList(USER1_1), DB1, tableName1, 1);
 
       // alter table set owner to user USER2_1, who already has explicit all with grant
       statementUSER2_1
           .execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER USER " + USER2_1);
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER,
           Lists.newArrayList(USER2_1),
           DB1, tableName1, 1);
 
     } finally {
-      statement.execute("drop role " + allWithGrantRole);
+      statementAdmin.execute("drop role " + allWithGrantRole);
 
-      statement.close();
+      statementAdmin.close();
       connection.close();
 
       statementUSER1_1.close();
@@ -717,15 +766,15 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     roles = new String[]{"admin_role", "create_db1"};
 
     // create required roles
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("USE " + DB1);
 
     // USER1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -734,7 +783,7 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
         + " (under_col int comment 'the under column')");
 
     // verify owner privileges created for new table
-    verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
+    verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER, Lists.newArrayList(USER1_1),
         DB1, tableName1, 1);
 
     // Changing the owner to an admin user
@@ -743,10 +792,10 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
 
     // verify no owner privileges to the new owner as the owner is admin user
 
-    verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER, Lists.newArrayList(admin),
+    verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER, Lists.newArrayList(admin),
         DB1, tableName1, 1);
 
-    statement.close();
+    statementAdmin.close();
     connection.close();
   }
 
@@ -756,8 +805,8 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
     userRoles.remove("admin_role");
 
     for (String roleName : userRoles) {
-      statement.execute("CREATE ROLE " + roleName);
-      statement.execute("GRANT ROLE " + roleName + " to GROUP " + USERGROUP1);
+      statementAdmin.execute("CREATE ROLE " + roleName);
+      statementAdmin.execute("GRANT ROLE " + roleName + " to GROUP " + USERGROUP1);
     }
   }
 
@@ -808,4 +857,47 @@ public class TestOwnerPrivileges extends TestHDFSIntegrationBase {
       resultSet.close();
     }
   }
+
+  /**
+   * Verifies HDFS ACL for users and groups.
+   * ACL could be because of explicit privilege grants or implicit owner privileges
+   *
+   * @param users list of users for which the ACL entries should be verified
+   * @param groups list of groups for which the ACL entries should be verified
+   * @param dbName Database name
+   * @param tableName  Table Name
+   * @param location Location of the database/table
+   * @param areAclExpected whether ACL entries are expected
+   * @throws Throwable If verification fails.
+   */
+   protected void verifyHdfsAcl(List<String> users, List<String> groups,
+      String dbName, String tableName, String location, boolean areAclExpected) throws Throwable {
+     String locationToVerify = location;
+     try {
+       if (Strings.isNullOrEmpty(locationToVerify)) {
+         if (tableName == null) {
+           locationToVerify = hiveWarehouseLocation + "/" + dbName + ".db";
+         } else {
+           locationToVerify = hiveWarehouseLocation + "/" + dbName + ".db" + "/" + tableName;
+         }
+       }
+
+       if (users != null && !users.isEmpty()) {
+         for (String user : users) {
+           verifyUserPermOnAllSubDirs(locationToVerify, FsAction.ALL, user, areAclExpected);
+         }
+       }
+
+       if (groups != null && !groups.isEmpty()) {
+         for (String group : groups) {
+           verifyGroupPermOnAllSubDirs(locationToVerify, FsAction.ALL, group, areAclExpected);
+         }
+       }
+     } catch (FileNotFoundException e) {
+       // If ACL's are not expected, This exception is consumed.
+       if(areAclExpected) {
+         throw e;
+       }
+     }
+   }
 }

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivilegesWithGrantOption.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivilegesWithGrantOption.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivilegesWithGrantOption.java
index c2ccb24..04ff27d 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivilegesWithGrantOption.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/dbprovider/TestOwnerPrivilegesWithGrantOption.java
@@ -44,21 +44,23 @@ public class TestOwnerPrivilegesWithGrantOption extends TestOwnerPrivileges {
    */
   @Ignore("Enable the test once HIVE-18762 is in the hiver version integrated with Sentry")
   @Test
-  public void testAuthorizeAlterTableSetOwnerByOwner() throws Exception {
+  public void testAuthorizeAlterTableSetOwnerByOwner() throws Throwable {
     String ownerRole = "owner_role";
     dbNames = new String[]{DB1};
     roles = new String[]{"admin_role", "create_db1", ownerRole};
 
     // create required roles, and assign them to USERGROUP1
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE create_db1");
+    statementAdmin.execute("USE " + DB1);
+
+    statementAdmin.execute("GRANT ROLE " + ownerRole + " TO GROUP " + USERGROUP2);
 
     // USER1_1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -76,21 +78,31 @@ public class TestOwnerPrivilegesWithGrantOption extends TestOwnerPrivileges {
           .execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER USER " + USER2_1);
 
       // verify privileges is transferred to USER2_1
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER,
           Lists.newArrayList(USER2_1),
           DB1, tableName1, 1);
 
+      // Verify that HDFS ACL are not set.
+      verifyHdfsAcl(Lists.newArrayList(USER1_1), null, DB1, tableName1, null, false);
+
+      // Verify that HDFS ACL are set.
+      verifyHdfsAcl(null, Lists.newArrayList(USERGROUP2), DB1, tableName1, null, true);
+
+
       // alter table set owner for role
       statementUSER2_1
           .execute("ALTER TABLE " + DB1 + "." + tableName1 + " SET OWNER ROLE " + ownerRole);
 
       // verify privileges is transferred to ownerRole
-      verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.ROLE,
+      verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.ROLE,
           Lists.newArrayList(ownerRole),
           DB1, tableName1, 1);
 
+      // Verify that HDFS ACL are not set.
+      verifyHdfsAcl(null, Lists.newArrayList(USERGROUP2), DB1, tableName1, null, false);
+
     } finally {
-      statement.close();
+      statementAdmin.close();
       connection.close();
 
       statementUSER1_1.close();
@@ -109,18 +121,18 @@ public class TestOwnerPrivilegesWithGrantOption extends TestOwnerPrivileges {
     roles = new String[]{"admin_role", ownerRole};
 
     // create required roles, and assign them to USERGROUP1
-    setupUserRoles(roles, statement);
+    setupUserRoles(roles, statementAdmin);
 
     // create test DB
-    statement.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
-    statement.execute("CREATE DATABASE " + DB1);
+    statementAdmin.execute("DROP DATABASE IF EXISTS " + DB1 + " CASCADE");
+    statementAdmin.execute("CREATE DATABASE " + DB1);
 
-    statement.execute("CREATE ROLE " + newOwnerRole);
-    statement.execute("GRANT ROLE " + newOwnerRole + " to GROUP " + USERGROUP2);
+    statementAdmin.execute("CREATE ROLE " + newOwnerRole);
+    statementAdmin.execute("GRANT ROLE " + newOwnerRole + " to GROUP " + USERGROUP2);
 
     // setup privileges for USER1
-    statement.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE " + ownerRole);
-    statement.execute("USE " + DB1);
+    statementAdmin.execute("GRANT CREATE ON DATABASE " + DB1 + " TO ROLE " + ownerRole);
+    statementAdmin.execute("USE " + DB1);
 
     // USER1_1 create table
     Connection connectionUSER1_1 = hiveServer2.createConnection(USER1_1, USER1_1);
@@ -129,7 +141,7 @@ public class TestOwnerPrivilegesWithGrantOption extends TestOwnerPrivileges {
             + " (under_col int comment 'the under column')");
 
    // Verify that the user who created the table has owner privilege on the table created.
-    verifyTableOwnerPrivilegeExistForPrincipal(statement, SentryPrincipalType.USER,
+    verifyTableOwnerPrivilegeExistForPrincipal(statementAdmin, SentryPrincipalType.USER,
             Lists.newArrayList(USER1_1),
             DB1, tableName1, 1);
 

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationAdvanced.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationAdvanced.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationAdvanced.java
index c8fc019..7110885 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationAdvanced.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationAdvanced.java
@@ -74,9 +74,9 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     stmt.execute("grant select on table t1 to role tab_role");
     stmt.execute("grant role tab_role to group flume");
 
-    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/t1", FsAction.READ_EXECUTE, "flume", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db1.db/t1", FsAction.READ_EXECUTE, "flume", true);
     stmt.execute("INSERT INTO TABLE t1 VALUES (1)");
-    verifyOnAllSubDirs("/user/hive/warehouse/db1.db/t1", FsAction.READ_EXECUTE, "flume", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/db1.db/t1", FsAction.READ_EXECUTE, "flume", true);
 
   }
 
@@ -123,26 +123,26 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     miniDFS.getFileSystem().mkdirs(new Path("/tmp/external/tab1_loc"));
     stmt.execute("use " + dbName);
     stmt.execute("create external table tab1(a int) location 'file:///tmp/external/tab1_loc'");
-    verifyOnAllSubDirs("/tmp/external/tab1_loc", null, StaticUserGroup.USERGROUP1, false);
+    verifyGroupPermOnAllSubDirs("/tmp/external/tab1_loc", null, StaticUserGroup.USERGROUP1, false);
 
     //External partitioned table on local file system
     miniDFS.getFileSystem().mkdirs(new Path("/tmp/external/tab2_loc/i=1"));
     stmt.execute("create external table tab2 (s string) partitioned by (i int) location 'file:///tmp/external/tab2_loc'");
-    verifyOnAllSubDirs("/tmp/external/tab2_loc", null, StaticUserGroup.USERGROUP1, false);
+    verifyGroupPermOnAllSubDirs("/tmp/external/tab2_loc", null, StaticUserGroup.USERGROUP1, false);
     //Partition on local file system
     stmt.execute("alter table tab2 add partition (i=1)");
     stmt.execute("alter table tab2 partition (i=1) set location 'file:///tmp/external/tab2_loc/i=1'");
 
-    verifyOnAllSubDirs("/tmp/external/tab2_loc/i=1", null, StaticUserGroup.USERGROUP1, false);
+    verifyGroupPermOnAllSubDirs("/tmp/external/tab2_loc/i=1", null, StaticUserGroup.USERGROUP1, false);
 
     //HDFS to local file system, also make sure does not specifying scheme still works
     stmt.execute("create external table tab3(a int) location '/tmp/external/tab3_loc'");
     // SENTRY-546
     // SENTRY-1471 - fixing the validation logic revealed that FsAction.ALL is the right value.
-    verifyOnAllSubDirs("/tmp/external/tab3_loc", FsAction.ALL, StaticUserGroup.USERGROUP1, true);
-    // verifyOnAllSubDirs("/tmp/external/tab3_loc", null, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/tab3_loc", FsAction.ALL, StaticUserGroup.USERGROUP1, true);
+    // verifyGroupPermOnAllSubDirs("/tmp/external/tab3_loc", null, StaticUserGroup.USERGROUP1, true);
     stmt.execute("alter table tab3 set location 'file:///tmp/external/tab3_loc'");
-    verifyOnAllSubDirs("/tmp/external/tab3_loc", null, StaticUserGroup.USERGROUP1, false);
+    verifyGroupPermOnAllSubDirs("/tmp/external/tab3_loc", null, StaticUserGroup.USERGROUP1, false);
 
     //Local file system to HDFS
     stmt.execute("create table tab4(a int) location 'file:///tmp/external/tab4_loc'");
@@ -150,8 +150,8 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     miniDFS.getFileSystem().mkdirs(new Path("/tmp/external/tab4_loc"));
     // SENTRY-546
     // SENTRY-1471 - fixing the validation logic revealed that FsAction.ALL is the right value.
-    verifyOnAllSubDirs("/tmp/external/tab4_loc", FsAction.ALL, StaticUserGroup.USERGROUP1, true);
-    // verifyOnAllSubDirs("/tmp/external/tab4_loc", null, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/tab4_loc", FsAction.ALL, StaticUserGroup.USERGROUP1, true);
+    // verifyGroupPermOnAllSubDirs("/tmp/external/tab4_loc", null, StaticUserGroup.USERGROUP1, true);
     stmt.close();
     conn.close();
   }
@@ -197,7 +197,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
     // When the table creation failed, the path will not be managed by sentry. And the
     // permission of the path will not be hive:hive.
-    verifyOnAllSubDirs("/tmp/external/p1", null, StaticUserGroup.HIVE, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/p1", null, StaticUserGroup.HIVE, true);
 
     stmt.close();
     conn.close();
@@ -243,7 +243,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
     // When the table creation failed, the path will not be managed by sentry. And the
     // permission of the path will not be hive:hive.
-    verifyOnAllSubDirs("/tmp/external/p1", null, StaticUserGroup.HIVE, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/p1", null, StaticUserGroup.HIVE, true);
 
     stmt.close();
     conn.close();
@@ -292,7 +292,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
     // When the table dropping failed, the path will still be managed by sentry. And the
     // permission of the path still should be hive:hive.
-    verifyOnAllSubDirs(tmpHDFSPartitionStr, FsAction.ALL, StaticUserGroup.HIVE, true);
+    verifyGroupPermOnAllSubDirs(tmpHDFSPartitionStr, FsAction.ALL, StaticUserGroup.HIVE, true);
 
     stmt.close();
     conn.close();
@@ -339,7 +339,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
     // When the partition dropping failed, the path for the partition will still
     // be managed by sentry. And the permission of the path still should be hive:hive.
-    verifyOnAllSubDirs(tmpHDFSPartitionStr, FsAction.ALL, StaticUserGroup.HIVE, true);
+    verifyGroupPermOnAllSubDirs(tmpHDFSPartitionStr, FsAction.ALL, StaticUserGroup.HIVE, true);
 
     stmt.close();
     conn.close();
@@ -600,7 +600,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
 
     // Verify that user_group1 has insert(write_execute) permission on '/tmp/external/p1'.
-    verifyOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
 
     // Create external table tab2 and partition on location '/tmp/external'.
     // Create tab2_role, and grant it with select permission on table tab2 to user_group2.
@@ -612,8 +612,8 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
 
     // Verify that user_group2 have select(read_execute) permission on both paths.
-    verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab2", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
-    verifyOnPath(tmpHDFSDirStr, FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab2", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
 
     // Create table tab3 and partition on the same location '/tmp/external' as tab2.
     // Create tab3_role, and grant it with insert permission on table tab3 to user_group3.
@@ -626,29 +626,29 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
     // When two partitions of different tables pointing to the same location with different grants,
     // ACLs should have union (no duplicates) of both rules.
-    verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
-    verifyOnPath(tmpHDFSDirStr, FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
-    verifyOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
 
     // When alter the table name (tab2 to be tabx), ACLs should remain the same.
     stmt.execute("alter table tab2 rename to tabx");
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
-    verifyOnPath(tmpHDFSDirStr, FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
-    verifyOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP2, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
 
     // When drop a partition that shares the same location with other partition belonging to
     // other table, should still have the other table permissions.
     stmt.execute("ALTER TABLE tabx DROP PARTITION (month = 1)");
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
-    verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
-    verifyOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
 
     // When drop a table that has a partition shares the same location with other partition
     // belonging to other table, should still have the other table permissions.
     stmt.execute("DROP TABLE IF EXISTS tabx");
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
-    verifyOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
-    verifyOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/" + dbName + ".db/tab3", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
+    verifyGroupPermOnPath(tmpHDFSDirStr, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP3, true);
 
     stmt.close();
     conn.close();
@@ -688,12 +688,12 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
 
     // Verify that user_group1 has insert(write_execute) permission on '/tmp/external/p1'.
-    verifyOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
 
     // When two partitions of the same table pointing to the same location,
     // ACLS should not be repeated. Exception will be thrown if there are duplicates.
     stmt.execute("alter table tab1 add partition (month = 2) location '/tmp/external/p1'");
-    verifyOnPath("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnPath("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
 
     stmt.close();
     conn.close();
@@ -730,7 +730,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
 
     // Verify that user_group1 has insert(write_execute) permission on '/tmp/external/p1'.
-    verifyOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnAllSubDirs("/tmp/external/p1", FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
 
     // Create table tab2 on the same location '/tmp/external/p1' as table tab1.
     // Create tab2_role, and grant it with select permission on table tab2 to user_group1.
@@ -741,12 +741,12 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
     // When two tables pointing to the same location, ACLS should have union (no duplicates)
     // of both rules.
-    verifyOnPath("/tmp/external/p1", FsAction.ALL, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnPath("/tmp/external/p1", FsAction.ALL, StaticUserGroup.USERGROUP1, true);
 
     // When drop table tab1, ACLs of tab2 still remain.
     stmt.execute("DROP TABLE IF EXISTS tab1");
     Thread.sleep(WAIT_BEFORE_TESTVERIFY);//Wait till sentry cache is updated in Namenode
-    verifyOnPath("/tmp/external/p1", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP1, true);
+    verifyGroupPermOnPath("/tmp/external/p1", FsAction.READ_EXECUTE, StaticUserGroup.USERGROUP1, true);
 
     stmt.close();
     conn.close();
@@ -805,7 +805,7 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
      //      /tmp/external                   (location without scheme)
      // Assert.assertEquals("/tmp/external", hmsClient.getTable(dbName, tblName).getSd().getLocation());
 
-     verifyOnPath("/tmp/external", FsAction.ALL, StaticUserGroup.HIVE, true);
+     verifyGroupPermOnPath("/tmp/external", FsAction.ALL, StaticUserGroup.HIVE, true);
 
      stmt.close();
      conn.close();
@@ -850,11 +850,11 @@ public class TestHDFSIntegrationAdvanced extends TestHDFSIntegrationBase {
 
       // Verify that the permissions are preserved.
       String newTblPath = Paths.get("/user/hive/warehouse", dbName + ".db", newTblName).toString();
-      verifyOnAllSubDirs(newTblPath, FsAction.ALL, StaticUserGroup.HIVE, true);
-      verifyOnAllSubDirs(newTblPath, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
+      verifyGroupPermOnAllSubDirs(newTblPath, FsAction.ALL, StaticUserGroup.HIVE, true);
+      verifyGroupPermOnAllSubDirs(newTblPath, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
       String newPatPath = new File(newTblPath, patName).toString();
-      verifyOnPath(newPatPath, FsAction.ALL, StaticUserGroup.ADMINGROUP, true);
-      verifyOnPath(newPatPath, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
+      verifyGroupPermOnPath(newPatPath, FsAction.ALL, StaticUserGroup.ADMINGROUP, true);
+      verifyGroupPermOnPath(newPatPath, FsAction.WRITE_EXECUTE, StaticUserGroup.USERGROUP1, true);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/sentry/blob/85cf7f29/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationBase.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationBase.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationBase.java
index f0cf960..3d7fbe3 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationBase.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegrationBase.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.parquet.Strings;
 import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl;
 import org.apache.sentry.binding.hive.authz.SentryHiveAuthorizerFactory;
 import org.apache.sentry.binding.hive.conf.HiveAuthzConf;
@@ -150,6 +151,7 @@ public abstract class TestHDFSIntegrationBase {
   protected static final int NUM_RETRIES = 10;
   protected static final int RETRY_WAIT = 1000; //ms
   protected static final String EXTERNAL_SENTRY_SERVICE = "sentry.e2etest.external.sentry";
+  protected static final String hiveWarehouseLocation = "/user/hive/warehouse";
 
   protected static MiniDFSCluster miniDFS;
   protected static InternalHiveServer hiveServer2;
@@ -225,31 +227,50 @@ public abstract class TestHDFSIntegrationBase {
     return port;
   }
 
-  protected void verifyOnAllSubDirs(String path, FsAction fsAction, String group, boolean groupShouldExist) throws Throwable {
-    verifyOnAllSubDirs(path, fsAction, group, groupShouldExist, true);
+  protected void verifyGroupPermOnAllSubDirs(String path, FsAction fsAction, String group, boolean groupShouldExist) throws Throwable {
+    verifyOnAllSubDirs(path, fsAction, null, group, groupShouldExist, true);
   }
 
-  protected void verifyOnPath(String path, FsAction fsAction, String group, boolean groupShouldExist) throws Throwable {
+  protected void verifyGroupPermOnPath(String path, FsAction fsAction, String group, boolean groupShouldExist) throws Throwable {
     long elapsed_Time = 0, start_time = System.nanoTime();
     final long TOTAL_SYNC_TIME = NUM_RETRIES * RETRY_WAIT; //ms
     while (elapsed_Time <= TOTAL_SYNC_TIME) {
       try {
-        verifyOnAllSubDirs(path, fsAction, group, groupShouldExist, false);
+        verifyOnAllSubDirs(path, fsAction, null, group, groupShouldExist, false);
         break;
       } catch (Exception ex) {
-        LOGGER.warn("verifyOnAllSubDirs fails: elapsed time = " + elapsed_Time + " ms.");
+        LOGGER.warn("verifyGroupPermOnAllSubDirs fails: elapsed time = " + elapsed_Time + " ms.");
       }
       elapsed_Time = (System.nanoTime() - start_time) / 1000000L; //ms
     }
     Assert.assertTrue(elapsed_Time <= TOTAL_SYNC_TIME);
   }
 
-  protected void verifyOnAllSubDirs(String path, FsAction fsAction, String group, boolean groupShouldExist, boolean recurse) throws Throwable {
-    verifyOnAllSubDirs(new Path(path), fsAction, group, groupShouldExist, recurse, NUM_RETRIES);
+  protected void verifyUserPermOnAllSubDirs(String path, FsAction fsAction, String user, boolean groupShouldExist) throws Throwable {
+    verifyOnAllSubDirs(path, fsAction, user, null, groupShouldExist, true);
   }
 
-  protected void verifyOnAllSubDirs(Path p, FsAction fsAction, String group, boolean groupShouldExist, boolean recurse, int retry) throws Throwable {
-    verifyOnAllSubDirsHelper(p, fsAction, group, groupShouldExist, recurse, retry);
+  protected void verifyUserPermOnPath(String path, FsAction fsAction, String user, boolean groupShouldExist) throws Throwable {
+    long elapsed_Time = 0, start_time = System.nanoTime();
+    final long TOTAL_SYNC_TIME = NUM_RETRIES * RETRY_WAIT; //ms
+    while (elapsed_Time <= TOTAL_SYNC_TIME) {
+      try {
+        verifyOnAllSubDirs(path, fsAction, user, null, groupShouldExist, false);
+        break;
+      } catch (Exception ex) {
+        LOGGER.warn("verifyGroupPermOnAllSubDirs fails: elapsed time = " + elapsed_Time + " ms.");
+      }
+      elapsed_Time = (System.nanoTime() - start_time) / 1000000L; //ms
+    }
+    Assert.assertTrue(elapsed_Time <= TOTAL_SYNC_TIME);
+  }
+
+  protected void verifyOnAllSubDirs(String path, FsAction fsAction, String user, String group, boolean groupShouldExist, boolean recurse) throws Throwable {
+    verifyOnAllSubDirs(new Path(path), fsAction, user, group, groupShouldExist, recurse, NUM_RETRIES);
+  }
+
+  protected void verifyOnAllSubDirs(Path p, FsAction fsAction, String user, String group, boolean groupShouldExist, boolean recurse, int retry) throws Throwable {
+    verifyOnAllSubDirsHelper(p, fsAction, user, group, groupShouldExist, recurse, retry);
   }
 
   /* SENTRY-1471 - fixing the validation logic.
@@ -257,24 +278,37 @@ public abstract class TestHDFSIntegrationBase {
    * b) Throw an exception instead of returning false, to pass valuable debugging info up the stack
    *    - expected vs. found permissions.
    */
-  private void verifyOnAllSubDirsHelper(Path p, FsAction fsAction, String group,
-                                           boolean groupShouldExist, boolean recurse, int retry) throws Throwable {
+  private void verifyOnAllSubDirsHelper(Path p, FsAction fsAction, String user, String group,
+                                           boolean shouldExist, boolean recurse, int retry) throws Throwable {
     FileStatus fStatus = null;
     // validate parent dir's acls
     try {
       fStatus = miniDFS.getFileSystem().getFileStatus(p);
-      if (groupShouldExist) {
-        Assert.assertEquals("Error at verifying Path action : " + p + " ;", fsAction, getAcls(p).get(group));
+      if (shouldExist) {
+        if(!Strings.isNullOrEmpty(group)) {
+          Assert.assertEquals("Error at verifying Path action : " + p + " ;", fsAction,
+                  getAcls(AclEntryType.GROUP, p).get(group));
+        }
+        if(!Strings.isNullOrEmpty(user)) {
+          Assert.assertEquals("Error at verifying Path action : " + p + " ;", fsAction,
+                  getAcls(AclEntryType.USER,p).get(user));
+        }
       } else {
-        assertFalse("Error at verifying Path : " + p + " ," +
-            " group : " + group + " ;", getAcls(p).containsKey(group));
+        if(!Strings.isNullOrEmpty(group)) {
+          assertFalse("Error at verifying Path : " + p + " ," +
+                  " group : " + group + " ;", getAcls(AclEntryType.GROUP, p).containsKey(group));
+        }
+        if(!Strings.isNullOrEmpty(user)) {
+          assertFalse("Error at verifying Path : " + p + " ," +
+                  " user : " + user + " ;", getAcls(AclEntryType.USER, p).containsKey(user));
+        }
       }
       LOGGER.info("Successfully found acls for path = " + p.getName());
     } catch (Throwable th) {
       if (retry > 0) {
         LOGGER.info("Retry: " + retry);
         Thread.sleep(RETRY_WAIT);
-        verifyOnAllSubDirsHelper(p, fsAction, group, groupShouldExist, recurse, retry - 1);
+        verifyOnAllSubDirsHelper(p, fsAction, user, group, shouldExist, recurse, retry - 1);
       } else {
         throw th;
       }
@@ -283,16 +317,16 @@ public abstract class TestHDFSIntegrationBase {
     if (recurse && fStatus.isDirectory()) {
       FileStatus[] children = miniDFS.getFileSystem().listStatus(p);
       for (FileStatus fs : children) {
-        verifyOnAllSubDirsHelper(fs.getPath(), fsAction, group, groupShouldExist, recurse, NUM_RETRIES);
+        verifyOnAllSubDirsHelper(fs.getPath(), fsAction, user, group, shouldExist, recurse, NUM_RETRIES);
       }
     }
   }
 
-  protected Map<String, FsAction> getAcls(Path path) throws Exception {
+  protected Map<String, FsAction> getAcls(AclEntryType type, Path path) throws Exception {
     AclStatus aclStatus = miniDFS.getFileSystem().getAclStatus(path);
     Map<String, FsAction> acls = new HashMap<String, FsAction>();
     for (AclEntry ent : aclStatus.getEntries()) {
-      if (ent.getType().equals(AclEntryType.GROUP)) {
+      if (ent.getType().equals(type)) {
 
         // In case of duplicate acl exist, exception should be thrown.
         if (acls.containsKey(ent.getName())) {
@@ -407,7 +441,7 @@ public abstract class TestHDFSIntegrationBase {
 
     stmt.execute("grant select on table p1 to role p1_admin");
 
-    verifyOnAllSubDirs("/user/hive/warehouse/p1", FsAction.READ_EXECUTE, "hbase", true);
+    verifyGroupPermOnAllSubDirs("/user/hive/warehouse/p1", FsAction.READ_EXECUTE, "hbase", true);
     // hbase user should now be allowed to read...
     hbaseUgi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
@@ -593,7 +627,7 @@ public abstract class TestHDFSIntegrationBase {
         hiveConf.set("fs.defaultFS", fsURI);
         hiveConf.set("fs.default.name", fsURI);
         hiveConf.set("hive.metastore.execute.setugi", "true");
-        hiveConf.set("hive.metastore.warehouse.dir", "hdfs:///user/hive/warehouse");
+        hiveConf.set("hive.metastore.warehouse.dir", "hdfs://" + hiveWarehouseLocation);
         hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=" + baseDir.getAbsolutePath() + "/metastore_db;create=true");
         hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
         hiveConf.set("javax.jdo.option.ConnectionUserName", "hive");


Mime
View raw message