sentry-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From lsk...@apache.org
Subject incubator-sentry git commit: SENTRY-544: Do not add non HDFS path updates in Hive meta store Sentry plugin for HDFS sync (Sravya Tirukkovalur via Lenni Kuff)
Date Fri, 05 Dec 2014 22:40:35 GMT
Repository: incubator-sentry
Updated Branches:
  refs/heads/master 58efe72a1 -> eb53de39e


SENTRY-544: Do not add non HDFS path updates in Hive meta store Sentry plugin for HDFS sync
(Sravya Tirukkovalur via Lenni Kuff)


Project: http://git-wip-us.apache.org/repos/asf/incubator-sentry/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-sentry/commit/eb53de39
Tree: http://git-wip-us.apache.org/repos/asf/incubator-sentry/tree/eb53de39
Diff: http://git-wip-us.apache.org/repos/asf/incubator-sentry/diff/eb53de39

Branch: refs/heads/master
Commit: eb53de39e4d700f124e70526dc0ac63a696001d0
Parents: 58efe72
Author: Lenni Kuff <lskuff@cloudera.com>
Authored: Fri Dec 5 14:34:32 2014 -0800
Committer: Lenni Kuff <lskuff@cloudera.com>
Committed: Fri Dec 5 14:34:32 2014 -0800

----------------------------------------------------------------------
 .../org/apache/sentry/hdfs/PathsUpdate.java     |  20 +-
 .../sentry/hdfs/TestUpdateableAuthzPaths.java   |  10 +-
 .../org/apache/sentry/hdfs/MetastorePlugin.java |  42 +++-
 .../tests/e2e/hdfs/TestHDFSIntegration.java     | 209 ++++++++++++++++---
 .../sentry/tests/e2e/hive/TestOperations.java   |   2 +
 5 files changed, 239 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/eb53de39/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
index 60f8629..2652520 100644
--- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/PathsUpdate.java
@@ -22,6 +22,7 @@ import java.net.URISyntaxException;
 import java.util.LinkedList;
 import java.util.List;
 
+import com.google.common.base.Preconditions;
 import org.apache.sentry.hdfs.service.thrift.TPathChanges;
 import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
 
@@ -74,12 +75,23 @@ public class PathsUpdate implements Updateable.Update {
     return tPathsUpdate;
   }
 
-  
 
-  public static List<String> cleanPath(String path) {
+  /**
+   *
+   * @param path : Needs to be a HDFS location with scheme
+   * @return Path in the form a list containing the path tree with scheme/ authority stripped
off.
+   * Returns null if a non HDFS path
+   */
+  public static List<String> parsePath(String path) {
     try {
-      return Lists.newArrayList(new URI(path).getPath().split("^/")[1]
-          .split("/"));
+      URI uri = new URI(path);
+      Preconditions.checkNotNull(uri.getScheme());
+      if(uri.getScheme().equalsIgnoreCase("hdfs")) {
+        return Lists.newArrayList(uri.getPath().split("^/")[1]
+            .split("/"));
+      } else {
+        return null;
+      }
     } catch (URISyntaxException e) {
       throw new RuntimeException("Incomprehensible path [" + path + "]");
     }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/eb53de39/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
index 51a939d..4b8a058 100644
--- a/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
+++ b/sentry-hdfs/sentry-hdfs-common/src/test/java/org/apache/sentry/hdfs/TestUpdateableAuthzPaths.java
@@ -77,13 +77,13 @@ public class TestUpdateableAuthzPaths {
     // Create table
     PathsUpdate update = new PathsUpdate(2, false);
     TPathChanges pathChange = update.newPathChange("db1.tbl12");
-    pathChange.addToAddPaths(PathsUpdate.cleanPath("file:///db1/tbl12"));
+    pathChange.addToAddPaths(PathsUpdate.parsePath("hdfs:///db1/tbl12"));
     authzPaths.updatePartial(Lists.newArrayList(update), lock);
     
     // Add partition
     update = new PathsUpdate(3, false);
     pathChange = update.newPathChange("db1.tbl12");
-    pathChange.addToAddPaths(PathsUpdate.cleanPath("file:///db1/tbl12/part121"));
+    pathChange.addToAddPaths(PathsUpdate.parsePath("hdfs:///db1/tbl12/part121"));
     authzPaths.updatePartial(Lists.newArrayList(update), lock);
 
     // Ensure no change in existing Paths
@@ -98,8 +98,8 @@ public class TestUpdateableAuthzPaths {
 
     // Rename table
     update = new PathsUpdate(4, false);
-    update.newPathChange("db1.xtbl11").addToAddPaths(PathsUpdate.cleanPath("file:///db1/xtbl11"));
-    update.newPathChange("db1.tbl11").addToDelPaths(PathsUpdate.cleanPath("file:///db1/tbl11"));
+    update.newPathChange("db1.xtbl11").addToAddPaths(PathsUpdate.parsePath("hdfs:///db1/xtbl11"));
+    update.newPathChange("db1.tbl11").addToDelPaths(PathsUpdate.parsePath("hdfs:///db1/tbl11"));
     authzPaths.updatePartial(Lists.newArrayList(update), lock);
 
     // Verify name change
@@ -128,7 +128,7 @@ public class TestUpdateableAuthzPaths {
     // Drop partition
     PathsUpdate update = new PathsUpdate(2, false);
     TPathChanges pathChange = update.newPathChange("db1.tbl11");
-    pathChange.addToDelPaths(PathsUpdate.cleanPath("file:///db1/tbl11/part111"));
+    pathChange.addToDelPaths(PathsUpdate.parsePath("hdfs:///db1/tbl11/part111"));
     authzPaths.updatePartial(Lists.newArrayList(update), lock);
 
     // Verify Paths deleted

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/eb53de39/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
index b8b8572..82ab27d 100644
--- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/MetastorePlugin.java
@@ -142,8 +142,10 @@ public class MetastorePlugin extends SentryMetastoreListenerPlugin {
     List<String> allDbStr = hmsHandler.get_all_databases();
     for (String dbName : allDbStr) {
       Database db = hmsHandler.get_database(dbName);
-      tempUpdate.newPathChange(db.getName()).addToAddPaths(
-          PathsUpdate.cleanPath(db.getLocationUri()));
+      List<String> dbPath = PathsUpdate.parsePath(db.getLocationUri());
+      if(dbPath != null) {
+        tempUpdate.newPathChange(db.getName()).addToAddPaths(dbPath);
+      }
       List<String> allTblStr = hmsHandler.get_all_tables(db.getName());
       for (String tblName : allTblStr) {
         Table tbl = hmsHandler.get_table(db.getName(), tblName);
@@ -151,12 +153,16 @@ public class MetastorePlugin extends SentryMetastoreListenerPlugin {
             .getDbName() + "." + tbl.getTableName());
         List<Partition> tblParts =
             hmsHandler.get_partitions(db.getName(), tbl.getTableName(), (short) -1);
-        tblPathChange.addToAddPaths(PathsUpdate.cleanPath(tbl.getSd()
-            .getLocation() == null ? db.getLocationUri() : tbl
-            .getSd().getLocation()));
+        List<String> tb1Path = PathsUpdate.parsePath(tbl.getSd().getLocation() == null
?
+            db.getLocationUri() : tbl.getSd().getLocation());
+        if(tb1Path != null) {
+          tblPathChange.addToAddPaths(tb1Path);
+        }
         for (Partition part : tblParts) {
-          tblPathChange.addToAddPaths(PathsUpdate.cleanPath(part.getSd()
-              .getLocation()));
+          List<String> partPath = PathsUpdate.parsePath(part.getSd().getLocation());
+          if(partPath != null) {
+            tblPathChange.addToAddPaths(partPath);
+          }
         }
       }
     }
@@ -167,12 +173,16 @@ public class MetastorePlugin extends SentryMetastoreListenerPlugin {
 
   @Override
   public void addPath(String authzObj, String path) {
+    List<String> pathTree = PathsUpdate.parsePath(path);
+    if(pathTree == null) {
+      return;
+    }
     LOGGER.debug("#### HMS Path Update ["
         + "OP : addPath, "
         + "authzObj : " + authzObj + ", "
         + "path : " + path + "]");
     PathsUpdate update = createHMSUpdate();
-    update.newPathChange(authzObj).addToAddPaths(PathsUpdate.cleanPath(path));
+    update.newPathChange(authzObj).addToAddPaths(pathTree);
     notifySentryAndApplyLocal(update);
   }
 
@@ -199,12 +209,16 @@ public class MetastorePlugin extends SentryMetastoreListenerPlugin {
     if ("*".equals(path)) {
       removeAllPaths(authzObj, null);
     } else {
+      List<String> pathTree = PathsUpdate.parsePath(path);
+      if(pathTree == null) {
+        return;
+      }
       LOGGER.debug("#### HMS Path Update ["
           + "OP : removePath, "
           + "authzObj : " + authzObj + ", "
           + "path : " + path + "]");
       PathsUpdate update = createHMSUpdate();
-      update.newPathChange(authzObj).addToDelPaths(PathsUpdate.cleanPath(path));
+      update.newPathChange(authzObj).addToDelPaths(pathTree);
       notifySentryAndApplyLocal(update);
     }
   }
@@ -219,8 +233,14 @@ public class MetastorePlugin extends SentryMetastoreListenerPlugin {
         + "newPath : " + oldPath + ","
         + "newName : " + newName + ","
         + "newPath : " + newPath + "]");
-    update.newPathChange(newName).addToAddPaths(PathsUpdate.cleanPath(newPath));
-    update.newPathChange(oldName).addToDelPaths(PathsUpdate.cleanPath(oldPath));
+    List<String> newPathTree = PathsUpdate.parsePath(newPath);
+    if( newPathTree != null ) {
+      update.newPathChange(newName).addToAddPaths(newPathTree);
+    }
+    List<String> oldPathTree = PathsUpdate.parsePath(oldPath);
+    if( oldPathTree != null ) {
+      update.newPathChange(oldName).addToDelPaths(oldPathTree);
+    }
     notifySentryAndApplyLocal(update);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/eb53de39/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
index a33cc15..cd30b2a 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
@@ -38,6 +38,7 @@ import java.util.StringTokenizer;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import com.google.common.base.Preconditions;
 import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
@@ -88,7 +89,9 @@ import org.apache.sentry.tests.e2e.hive.hiveserver.InternalHiveServer;
 import org.apache.sentry.tests.e2e.hive.hiveserver.InternalMetastoreServer;
 import org.fest.reflect.core.Reflection;
 import org.junit.After;
-import org.junit.Before;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -135,18 +138,25 @@ public class TestHDFSIntegration {
   private static final int NUM_RETRIES = 10;
   private static final int RETRY_WAIT = 1000;
 
-  private MiniDFSCluster miniDFS;
+  private static MiniDFSCluster miniDFS;
   private MiniMRClientCluster miniMR;
-  private InternalHiveServer hiveServer2;
-  private InternalMetastoreServer metastore;
-  private SentryService sentryService;
-  private String fsURI;
-  private int hmsPort;
-  private int sentryPort = -1;
-  private File baseDir;
-  private File policyFileLocation;
-  private UserGroupInformation adminUgi;
-  private UserGroupInformation hiveUgi;
+  private static InternalHiveServer hiveServer2;
+  private static InternalMetastoreServer metastore;
+  private static SentryService sentryService;
+  private static String fsURI;
+  private static int hmsPort;
+  private static int sentryPort = -1;
+  private static File baseDir;
+  private static File policyFileLocation;
+  private static UserGroupInformation adminUgi;
+  private static UserGroupInformation hiveUgi;
+
+  // Variables which are used for cleanup after test
+  // Please set these values in each test
+  private Path tmpHDFSDir;
+  private String[] dbNames;
+  private String[] roles;
+  private String admin;
 
   protected static File assertCreateDir(File dir) {
     if(!dir.isDirectory()) {
@@ -162,7 +172,7 @@ public class TestHDFSIntegration {
     return port;
   }
 
-  private void waitOnSentryService() throws Exception {
+  private static void waitOnSentryService() throws Exception {
     sentryService.start();
     final long start = System.currentTimeMillis();
     while (!sentryService.isRunning()) {
@@ -173,8 +183,8 @@ public class TestHDFSIntegration {
     }
   }
 
-  @Before
-  public void setup() throws Exception {
+  @BeforeClass
+  public static void setup() throws Exception {
     Class.forName("org.apache.hive.jdbc.HiveDriver");
     baseDir = Files.createTempDir();
     policyFileLocation = new File(baseDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME);
@@ -199,11 +209,11 @@ public class TestHDFSIntegration {
 
   }
 
-  private void startHiveAndMetastore() throws IOException, InterruptedException {
+  private static void startHiveAndMetastore() throws IOException, InterruptedException {
     startHiveAndMetastore(NUM_RETRIES);
   }
 
-  private void startHiveAndMetastore(final int retries) throws IOException, InterruptedException
{
+  private static void startHiveAndMetastore(final int retries) throws IOException, InterruptedException
{
     hiveUgi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws Exception {
@@ -230,6 +240,7 @@ public class TestHDFSIntegration {
         hiveConf.set("fs.defaultFS", fsURI);
         hiveConf.set("fs.default.name", fsURI);
         hiveConf.set("hive.metastore.execute.setugi", "true");
+        hiveConf.set("hive.metastore.warehouse.dir", "hdfs:///user/hive/warehouse");
         hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=" + baseDir.getAbsolutePath()
+ "/metastore_db;create=true");
         hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
         hiveConf.set("javax.jdo.option.ConnectionUserName", "hive");
@@ -244,6 +255,7 @@ public class TestHDFSIntegration {
         hiveConf.set("hive.metastore.event.listeners", "org.apache.sentry.binding.metastore.SentryMetastorePostEventListener");
         hiveConf.set("hive.security.authorization.task.factory", "org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl");
         hiveConf.set("hive.server2.session.hook", "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
+        hiveConf.set("sentry.metastore.service.users", "hive");// queries made by hive user
(beeline) skip meta store check
 
         HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml"));
         authzConf.addResource(hiveConf);
@@ -289,7 +301,7 @@ public class TestHDFSIntegration {
     });
   }
 
-  private void startHiveServer2(final int retries, HiveConf hiveConf)
+  private static void startHiveServer2(final int retries, HiveConf hiveConf)
       throws IOException, InterruptedException, SQLException {
     Connection conn = null;
     Thread th = null;
@@ -327,7 +339,7 @@ public class TestHDFSIntegration {
     }
   }
 
-  private void startDFSandYARN() throws IOException,
+  private static void startDFSandYARN() throws IOException,
       InterruptedException {
     adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
@@ -399,7 +411,7 @@ public class TestHDFSIntegration {
     });
   }
 
-  private void startSentry() throws IOException,
+  private static void startSentry() throws IOException,
       InterruptedException {
     hiveUgi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
@@ -452,7 +464,41 @@ public class TestHDFSIntegration {
   }
 
   @After
-  public void cleanUp() throws Exception {
+  public void cleanAfterTest() throws Exception {
+    //Clean up database
+    Connection conn;
+    Statement stmt;
+    Preconditions.checkArgument(admin != null && dbNames !=null && roles
!= null && tmpHDFSDir != null,
+        "Test case did not set some of these values required for clean up: admin, dbNames,
roles, tmpHDFSDir");
+
+    conn = hiveServer2.createConnection(admin, admin);
+    stmt = conn.createStatement();
+    for( String dbName: dbNames) {
+      stmt.execute("drop database if exists " + dbName + " cascade");
+    }
+    stmt.close();
+    conn.close();
+
+    //Clean up roles
+    conn = hiveServer2.createConnection("hive", "hive");
+    stmt = conn.createStatement();
+    for( String role:roles) {
+      stmt.execute("drop role " + role);
+    }
+    stmt.close();
+    conn.close();
+
+    //Clean up hdfs directories
+    miniDFS.getFileSystem().delete(tmpHDFSDir, true);
+
+    tmpHDFSDir = null;
+    dbNames = null;
+    roles = null;
+    admin = null;
+  }
+
+  @AfterClass
+  public static void cleanUp() throws Exception {
     try {
       if (miniDFS != null) {
         miniDFS.shutdown();
@@ -472,9 +518,15 @@ public class TestHDFSIntegration {
 
   @Test
   public void testEnd2End() throws Throwable {
-
-    Connection conn = hiveServer2.createConnection("hive", "hive");
-    Statement stmt = conn.createStatement();
+    tmpHDFSDir = new Path("/tmp/external");
+    dbNames = new String[]{"db1"};
+    roles = new String[]{"admin_role"};
+    admin = "hive";
+
+    Connection conn;
+    Statement stmt;
+    conn = hiveServer2.createConnection("hive", "hive");
+    stmt = conn.createStatement();
     stmt.execute("create role admin_role");
     stmt.execute("grant role admin_role to group hive");
     stmt.execute("grant all on server server1 to role admin_role");
@@ -687,6 +739,115 @@ public class TestHDFSIntegration {
     conn.close();
   }
 
+  /**
+   * Make sure non HDFS paths are not added to the object - location map.
+   * @throws Throwable
+   */
+  @Test
+  public void testNonHDFSLocations() throws Throwable {
+    String dbName = "db2";
+
+    tmpHDFSDir = new Path("/tmp/external");
+    dbNames = new String[]{dbName};
+    roles = new String[]{"admin_role", "user_role"};
+    admin = StaticUserGroup.ADMIN1;
+
+    Connection conn;
+    Statement stmt;
+
+    conn = hiveServer2.createConnection("hive", "hive");
+    stmt = conn.createStatement();
+    stmt.execute("create role admin_role");
+    stmt.execute("grant all on server server1 to role admin_role");
+    stmt.execute("grant all on uri 'file:///tmp/external' to role admin_role");
+    stmt.execute("grant all on uri 'hdfs:///tmp/external' to role admin_role");
+    stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP);
+
+    conn = hiveServer2.createConnection(admin, admin);
+    stmt = conn.createStatement();
+    stmt.execute("create database " + dbName);
+    stmt.close();
+    conn.close();
+
+    conn = hiveServer2.createConnection("hive", "hive");
+    stmt = conn.createStatement();
+    stmt.execute("create role user_role");
+    stmt.execute("grant all on database " + dbName + " to role user_role");
+    stmt.execute("grant role user_role to group " + StaticUserGroup.USERGROUP1);
+    stmt.close();
+    conn.close();
+
+    conn = hiveServer2.createConnection(admin, admin);
+    stmt = conn.createStatement();
+
+    miniDFS.getFileSystem().mkdirs(tmpHDFSDir);
+    miniDFS.getFileSystem().setOwner(tmpHDFSDir, "hive", "hive");
+
+    //External table on local file system
+    miniDFS.getFileSystem().mkdirs(new Path("/tmp/external/tab1_loc"));
+    stmt.execute("use " + dbName);
+    stmt.execute("create external table tab1(a int) location 'file:///tmp/external/tab1_loc'");
+    verifyOnAllSubDirs("/tmp/external/tab1_loc", null, StaticUserGroup.USERGROUP1, false);
+
+    //External partitioned table on local file system
+    miniDFS.getFileSystem().mkdirs(new Path("/tmp/external/tab2_loc/i=1"));
+    stmt.execute("create external table tab2 (s string) partitioned by (i int) location 'file:///tmp/external/tab2_loc'");
+    verifyOnAllSubDirs("/tmp/external/tab2_loc", null, StaticUserGroup.USERGROUP1, false);
+    //Partition on local file system
+    stmt.execute("alter table tab2 add partition (i=1)");
+    stmt.execute("alter table tab2 partition (i=1) set location 'file:///tmp/external/tab2_loc/i=1'");
+    verifyOnAllSubDirs("/tmp/external/tab2_loc/i=1", null, StaticUserGroup.USERGROUP1, false);
+
+    //HDFS to local file system, also make sure does not specifying scheme still works
+    stmt.execute("create external table tab3(a int) location '/tmp/external/tab3_loc'");
+    // SENTRY-546
+    // verifyOnAllSubDirs("/tmp/external/tab3_loc", FsAction.ALL, StaticUserGroup.USERGROUP1,
true);
+    verifyOnAllSubDirs("/tmp/external/tab3_loc", null, StaticUserGroup.USERGROUP1, true);
+    stmt.execute("alter table tab3 set location 'file:///tmp/external/tab3_loc'");
+    verifyOnAllSubDirs("/tmp/external/tab3_loc", null, StaticUserGroup.USERGROUP1, false);
+
+    //Local file system to HDFS
+    stmt.execute("create table tab4(a int) location 'file:///tmp/external/tab4_loc'");
+    stmt.execute("alter table tab4 set location 'hdfs:///tmp/external/tab4_loc'");
+    miniDFS.getFileSystem().mkdirs(new Path("/tmp/external/tab4_loc"));
+    // SENTRY-546
+    // verifyOnAllSubDirs("/tmp/external/tab4_loc", FsAction.ALL, StaticUserGroup.USERGROUP1,
true);
+    verifyOnAllSubDirs("/tmp/external/tab4_loc", null, StaticUserGroup.USERGROUP1, true);
+    stmt.close();
+    conn.close();
+  }
+
+  @Ignore("SENTRY-546")
+  @Test
+  public void testExternalTable() throws Throwable {
+    String dbName = "db2";
+
+    tmpHDFSDir = new Path("/tmp/external");
+    dbNames = new String[]{dbName};
+    roles = new String[]{"admin_role"};
+    admin = StaticUserGroup.ADMIN1;
+
+    Connection conn;
+    Statement stmt;
+
+    conn = hiveServer2.createConnection("hive", "hive");
+    stmt = conn.createStatement();
+    stmt.execute("create role admin_role");
+    stmt.execute("grant all on server server1 to role admin_role");
+    stmt.execute("grant all on uri 'hdfs:///tmp/external' to role admin_role");
+    stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP);
+
+    conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1);
+    stmt = conn.createStatement();
+    stmt.execute("create database " + dbName);
+    stmt.execute("create external table tab1(a int) location '/tmp/external/tab1_loc'");
+    verifyOnAllSubDirs("/tmp/external/tab1_loc", FsAction.ALL, StaticUserGroup.ADMINGROUP,
true);
+
+    stmt.close();
+    conn.close();
+
+  }
+
   private void verifyQuery(Statement stmt, String table, int n) throws Throwable {
     verifyQuery(stmt, table, n, NUM_RETRIES);
   }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/eb53de39/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java
b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java
index bff372b..6437d23 100644
--- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/TestOperations.java
@@ -1003,6 +1003,8 @@ public class TestOperations extends AbstractTestWithStaticConfiguration
{
     Statement statement = context.createStatement(connection);
     assertSemanticException(statement, "create external table " + DB1 + ".tb1(a int) stored
as " +
         "textfile location 'file:" + externalTblDir.getAbsolutePath() + "'");
+    //Create external table on HDFS
+    assertSemanticException(statement, "create external table " + DB1 + ".tb2(a int) location
'/user/hive/warehouse/blah'");
     statement.close();
     connection.close();
 


Mime
View raw message