From commits-return-3803-apmail-sentry-commits-archive=sentry.apache.org@sentry.incubator.apache.org Thu Oct 16 20:22:58 2014 Return-Path: X-Original-To: apmail-sentry-commits-archive@minotaur.apache.org Delivered-To: apmail-sentry-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id ECDAA179A3 for ; Thu, 16 Oct 2014 20:22:58 +0000 (UTC) Received: (qmail 65950 invoked by uid 500); 16 Oct 2014 20:22:53 -0000 Delivered-To: apmail-sentry-commits-archive@sentry.apache.org Received: (qmail 65903 invoked by uid 500); 16 Oct 2014 20:22:53 -0000 Mailing-List: contact commits-help@sentry.incubator.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@sentry.incubator.apache.org Delivered-To: mailing list commits@sentry.incubator.apache.org Received: (qmail 65894 invoked by uid 99); 16 Oct 2014 20:22:53 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 16 Oct 2014 20:22:53 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED,T_RP_MATCHES_RCVD X-Spam-Check-By: apache.org Received: from [140.211.11.3] (HELO mail.apache.org) (140.211.11.3) by apache.org (qpsmtpd/0.29) with SMTP; Thu, 16 Oct 2014 20:22:48 +0000 Received: (qmail 64349 invoked by uid 99); 16 Oct 2014 20:22:28 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 16 Oct 2014 20:22:28 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id 23A2D9D2D90; Thu, 16 Oct 2014 20:22:28 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: sravya@apache.org To: commits@sentry.incubator.apache.org Date: Thu, 16 Oct 2014 20:22:29 -0000 Message-Id: In-Reply-To: <3a76607137aa4d6ca40bace5601e7f9d@git.apache.org> References: <3a76607137aa4d6ca40bace5601e7f9d@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [2/2] git commit: SENTRY-432: Synchronization of HDFS permissions with Sentry permissions: Refactoring and e2e tests ( Arun Suresh via Sravya Tirukkovalur) X-Virus-Checked: Checked by ClamAV on apache.org SENTRY-432: Synchronization of HDFS permissions with Sentry permissions: Refactoring and e2e tests ( Arun Suresh via Sravya Tirukkovalur) Project: http://git-wip-us.apache.org/repos/asf/incubator-sentry/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-sentry/commit/c059d3d7 Tree: http://git-wip-us.apache.org/repos/asf/incubator-sentry/tree/c059d3d7 Diff: http://git-wip-us.apache.org/repos/asf/incubator-sentry/diff/c059d3d7 Branch: refs/heads/sentry-hdfs-plugin Commit: c059d3d76b9dde540594be68212396f8908e23fb Parents: 78787d6 Author: Sravya Tirukkovalur Authored: Thu Oct 16 13:21:03 2014 -0700 Committer: Sravya Tirukkovalur Committed: Thu Oct 16 13:22:02 2014 -0700 ---------------------------------------------------------------------- pom.xml | 6 + sentry-dist/pom.xml | 13 +- sentry-dist/src/main/assembly/bin.xml | 12 +- sentry-dist/src/main/assembly/sentry-hdfs.xml | 47 +++ sentry-hdfs/bin/pom.xml | 38 -- sentry-hdfs/bin/sentry-hdfs-common/.gitignore | 1 - sentry-hdfs/bin/sentry-hdfs-common/pom.xml | 148 ------- .../main/resources/sentry_hdfs_service.thrift | 87 ---- .../src/test/resources/hdfs-sentry.xml | 22 - .../bin/sentry-hdfs-namenode-plugin/pom.xml | 74 ---- .../src/test/resources/hdfs-sentry.xml | 33 -- sentry-hdfs/bin/sentry-hdfs-service/pom.xml | 108 ----- sentry-hdfs/pom.xml | 2 +- .../sentry/hdfs/ExtendedMetastoreClient.java | 104 ----- .../sentry/hdfs/SentryHDFSServiceClient.java | 212 ++++++++++ .../apache/sentry/hdfs/ServiceConstants.java | 66 +++ sentry-hdfs/sentry-hdfs-dist/pom.xml | 72 ++++ .../src/main/assembly/all-jar.xml | 18 + .../sentry/hdfs/ExtendedMetastoreClient.java | 104 +++++ .../sentry/hdfs/SentryHDFSServiceClient.java | 210 ---------- .../sentry/hdfs/SentryHDFSServiceProcessor.java | 2 - .../hdfs/SentryHDFSServiceProcessorFactory.java | 2 +- .../org/apache/sentry/hdfs/SentryPlugin.java | 5 +- sentry-tests/sentry-tests-hive/pom.xml | 15 + .../tests/e2e/hdfs/TestHDFSIntegration.java | 400 +++++++++++++++++++ .../sentry/tests/e2e/hive/StaticUserGroup.java | 2 + 26 files changed, 959 insertions(+), 844 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/pom.xml ---------------------------------------------------------------------- diff --git a/pom.xml b/pom.xml index 615de75..952d702 100644 --- a/pom.xml +++ b/pom.xml @@ -360,6 +360,11 @@ limitations under the License. org.apache.sentry + sentry-hdfs-dist + ${project.version} + + + org.apache.sentry sentry-provider-cache ${project.version} @@ -541,6 +546,7 @@ limitations under the License. **/.metadata/ **/target/ + **/assembly/ maven-repo/ test-output/ http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-dist/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-dist/pom.xml b/sentry-dist/pom.xml index c4aa7a2..c720cf0 100644 --- a/sentry-dist/pom.xml +++ b/sentry-dist/pom.xml @@ -64,18 +64,6 @@ limitations under the License. org.apache.sentry - sentry-hdfs-common - - - org.apache.sentry - sentry-hdfs-service - - - org.apache.sentry - sentry-hdfs-namenode-plugin - - - org.apache.sentry sentry-service-client @@ -109,6 +97,7 @@ limitations under the License. src/main/assembly/src.xml src/main/assembly/bin.xml + src/main/assembly/sentry-hdfs.xml http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-dist/src/main/assembly/bin.xml ---------------------------------------------------------------------- diff --git a/sentry-dist/src/main/assembly/bin.xml b/sentry-dist/src/main/assembly/bin.xml index 258e63c..6b95a3c 100644 --- a/sentry-dist/src/main/assembly/bin.xml +++ b/sentry-dist/src/main/assembly/bin.xml @@ -57,7 +57,6 @@ com.jolbox:bonecp org.apache.hive:hive-beeline org.apache.derby:derby - org.apache.derby:derby @@ -80,6 +79,7 @@ sentry-provider/** sentry-policy/** sentry-tests/** + sentry-hdfs/** @@ -95,6 +95,16 @@ / + ${project.parent.basedir}/sentry-hdfs/sentry-hdfs-dist/target + + sentry-hdfs-*.jar + + + sentry-hdfs-dist-*.jar + + lib + + ${project.parent.basedir}/sentry-provider/sentry-provider-db/src/main/resources **/* http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-dist/src/main/assembly/sentry-hdfs.xml ---------------------------------------------------------------------- diff --git a/sentry-dist/src/main/assembly/sentry-hdfs.xml b/sentry-dist/src/main/assembly/sentry-hdfs.xml new file mode 100644 index 0000000..8d85d8f --- /dev/null +++ b/sentry-dist/src/main/assembly/sentry-hdfs.xml @@ -0,0 +1,47 @@ + + + + + hdfs + + + tar.gz + + + sentry-hdfs-${project.version} + + + + ${project.parent.basedir}/sentry-hdfs/sentry-hdfs-dist/target + + sentry-hdfs-*.jar + + + sentry-hdfs-dist-*.jar + + / + + + + + http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/pom.xml b/sentry-hdfs/bin/pom.xml deleted file mode 100644 index 4c4691f..0000000 --- a/sentry-hdfs/bin/pom.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - 4.0.0 - - - org.apache.sentry - sentry - 1.5.0-incubating-SNAPSHOT - - - sentry-hdfs - Sentry HDFS - pom - - - sentry-hdfs-common - sentry-hdfs-service - sentry-hdfs-namenode-plugin - - - http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/.gitignore ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-common/.gitignore b/sentry-hdfs/bin/sentry-hdfs-common/.gitignore deleted file mode 100644 index 1a28cd6..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-common/.gitignore +++ /dev/null @@ -1 +0,0 @@ -sentry-hdfs/src/test/java/org/apache/sentry/hdfs/DummyAdapter.java http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-common/pom.xml b/sentry-hdfs/bin/sentry-hdfs-common/pom.xml deleted file mode 100644 index 511bc53..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-common/pom.xml +++ /dev/null @@ -1,148 +0,0 @@ - - - - 4.0.0 - - org.apache.sentry - sentry-hdfs - 1.5.0-incubating-SNAPSHOT - - - sentry-hdfs-common - Sentry HDFS Common - - - - - org.apache.hadoop - hadoop-minicluster - 2.5.0 - provided - - - - junit - junit - test - - - com.google.guava - guava - - - org.apache.hive - hive-metastore - provided - - - org.apache.hadoop - hadoop-common - 2.5.0 - provided - - - - ${basedir}/src/main/java - ${basedir}/src/test/java - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-source - generate-sources - - add-source - - - - src/gen/thrift/gen-javabean - - - - - - - - - - thriftif - - - - org.apache.maven.plugins - maven-antrun-plugin - - - generate-thrift-sources - generate-sources - - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-property - - enforce - - - - - thrift.home - - - true - - - - - - - - - - http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift b/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift deleted file mode 100644 index 9212b64..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/local/bin/thrift -java - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -# -# Thrift Service that the MetaStore is built on -# - -include "share/fb303/if/fb303.thrift" - -namespace java org.apache.sentry.hdfs.service.thrift -namespace php sentry.hdfs.thrift -namespace cpp Apache.Sentry.HDFS.Thrift - -struct TPathChanges { -1: required string authzObj; -2: required list> addPaths; -3: required list> delPaths; -} - -struct TPathEntry { -1: required byte type; -2: required string pathElement; -3: optional string authzObj; -4: required set children; -} - -struct TPathsDump { -1: required i32 rootId; -2: required map nodeMap; -} - -struct TPathsUpdate { -1: required bool hasFullImage; -2: optional TPathsDump pathsDump; -3: required i64 seqNum; -4: required list pathChanges; -} - -struct TPrivilegeChanges { -1: required string authzObj; -2: required map addPrivileges; -3: required map delPrivileges; -} - -struct TRoleChanges { -1: required string role; -2: required list addGroups; -3: required list delGroups; -} - -struct TPermissionsUpdate { -1: required bool hasfullImage; -2: required i64 seqNum; -3: required map privilegeChanges; -4: required map roleChanges; -} - -struct TAuthzUpdateResponse { -1: optional list authzPathUpdate, -2: optional list authzPermUpdate, -} - -service SentryHDFSService -{ - # HMS Path cache - void handle_hms_notification(1:TPathsUpdate pathsUpdate); - - TAuthzUpdateResponse get_all_authz_updates_from(1:i64 permSeqNum, 2:i64 pathSeqNum); - map> get_all_related_paths(1:string path, 2:bool exactMatch); -} http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml b/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml deleted file mode 100644 index c23a431..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml b/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml deleted file mode 100644 index de1aabd..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - 4.0.0 - - org.apache.sentry - sentry-hdfs - 1.5.0-incubating-SNAPSHOT - - - sentry-hdfs-namenode-plugin - Sentry HDFS Namenode Plugin - - - - - org.apache.sentry - sentry-hdfs-common - 1.5.0-incubating-SNAPSHOT - - - org.apache.sentry - sentry-service-client - 1.5.0-incubating-SNAPSHOT - - - org.apache.sentry - sentry-hdfs-service - 1.5.0-incubating-SNAPSHOT - - - - junit - junit - test - - - com.google.guava - guava - - - org.apache.hadoop - hadoop-common - provided - - - org.apache.hadoop - hadoop-hdfs - provided - - - org.apache.hadoop - hadoop-minicluster - test - - - - http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml b/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml deleted file mode 100644 index 511bfdd..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - sentry.hdfs-plugin.path-prefixes - /user/hive/dw - - - sentry.hdfs-plugin.sentry-uri - thrift://localhost:1234 - - - sentry.hdfs-plugin.stale-threshold.ms - -1 - - http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-service/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/bin/sentry-hdfs-service/pom.xml b/sentry-hdfs/bin/sentry-hdfs-service/pom.xml deleted file mode 100644 index 74c4f20..0000000 --- a/sentry-hdfs/bin/sentry-hdfs-service/pom.xml +++ /dev/null @@ -1,108 +0,0 @@ - - - - 4.0.0 - - org.apache.sentry - sentry-hdfs - 1.5.0-incubating-SNAPSHOT - - - sentry-hdfs-service - Sentry HDFS service - - - - org.apache.hadoop - hadoop-common - provided - - - junit - junit - test - - - log4j - log4j - - - org.apache.shiro - shiro-core - - - com.google.guava - guava - - - org.slf4j - slf4j-api - - - org.slf4j - slf4j-log4j12 - - - org.apache.sentry - sentry-hdfs-common - - - org.apache.sentry - sentry-provider-db - - - org.apache.sentry - sentry-service-client - - - org.apache.hive - hive-exec - ${hive.version} - - - org.apache.hive - hive-shims - ${hive.version} - - - org.apache.thrift - libfb303 - - - org.apache.thrift - libthrift - - - ant-contrib - ant-contrib - - - org.apache.hadoop - hadoop-minikdc - test - - - org.apache.hive - hive-metastore - ${hive.version} - - - - - http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/pom.xml b/sentry-hdfs/pom.xml index 4c4691f..1455235 100644 --- a/sentry-hdfs/pom.xml +++ b/sentry-hdfs/pom.xml @@ -28,11 +28,11 @@ limitations under the License. sentry-hdfs Sentry HDFS pom - sentry-hdfs-common sentry-hdfs-service sentry-hdfs-namenode-plugin + sentry-hdfs-dist http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java deleted file mode 100644 index c0358f4..0000000 --- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.hdfs; - -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ExtendedMetastoreClient implements MetastoreClient { - - private static Logger LOG = LoggerFactory.getLogger(ExtendedMetastoreClient.class); - - private HiveMetaStoreClient client; - private final HiveConf hiveConf; - public ExtendedMetastoreClient(HiveConf hiveConf) { - this.hiveConf = hiveConf; - } - - @Override - public List getAllDatabases() { - List retList = new ArrayList(); - HiveMetaStoreClient client = getClient(); - if (client != null) { - try { - for (String dbName : client.getAllDatabases()) { - retList.add(client.getDatabase(dbName)); - } - } catch (Exception e) { - LOG.error("Could not get All Databases !!", e); - } - } - return retList; - } - - @Override - public List getAllTablesOfDatabase(Database db) { - List
retList = new ArrayList
(); - HiveMetaStoreClient client = getClient(); - if (client != null) { - try { - for (String tblName : client.getAllTables(db.getName())) { - retList.add(client.getTable(db.getName(), tblName)); - } - } catch (Exception e) { - LOG.error(String.format( - "Could not get Tables for '%s' !!", db.getName()), e); - } - } - return retList; - } - - @Override - public List listAllPartitions(Database db, Table tbl) { - HiveMetaStoreClient client = getClient(); - if (client != null) { - try { - return client.listPartitions(db.getName(), tbl.getTableName(), Short.MAX_VALUE); - } catch (Exception e) { - LOG.error(String.format( - "Could not get partitions for '%s'.'%s' !!", db.getName(), - tbl.getTableName()), e); - } - } - return new LinkedList(); - } - - private HiveMetaStoreClient getClient() { - if (client == null) { - try { - client = new HiveMetaStoreClient(hiveConf); - return client; - } catch (MetaException e) { - client = null; - LOG.error("Could not create metastore client !!", e); - return null; - } - } else { - return client; - } - } -} http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java new file mode 100644 index 0000000..fa31a19 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.hdfs.service.thrift.SentryHDFSService; +import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client; +import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse; +import org.apache.sentry.hdfs.service.thrift.TPathsUpdate; +import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate; +import org.apache.sentry.hdfs.ServiceConstants.ClientConfig; +import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TMultiplexedProtocol; +import org.apache.thrift.transport.TSaslClientTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; + +public class SentryHDFSServiceClient { + + private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClient.class); + + public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService"; + + public static class SentryAuthzUpdate { + + private final List permUpdates; + private final List pathUpdates; + + public SentryAuthzUpdate(List permUpdates, List pathUpdates) { + this.permUpdates = permUpdates; + this.pathUpdates = pathUpdates; + } + + public List getPermUpdates() { + return permUpdates; + } + + public List getPathUpdates() { + return pathUpdates; + } + } + + /** + * This transport wraps the Sasl transports to set up the right UGI context for open(). + */ + public static class UgiSaslClientTransport extends TSaslClientTransport { + protected UserGroupInformation ugi = null; + + public UgiSaslClientTransport(String mechanism, String authorizationId, + String protocol, String serverName, Map props, + CallbackHandler cbh, TTransport transport, boolean wrapUgi) + throws IOException { + super(mechanism, authorizationId, protocol, serverName, props, cbh, + transport); + if (wrapUgi) { + ugi = UserGroupInformation.getLoginUser(); + } + } + + // open the SASL transport with using the current UserGroupInformation + // This is needed to get the current login context stored + @Override + public void open() throws TTransportException { + if (ugi == null) { + baseOpen(); + } else { + try { + ugi.doAs(new PrivilegedExceptionAction() { + public Void run() throws TTransportException { + baseOpen(); + return null; + } + }); + } catch (IOException e) { + throw new TTransportException("Failed to open SASL transport", e); + } catch (InterruptedException e) { + throw new TTransportException( + "Interrupted while opening underlying transport", e); + } + } + } + + private void baseOpen() throws TTransportException { + super.open(); + } + } + + private final Configuration conf; + private final InetSocketAddress serverAddress; + private final int connectionTimeout; + private boolean kerberos; + private TTransport transport; + + private String[] serverPrincipalParts; + private Client client; + + public SentryHDFSServiceClient(Configuration conf) throws IOException { + this.conf = conf; + Preconditions.checkNotNull(this.conf, "Configuration object cannot be null"); + this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull( + conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key " + + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt( + ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT)); + this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT, + ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT); + kerberos = ClientConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase( + conf.get(ClientConfig.SECURITY_MODE, ClientConfig.SECURITY_MODE_KERBEROS).trim()); + transport = new TSocket(serverAddress.getHostName(), + serverAddress.getPort(), connectionTimeout); + if (kerberos) { + String serverPrincipal = Preconditions.checkNotNull( + conf.get(ClientConfig.PRINCIPAL), ClientConfig.PRINCIPAL + " is required"); + + // Resolve server host in the same way as we are doing on server side + serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress()); + LOGGER.info("Using server kerberos principal: " + serverPrincipal); + + serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal); + Preconditions.checkArgument(serverPrincipalParts.length == 3, + "Kerberos principal should have 3 parts: " + serverPrincipal); + boolean wrapUgi = "true".equalsIgnoreCase(conf + .get(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true")); + transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(), + null, serverPrincipalParts[0], serverPrincipalParts[1], + ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi); + } else { + serverPrincipalParts = null; + } + try { + transport.open(); + } catch (TTransportException e) { + throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); + } + LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress); + TMultiplexedProtocol protocol = new TMultiplexedProtocol( + new TCompactProtocol(transport), + SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME); + client = new SentryHDFSService.Client(protocol); + LOGGER.info("Successfully created client"); + } + + public synchronized void notifyHMSUpdate(PathsUpdate update) + throws IOException { + try { + client.handle_hms_notification(update.getThriftObject()); + } catch (Exception e) { + throw new IOException("Thrift Exception occurred !!", e); + } + } + + public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum) + throws IOException { + SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList(), new LinkedList()); + try { + TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum); + if (sentryUpdates.getAuthzPathUpdate() != null) { + for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) { + retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate)); + } + } + if (sentryUpdates.getAuthzPermUpdate() != null) { + for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) { + retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate)); + } + } + } catch (Exception e) { + throw new IOException("Thrift Exception occurred !!", e); + } + return retVal; + } + + public void close() { + if (transport != null) { + transport.close(); + } + } +} http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java new file mode 100644 index 0000000..397a534 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import java.util.HashMap; +import java.util.Map; + +import javax.security.sasl.Sasl; + +import com.google.common.collect.ImmutableMap; + +public class ServiceConstants { + + private static final ImmutableMap SASL_PROPERTIES; + + static { + Map saslProps = new HashMap(); + saslProps.put(Sasl.SERVER_AUTH, "true"); + saslProps.put(Sasl.QOP, "auth-conf"); + SASL_PROPERTIES = ImmutableMap.copyOf(saslProps); + } + + public static class ServerConfig { + public static final ImmutableMap SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES; + /** + * This configuration parameter is only meant to be used for testing purposes. + */ + public static final String SENTRY_HDFS_INTEGRATION_PATH_PREFIXES = "sentry.hdfs.integration.path.prefixes"; + public static final String[] SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT = + new String[]{"/user/hive/warehouse"}; + + } + public static class ClientConfig { + public static final ImmutableMap SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES; + + public static final String SECURITY_MODE = "sentry.hdfs.service.security.mode"; + public static final String SECURITY_MODE_KERBEROS = "kerberos"; + public static final String SECURITY_MODE_NONE = "none"; + public static final String SECURITY_USE_UGI_TRANSPORT = "sentry.hdfs.service.security.use.ugi"; + public static final String PRINCIPAL = "sentry.hdfs.service.server.principal"; + + public static final String SERVER_RPC_PORT = "sentry.hdfs.service.client.server.rpc-port"; + public static final int SERVER_RPC_PORT_DEFAULT = 8038; + + public static final String SERVER_RPC_ADDRESS = "sentry.hdfs.service.client.server.rpc-address"; + + public static final String SERVER_RPC_CONN_TIMEOUT = "sentry.hdfs.service.client.server.rpc-connection-timeout"; + public static final int SERVER_RPC_CONN_TIMEOUT_DEFAULT = 200000; + } + +} http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-dist/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-dist/pom.xml b/sentry-hdfs/sentry-hdfs-dist/pom.xml new file mode 100644 index 0000000..91b8248 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-dist/pom.xml @@ -0,0 +1,72 @@ + + + + + 4.0.0 + + + org.apache.sentry + sentry-hdfs + 1.5.0-incubating-SNAPSHOT + + + sentry-hdfs-dist + Sentry HDFS Dist + + + + org.apache.sentry + sentry-hdfs-common + + + org.apache.sentry + sentry-hdfs-service + + + org.apache.sentry + sentry-hdfs-namenode-plugin + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 2.4.1 + + + assemble + package + + single + + false + + sentry-hdfs + + src/main/assembly/all-jar.xml + + + + + + + + + http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml b/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml new file mode 100644 index 0000000..8db709b --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml @@ -0,0 +1,18 @@ + + ${project.version} + + jar + + + false + + + + true + false + + + http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java new file mode 100644 index 0000000..c0358f4 --- /dev/null +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.hdfs; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ExtendedMetastoreClient implements MetastoreClient { + + private static Logger LOG = LoggerFactory.getLogger(ExtendedMetastoreClient.class); + + private HiveMetaStoreClient client; + private final HiveConf hiveConf; + public ExtendedMetastoreClient(HiveConf hiveConf) { + this.hiveConf = hiveConf; + } + + @Override + public List getAllDatabases() { + List retList = new ArrayList(); + HiveMetaStoreClient client = getClient(); + if (client != null) { + try { + for (String dbName : client.getAllDatabases()) { + retList.add(client.getDatabase(dbName)); + } + } catch (Exception e) { + LOG.error("Could not get All Databases !!", e); + } + } + return retList; + } + + @Override + public List
getAllTablesOfDatabase(Database db) { + List
retList = new ArrayList
(); + HiveMetaStoreClient client = getClient(); + if (client != null) { + try { + for (String tblName : client.getAllTables(db.getName())) { + retList.add(client.getTable(db.getName(), tblName)); + } + } catch (Exception e) { + LOG.error(String.format( + "Could not get Tables for '%s' !!", db.getName()), e); + } + } + return retList; + } + + @Override + public List listAllPartitions(Database db, Table tbl) { + HiveMetaStoreClient client = getClient(); + if (client != null) { + try { + return client.listPartitions(db.getName(), tbl.getTableName(), Short.MAX_VALUE); + } catch (Exception e) { + LOG.error(String.format( + "Could not get partitions for '%s'.'%s' !!", db.getName(), + tbl.getTableName()), e); + } + } + return new LinkedList(); + } + + private HiveMetaStoreClient getClient() { + if (client == null) { + try { + client = new HiveMetaStoreClient(hiveConf); + return client; + } catch (MetaException e) { + client = null; + LOG.error("Could not create metastore client !!", e); + return null; + } + } else { + return client; + } + } +} http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java deleted file mode 100644 index 2b1b554..0000000 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.sentry.hdfs; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -import javax.security.auth.callback.CallbackHandler; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hadoop.security.SaslRpcServer.AuthMethod; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.sentry.hdfs.service.thrift.SentryHDFSService; -import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client; -import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse; -import org.apache.sentry.hdfs.service.thrift.TPathsUpdate; -import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate; -import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TMultiplexedProtocol; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; - -public class SentryHDFSServiceClient { - - private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClient.class); - - public static class SentryAuthzUpdate { - - private final List permUpdates; - private final List pathUpdates; - - public SentryAuthzUpdate(List permUpdates, List pathUpdates) { - this.permUpdates = permUpdates; - this.pathUpdates = pathUpdates; - } - - public List getPermUpdates() { - return permUpdates; - } - - public List getPathUpdates() { - return pathUpdates; - } - } - - /** - * This transport wraps the Sasl transports to set up the right UGI context for open(). - */ - public static class UgiSaslClientTransport extends TSaslClientTransport { - protected UserGroupInformation ugi = null; - - public UgiSaslClientTransport(String mechanism, String authorizationId, - String protocol, String serverName, Map props, - CallbackHandler cbh, TTransport transport, boolean wrapUgi) - throws IOException { - super(mechanism, authorizationId, protocol, serverName, props, cbh, - transport); - if (wrapUgi) { - ugi = UserGroupInformation.getLoginUser(); - } - } - - // open the SASL transport with using the current UserGroupInformation - // This is needed to get the current login context stored - @Override - public void open() throws TTransportException { - if (ugi == null) { - baseOpen(); - } else { - try { - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws TTransportException { - baseOpen(); - return null; - } - }); - } catch (IOException e) { - throw new TTransportException("Failed to open SASL transport", e); - } catch (InterruptedException e) { - throw new TTransportException( - "Interrupted while opening underlying transport", e); - } - } - } - - private void baseOpen() throws TTransportException { - super.open(); - } - } - - private final Configuration conf; - private final InetSocketAddress serverAddress; - private final int connectionTimeout; - private boolean kerberos; - private TTransport transport; - - private String[] serverPrincipalParts; - private Client client; - - public SentryHDFSServiceClient(Configuration conf) throws IOException { - this.conf = conf; - Preconditions.checkNotNull(this.conf, "Configuration object cannot be null"); - this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull( - conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key " - + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt( - ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT)); - this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT, - ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT); - kerberos = ServerConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase( - conf.get(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_KERBEROS).trim()); - transport = new TSocket(serverAddress.getHostName(), - serverAddress.getPort(), connectionTimeout); - if (kerberos) { - String serverPrincipal = Preconditions.checkNotNull( - conf.get(ServerConfig.PRINCIPAL), ServerConfig.PRINCIPAL + " is required"); - - // Resolve server host in the same way as we are doing on server side - serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress()); - LOGGER.info("Using server kerberos principal: " + serverPrincipal); - - serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal); - Preconditions.checkArgument(serverPrincipalParts.length == 3, - "Kerberos principal should have 3 parts: " + serverPrincipal); - boolean wrapUgi = "true".equalsIgnoreCase(conf - .get(ServerConfig.SECURITY_USE_UGI_TRANSPORT, "true")); - transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(), - null, serverPrincipalParts[0], serverPrincipalParts[1], - ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi); - } else { - serverPrincipalParts = null; - } - try { - transport.open(); - } catch (TTransportException e) { - throw new IOException("Transport exception while opening transport: " + e.getMessage(), e); - } - LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress); - TMultiplexedProtocol protocol = new TMultiplexedProtocol( - new TCompactProtocol(transport), - SentryHDFSServiceProcessor.SENTRY_HDFS_SERVICE_NAME); - client = new SentryHDFSService.Client(protocol); - LOGGER.info("Successfully created client"); - } - - public synchronized void notifyHMSUpdate(PathsUpdate update) - throws IOException { - try { - client.handle_hms_notification(update.getThriftObject()); - } catch (Exception e) { - throw new IOException("Thrift Exception occurred !!", e); - } - } - - public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum) - throws IOException { - SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList(), new LinkedList()); - try { - TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum); - if (sentryUpdates.getAuthzPathUpdate() != null) { - for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) { - retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate)); - } - } - if (sentryUpdates.getAuthzPermUpdate() != null) { - for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) { - retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate)); - } - } - } catch (Exception e) { - throw new IOException("Thrift Exception occurred !!", e); - } - return retVal; - } - - public void close() { - if (transport != null) { - transport.close(); - } - } -} http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java index 1198619..ab07494 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java @@ -34,8 +34,6 @@ public class SentryHDFSServiceProcessor implements SentryHDFSService.Iface { private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceProcessor.class); - public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService"; - @Override public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pathSeqNum) throws TException { http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java index 81168b2..c45c294 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java @@ -98,7 +98,7 @@ public class SentryHDFSServiceProcessorFactory extends ProcessorFactory{ new SentryHDFSServiceProcessor(); TProcessor processor = new ProcessorWrapper(sentryServiceHandler); multiplexedProcessor.registerProcessor( - SentryHDFSServiceProcessor.SENTRY_HDFS_SERVICE_NAME, processor); + SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME, processor); return true; } } http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java ---------------------------------------------------------------------- diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java index 262e893..5bb6d45 100644 --- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java +++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java @@ -44,7 +44,7 @@ import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleRevokePrivil import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleRequest; import org.apache.sentry.provider.db.service.thrift.TSentryGroup; import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege; -import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.hdfs.ServiceConstants.ServerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -99,7 +99,8 @@ public class SentryPlugin implements SentryPolicyStorePlugin { HiveConf hiveConf = new HiveConf(conf, Configuration.class); final MetastoreClient hmsClient = new ExtendedMetastoreClient(hiveConf); final String[] pathPrefixes = conf - .getStrings(ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES, new String[]{"/"}); + .getStrings(ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES, + ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT); pathsUpdater = new UpdateForwarder(new UpdateableAuthzPaths( pathPrefixes), createHMSImageRetriever(pathPrefixes, hmsClient), 100); PermImageRetriever permImageRetriever = new PermImageRetriever(sentryStore); http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-tests/sentry-tests-hive/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/pom.xml b/sentry-tests/sentry-tests-hive/pom.xml index 769afb5..fde850f 100644 --- a/sentry-tests/sentry-tests-hive/pom.xml +++ b/sentry-tests/sentry-tests-hive/pom.xml @@ -227,6 +227,21 @@ limitations under the License. org.apache.sentry + sentry-hdfs-common + test + + + org.apache.sentry + sentry-hdfs-service + test + + + org.apache.sentry + sentry-hdfs-namenode-plugin + test + + + org.apache.sentry sentry-policy-db test http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java new file mode 100644 index 0000000..41f8af8 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java @@ -0,0 +1,400 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.sentry.tests.e2e.hdfs; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.ServerSocket; +import java.net.URL; +import java.security.PrivilegedExceptionAction; +import java.sql.Connection; +import java.sql.Statement; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.security.GroupMappingServiceProvider; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl; +import org.apache.sentry.binding.hive.conf.HiveAuthzConf; +import org.apache.sentry.hdfs.SentryAuthorizationProvider; +import org.apache.sentry.provider.db.SimpleDBProviderBackend; +import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider; +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.service.thrift.SentryService; +import org.apache.sentry.service.thrift.SentryServiceFactory; +import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; +import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; +import org.apache.sentry.tests.e2e.hive.StaticUserGroup; +import org.apache.sentry.tests.e2e.hive.fs.MiniDFS; +import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; +import org.apache.sentry.tests.e2e.hive.hiveserver.InternalHiveServer; +import org.apache.sentry.tests.e2e.hive.hiveserver.InternalMetastoreServer; +import org.fest.reflect.core.Reflection; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.io.Files; +import com.google.common.io.Resources; + +public class TestHDFSIntegration { + + // mock user group mapping that maps user to same group + public static class PseudoGroupMappingService implements + GroupMappingServiceProvider { + + @Override + public List getGroups(String user) { + return Lists.newArrayList(user, System.getProperty("user.name")); + } + + @Override + public void cacheGroupsRefresh() throws IOException { + // no-op + } + + @Override + public void cacheGroupsAdd(List groups) throws IOException { + // no-op + } + } + + private MiniDFSCluster miniDFS; + private InternalHiveServer hiveServer2; + private InternalMetastoreServer metastore; + private String fsURI; + private int hmsPort; + private int sentryPort; + private File baseDir; + private UserGroupInformation admin; + + protected static File assertCreateDir(File dir) { + if(!dir.isDirectory()) { + Assert.assertTrue("Failed creating " + dir, dir.mkdirs()); + } + return dir; + } + + private static int findPort() throws IOException { + ServerSocket socket = new ServerSocket(0); + int port = socket.getLocalPort(); + socket.close(); + return port; + } + + private static void startSentryService(SentryService sentryServer) throws Exception { + sentryServer.start(); + final long start = System.currentTimeMillis(); + while (!sentryServer.isRunning()) { + Thread.sleep(1000); + if (System.currentTimeMillis() - start > 60000L) { + throw new TimeoutException("Server did not start after 60 seconds"); + } + } + } + + @Before + public void setup() throws Exception { + Class.forName("org.apache.hive.jdbc.HiveDriver"); + baseDir = Files.createTempDir(); + final File policyFileLocation = new File(baseDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME); + PolicyFile policyFile = PolicyFile.setAdminOnServer1("hive") + .setUserGroupMapping(StaticUserGroup.getStaticMapping()); + policyFile.write(policyFileLocation); + + admin = UserGroupInformation.createUserForTesting( + System.getProperty("user.name"), new String[] { "supergroup" }); + + UserGroupInformation hiveUgi = UserGroupInformation.createUserForTesting( + "hive", new String[] { "hive" }); + + hiveUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + Configuration sentryConf = new Configuration(false); + Map properties = Maps.newHashMap(); + properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND, + SimpleDBProviderBackend.class.getName()); + properties.put(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname, + SentryHiveAuthorizationTaskFactoryImpl.class.getName()); + properties + .put(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS.varname, "2"); + properties.put("hive.metastore.uris", "thrift://localhost:" + hmsPort); + properties.put(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE); + properties.put("sentry.hive.testing.mode", "true"); + properties.put(ServerConfig.ADMIN_GROUPS, "hive,admin"); + properties.put(ServerConfig.RPC_ADDRESS, "localhost"); + properties.put(ServerConfig.RPC_PORT, String.valueOf(0)); + properties.put(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false"); + + properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); + properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath()); + properties.put(ServerConfig.SENTRY_STORE_JDBC_URL, + "jdbc:derby:;databaseName=" + baseDir.getPath() + + "/sentrystore_db;create=true"); + properties.put("sentry.service.processor.factories", + "org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessorFactory,org.apache.sentry.hdfs.SentryHDFSServiceProcessorFactory"); + properties.put("sentry.policy.store.plugins", "org.apache.sentry.hdfs.SentryPlugin"); + properties.put(ServerConfig.RPC_MIN_THREADS, "3"); + for (Map.Entry entry : properties.entrySet()) { + sentryConf.set(entry.getKey(), entry.getValue()); + } + SentryService sentryServer = new SentryServiceFactory().create(sentryConf); + properties.put(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress() + .getHostName()); + sentryConf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress() + .getHostName()); + properties.put(ClientConfig.SERVER_RPC_PORT, + String.valueOf(sentryServer.getAddress().getPort())); + sentryConf.set(ClientConfig.SERVER_RPC_PORT, + String.valueOf(sentryServer.getAddress().getPort())); + startSentryService(sentryServer); + sentryPort = sentryServer.getAddress().getPort(); + System.out.println("\n\n Sentry port : " + sentryPort + "\n\n"); + return null; + } + }); + + admin.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data"); + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY, + SentryAuthorizationProvider.class.getName()); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + + File dfsDir = assertCreateDir(new File(baseDir, "dfs")); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath()); + conf.set("hadoop.security.group.mapping", + MiniDFS.PseudoGroupMappingService.class.getName()); + Configuration.addDefaultResource("test.xml"); + + conf.set("sentry.authorization-provider.hdfs-path-prefixes", "/user/hive/warehouse"); + conf.set("sentry.hdfs.service.security.mode", "none"); + conf.set("sentry.hdfs.service.client.server.rpc-address", "localhost"); + conf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort)); + EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); + miniDFS = new MiniDFSCluster.Builder(conf).build(); + Path tmpPath = new Path("/tmp"); + Path hivePath = new Path("/user/hive"); + Path warehousePath = new Path(hivePath, "warehouse"); + miniDFS.getFileSystem().mkdirs(warehousePath); + boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath); + System.out.println("\n\n Is dir :" + directory + "\n\n"); + System.out.println("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n"); + fsURI = miniDFS.getFileSystem().getUri().toString(); + miniDFS.getFileSystem().mkdirs(tmpPath); + miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx")); + miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive"); + miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive"); + System.out.println("\n\n Owner :" + + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner() + + ", " + + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup() + + "\n\n"); + System.out.println("\n\n Owner tmp :" + + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", " + + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", " + + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", " + + "\n\n"); + return null; + } + }); + + + hiveUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + HiveConf hiveConf = new HiveConf(); + hiveConf.set("sentry.metastore.plugins", "org.apache.sentry.hdfs.MetastorePlugin"); + hiveConf.set("sentry.service.client.server.rpc-address", "localhost"); + hiveConf.set("sentry.hdfs.service.client.server.rpc-address", "localhost"); + hiveConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort)); + hiveConf.set("sentry.service.client.server.rpc-port", String.valueOf(sentryPort)); + hiveConf.set("sentry.service.security.mode", "none"); + hiveConf.set("sentry.hdfs.service.security.mode", "none"); + hiveConf.set("sentry.hive.provider.backend", "org.apache.sentry.provider.db.SimpleDBProviderBackend"); + hiveConf.set("sentry.provider", LocalGroupResourceAuthorizationProvider.class.getName()); + hiveConf.set("sentry.hive.provider", LocalGroupResourceAuthorizationProvider.class.getName()); + hiveConf.set("sentry.hive.provider.resource", policyFileLocation.getPath()); + hiveConf.set("sentry.hive.testing.mode", "true"); + hiveConf.set("sentry.hive.server", "server1"); + + + hiveConf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING); + hiveConf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath()); + hiveConf.set("fs.defaultFS", fsURI); + hiveConf.set("fs.default.name", fsURI); + hiveConf.set("hive.metastore.execute.setugi", "true"); + hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=" + baseDir.getAbsolutePath() + "/metastore_db;create=true"); + hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver"); + hiveConf.set("javax.jdo.option.ConnectionUserName", "hive"); + hiveConf.set("javax.jdo.option.ConnectionPassword", "hive"); + hiveConf.set("datanucleus.autoCreateSchema", "true"); + hiveConf.set("datanucleus.fixedDatastore", "false"); + hiveConf.set("datanucleus.autoStartMechanism", "SchemaTable"); + hmsPort = findPort(); + System.out.println("\n\n HMS port : " + hmsPort + "\n\n"); + hiveConf.set("hive.metastore.uris", "thrift://localhost:" + hmsPort); + hiveConf.set("hive.metastore.pre.event.listeners", "org.apache.sentry.binding.metastore.MetastoreAuthzBinding"); + hiveConf.set("hive.metastore.event.listeners", "org.apache.sentry.binding.metastore.SentryMetastorePostEventListener"); + hiveConf.set("hive.security.authorization.task.factory", "org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl"); + hiveConf.set("hive.server2.session.hook", "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook"); + + HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml")); + authzConf.addResource(hiveConf); + File confDir = assertCreateDir(new File(baseDir, "etc")); + File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE); + OutputStream out = new FileOutputStream(accessSite); + authzConf.set("fs.defaultFS", fsURI); + authzConf.writeXml(out); + out.close(); + +// hiveConf.set("hive.sentry.conf.url", "file://" + accessSite.getCanonicalPath()); + hiveConf.set("hive.sentry.conf.url", accessSite.getPath()); + System.out.println("Sentry client file : " + accessSite.getPath()); + + File hiveSite = new File(confDir, "hive-site.xml"); + hiveConf.set("hive.server2.enable.doAs", "false"); + hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, accessSite.toURI().toURL() + .toExternalForm()); + out = new FileOutputStream(hiveSite); + hiveConf.writeXml(out); + out.close(); + + Reflection.staticField("hiveSiteURL") + .ofType(URL.class) + .in(HiveConf.class) + .set(hiveSite.toURI().toURL()); + + metastore = new InternalMetastoreServer(hiveConf); + metastore.start(); + + hiveServer2 = new InternalHiveServer(hiveConf); + hiveServer2.start(); + + return null; + } + }); + + } + + @After + public void cleanUp() throws Exception { + try { + if (miniDFS != null) { + miniDFS.shutdown(); + } + } finally { + try { + if (hiveServer2 != null) { + hiveServer2.shutdown(); + } + } finally { + if (metastore != null) { + metastore.shutdown(); + } + } + } + } + +// public Connection createConnection(String username) throws Exception { +// String password = username; +// Connection connection = hiveServer2.createConnection(username, password); +// assertNotNull("Connection is null", connection); +// assertFalse("Connection should not be closed", connection.isClosed()); +// Statement statement = connection.createStatement(); +// statement.close(); +// return connection; +// } +// +// public Statement createStatement(Connection connection) +// throws Exception { +// Statement statement = connection.createStatement(); +// assertNotNull("Statement is null", statement); +// return statement; +// } + + @Test + public void testSimple() throws Exception { + Connection conn = hiveServer2.createConnection("hive", "hive"); + Statement stmt = conn.createStatement(); + stmt.execute("create role admin_role"); + stmt.execute("grant role admin_role to group hive"); + stmt.execute("grant all on server server1 to role admin_role"); + stmt.execute("create table p1 (s string) partitioned by (month int, day int)"); + stmt.execute("alter table p1 add partition (month=1, day=1)"); + stmt.execute("alter table p1 add partition (month=1, day=2)"); + stmt.execute("alter table p1 add partition (month=2, day=1)"); + stmt.execute("alter table p1 add partition (month=2, day=2)"); + AclStatus aclStatus = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1")); + Set groups = new HashSet(); + for (AclEntry ent : aclStatus.getEntries()) { + if (ent.getType().equals(AclEntryType.GROUP)) { + groups.add(ent.getName()); + } + } + System.out.println("Final acls [" + aclStatus + "]"); + Assert.assertEquals(false, groups.contains("hbase")); + + stmt.execute("create role p1_admin"); + stmt.execute("grant role p1_admin to group hbase"); + stmt.execute("grant select on table p1 to role p1_admin"); + Thread.sleep(1000); + aclStatus = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1")); + groups = new HashSet(); + for (AclEntry ent : aclStatus.getEntries()) { + if (ent.getType().equals(AclEntryType.GROUP)) { + groups.add(ent.getName()); + } + } + Assert.assertEquals(true, groups.contains("hbase")); + + stmt.execute("revoke select on table p1 from role p1_admin"); + Thread.sleep(1000); + aclStatus = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1")); + groups = new HashSet(); + for (AclEntry ent : aclStatus.getEntries()) { + if (ent.getType().equals(AclEntryType.GROUP)) { + groups.add(ent.getName()); + } + } + Assert.assertEquals(false, groups.contains("hbase")); + } +}