sentry-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sra...@apache.org
Subject [2/2] git commit: SENTRY-432: Synchronization of HDFS permissions with Sentry permissions: Refactoring and e2e tests ( Arun Suresh via Sravya Tirukkovalur)
Date Thu, 16 Oct 2014 20:22:29 GMT
SENTRY-432: Synchronization of HDFS permissions with Sentry permissions: Refactoring and e2e tests ( Arun Suresh via Sravya Tirukkovalur)


Project: http://git-wip-us.apache.org/repos/asf/incubator-sentry/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-sentry/commit/c059d3d7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-sentry/tree/c059d3d7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-sentry/diff/c059d3d7

Branch: refs/heads/sentry-hdfs-plugin
Commit: c059d3d76b9dde540594be68212396f8908e23fb
Parents: 78787d6
Author: Sravya Tirukkovalur <sravya@clouera.com>
Authored: Thu Oct 16 13:21:03 2014 -0700
Committer: Sravya Tirukkovalur <sravya@clouera.com>
Committed: Thu Oct 16 13:22:02 2014 -0700

----------------------------------------------------------------------
 pom.xml                                         |   6 +
 sentry-dist/pom.xml                             |  13 +-
 sentry-dist/src/main/assembly/bin.xml           |  12 +-
 sentry-dist/src/main/assembly/sentry-hdfs.xml   |  47 +++
 sentry-hdfs/bin/pom.xml                         |  38 --
 sentry-hdfs/bin/sentry-hdfs-common/.gitignore   |   1 -
 sentry-hdfs/bin/sentry-hdfs-common/pom.xml      | 148 -------
 .../main/resources/sentry_hdfs_service.thrift   |  87 ----
 .../src/test/resources/hdfs-sentry.xml          |  22 -
 .../bin/sentry-hdfs-namenode-plugin/pom.xml     |  74 ----
 .../src/test/resources/hdfs-sentry.xml          |  33 --
 sentry-hdfs/bin/sentry-hdfs-service/pom.xml     | 108 -----
 sentry-hdfs/pom.xml                             |   2 +-
 .../sentry/hdfs/ExtendedMetastoreClient.java    | 104 -----
 .../sentry/hdfs/SentryHDFSServiceClient.java    | 212 ++++++++++
 .../apache/sentry/hdfs/ServiceConstants.java    |  66 +++
 sentry-hdfs/sentry-hdfs-dist/pom.xml            |  72 ++++
 .../src/main/assembly/all-jar.xml               |  18 +
 .../sentry/hdfs/ExtendedMetastoreClient.java    | 104 +++++
 .../sentry/hdfs/SentryHDFSServiceClient.java    | 210 ----------
 .../sentry/hdfs/SentryHDFSServiceProcessor.java |   2 -
 .../hdfs/SentryHDFSServiceProcessorFactory.java |   2 +-
 .../org/apache/sentry/hdfs/SentryPlugin.java    |   5 +-
 sentry-tests/sentry-tests-hive/pom.xml          |  15 +
 .../tests/e2e/hdfs/TestHDFSIntegration.java     | 400 +++++++++++++++++++
 .../sentry/tests/e2e/hive/StaticUserGroup.java  |   2 +
 26 files changed, 959 insertions(+), 844 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 615de75..952d702 100644
--- a/pom.xml
+++ b/pom.xml
@@ -360,6 +360,11 @@ limitations under the License.
       </dependency>
       <dependency>
         <groupId>org.apache.sentry</groupId>
+        <artifactId>sentry-hdfs-dist</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.sentry</groupId>
         <artifactId>sentry-provider-cache</artifactId>
         <version>${project.version}</version>
       </dependency>
@@ -541,6 +546,7 @@ limitations under the License.
                   <exclude>**/.metadata/</exclude>
                   <!-- Maven working directory -->
                   <exclude>**/target/</exclude>
+                  <exclude>**/assembly/</exclude>
                   <!-- Pre commit testing generated files -->
                   <exclude>maven-repo/</exclude>
                   <exclude>test-output/</exclude>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-dist/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-dist/pom.xml b/sentry-dist/pom.xml
index c4aa7a2..c720cf0 100644
--- a/sentry-dist/pom.xml
+++ b/sentry-dist/pom.xml
@@ -64,18 +64,6 @@ limitations under the License.
     </dependency>
     <dependency>
       <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-hdfs-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-hdfs-service</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-hdfs-namenode-plugin</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
       <artifactId>sentry-service-client</artifactId>
     </dependency>
     <dependency>
@@ -109,6 +97,7 @@ limitations under the License.
               <descriptors>
                 <descriptor>src/main/assembly/src.xml</descriptor>
                 <descriptor>src/main/assembly/bin.xml</descriptor>
+                <descriptor>src/main/assembly/sentry-hdfs.xml</descriptor>
               </descriptors>
             </configuration>
           </execution>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-dist/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/sentry-dist/src/main/assembly/bin.xml b/sentry-dist/src/main/assembly/bin.xml
index 258e63c..6b95a3c 100644
--- a/sentry-dist/src/main/assembly/bin.xml
+++ b/sentry-dist/src/main/assembly/bin.xml
@@ -57,7 +57,6 @@
         <include>com.jolbox:bonecp</include>
         <include>org.apache.hive:hive-beeline</include>
         <include>org.apache.derby:derby</include>
-        <include>org.apache.derby:derby</include>
       </includes>
     </dependencySet>
   </dependencySets>
@@ -80,6 +79,7 @@
         <exclude>sentry-provider/**</exclude>
         <exclude>sentry-policy/**</exclude>
         <exclude>sentry-tests/**</exclude>
+        <exclude>sentry-hdfs/**</exclude>
       </excludes>
 
       <includes>
@@ -95,6 +95,16 @@
       <outputDirectory>/</outputDirectory>
     </fileSet>
     <fileSet>
+      <directory>${project.parent.basedir}/sentry-hdfs/sentry-hdfs-dist/target</directory>
+      <includes>
+        <include>sentry-hdfs-*.jar</include>
+      </includes>
+      <excludes>
+        <exclude>sentry-hdfs-dist-*.jar</exclude>
+      </excludes>
+      <outputDirectory>lib</outputDirectory>
+    </fileSet>
+    <fileSet>
       <directory>${project.parent.basedir}/sentry-provider/sentry-provider-db/src/main/resources</directory>
       <includes>
         <include>**/*</include>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-dist/src/main/assembly/sentry-hdfs.xml
----------------------------------------------------------------------
diff --git a/sentry-dist/src/main/assembly/sentry-hdfs.xml b/sentry-dist/src/main/assembly/sentry-hdfs.xml
new file mode 100644
index 0000000..8d85d8f
--- /dev/null
+++ b/sentry-dist/src/main/assembly/sentry-hdfs.xml
@@ -0,0 +1,47 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+
+  <id>hdfs</id>
+
+  <formats>
+    <format>tar.gz</format>
+  </formats>
+
+  <baseDirectory>sentry-hdfs-${project.version}</baseDirectory>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.parent.basedir}/sentry-hdfs/sentry-hdfs-dist/target</directory>
+      <includes>
+        <include>sentry-hdfs-*.jar</include>
+      </includes>
+      <excludes>
+        <exclude>sentry-hdfs-dist-*.jar</exclude>
+      </excludes>
+      <outputDirectory>/</outputDirectory>
+    </fileSet>
+  </fileSets>
+
+</assembly>
+

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/pom.xml b/sentry-hdfs/bin/pom.xml
deleted file mode 100644
index 4c4691f..0000000
--- a/sentry-hdfs/bin/pom.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.apache.sentry</groupId>
-    <artifactId>sentry</artifactId>
-    <version>1.5.0-incubating-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>sentry-hdfs</artifactId>
-  <name>Sentry HDFS</name>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>sentry-hdfs-common</module>
-    <module>sentry-hdfs-service</module>
-    <module>sentry-hdfs-namenode-plugin</module>
-  </modules>
-
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/.gitignore
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-common/.gitignore b/sentry-hdfs/bin/sentry-hdfs-common/.gitignore
deleted file mode 100644
index 1a28cd6..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-common/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-sentry-hdfs/src/test/java/org/apache/sentry/hdfs/DummyAdapter.java

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-common/pom.xml b/sentry-hdfs/bin/sentry-hdfs-common/pom.xml
deleted file mode 100644
index 511bc53..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-common/pom.xml
+++ /dev/null
@@ -1,148 +0,0 @@
-<?xml version="1.0"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.sentry</groupId>
-    <artifactId>sentry-hdfs</artifactId>
-    <version>1.5.0-incubating-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>sentry-hdfs-common</artifactId>
-  <name>Sentry HDFS Common</name>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minicluster</artifactId>
-      <version>2.5.0</version>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-metastore</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>2.5.0</version>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-  <build>
-    <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
-    <testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>src/gen/thrift/gen-javabean</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-  <profiles>
-    <profile>
-      <id>thriftif</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>generate-thrift-sources</id>
-                <phase>generate-sources</phase>
-                <configuration>
-                  <target>
-                    <taskdef name="for" classname="net.sf.antcontrib.logic.ForTask"
-                      classpathref="maven.plugin.classpath" />
-                    <property name="thrift.args" value="-I ${thrift.home} --gen java:beans,hashcode"/>
-                    <property name="thrift.gen.dir" value="${basedir}/src/gen/thrift"/>
-                    <delete dir="${thrift.gen.dir}"/>
-                    <mkdir dir="${thrift.gen.dir}"/>
-                    <for param="thrift.file">
-                      <path>
-                        <fileset dir="${basedir}/src/main/resources/" includes="**/*.thrift" />
-                      </path>
-                      <sequential>
-                        <echo message="Generating Thrift code for @{thrift.file}"/>
-                        <exec executable="${thrift.home}/bin/thrift"  failonerror="true" dir=".">
-                          <arg line="${thrift.args} -I ${basedir}/src/main/resources/ -o ${thrift.gen.dir} @{thrift.file} " />
-                        </exec>
-                      </sequential>
-                    </for>
-                  </target>
-                </configuration>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-enforcer-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>enforce-property</id>
-                <goals>
-                  <goal>enforce</goal>
-                </goals>
-                <configuration>
-                  <rules>
-                    <requireProperty>
-                      <property>thrift.home</property>
-                    </requireProperty>
-                  </rules>
-                  <fail>true</fail>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift b/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
deleted file mode 100644
index 9212b64..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-common/src/main/resources/sentry_hdfs_service.thrift
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/local/bin/thrift -java
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#
-# Thrift Service that the MetaStore is built on
-#
-
-include "share/fb303/if/fb303.thrift"
-
-namespace java org.apache.sentry.hdfs.service.thrift
-namespace php sentry.hdfs.thrift
-namespace cpp Apache.Sentry.HDFS.Thrift
-
-struct TPathChanges {
-1: required string authzObj;
-2: required list<list<string>> addPaths;
-3: required list<list<string>> delPaths;
-}
-
-struct TPathEntry {
-1: required byte type;
-2: required string pathElement;
-3: optional string authzObj;
-4: required set<i32> children;
-}
-
-struct TPathsDump {
-1: required i32 rootId;
-2: required map<i32,TPathEntry> nodeMap;
-}
-
-struct TPathsUpdate {
-1: required bool hasFullImage;
-2: optional TPathsDump pathsDump;
-3: required i64 seqNum;
-4: required list<TPathChanges> pathChanges;
-}
-
-struct TPrivilegeChanges {
-1: required string authzObj;
-2: required map<string, string> addPrivileges;
-3: required map<string, string> delPrivileges;
-}
-
-struct TRoleChanges {
-1: required string role;
-2: required list<string> addGroups;
-3: required list<string> delGroups;
-}
-
-struct TPermissionsUpdate {
-1: required bool hasfullImage;
-2: required i64 seqNum;
-3: required map<string, TPrivilegeChanges> privilegeChanges;
-4: required map<string, TRoleChanges> roleChanges; 
-}
-
-struct TAuthzUpdateResponse {
-1: optional list<TPathsUpdate> authzPathUpdate,
-2: optional list<TPermissionsUpdate> authzPermUpdate,
-}
-
-service SentryHDFSService
-{
-  # HMS Path cache
-  void handle_hms_notification(1:TPathsUpdate pathsUpdate);
-
-  TAuthzUpdateResponse get_all_authz_updates_from(1:i64 permSeqNum, 2:i64 pathSeqNum);
-  map<string, list<string>> get_all_related_paths(1:string path, 2:bool exactMatch);
-}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml b/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
deleted file mode 100644
index c23a431..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-common/src/test/resources/hdfs-sentry.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-  <!-- dummy file that gets rewritten by testcases in target test classpath -->
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml b/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml
deleted file mode 100644
index de1aabd..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/pom.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<?xml version="1.0"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.sentry</groupId>
-    <artifactId>sentry-hdfs</artifactId>
-    <version>1.5.0-incubating-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>sentry-hdfs-namenode-plugin</artifactId>
-  <name>Sentry HDFS Namenode Plugin</name>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-hdfs-common</artifactId>
-      <version>1.5.0-incubating-SNAPSHOT</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-service-client</artifactId>
-      <version>1.5.0-incubating-SNAPSHOT</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-hdfs-service</artifactId>
-      <version>1.5.0-incubating-SNAPSHOT</version>
-    </dependency>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minicluster</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml b/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml
deleted file mode 100644
index 511bfdd..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-namenode-plugin/src/test/resources/hdfs-sentry.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>sentry.hdfs-plugin.path-prefixes</name>
-    <value>/user/hive/dw</value>
-  </property>
-  <property>
-    <name>sentry.hdfs-plugin.sentry-uri</name>
-    <value>thrift://localhost:1234</value>
-  </property>
-  <property>
-    <name>sentry.hdfs-plugin.stale-threshold.ms</name>
-    <value>-1</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/bin/sentry-hdfs-service/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/bin/sentry-hdfs-service/pom.xml b/sentry-hdfs/bin/sentry-hdfs-service/pom.xml
deleted file mode 100644
index 74c4f20..0000000
--- a/sentry-hdfs/bin/sentry-hdfs-service/pom.xml
+++ /dev/null
@@ -1,108 +0,0 @@
-<?xml version="1.0"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.sentry</groupId>
-    <artifactId>sentry-hdfs</artifactId>
-    <version>1.5.0-incubating-SNAPSHOT</version>
-  </parent>
-
-  <artifactId>sentry-hdfs-service</artifactId>
-  <name>Sentry HDFS service</name>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.shiro</groupId>
-      <artifactId>shiro-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-hdfs-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-provider-db</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.sentry</groupId>
-      <artifactId>sentry-service-client</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-exec</artifactId>
-      <version>${hive.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-shims</artifactId>
-      <version>${hive.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.thrift</groupId>
-      <artifactId>libfb303</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.thrift</groupId>
-      <artifactId>libthrift</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>ant-contrib</groupId>
-      <artifactId>ant-contrib</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-minikdc</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-metastore</artifactId>
-      <version>${hive.version}</version>
-    </dependency>
-  </dependencies>
-
-
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/pom.xml b/sentry-hdfs/pom.xml
index 4c4691f..1455235 100644
--- a/sentry-hdfs/pom.xml
+++ b/sentry-hdfs/pom.xml
@@ -28,11 +28,11 @@ limitations under the License.
   <artifactId>sentry-hdfs</artifactId>
   <name>Sentry HDFS</name>
   <packaging>pom</packaging>
-
   <modules>
     <module>sentry-hdfs-common</module>
     <module>sentry-hdfs-service</module>
     <module>sentry-hdfs-namenode-plugin</module>
+    <module>sentry-hdfs-dist</module>
   </modules>
 
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
deleted file mode 100644
index c0358f4..0000000
--- a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.sentry.hdfs;
-
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ExtendedMetastoreClient implements MetastoreClient {
-  
-  private static Logger LOG = LoggerFactory.getLogger(ExtendedMetastoreClient.class);
-
-  private HiveMetaStoreClient client;
-  private final HiveConf hiveConf;
-  public ExtendedMetastoreClient(HiveConf hiveConf) {
-    this.hiveConf = hiveConf;
-  }
-
-  @Override
-  public List<Database> getAllDatabases() {
-    List<Database> retList = new ArrayList<Database>();
-    HiveMetaStoreClient client = getClient();
-    if (client != null) {
-      try {
-        for (String dbName : client.getAllDatabases()) {
-          retList.add(client.getDatabase(dbName));
-        }
-      } catch (Exception e) {
-        LOG.error("Could not get All Databases !!", e);
-      }
-    }
-    return retList;
-  }
-
-  @Override
-  public List<Table> getAllTablesOfDatabase(Database db) {
-    List<Table> retList = new ArrayList<Table>();
-    HiveMetaStoreClient client = getClient();
-    if (client != null) {
-      try {
-        for (String tblName : client.getAllTables(db.getName())) {
-          retList.add(client.getTable(db.getName(), tblName));
-        }
-      } catch (Exception e) {
-        LOG.error(String.format(
-            "Could not get Tables for '%s' !!", db.getName()), e);
-      }
-    }
-    return retList;
-  }
-
-  @Override
-  public List<Partition> listAllPartitions(Database db, Table tbl) {
-    HiveMetaStoreClient client = getClient();
-    if (client != null) {
-      try {
-        return client.listPartitions(db.getName(), tbl.getTableName(), Short.MAX_VALUE);
-      } catch (Exception e) {
-        LOG.error(String.format(
-            "Could not get partitions for '%s'.'%s' !!", db.getName(),
-            tbl.getTableName()), e);
-      }
-    }
-    return new LinkedList<Partition>();
-  }
-
-  private HiveMetaStoreClient getClient() {
-    if (client == null) {
-      try {
-        client = new HiveMetaStoreClient(hiveConf);
-        return client;
-      } catch (MetaException e) {
-        client = null;
-        LOG.error("Could not create metastore client !!", e);
-        return null;
-      }
-    } else {
-      return client;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
new file mode 100644
index 0000000..fa31a19
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
@@ -0,0 +1,212 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import javax.security.auth.callback.CallbackHandler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService;
+import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client;
+import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse;
+import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
+import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
+import org.apache.sentry.hdfs.ServiceConstants.ClientConfig;
+import org.apache.sentry.hdfs.ServiceConstants.ServerConfig;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TMultiplexedProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+public class SentryHDFSServiceClient {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClient.class);
+
+  public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService";
+
+  public static class SentryAuthzUpdate {
+
+    private final List<PermissionsUpdate> permUpdates;
+    private final List<PathsUpdate> pathUpdates;
+
+    public SentryAuthzUpdate(List<PermissionsUpdate> permUpdates, List<PathsUpdate> pathUpdates) {
+      this.permUpdates = permUpdates;
+      this.pathUpdates = pathUpdates;
+    }
+
+    public List<PermissionsUpdate> getPermUpdates() {
+      return permUpdates;
+    }
+
+    public List<PathsUpdate> getPathUpdates() {
+      return pathUpdates;
+    }
+  }
+  
+  /**
+   * This transport wraps the Sasl transports to set up the right UGI context for open().
+   */
+  public static class UgiSaslClientTransport extends TSaslClientTransport {
+    protected UserGroupInformation ugi = null;
+
+    public UgiSaslClientTransport(String mechanism, String authorizationId,
+        String protocol, String serverName, Map<String, String> props,
+        CallbackHandler cbh, TTransport transport, boolean wrapUgi)
+        throws IOException {
+      super(mechanism, authorizationId, protocol, serverName, props, cbh,
+          transport);
+      if (wrapUgi) {
+        ugi = UserGroupInformation.getLoginUser();
+      }
+    }
+
+    // open the SASL transport with using the current UserGroupInformation
+    // This is needed to get the current login context stored
+    @Override
+    public void open() throws TTransportException {
+      if (ugi == null) {
+        baseOpen();
+      } else {
+        try {
+          ugi.doAs(new PrivilegedExceptionAction<Void>() {
+            public Void run() throws TTransportException {
+              baseOpen();
+              return null;
+            }
+          });
+        } catch (IOException e) {
+          throw new TTransportException("Failed to open SASL transport", e);
+        } catch (InterruptedException e) {
+          throw new TTransportException(
+              "Interrupted while opening underlying transport", e);
+        }
+      }
+    }
+
+    private void baseOpen() throws TTransportException {
+      super.open();
+    }
+  }
+
+  private final Configuration conf;
+  private final InetSocketAddress serverAddress;
+  private final int connectionTimeout;
+  private boolean kerberos;
+  private TTransport transport;
+
+  private String[] serverPrincipalParts;
+  private Client client;
+  
+  public SentryHDFSServiceClient(Configuration conf) throws IOException {
+    this.conf = conf;
+    Preconditions.checkNotNull(this.conf, "Configuration object cannot be null");
+    this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull(
+                           conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key "
+                           + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt(
+                           ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT));
+    this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT,
+                                         ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT);
+    kerberos = ClientConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase(
+        conf.get(ClientConfig.SECURITY_MODE, ClientConfig.SECURITY_MODE_KERBEROS).trim());
+    transport = new TSocket(serverAddress.getHostName(),
+        serverAddress.getPort(), connectionTimeout);
+    if (kerberos) {
+      String serverPrincipal = Preconditions.checkNotNull(
+          conf.get(ClientConfig.PRINCIPAL), ClientConfig.PRINCIPAL + " is required");
+
+      // Resolve server host in the same way as we are doing on server side
+      serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress());
+      LOGGER.info("Using server kerberos principal: " + serverPrincipal);
+
+      serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal);
+      Preconditions.checkArgument(serverPrincipalParts.length == 3,
+           "Kerberos principal should have 3 parts: " + serverPrincipal);
+      boolean wrapUgi = "true".equalsIgnoreCase(conf
+          .get(ClientConfig.SECURITY_USE_UGI_TRANSPORT, "true"));
+      transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(),
+          null, serverPrincipalParts[0], serverPrincipalParts[1],
+          ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi);
+    } else {
+      serverPrincipalParts = null;
+    }
+    try {
+      transport.open();
+    } catch (TTransportException e) {
+      throw new IOException("Transport exception while opening transport: " + e.getMessage(), e);
+    }
+    LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress);
+    TMultiplexedProtocol protocol = new TMultiplexedProtocol(
+      new TCompactProtocol(transport),
+      SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME);
+    client = new SentryHDFSService.Client(protocol);
+    LOGGER.info("Successfully created client");
+  }
+
+  public synchronized void notifyHMSUpdate(PathsUpdate update)
+      throws IOException {
+    try {
+      client.handle_hms_notification(update.getThriftObject());
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+  }
+
+  public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum)
+      throws IOException {
+    SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList<PermissionsUpdate>(), new LinkedList<PathsUpdate>());
+    try {
+      TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum);
+      if (sentryUpdates.getAuthzPathUpdate() != null) {
+        for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) {
+          retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate));
+        }
+      }
+      if (sentryUpdates.getAuthzPermUpdate() != null) {
+        for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) {
+          retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate));
+        }
+      }
+    } catch (Exception e) {
+      throw new IOException("Thrift Exception occurred !!", e);
+    }
+    return retVal;
+  }
+
+  public void close() {
+    if (transport != null) {
+      transport.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
new file mode 100644
index 0000000..397a534
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-common/src/main/java/org/apache/sentry/hdfs/ServiceConstants.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.security.sasl.Sasl;
+
+import com.google.common.collect.ImmutableMap;
+
+public class ServiceConstants {
+
+  private static final ImmutableMap<String, String> SASL_PROPERTIES;
+
+  static {
+    Map<String, String> saslProps = new HashMap<String, String>();
+    saslProps.put(Sasl.SERVER_AUTH, "true");
+    saslProps.put(Sasl.QOP, "auth-conf");
+    SASL_PROPERTIES = ImmutableMap.copyOf(saslProps);
+  }
+
+  public static class ServerConfig {
+    public static final ImmutableMap<String, String> SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES;
+    /**
+     * This configuration parameter is only meant to be used for testing purposes.
+     */
+    public static final String SENTRY_HDFS_INTEGRATION_PATH_PREFIXES = "sentry.hdfs.integration.path.prefixes";
+    public static final String[] SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT =
+        new String[]{"/user/hive/warehouse"};
+
+  }
+  public static class ClientConfig {
+    public static final ImmutableMap<String, String> SASL_PROPERTIES = ServiceConstants.SASL_PROPERTIES;
+
+    public static final String SECURITY_MODE = "sentry.hdfs.service.security.mode";
+    public static final String SECURITY_MODE_KERBEROS = "kerberos";
+    public static final String SECURITY_MODE_NONE = "none";
+    public static final String SECURITY_USE_UGI_TRANSPORT = "sentry.hdfs.service.security.use.ugi";
+    public static final String PRINCIPAL = "sentry.hdfs.service.server.principal";
+
+    public static final String SERVER_RPC_PORT = "sentry.hdfs.service.client.server.rpc-port";
+    public static final int SERVER_RPC_PORT_DEFAULT = 8038;
+
+    public static final String SERVER_RPC_ADDRESS = "sentry.hdfs.service.client.server.rpc-address";
+
+    public static final String SERVER_RPC_CONN_TIMEOUT = "sentry.hdfs.service.client.server.rpc-connection-timeout";
+    public static final int SERVER_RPC_CONN_TIMEOUT_DEFAULT = 200000;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-dist/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-dist/pom.xml b/sentry-hdfs/sentry-hdfs-dist/pom.xml
new file mode 100644
index 0000000..91b8248
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-dist/pom.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.sentry</groupId>
+    <artifactId>sentry-hdfs</artifactId>
+    <version>1.5.0-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>sentry-hdfs-dist</artifactId>
+  <name>Sentry HDFS Dist</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+      <groupId>org.apache.maven.plugins</groupId>
+      <artifactId>maven-assembly-plugin</artifactId>
+      <version>2.4.1</version>
+      <executions>
+        <execution>
+          <id>assemble</id>
+          <phase>package</phase>
+          <goals>
+            <goal>single</goal>
+          </goals>
+          <inherited>false</inherited>
+          <configuration>
+            <finalName>sentry-hdfs</finalName>
+            <descriptors>
+              <descriptor>src/main/assembly/all-jar.xml</descriptor>
+            </descriptors>
+          </configuration>
+        </execution>
+      </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml b/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml
new file mode 100644
index 0000000..8db709b
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-dist/src/main/assembly/all-jar.xml
@@ -0,0 +1,18 @@
+<assembly
+ xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+  <id>${project.version}</id>
+  <formats>
+    <format>jar</format> <!-- the result is a jar file -->
+  </formats>
+
+  <includeBaseDirectory>false</includeBaseDirectory> <!-- strip the module prefixes -->
+
+  <dependencySets>
+    <dependencySet>
+      <unpack>true</unpack> <!-- unpack , then repack the jars -->
+      <useTransitiveDependencies>false</useTransitiveDependencies> <!-- do not pull in any transitive dependencies -->
+    </dependencySet>
+  </dependencySets>
+</assembly>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
new file mode 100644
index 0000000..c0358f4
--- /dev/null
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/ExtendedMetastoreClient.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.hdfs;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ExtendedMetastoreClient implements MetastoreClient {
+  
+  private static Logger LOG = LoggerFactory.getLogger(ExtendedMetastoreClient.class);
+
+  private HiveMetaStoreClient client;
+  private final HiveConf hiveConf;
+  public ExtendedMetastoreClient(HiveConf hiveConf) {
+    this.hiveConf = hiveConf;
+  }
+
+  @Override
+  public List<Database> getAllDatabases() {
+    List<Database> retList = new ArrayList<Database>();
+    HiveMetaStoreClient client = getClient();
+    if (client != null) {
+      try {
+        for (String dbName : client.getAllDatabases()) {
+          retList.add(client.getDatabase(dbName));
+        }
+      } catch (Exception e) {
+        LOG.error("Could not get All Databases !!", e);
+      }
+    }
+    return retList;
+  }
+
+  @Override
+  public List<Table> getAllTablesOfDatabase(Database db) {
+    List<Table> retList = new ArrayList<Table>();
+    HiveMetaStoreClient client = getClient();
+    if (client != null) {
+      try {
+        for (String tblName : client.getAllTables(db.getName())) {
+          retList.add(client.getTable(db.getName(), tblName));
+        }
+      } catch (Exception e) {
+        LOG.error(String.format(
+            "Could not get Tables for '%s' !!", db.getName()), e);
+      }
+    }
+    return retList;
+  }
+
+  @Override
+  public List<Partition> listAllPartitions(Database db, Table tbl) {
+    HiveMetaStoreClient client = getClient();
+    if (client != null) {
+      try {
+        return client.listPartitions(db.getName(), tbl.getTableName(), Short.MAX_VALUE);
+      } catch (Exception e) {
+        LOG.error(String.format(
+            "Could not get partitions for '%s'.'%s' !!", db.getName(),
+            tbl.getTableName()), e);
+      }
+    }
+    return new LinkedList<Partition>();
+  }
+
+  private HiveMetaStoreClient getClient() {
+    if (client == null) {
+      try {
+        client = new HiveMetaStoreClient(hiveConf);
+        return client;
+      } catch (MetaException e) {
+        client = null;
+        LOG.error("Could not create metastore client !!", e);
+        return null;
+      }
+    } else {
+      return client;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
deleted file mode 100644
index 2b1b554..0000000
--- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceClient.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.sentry.hdfs;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import javax.security.auth.callback.CallbackHandler;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.sentry.hdfs.service.thrift.SentryHDFSService;
-import org.apache.sentry.hdfs.service.thrift.SentryHDFSService.Client;
-import org.apache.sentry.hdfs.service.thrift.TAuthzUpdateResponse;
-import org.apache.sentry.hdfs.service.thrift.TPathsUpdate;
-import org.apache.sentry.hdfs.service.thrift.TPermissionsUpdate;
-import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig;
-import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TMultiplexedProtocol;
-import org.apache.thrift.transport.TSaslClientTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-public class SentryHDFSServiceClient {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceClient.class);
-  
-  public static class SentryAuthzUpdate {
-
-    private final List<PermissionsUpdate> permUpdates;
-    private final List<PathsUpdate> pathUpdates;
-
-    public SentryAuthzUpdate(List<PermissionsUpdate> permUpdates, List<PathsUpdate> pathUpdates) {
-      this.permUpdates = permUpdates;
-      this.pathUpdates = pathUpdates;
-    }
-
-    public List<PermissionsUpdate> getPermUpdates() {
-      return permUpdates;
-    }
-
-    public List<PathsUpdate> getPathUpdates() {
-      return pathUpdates;
-    }
-  }
-  
-  /**
-   * This transport wraps the Sasl transports to set up the right UGI context for open().
-   */
-  public static class UgiSaslClientTransport extends TSaslClientTransport {
-    protected UserGroupInformation ugi = null;
-
-    public UgiSaslClientTransport(String mechanism, String authorizationId,
-        String protocol, String serverName, Map<String, String> props,
-        CallbackHandler cbh, TTransport transport, boolean wrapUgi)
-        throws IOException {
-      super(mechanism, authorizationId, protocol, serverName, props, cbh,
-          transport);
-      if (wrapUgi) {
-        ugi = UserGroupInformation.getLoginUser();
-      }
-    }
-
-    // open the SASL transport with using the current UserGroupInformation
-    // This is needed to get the current login context stored
-    @Override
-    public void open() throws TTransportException {
-      if (ugi == null) {
-        baseOpen();
-      } else {
-        try {
-          ugi.doAs(new PrivilegedExceptionAction<Void>() {
-            public Void run() throws TTransportException {
-              baseOpen();
-              return null;
-            }
-          });
-        } catch (IOException e) {
-          throw new TTransportException("Failed to open SASL transport", e);
-        } catch (InterruptedException e) {
-          throw new TTransportException(
-              "Interrupted while opening underlying transport", e);
-        }
-      }
-    }
-
-    private void baseOpen() throws TTransportException {
-      super.open();
-    }
-  }
-
-  private final Configuration conf;
-  private final InetSocketAddress serverAddress;
-  private final int connectionTimeout;
-  private boolean kerberos;
-  private TTransport transport;
-
-  private String[] serverPrincipalParts;
-  private Client client;
-  
-  public SentryHDFSServiceClient(Configuration conf) throws IOException {
-    this.conf = conf;
-    Preconditions.checkNotNull(this.conf, "Configuration object cannot be null");
-    this.serverAddress = NetUtils.createSocketAddr(Preconditions.checkNotNull(
-                           conf.get(ClientConfig.SERVER_RPC_ADDRESS), "Config key "
-                           + ClientConfig.SERVER_RPC_ADDRESS + " is required"), conf.getInt(
-                           ClientConfig.SERVER_RPC_PORT, ClientConfig.SERVER_RPC_PORT_DEFAULT));
-    this.connectionTimeout = conf.getInt(ClientConfig.SERVER_RPC_CONN_TIMEOUT,
-                                         ClientConfig.SERVER_RPC_CONN_TIMEOUT_DEFAULT);
-    kerberos = ServerConfig.SECURITY_MODE_KERBEROS.equalsIgnoreCase(
-        conf.get(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_KERBEROS).trim());
-    transport = new TSocket(serverAddress.getHostName(),
-        serverAddress.getPort(), connectionTimeout);
-    if (kerberos) {
-      String serverPrincipal = Preconditions.checkNotNull(
-          conf.get(ServerConfig.PRINCIPAL), ServerConfig.PRINCIPAL + " is required");
-
-      // Resolve server host in the same way as we are doing on server side
-      serverPrincipal = SecurityUtil.getServerPrincipal(serverPrincipal, serverAddress.getAddress());
-      LOGGER.info("Using server kerberos principal: " + serverPrincipal);
-
-      serverPrincipalParts = SaslRpcServer.splitKerberosName(serverPrincipal);
-      Preconditions.checkArgument(serverPrincipalParts.length == 3,
-           "Kerberos principal should have 3 parts: " + serverPrincipal);
-      boolean wrapUgi = "true".equalsIgnoreCase(conf
-          .get(ServerConfig.SECURITY_USE_UGI_TRANSPORT, "true"));
-      transport = new UgiSaslClientTransport(AuthMethod.KERBEROS.getMechanismName(),
-          null, serverPrincipalParts[0], serverPrincipalParts[1],
-          ClientConfig.SASL_PROPERTIES, null, transport, wrapUgi);
-    } else {
-      serverPrincipalParts = null;
-    }
-    try {
-      transport.open();
-    } catch (TTransportException e) {
-      throw new IOException("Transport exception while opening transport: " + e.getMessage(), e);
-    }
-    LOGGER.info("Successfully opened transport: " + transport + " to " + serverAddress);
-    TMultiplexedProtocol protocol = new TMultiplexedProtocol(
-      new TCompactProtocol(transport),
-      SentryHDFSServiceProcessor.SENTRY_HDFS_SERVICE_NAME);
-    client = new SentryHDFSService.Client(protocol);
-    LOGGER.info("Successfully created client");
-  }
-
-  public synchronized void notifyHMSUpdate(PathsUpdate update)
-      throws IOException {
-    try {
-      client.handle_hms_notification(update.getThriftObject());
-    } catch (Exception e) {
-      throw new IOException("Thrift Exception occurred !!", e);
-    }
-  }
-
-  public synchronized SentryAuthzUpdate getAllUpdatesFrom(long permSeqNum, long pathSeqNum)
-      throws IOException {
-    SentryAuthzUpdate retVal = new SentryAuthzUpdate(new LinkedList<PermissionsUpdate>(), new LinkedList<PathsUpdate>());
-    try {
-      TAuthzUpdateResponse sentryUpdates = client.get_all_authz_updates_from(permSeqNum, pathSeqNum);
-      if (sentryUpdates.getAuthzPathUpdate() != null) {
-        for (TPathsUpdate pathsUpdate : sentryUpdates.getAuthzPathUpdate()) {
-          retVal.getPathUpdates().add(new PathsUpdate(pathsUpdate));
-        }
-      }
-      if (sentryUpdates.getAuthzPermUpdate() != null) {
-        for (TPermissionsUpdate permsUpdate : sentryUpdates.getAuthzPermUpdate()) {
-          retVal.getPermUpdates().add(new PermissionsUpdate(permsUpdate));
-        }
-      }
-    } catch (Exception e) {
-      throw new IOException("Thrift Exception occurred !!", e);
-    }
-    return retVal;
-  }
-
-  public void close() {
-    if (transport != null) {
-      transport.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
index 1198619..ab07494 100644
--- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessor.java
@@ -34,8 +34,6 @@ public class SentryHDFSServiceProcessor implements SentryHDFSService.Iface {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(SentryHDFSServiceProcessor.class);
 
-  public static final String SENTRY_HDFS_SERVICE_NAME = "SentryHDFSService";
-
   @Override
   public TAuthzUpdateResponse get_all_authz_updates_from(long permSeqNum, long pathSeqNum)
       throws TException {

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
index 81168b2..c45c294 100644
--- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryHDFSServiceProcessorFactory.java
@@ -98,7 +98,7 @@ public class SentryHDFSServiceProcessorFactory extends ProcessorFactory{
         new SentryHDFSServiceProcessor();
     TProcessor processor = new ProcessorWrapper(sentryServiceHandler);
     multiplexedProcessor.registerProcessor(
-        SentryHDFSServiceProcessor.SENTRY_HDFS_SERVICE_NAME, processor);
+        SentryHDFSServiceClient.SENTRY_HDFS_SERVICE_NAME, processor);
     return true;
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
----------------------------------------------------------------------
diff --git a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
index 262e893..5bb6d45 100644
--- a/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
+++ b/sentry-hdfs/sentry-hdfs-service/src/main/java/org/apache/sentry/hdfs/SentryPlugin.java
@@ -44,7 +44,7 @@ import org.apache.sentry.provider.db.service.thrift.TAlterSentryRoleRevokePrivil
 import org.apache.sentry.provider.db.service.thrift.TDropSentryRoleRequest;
 import org.apache.sentry.provider.db.service.thrift.TSentryGroup;
 import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
-import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
+import org.apache.sentry.hdfs.ServiceConstants.ServerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -99,7 +99,8 @@ public class SentryPlugin implements SentryPolicyStorePlugin {
     HiveConf hiveConf = new HiveConf(conf, Configuration.class);
     final MetastoreClient hmsClient = new ExtendedMetastoreClient(hiveConf);
     final String[] pathPrefixes = conf
-        .getStrings(ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES, new String[]{"/"});
+        .getStrings(ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES,
+            ServerConfig.SENTRY_HDFS_INTEGRATION_PATH_PREFIXES_DEFAULT);
     pathsUpdater = new UpdateForwarder<PathsUpdate>(new UpdateableAuthzPaths(
         pathPrefixes), createHMSImageRetriever(pathPrefixes, hmsClient), 100);
     PermImageRetriever permImageRetriever = new PermImageRetriever(sentryStore);

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-tests/sentry-tests-hive/pom.xml
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/pom.xml b/sentry-tests/sentry-tests-hive/pom.xml
index 769afb5..fde850f 100644
--- a/sentry-tests/sentry-tests-hive/pom.xml
+++ b/sentry-tests/sentry-tests-hive/pom.xml
@@ -227,6 +227,21 @@ limitations under the License.
     </dependency>
     <dependency>
       <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-common</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-service</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-hdfs-namenode-plugin</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.sentry</groupId>
       <artifactId>sentry-policy-db</artifactId>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/incubator-sentry/blob/c059d3d7/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
new file mode 100644
index 0000000..41f8af8
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestHDFSIntegration.java
@@ -0,0 +1,400 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.tests.e2e.hdfs;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.ServerSocket;
+import java.net.URL;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.security.GroupMappingServiceProvider;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl;
+import org.apache.sentry.binding.hive.conf.HiveAuthzConf;
+import org.apache.sentry.hdfs.SentryAuthorizationProvider;
+import org.apache.sentry.provider.db.SimpleDBProviderBackend;
+import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider;
+import org.apache.sentry.provider.file.PolicyFile;
+import org.apache.sentry.service.thrift.SentryService;
+import org.apache.sentry.service.thrift.SentryServiceFactory;
+import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig;
+import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
+import org.apache.sentry.tests.e2e.hive.StaticUserGroup;
+import org.apache.sentry.tests.e2e.hive.fs.MiniDFS;
+import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory;
+import org.apache.sentry.tests.e2e.hive.hiveserver.InternalHiveServer;
+import org.apache.sentry.tests.e2e.hive.hiveserver.InternalMetastoreServer;
+import org.fest.reflect.core.Reflection;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.io.Files;
+import com.google.common.io.Resources;
+
+public class TestHDFSIntegration {
+
+  // mock user group mapping that maps user to same group
+  public static class PseudoGroupMappingService implements
+      GroupMappingServiceProvider {
+
+    @Override
+    public List<String> getGroups(String user) {
+      return Lists.newArrayList(user, System.getProperty("user.name"));
+    }
+
+    @Override
+    public void cacheGroupsRefresh() throws IOException {
+      // no-op
+    }
+
+    @Override
+    public void cacheGroupsAdd(List<String> groups) throws IOException {
+      // no-op
+    }
+  }
+
+  private MiniDFSCluster miniDFS;
+  private InternalHiveServer hiveServer2;
+  private InternalMetastoreServer metastore;
+  private String fsURI;
+  private int hmsPort;
+  private int sentryPort;
+  private File baseDir;
+  private UserGroupInformation admin;
+
+  protected static File assertCreateDir(File dir) {
+    if(!dir.isDirectory()) {
+      Assert.assertTrue("Failed creating " + dir, dir.mkdirs());
+    }
+    return dir;
+  }
+
+  private static int findPort() throws IOException {
+    ServerSocket socket = new ServerSocket(0);
+    int port = socket.getLocalPort();
+    socket.close();
+    return port;
+  }
+
+  private static void startSentryService(SentryService sentryServer) throws Exception {
+    sentryServer.start();
+    final long start = System.currentTimeMillis();
+    while (!sentryServer.isRunning()) {
+      Thread.sleep(1000);
+      if (System.currentTimeMillis() - start > 60000L) {
+        throw new TimeoutException("Server did not start after 60 seconds");
+      }
+    }
+  }
+
+  @Before
+  public void setup() throws Exception {
+    Class.forName("org.apache.hive.jdbc.HiveDriver");
+    baseDir = Files.createTempDir();
+    final File policyFileLocation = new File(baseDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME);
+    PolicyFile policyFile = PolicyFile.setAdminOnServer1("hive")
+        .setUserGroupMapping(StaticUserGroup.getStaticMapping());
+    policyFile.write(policyFileLocation);
+
+    admin = UserGroupInformation.createUserForTesting(
+        System.getProperty("user.name"), new String[] { "supergroup" });
+
+    UserGroupInformation hiveUgi = UserGroupInformation.createUserForTesting(
+        "hive", new String[] { "hive" });
+
+    hiveUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        Configuration sentryConf = new Configuration(false);
+        Map<String, String> properties = Maps.newHashMap();
+        properties.put(HiveServerFactory.AUTHZ_PROVIDER_BACKEND,
+            SimpleDBProviderBackend.class.getName());
+        properties.put(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname,
+            SentryHiveAuthorizationTaskFactoryImpl.class.getName());
+        properties
+            .put(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS.varname, "2");
+        properties.put("hive.metastore.uris", "thrift://localhost:" + hmsPort);
+        properties.put(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_NONE);
+        properties.put("sentry.hive.testing.mode", "true");
+        properties.put(ServerConfig.ADMIN_GROUPS, "hive,admin");
+        properties.put(ServerConfig.RPC_ADDRESS, "localhost");
+        properties.put(ServerConfig.RPC_PORT, String.valueOf(0));
+        properties.put(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, "false");
+
+        properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING);
+        properties.put(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath());
+        properties.put(ServerConfig.SENTRY_STORE_JDBC_URL,
+            "jdbc:derby:;databaseName=" + baseDir.getPath()
+                + "/sentrystore_db;create=true");
+        properties.put("sentry.service.processor.factories",
+            "org.apache.sentry.provider.db.service.thrift.SentryPolicyStoreProcessorFactory,org.apache.sentry.hdfs.SentryHDFSServiceProcessorFactory");
+        properties.put("sentry.policy.store.plugins", "org.apache.sentry.hdfs.SentryPlugin");
+        properties.put(ServerConfig.RPC_MIN_THREADS, "3");
+        for (Map.Entry<String, String> entry : properties.entrySet()) {
+          sentryConf.set(entry.getKey(), entry.getValue());
+        }
+        SentryService sentryServer = new SentryServiceFactory().create(sentryConf);
+        properties.put(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress()
+            .getHostName());
+        sentryConf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress()
+            .getHostName());
+        properties.put(ClientConfig.SERVER_RPC_PORT,
+            String.valueOf(sentryServer.getAddress().getPort()));
+        sentryConf.set(ClientConfig.SERVER_RPC_PORT,
+            String.valueOf(sentryServer.getAddress().getPort()));
+        startSentryService(sentryServer);
+        sentryPort = sentryServer.getAddress().getPort();
+        System.out.println("\n\n Sentry port : " + sentryPort + "\n\n");
+        return null;
+      }
+    });
+
+    admin.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
+        Configuration conf = new HdfsConfiguration();
+        conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
+            SentryAuthorizationProvider.class.getName());
+        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+        
+        File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
+        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
+        conf.set("hadoop.security.group.mapping",
+            MiniDFS.PseudoGroupMappingService.class.getName());
+        Configuration.addDefaultResource("test.xml");
+
+        conf.set("sentry.authorization-provider.hdfs-path-prefixes", "/user/hive/warehouse");
+        conf.set("sentry.hdfs.service.security.mode", "none");
+        conf.set("sentry.hdfs.service.client.server.rpc-address", "localhost");
+        conf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
+        EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+        miniDFS = new MiniDFSCluster.Builder(conf).build();
+        Path tmpPath = new Path("/tmp");
+        Path hivePath = new Path("/user/hive");
+        Path warehousePath = new Path(hivePath, "warehouse");
+        miniDFS.getFileSystem().mkdirs(warehousePath);
+        boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
+        System.out.println("\n\n Is dir :" + directory + "\n\n");
+        System.out.println("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
+        fsURI = miniDFS.getFileSystem().getUri().toString();
+        miniDFS.getFileSystem().mkdirs(tmpPath);
+        miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
+        miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
+        miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
+        System.out.println("\n\n Owner :"
+            + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner()
+            + ", "
+            + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup()
+            + "\n\n");
+        System.out.println("\n\n Owner tmp :"
+            + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
+            + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
+            + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", "
+            + "\n\n");
+        return null;
+      }
+    });
+
+
+    hiveUgi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        HiveConf hiveConf = new HiveConf();
+        hiveConf.set("sentry.metastore.plugins", "org.apache.sentry.hdfs.MetastorePlugin");
+        hiveConf.set("sentry.service.client.server.rpc-address", "localhost");
+        hiveConf.set("sentry.hdfs.service.client.server.rpc-address", "localhost");
+        hiveConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
+        hiveConf.set("sentry.service.client.server.rpc-port", String.valueOf(sentryPort));
+        hiveConf.set("sentry.service.security.mode", "none");
+        hiveConf.set("sentry.hdfs.service.security.mode", "none");
+        hiveConf.set("sentry.hive.provider.backend", "org.apache.sentry.provider.db.SimpleDBProviderBackend");
+        hiveConf.set("sentry.provider", LocalGroupResourceAuthorizationProvider.class.getName());
+        hiveConf.set("sentry.hive.provider", LocalGroupResourceAuthorizationProvider.class.getName());
+        hiveConf.set("sentry.hive.provider.resource", policyFileLocation.getPath());
+        hiveConf.set("sentry.hive.testing.mode", "true");
+        hiveConf.set("sentry.hive.server", "server1");
+        
+
+        hiveConf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING, ServerConfig.SENTRY_STORE_LOCAL_GROUP_MAPPING);
+        hiveConf.set(ServerConfig.SENTRY_STORE_GROUP_MAPPING_RESOURCE, policyFileLocation.getPath());
+        hiveConf.set("fs.defaultFS", fsURI);
+        hiveConf.set("fs.default.name", fsURI);
+        hiveConf.set("hive.metastore.execute.setugi", "true");
+        hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=" + baseDir.getAbsolutePath() + "/metastore_db;create=true");
+        hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
+        hiveConf.set("javax.jdo.option.ConnectionUserName", "hive");
+        hiveConf.set("javax.jdo.option.ConnectionPassword", "hive");
+        hiveConf.set("datanucleus.autoCreateSchema", "true");
+        hiveConf.set("datanucleus.fixedDatastore", "false");
+        hiveConf.set("datanucleus.autoStartMechanism", "SchemaTable");
+        hmsPort = findPort();
+        System.out.println("\n\n HMS port : " + hmsPort + "\n\n");
+        hiveConf.set("hive.metastore.uris", "thrift://localhost:" + hmsPort);
+        hiveConf.set("hive.metastore.pre.event.listeners", "org.apache.sentry.binding.metastore.MetastoreAuthzBinding");
+        hiveConf.set("hive.metastore.event.listeners", "org.apache.sentry.binding.metastore.SentryMetastorePostEventListener");
+        hiveConf.set("hive.security.authorization.task.factory", "org.apache.sentry.binding.hive.SentryHiveAuthorizationTaskFactoryImpl");
+        hiveConf.set("hive.server2.session.hook", "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook");
+
+        HiveAuthzConf authzConf = new HiveAuthzConf(Resources.getResource("sentry-site.xml"));
+        authzConf.addResource(hiveConf);
+        File confDir = assertCreateDir(new File(baseDir, "etc"));
+        File accessSite = new File(confDir, HiveAuthzConf.AUTHZ_SITE_FILE);
+        OutputStream out = new FileOutputStream(accessSite);
+        authzConf.set("fs.defaultFS", fsURI);
+        authzConf.writeXml(out);
+        out.close();
+
+//        hiveConf.set("hive.sentry.conf.url", "file://" + accessSite.getCanonicalPath());
+        hiveConf.set("hive.sentry.conf.url", accessSite.getPath());
+        System.out.println("Sentry client file : " + accessSite.getPath());
+
+        File hiveSite = new File(confDir, "hive-site.xml");
+        hiveConf.set("hive.server2.enable.doAs", "false");
+        hiveConf.set(HiveAuthzConf.HIVE_SENTRY_CONF_URL, accessSite.toURI().toURL()
+            .toExternalForm());
+        out = new FileOutputStream(hiveSite);
+        hiveConf.writeXml(out);
+        out.close();
+
+        Reflection.staticField("hiveSiteURL")
+        .ofType(URL.class)
+        .in(HiveConf.class)
+        .set(hiveSite.toURI().toURL());
+
+        metastore = new InternalMetastoreServer(hiveConf);
+        metastore.start();
+
+        hiveServer2 = new InternalHiveServer(hiveConf);
+        hiveServer2.start();
+
+        return null;
+      }
+    });
+
+  }
+
+  @After
+  public void cleanUp() throws Exception {
+    try {
+      if (miniDFS != null) {
+        miniDFS.shutdown();
+      }
+    } finally {
+      try {
+        if (hiveServer2 != null) {
+          hiveServer2.shutdown();
+        }
+      } finally {
+        if (metastore != null) {
+          metastore.shutdown();
+        }
+      }
+    }
+  }
+
+//  public Connection createConnection(String username) throws Exception {
+//    String password = username;
+//    Connection connection =  hiveServer2.createConnection(username, password);
+//    assertNotNull("Connection is null", connection);
+//    assertFalse("Connection should not be closed", connection.isClosed());
+//    Statement statement  = connection.createStatement();
+//    statement.close();
+//    return connection;
+//  }
+//
+//  public Statement createStatement(Connection connection)
+//  throws Exception {
+//    Statement statement  = connection.createStatement();
+//    assertNotNull("Statement is null", statement);
+//    return statement;
+//  }
+
+  @Test
+  public void testSimple() throws Exception {
+    Connection conn = hiveServer2.createConnection("hive", "hive");
+    Statement stmt = conn.createStatement();
+    stmt.execute("create role admin_role");
+    stmt.execute("grant role admin_role to group hive");
+    stmt.execute("grant all on server server1 to role admin_role");
+    stmt.execute("create table p1 (s string) partitioned by (month int, day int)");
+    stmt.execute("alter table p1 add partition (month=1, day=1)");
+    stmt.execute("alter table p1 add partition (month=1, day=2)");
+    stmt.execute("alter table p1 add partition (month=2, day=1)");
+    stmt.execute("alter table p1 add partition (month=2, day=2)");
+    AclStatus aclStatus = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1"));
+    Set<String> groups = new HashSet<String>(); 
+    for (AclEntry ent : aclStatus.getEntries()) {
+      if (ent.getType().equals(AclEntryType.GROUP)) {
+        groups.add(ent.getName());
+      }
+    }
+    System.out.println("Final acls [" + aclStatus + "]");
+    Assert.assertEquals(false, groups.contains("hbase"));
+
+    stmt.execute("create role p1_admin");
+    stmt.execute("grant role p1_admin to group hbase");
+    stmt.execute("grant select on table p1 to role p1_admin");
+    Thread.sleep(1000);
+    aclStatus = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1"));
+    groups = new HashSet<String>();
+    for (AclEntry ent : aclStatus.getEntries()) {
+      if (ent.getType().equals(AclEntryType.GROUP)) {
+        groups.add(ent.getName());
+      }
+    }
+    Assert.assertEquals(true, groups.contains("hbase"));
+
+    stmt.execute("revoke select on table p1 from role p1_admin");
+    Thread.sleep(1000);
+    aclStatus = miniDFS.getFileSystem().getAclStatus(new Path("/user/hive/warehouse/p1"));
+    groups = new HashSet<String>();
+    for (AclEntry ent : aclStatus.getEntries()) {
+      if (ent.getType().equals(AclEntryType.GROUP)) {
+        groups.add(ent.getName());
+      }
+    }
+    Assert.assertEquals(false, groups.contains("hbase"));
+  }
+}


Mime
View raw message