directory-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zengl...@apache.org
Subject directory-kerby git commit: Update HAS documents.
Date Thu, 02 Aug 2018 02:34:36 GMT
Repository: directory-kerby
Updated Branches:
  refs/heads/trunk 8729345c9 -> 8ca9e8f68


Update HAS documents.


Project: http://git-wip-us.apache.org/repos/asf/directory-kerby/repo
Commit: http://git-wip-us.apache.org/repos/asf/directory-kerby/commit/8ca9e8f6
Tree: http://git-wip-us.apache.org/repos/asf/directory-kerby/tree/8ca9e8f6
Diff: http://git-wip-us.apache.org/repos/asf/directory-kerby/diff/8ca9e8f6

Branch: refs/heads/trunk
Commit: 8ca9e8f687090a53770055dceac26025e35f997d
Parents: 8729345
Author: zenglinx <frank.zeng@intel.com>
Authored: Thu Aug 2 10:34:19 2018 +0800
Committer: zenglinx <frank.zeng@intel.com>
Committed: Thu Aug 2 10:34:19 2018 +0800

----------------------------------------------------------------------
 has-project/docs/deploy-https.md                | 312 ++++++++++---------
 has-project/docs/has-start.md                   | 126 +++++++-
 has-project/supports/hadoop/README.md           |  42 ++-
 .../remote/cmd/CreatePrincipalsRemoteCmd.java   |   4 +-
 .../remote/cmd/EnableConfigureRemoteCmd.java    |   2 +-
 5 files changed, 326 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/directory-kerby/blob/8ca9e8f6/has-project/docs/deploy-https.md
----------------------------------------------------------------------
diff --git a/has-project/docs/deploy-https.md b/has-project/docs/deploy-https.md
index bd6c3c5..d221e5b 100644
--- a/has-project/docs/deploy-https.md
+++ b/has-project/docs/deploy-https.md
@@ -1,153 +1,159 @@
-Deploy HTTPS
-===============
-
-## 1. Create a keystore file for each host
-
-> keystore: the keystore file that stores the certificate.
-> validity: the valid time of the certificate in days.
-```
-keytool -alias {hostname} -keystore {keystore} -validity {validity} -genkey
-```
-
-> The keytool will ask for more details such as the keystore password, keypassword and
CN(hostname).
-
-## 2. Export the certificate public key to a certificate file for each host
-```
-keytool -export -alias {hostname} -keystore {keystore} -rfc -file {cert-file}
-```
-
-## 3. Create a common truststore file (trustAll)
-The truststore file contains the public key from all certificates. If you assume a 2-node
cluster with node1 and node2,
-login to node1 and import the truststore file for node1.
-```
-keytool -import -alias {hostname} -keystore {trustAll} -file {cert-file}
-```
-
-## 4. Update the common truststore file
-* Move {trustAll} from node1 to node2 ({trustAll} already has the certificate entry of node1),
and repeat Step 3.
-
-* Move the updated {trustAll} from node2 to node1. Repeat these steps for each node in the
cluster.
-When you finish, the {trustAll} file will have the certificates from all nodes.
-
-> Note these work could be done on the same node, just notice the hostname.
-
-## 5. Copy {trustAll} from node1 to all of the other nodes
-
-## 6. Validate the common truststore file
-```
-keytool -list -v -keystore {trustAll}
-```
-
-## 7. Edit the Configuration files
-> Deploy {keystore} and {trustAll} files and config /<conf-dir>/ssl-server.conf
for HAS server
-```
-ssl.server.keystore.location = {path to keystore}
-ssl.server.keystore.password = {keystore password set in step 1}
-ssl.server.keystore.keypassword = {keypassword set in step 1}
-ssl.server.truststore.reload.interval = 1000
-ssl.server.truststore.location = {path to trustAll}
-ssl.server.truststore.password = {trustAll password set in step 2}
-```
-
-> Config /etc/has/<https_host>/ssl-client.conf for HAS client, the <https_host>
-is the has server address, the same as the value configured in has-client.conf
-```
-ssl.client.truststore.location = {path to trustAll}
-ssl.client.truststore.password = {trustAll password}
-```
-
-> Config $HADOOP_HOME/etc/hadoop/ssl-server.xml for Hadoop
-```
-<configuration>
-
-<property>
-  <name>ssl.server.truststore.location</name>
-  <value>path to trustAll</value>
-</property>
-
-<property>
-  <name>ssl.server.truststore.password</name>
-  <value>trustAll password</value>
-</property>
-
-<property>
-  <name>ssl.server.truststore.type</name>
-  <value>jks</value>
-</property>
-
-<property>
-  <name>ssl.server.truststore.reload.interval</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.location</name>
-  <value>path to keystore</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.password</name>
-  <value>keystore password</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.keypassword</name>
-  <value>keystore keypassword</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.type</name>
-  <value>jks</value>
-</property>
-
-</configuration>
-```
-
-> Config $HADOOP_HOME/etc/hadoop/ssl-client.xml for Hadoop
-```
-<configuration>
-
-<property>
-  <name>ssl.client.truststore.location</name>
-  <value>patch to trustAll</value>
-</property>
-
-<property>
-  <name>ssl.client.truststore.password</name>
-  <value>trustAll password</value>
-</property>
-
-<property>
-  <name>ssl.client.truststore.type</name>
-  <value>jks</value>
-</property>
-
-<property>
-  <name>ssl.client.truststore.reload.interval</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.location</name>
-  <value>path to keystore</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.password</name>
-  <value>keystore password</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.keypassword</name>
-  <value>keystore keypassword</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.type</name>
-  <value>jks</value>
-</property>
-
-</configuration>
-```
-
-> To make the nodes in the cluster communicate bidirectionally, deploy all the configuration
files.
+Deploy HTTPS
+===============
+
+## 1. Create a keystore file for each host
+
+> keystore: the keystore file that stores the certificate.
+> validity: the valid time of the certificate in days.
+```
+keytool -alias {hostname} -keystore {keystore} -validity {validity} -genkey
+```
+
+> The keytool will ask for more details such as the keystore password, keypassword and
CN(hostname).
+
+## 2. Export the certificate public key to a certificate file for each host
+```
+keytool -export -alias {hostname} -keystore {keystore} -rfc -file {cert-file}
+```
+
+## 3. Create a common truststore file (trustAll)
+The truststore file contains the public key from all certificates. If you assume a 2-node
cluster with node1 and node2,
+login to node1 and import the truststore file for node1.
+```
+keytool -import -alias {hostname} -keystore {trustAll} -file {cert-file}
+```
+
+## 4. Update the common truststore file
+* Move {trustAll} from node1 to node2 ({trustAll} already has the certificate entry of node1),
and repeat Step 3.
+
+* Move the updated {trustAll} from node2 to node1. Repeat these steps for each node in the
cluster.
+When you finish, the {trustAll} file will have the certificates from all nodes.
+
+> Note these work could be done on the same node, just notice the hostname.
+
+## 5. Copy {trustAll} from node1 to all of the other nodes
+
+## 6. Validate the common truststore file
+```
+keytool -list -v -keystore {trustAll}
+```
+
+## 7. Edit the Configuration files
+> Deploy {keystore} and {trustAll} files,
+and config `/<conf-dir>/ssl-server.conf` (e.g. `/etc/has/ssl-server.conf`) for HAS
server.
+```
+ssl.server.keystore.location = {path to keystore}
+ssl.server.keystore.password = {keystore password set in step 1}
+ssl.server.keystore.keypassword = {keypassword set in step 1}
+ssl.server.truststore.reload.interval = 1000
+ssl.server.truststore.location = {path to trustAll}
+ssl.server.truststore.password = {trustAll password set in step 2}
+```
+
+If `ssl-server.conf` and `ssl-client.conf` do not specify the path of {keystore} and {trustAll},
+they should be put in `/etc/has`, which is the default location for HAS to get them.
+
+> Config `/etc/has/<https_host>/ssl-client.conf` for HAS client,
+the `<https_host>` is the HAS server address, the same as the value configured in has-client.conf.
+```
+ssl.client.truststore.location = {path to trustAll}
+ssl.client.truststore.password = {trustAll password}
+```
+
+Notice: `ssl-client.conf` is also required to appear in `/etc/has/` to avoid HTTPS validation
problem.
+
+> Config $HADOOP_HOME/etc/hadoop/ssl-client.xml for Hadoop:
+```
+<configuration>
+
+<property>
+  <name>ssl.server.truststore.location</name>
+  <value>path to trustAll</value>
+</property>
+
+<property>
+  <name>ssl.server.truststore.password</name>
+  <value>trustAll password</value>
+</property>
+
+<property>
+  <name>ssl.server.truststore.type</name>
+  <value>jks</value>
+</property>
+
+<property>
+  <name>ssl.server.truststore.reload.interval</name>
+  <value>10000</value>
+</property>
+
+<property>
+  <name>ssl.server.keystore.location</name>
+  <value>path to keystore</value>
+</property>
+
+<property>
+  <name>ssl.server.keystore.password</name>
+  <value>keystore password</value>
+</property>
+
+<property>
+  <name>ssl.server.keystore.keypassword</name>
+  <value>keystore keypassword</value>
+</property>
+
+<property>
+  <name>ssl.server.keystore.type</name>
+  <value>jks</value>
+</property>
+
+</configuration>
+```
+
+> Config $HADOOP_HOME/etc/hadoop/ssl-client.xml for Hadoop
+```
+<configuration>
+
+<property>
+  <name>ssl.client.truststore.location</name>
+  <value>patch to trustAll</value>
+</property>
+
+<property>
+  <name>ssl.client.truststore.password</name>
+  <value>trustAll password</value>
+</property>
+
+<property>
+  <name>ssl.client.truststore.type</name>
+  <value>jks</value>
+</property>
+
+<property>
+  <name>ssl.client.truststore.reload.interval</name>
+  <value>10000</value>
+</property>
+
+<property>
+  <name>ssl.client.keystore.location</name>
+  <value>path to keystore</value>
+</property>
+
+<property>
+  <name>ssl.client.keystore.password</name>
+  <value>keystore password</value>
+</property>
+
+<property>
+  <name>ssl.client.keystore.keypassword</name>
+  <value>keystore keypassword</value>
+</property>
+
+<property>
+  <name>ssl.client.keystore.type</name>
+  <value>jks</value>
+</property>
+
+</configuration>
+```
+
+> To make the nodes in the cluster communicate bidirectionally, deploy above configuration
files to each node.

http://git-wip-us.apache.org/repos/asf/directory-kerby/blob/8ca9e8f6/has-project/docs/has-start.md
----------------------------------------------------------------------
diff --git a/has-project/docs/has-start.md b/has-project/docs/has-start.md
index a67aaab..7a3aec8 100644
--- a/has-project/docs/has-start.md
+++ b/has-project/docs/has-start.md
@@ -26,7 +26,7 @@ An example of has-server.conf:
   https_host = localhost
   https_port = 8092
   filter_auth_type = kerberos
-  
+
 [PLUGIN]
   auth_type = MySQL
 ```
@@ -47,8 +47,40 @@ sh bin/start-has.sh
 
 Root privileges required if https_port or KDC port numbers range from 0 to 1023.
 
-### Configure HAS KDC:
+### Configure HAS plugin:
+```
+cd kerby-dist/has-dist
+// Proxy needed to be removed if it exists
+unset https_proxy
+// Start HAS init tool
+sh bin/has-init.sh <conf_dir>
+// Also: sh bin/has-init.sh, if HAS_CONF_DIR environment variable has been set.
+// Plugin_name example: MySQL
+HasInitTool: set_plugin <plugin_name>
+HasInitTool: exit
+```
 
+### Configure HAS backend:
+```
+cd kerby-dist/has-dist
+// Start HAS init tool
+sh bin/has-init.sh <conf_dir>
+// An example of json backend:
+HasInitTool: config_kdcBackend json /tmp/has/jsonbackend
+// An example of mysql backend:
+HasInitTool: config_kdcBackend mysql jdbc:mysql://127.0.0.1:3306/mysqlbackend root passwd
+HasInitTool: exit
+```
+
+### Configure HAS KDC:
+```
+cd kerby-dist/has-dist
+// Start HAS init tool
+sh bin/has-init.sh <conf_dir>
+// An example of configure HAS KDC:
+HasInitTool: config_kdc localhost 88 HADOOP.COM
+HasInitTool: exit
+```
 Please make sure the following configuration files exist in the conf directory:
 has-server.conf backend.conf kdc.conf
 
@@ -82,7 +114,95 @@ cd kerby-dist/has-dist
 sh bin/start-has.sh <conf_dir> <work_dir>
 
 cd kerby-dist/has-dist
-sh bin/kdcinit.sh <conf_dir>
+sh bin/has-init.sh <conf_dir>
 HasInitTool: start
 HasInitTool: exit
 ```
+
+### Get and deploy krb5.conf:
+```
+cd kerby-dist/has-dist
+// Start HAS init tool:
+sh bin/has-init.sh <conf_dir>
+// Get krb5.conf, and put it to /etc:
+HasInitTool: getkrb5 -p /etc
+HasInitTool: exit
+```
+
+### Get and deploy has-client.conf:
+```
+cd kerby-dist/has-dist
+// Start HAS init tool
+sh bin/has-init.sh <conf_dir>
+// Get has-client.conf, and put it to /etc/has:
+HasInitTool: gethas -p /etc/has
+HasInitTool: exit
+```
+
+## 3. Prepare for Hadoop
+There are two ways to create and deploy corresponding keytabs of Hadoop.
+
+### a. Create and deploy keytabs manually
+#### Create service principals:
+```
+cd kerby-dist/has-dist
+echo { \
+    HOSTS: [ \
+       {"name":"<host>","hostRoles":"<role>,..., <role>"\}, \
+       ...
+       {"name":"<host>","hostRoles":"<role>,...,<role>"\} \
+    ] \
+\} > hosts.txt
+// Start local hadmin tool
+sh bin/admin-local.sh <conf_dir> -k <keytab>
+// Also: sh bin/admin-local.sh -k <keytab>, if HAS_CONF_DIR environment variable has
been set.
+// Also you can use remote admin tool, admin.keytab file needed to be placed in /etc/has
+sh bin/admin-remote.sh <conf_dir>
+// Also: sh bin/admin-remote.sh, if HAS_CONF_DIR environment variable has been set.
+HadminLocalTool.local: creprincs hosts.txt
+HadminLocalTool.local: exit
+```
+The admin.keytab file is created by the kdcinit. In local and remote hadmin tool, you can
type "?" for help.
+
+#### Get hostRoles list:
+```
+cd kerby-dist/has-dist
+// Start local or remote hadmin tool
+sh bin/admin-local.sh(bin/admin-remote.sh) <conf_dir> -k <keytab>
+HadminLocalTool.local: hostroles
+HadminLocalTool.local: exit
+```
+
+#### Export service keytabs:
+```
+cd kerby-dist/has-dist
+// Start local or remote hadmin tool
+sh bin/admin-local.sh(bin/admin-remote.sh) <conf_dir> -k <keytab>
+// An example of exporting keytabs of localhost(hostname):
+HadminLocalTool.local: expkeytabs localhost
+HadminLocalTool.local: exit
+```
+
+### b. One step to create service principals, export keytabs and deploy keytabs:
+```
+cd kerby-dist/has-dist
+echo { \
+    HOSTS: [ \
+       {"name":"<host>","hostRoles":"<role>,..., <role>"\}, \
+       ...
+       {"name":"<host>","hostRoles":"<role>,...,<role>"\} \
+    ] \
+\} > hosts.txt
+
+// Start local hadmin tool
+sh bin/admin-local.sh <conf_dir> -k <keytab>
+
+// deploy_keytabs [HostRoles-File] [Where-to-Deploy] [SSH-Port] [UserName] [Password]
+// Where-to-Deploy: The place to store the keytabs
+// UserName: The host user name
+// Password: The host password
+// All the hosts with the same user and password
+HadminLocalTool.local: deploy_keytabs hosts.txt 22 /etc/has/ username password
+HadminLocalTool.local: exit
+```
+Note: The admin.keytab file is created by the `has-init`. In local hadmin tool, you can type
"?" for help.

http://git-wip-us.apache.org/repos/asf/directory-kerby/blob/8ca9e8f6/has-project/supports/hadoop/README.md
----------------------------------------------------------------------
diff --git a/has-project/supports/hadoop/README.md b/has-project/supports/hadoop/README.md
index 4e79388..b485818 100644
--- a/has-project/supports/hadoop/README.md
+++ b/has-project/supports/hadoop/README.md
@@ -240,6 +240,10 @@ Please look at [How to deploy https](https://github.com/apache/directory-kerby/b
 ## 4. Configure container-executor
 
 ### Create and configure container-executor.cfg
+`container-executor.cfg` locates in the path specified in the `mvn` build command. According
to the example
+`mvn package -Pdist,native -Dtar -DskipTests -Dmaven.javadoc.skip=true -Dcontainer-executor.conf.dir=/etc/hadoop/conf`
+build command in this document, the path is `/etc/hadoop/conf/`.
+Note that `container-executor.cfg` should be deployed to each node of cluster.
 
 Example of container-executor.cfg:
 ```
@@ -266,8 +270,44 @@ chmod 6050 container-executor
 // Test whether configuration is correct
 container-executor --checksetup
 ```
+> Note that `container-executor` is in `$HADOOP_HOME/bin`.
 
-## 5. Setting up cross-realm for DistCp
+## 5. Configure for Hadoop Client
+There are two ways to enable HAS authentication for Hadoop client.
+
+### a. Use HAS Plugin (MySQL plugin as example)
+
+#### Make sure the plugin type is MySQL
+Check `auth_type` value of `has-server.conf` and `has-client.conf`.
+
+#### Create `has_user` table in MySQL Database
+On HAS sever, create a table named `has_user`.
+This table contains column named `user_name` and `pass_word` with type `VARCHAR`.
+
+All the authenticate information of users stored in this table.
+
+#### Configure environmental variables of HAS server
+Add following environmental variables in the HAS sever:
+```
+mysqlUrl=jdbc:mysql://127.0.0.1:3306/NameOfHasDatabase
+mysqlUser=MySQLUserNameForHas
+mysqlPasswd=PasswordForDBUser
+```
+
+#### Configure environmental variables of Hadoop client
+Add following environmental variables in the Hadoop client:
+```
+userName=HAS Client Name in has_user table
+pass_word=HAS Client Password in has_user table
+```
+
+### b. Use legacy credential cache
+Use `kinit` command to get credential cache.
+```
+knit -k -t path/to/any/keytab/file <pricipal_of_the_specified_keytab>
+```
+
+## 6. Setting up cross-realm for DistCp
 
 ### Setup cross realm trust between realms
 Please look at [How to setup cross-realm](https://github.com/apache/directory-kerby/blob/trunk/has-project/docs/cross-realm.md).

http://git-wip-us.apache.org/repos/asf/directory-kerby/blob/8ca9e8f6/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/CreatePrincipalsRemoteCmd.java
----------------------------------------------------------------------
diff --git a/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/CreatePrincipalsRemoteCmd.java
b/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/CreatePrincipalsRemoteCmd.java
index 0c98556..68f6b04 100644
--- a/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/CreatePrincipalsRemoteCmd.java
+++ b/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/CreatePrincipalsRemoteCmd.java
@@ -30,12 +30,12 @@ import java.io.IOException;
 import java.util.List;
 
 public class CreatePrincipalsRemoteCmd extends AdminRemoteCmd {
-    private static final String USAGE = "\nUsage: add_principals [hostRoles-file]\n"
+    private static final String USAGE = "\nUsage: create_principals [hostRoles-file]\n"
             + "\t'hostRoles-file' is a file with a hostRoles json string like:\n"
             + "\t\t{HOSTS: [ {\"name\":\"host1\",\"hostRoles\":\"HDFS\"}, "
             + "{\"name\":\"host2\",\"hostRoles\":\"HDFS,HBASE\"} ] }\n"
             + "\tExample:\n"
-            + "\t\tadd_principals hostroles.txt\n";
+            + "\t\tcreate_principals hostroles.txt\n";
 
     public CreatePrincipalsRemoteCmd(HasAuthAdminClient authHadmin) {
         super(authHadmin);

http://git-wip-us.apache.org/repos/asf/directory-kerby/blob/8ca9e8f6/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/EnableConfigureRemoteCmd.java
----------------------------------------------------------------------
diff --git a/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/EnableConfigureRemoteCmd.java
b/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/EnableConfigureRemoteCmd.java
index 8577d82..09157b8 100644
--- a/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/EnableConfigureRemoteCmd.java
+++ b/kerby-tool/has-tool/src/main/java/org/apache/kerby/kerberos/tool/admin/remote/cmd/EnableConfigureRemoteCmd.java
@@ -40,6 +40,6 @@ public class EnableConfigureRemoteCmd extends AdminRemoteCmd {
 
         HasAuthAdminClient client = getAuthAdminClient();
         client.setEnableOfConf("true");
-        System.out.println("Disable configure HAS.");
+        System.out.println("Enable configure HAS.");
     }
 }


Mime
View raw message