knox-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kmin...@apache.org
Subject svn commit: r1530346 [3/3] - in /incubator/knox: site/ site/books/knox-incubating-0-3-0/ trunk/books/0.3.0/ trunk/books/static/
Date Tue, 08 Oct 2013 16:45:25 GMT
Modified: incubator/knox/site/index.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/index.html?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/site/index.html (original)
+++ incubator/knox/site/index.html Tue Oct  8 16:45:25 2013
@@ -1,5 +1,5 @@
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 7, 2013 -->
+<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 8, 2013 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
@@ -10,7 +10,7 @@
       @import url("./css/site.css");
     </style>
     <link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
-    <meta name="Date-Revision-yyyymmdd" content="20131007" />
+    <meta name="Date-Revision-yyyymmdd" content="20131008" />
     <meta http-equiv="Content-Language" content="en" />
                                                     
 <script type="text/javascript">var _gaq = _gaq || [];
@@ -57,7 +57,7 @@
                         <a href="https://cwiki.apache.org/confluence/display/KNOX/Index" class="externalLink" title="Wiki">Wiki</a>
               
                     
-                &nbsp;| <span id="publishDate">Last Published: 2013-10-07</span>
+                &nbsp;| <span id="publishDate">Last Published: 2013-10-08</span>
               &nbsp;| <span id="projectVersion">Version: 0.0.0-SNAPSHOT</span>
             </div>
       <div class="clear">

Modified: incubator/knox/site/issue-tracking.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/issue-tracking.html?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/site/issue-tracking.html (original)
+++ incubator/knox/site/issue-tracking.html Tue Oct  8 16:45:25 2013
@@ -1,5 +1,5 @@
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 7, 2013 -->
+<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 8, 2013 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
@@ -10,7 +10,7 @@
       @import url("./css/site.css");
     </style>
     <link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
-    <meta name="Date-Revision-yyyymmdd" content="20131007" />
+    <meta name="Date-Revision-yyyymmdd" content="20131008" />
     <meta http-equiv="Content-Language" content="en" />
                                                     
 <script type="text/javascript">var _gaq = _gaq || [];
@@ -57,7 +57,7 @@
                         <a href="https://cwiki.apache.org/confluence/display/KNOX/Index" class="externalLink" title="Wiki">Wiki</a>
               
                     
-                &nbsp;| <span id="publishDate">Last Published: 2013-10-07</span>
+                &nbsp;| <span id="publishDate">Last Published: 2013-10-08</span>
               &nbsp;| <span id="projectVersion">Version: 0.0.0-SNAPSHOT</span>
             </div>
       <div class="clear">

Modified: incubator/knox/site/license.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/license.html?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/site/license.html (original)
+++ incubator/knox/site/license.html Tue Oct  8 16:45:25 2013
@@ -1,5 +1,5 @@
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 7, 2013 -->
+<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 8, 2013 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
@@ -10,7 +10,7 @@
       @import url("./css/site.css");
     </style>
     <link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
-    <meta name="Date-Revision-yyyymmdd" content="20131007" />
+    <meta name="Date-Revision-yyyymmdd" content="20131008" />
     <meta http-equiv="Content-Language" content="en" />
                                                     
 <script type="text/javascript">var _gaq = _gaq || [];
@@ -57,7 +57,7 @@
                         <a href="https://cwiki.apache.org/confluence/display/KNOX/Index" class="externalLink" title="Wiki">Wiki</a>
               
                     
-                &nbsp;| <span id="publishDate">Last Published: 2013-10-07</span>
+                &nbsp;| <span id="publishDate">Last Published: 2013-10-08</span>
               &nbsp;| <span id="projectVersion">Version: 0.0.0-SNAPSHOT</span>
             </div>
       <div class="clear">

Modified: incubator/knox/site/mail-lists.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/mail-lists.html?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/site/mail-lists.html (original)
+++ incubator/knox/site/mail-lists.html Tue Oct  8 16:45:25 2013
@@ -1,5 +1,5 @@
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 7, 2013 -->
+<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 8, 2013 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
@@ -10,7 +10,7 @@
       @import url("./css/site.css");
     </style>
     <link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
-    <meta name="Date-Revision-yyyymmdd" content="20131007" />
+    <meta name="Date-Revision-yyyymmdd" content="20131008" />
     <meta http-equiv="Content-Language" content="en" />
                                                     
 <script type="text/javascript">var _gaq = _gaq || [];
@@ -57,7 +57,7 @@
                         <a href="https://cwiki.apache.org/confluence/display/KNOX/Index" class="externalLink" title="Wiki">Wiki</a>
               
                     
-                &nbsp;| <span id="publishDate">Last Published: 2013-10-07</span>
+                &nbsp;| <span id="publishDate">Last Published: 2013-10-08</span>
               &nbsp;| <span id="projectVersion">Version: 0.0.0-SNAPSHOT</span>
             </div>
       <div class="clear">

Modified: incubator/knox/site/project-info.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/project-info.html?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/site/project-info.html (original)
+++ incubator/knox/site/project-info.html Tue Oct  8 16:45:25 2013
@@ -1,5 +1,5 @@
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 7, 2013 -->
+<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 8, 2013 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
@@ -10,7 +10,7 @@
       @import url("./css/site.css");
     </style>
     <link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
-    <meta name="Date-Revision-yyyymmdd" content="20131007" />
+    <meta name="Date-Revision-yyyymmdd" content="20131008" />
     <meta http-equiv="Content-Language" content="en" />
                                                     
 <script type="text/javascript">var _gaq = _gaq || [];
@@ -57,7 +57,7 @@
                         <a href="https://cwiki.apache.org/confluence/display/KNOX/Index" class="externalLink" title="Wiki">Wiki</a>
               
                     
-                &nbsp;| <span id="publishDate">Last Published: 2013-10-07</span>
+                &nbsp;| <span id="publishDate">Last Published: 2013-10-08</span>
               &nbsp;| <span id="projectVersion">Version: 0.0.0-SNAPSHOT</span>
             </div>
       <div class="clear">

Modified: incubator/knox/site/team-list.html
URL: http://svn.apache.org/viewvc/incubator/knox/site/team-list.html?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/site/team-list.html (original)
+++ incubator/knox/site/team-list.html Tue Oct  8 16:45:25 2013
@@ -1,5 +1,5 @@
 <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 7, 2013 -->
+<!-- Generated by Apache Maven Doxia Site Renderer 1.3 at Oct 8, 2013 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
@@ -10,7 +10,7 @@
       @import url("./css/site.css");
     </style>
     <link rel="stylesheet" href="./css/print.css" type="text/css" media="print" />
-    <meta name="Date-Revision-yyyymmdd" content="20131007" />
+    <meta name="Date-Revision-yyyymmdd" content="20131008" />
     <meta http-equiv="Content-Language" content="en" />
                                                     
 <script type="text/javascript">var _gaq = _gaq || [];
@@ -57,7 +57,7 @@
                         <a href="https://cwiki.apache.org/confluence/display/KNOX/Index" class="externalLink" title="Wiki">Wiki</a>
               
                     
-                &nbsp;| <span id="publishDate">Last Published: 2013-10-07</span>
+                &nbsp;| <span id="publishDate">Last Published: 2013-10-08</span>
               &nbsp;| <span id="projectVersion">Version: 0.0.0-SNAPSHOT</span>
             </div>
       <div class="clear">

Modified: incubator/knox/trunk/books/0.3.0/book_client-details.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/book_client-details.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/book_client-details.md (original)
+++ incubator/knox/trunk/books/0.3.0/book_client-details.md Tue Oct  8 16:45:25 2013
@@ -59,7 +59,7 @@ The GATEWAY_HOME directory is the direct
 * A few examples require the use of commands from a standard Groovy installation.  These examples are optional but to try them you will need Groovy [installed](http://groovy.codehaus.org/Installing+Groovy).
 
 
-### Assumptions ###
+### Basics ###
 
 The DSL requires a shell to interpret the Groovy script.
 The shell can either be used interactively or to execute a script file.
@@ -81,7 +81,7 @@ Using `^C` to exit can sometimes leaves 
 
 The shell can also be used to execute a script by passing a single filename argument.
 
-    java -jar bin/shell.jar samples/ExamplePutFile.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGetFile.groovy
 
 
 ### Examples ###
@@ -90,8 +90,8 @@ Once the shell can be launched the DSL c
 Below is a very simple example of an interactive shell session to upload a file to HDFS.
 
     java -jar bin/shell.jar
-    knox:000> hadoop = Hadoop.login( "https://localhost:8443/gateway/sandbox", "guest", "guest-password" )
-    knox:000> Hdfs.put( hadoop ).file( "README" ).to( "/tmp/example/README" ).now()
+    knox:000> session = Hadoop.login( "https://localhost:8443/gateway/sandbox", "guest", "guest-password" )
+    knox:000> Hdfs.put( session ).file( "README" ).to( "/tmp/example/README" ).now()
 
 The `knox:000>` in the example above is the prompt from the embedded Groovy console.
 If you output doesn't look like this you may need to set the verbosity and show-last-result preferences as described above in the Usage section.
@@ -99,19 +99,19 @@ If you output doesn't look like this you
 If you relieve an error `HTTP/1.1 403 Forbidden` it may be because that file already exists.
 Try deleting it with the following command and then try again.
 
-    knox:000> Hdfs.rm(hadoop).file("/tmp/example/README").now()
+    knox:000> Hdfs.rm(session).file("/tmp/example/README").now()
 
 Without using some other tool to browse HDFS it is hard to tell that that this command did anything.
 Execute this to get a bit more feedback.
 
-    knox:000> println "Status=" + Hdfs.put( hadoop ).file( "README" ).to( "/tmp/example/README2" ).now().statusCode
+    knox:000> println "Status=" + Hdfs.put( session ).file( "README" ).to( "/tmp/example/README2" ).now().statusCode
     Status=201
 
 Notice that a different filename is used for the destination.
 Without this an error would have resulted.
 Of course the DSL also provides a command to list the contents of a directory.
 
-    knox:000> println Hdfs.ls( hadoop ).dir( "/tmp/example" ).now().string
+    knox:000> println Hdfs.ls( session ).dir( "/tmp/example" ).now().string
     {"FileStatuses":{"FileStatus":[{"accessTime":1363711366977,"blockSize":134217728,"group":"hdfs","length":19395,"modificationTime":1363711366977,"owner":"guest","pathSuffix":"README","permission":"644","replication":1,"type":"FILE"},{"accessTime":1363711375617,"blockSize":134217728,"group":"hdfs","length":19395,"modificationTime":1363711375617,"owner":"guest","pathSuffix":"README2","permission":"644","replication":1,"type":"FILE"}]}}
 
 It is a design decision of the DSL to not provide type safe classes for various request and response payloads.
@@ -121,7 +121,7 @@ See the Groovy section below for a varie
 The example below shows the use of JsonSlurper and GPath to extract content from a JSON response.
 
     knox:000> import groovy.json.JsonSlurper
-    knox:000> text = Hdfs.ls( hadoop ).dir( "/tmp/example" ).now().string
+    knox:000> text = Hdfs.ls( session ).dir( "/tmp/example" ).now().string
     knox:000> json = (new JsonSlurper()).parseText( text )
     knox:000> println json.FileStatuses.FileStatus.pathSuffix
     [README, README2]
@@ -129,20 +129,20 @@ The example below shows the use of JsonS
 *In the future, "built-in" methods to slurp JSON and XML may be added to make this a bit easier.*
 *This would allow for this type if single line interaction.*
 
-    println Hdfs.ls(hadoop).dir("/tmp").now().json().FileStatuses.FileStatus.pathSuffix
+    println Hdfs.ls(session).dir("/tmp").now().json().FileStatuses.FileStatus.pathSuffix
 
 Shell session should always be ended with shutting down the session.
 The examples above do not touch on it but the DSL supports the simple execution of commands asynchronously.
 The shutdown command attempts to ensures that all asynchronous commands have completed before existing the shell.
 
-    knox:000> hadoop.shutdown()
+    knox:000> session.shutdown()
     knox:000> exit
 
 All of the commands above could have been combined into a script file and executed as a single line.
 
-    java -jar bin/shell.jar samples/ExamplePutFile.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
 
-This script file is available in the distribution but for convenience, this is the content.
+This would be the content of that script.
 
     import org.apache.hadoop.gateway.shell.Hadoop
     import org.apache.hadoop.gateway.shell.hdfs.Hdfs
@@ -153,13 +153,13 @@ This script file is available in the dis
     password = "guest-password"
     dataFile = "README"
     
-    hadoop = Hadoop.login( gateway, username, password )
-    Hdfs.rm( hadoop ).file( "/tmp/example" ).recursive().now()
-    Hdfs.put( hadoop ).file( dataFile ).to( "/tmp/example/README" ).now()
-    text = Hdfs.ls( hadoop ).dir( "/tmp/example" ).now().string
+    session = Hadoop.login( gateway, username, password )
+    Hdfs.rm( session ).file( "/tmp/example" ).recursive().now()
+    Hdfs.put( session ).file( dataFile ).to( "/tmp/example/README" ).now()
+    text = Hdfs.ls( session ).dir( "/tmp/example" ).now().string
     json = (new JsonSlurper()).parseText( text )
     println json.FileStatuses.FileStatus.pathSuffix
-    hadoop.shutdown()
+    session.shutdown()
     exit
 
 Notice the `Hdfs.rm` command.  This is included simply to ensure that the script can be rerun.
@@ -172,19 +172,19 @@ The DSL supports the ability to invoke c
 The object returned from the later() method is a java.util.concurrent.Future parametrized with the response type of the command.
 This is an example of how to asynchronously put a file to HDFS.
 
-    future = Hdfs.put(hadoop).file("README").to("tmp/example/README").later()
+    future = Hdfs.put(session).file("README").to("tmp/example/README").later()
     println future.get().statusCode
 
 The future.get() method will block until the asynchronous command is complete.
 To illustrate the usefulness of this however multiple concurrent commands are required.
 
-    readmeFuture = Hdfs.put(hadoop).file("README").to("tmp/example/README").later()
-    licenseFuture = Hdfs.put(hadoop).file("LICENSE").to("tmp/example/LICENSE").later()
-    hadoop.waitFor( readmeFuture, licenseFuture )
+    readmeFuture = Hdfs.put(session).file("README").to("tmp/example/README").later()
+    licenseFuture = Hdfs.put(session).file("LICENSE").to("tmp/example/LICENSE").later()
+    session.waitFor( readmeFuture, licenseFuture )
     println readmeFuture.get().statusCode
     println licenseFuture.get().statusCode
 
-The hadoop.waitFor() method will wait for one or more asynchronous commands to complete.
+The session.waitFor() method will wait for one or more asynchronous commands to complete.
 
 
 ### Closures ###
@@ -196,17 +196,17 @@ Closures are blocks of code that are pas
 In Groovy these are contained within {} immediately after a method.
 These blocks of code are executed once the asynchronous command is complete.
 
-    Hdfs.put(hadoop).file("README").to("tmp/example/README").later(){ println it.statusCode }
+    Hdfs.put(session).file("README").to("tmp/example/README").later(){ println it.statusCode }
 
 In this example the put() command is executed on a separate thread and once complete the `println it.statusCode` block is executed on that thread.
 The it variable is automatically populated by Groovy and is a reference to the result that is returned from the future or now() method.
 The future example above can be rewritten to illustrate the use of closures.
 
-    readmeFuture = Hdfs.put(hadoop).file("README").to("tmp/example/README").later() { println it.statusCode }
-    licenseFuture = Hdfs.put(hadoop).file("LICENSE").to("tmp/example/LICENSE").later() { println it.statusCode }
-    hadoop.waitFor( readmeFuture, licenseFuture )
+    readmeFuture = Hdfs.put(session).file("README").to("tmp/example/README").later() { println it.statusCode }
+    licenseFuture = Hdfs.put(session).file("LICENSE").to("tmp/example/LICENSE").later() { println it.statusCode }
+    session.waitFor( readmeFuture, licenseFuture )
 
-Again, the hadoop.waitFor() method will wait for one or more asynchronous commands to complete.
+Again, the session.waitFor() method will wait for one or more asynchronous commands to complete.
 
 
 ### Constructs ###
@@ -214,7 +214,7 @@ Again, the hadoop.waitFor() method will 
 In order to understand the DSL there are three primary constructs that need to be understood.
 
 
-### Hadoop ###
+#### Session ####
 
 This construct encapsulates the client side session state that will be shared between all command invocations.
 In particular it will simplify the management of any tokens that need to be presented with each command invocation.
@@ -224,31 +224,31 @@ The syntax associated with this is expec
 Rather it is expected that some form of access token will be used to initialize the session.
 
 
-### Services ###
+#### Services ####
 
 Services are the primary extension point for adding new suites of commands.
-The built in examples are: Hdfs, Job and Workflow.
-The desire for extensibility is the reason for the slightly awkward Hdfs.ls(hadoop) syntax.
-Certainly something more like `hadoop.hdfs().ls()` would have been preferred but this would prevent adding new commands easily.
+The current built in examples are: Hdfs, Job and Workflow.
+The desire for extensibility is the reason for the slightly awkward Hdfs.ls(session) syntax.
+Certainly something more like `session.hdfs().ls()` would have been preferred but this would prevent adding new commands easily.
 At a minimum it would result in extension commands with a different syntax from the "built-in" commands.
 
 The service objects essentially function as a factory for a suite of commands.
 
 
-### Commands ###
+#### Commands ####
 
 Commands provide the behavior of the DSL.
 They typically follow a Fluent interface style in order to allow for single line commands.
 There are really three parts to each command: Request, Invocation, Response
 
 
-### Request ###
+#### Request ####
 
 The request is populated by all of the methods following the "verb" method and the "invoke" method.
-For example in `Hdfs.rm(hadoop).ls(dir).now()` the request is populated between the "verb" method `rm()` and the "invoke" method `now()`.
+For example in `Hdfs.rm(session).ls(dir).now()` the request is populated between the "verb" method `rm()` and the "invoke" method `now()`.
 
 
-### Invocation ###
+#### Invocation ####
 
 The invocation method controls how the request is invoked.
 Currently supported synchronous and asynchronous invocation.
@@ -258,7 +258,7 @@ In addition later() invocation method ca
 See the Futures and Closures sections below for additional detail and examples.
 
 
-### Response ###
+#### Response ####
 
 The response contains the results of the invocation of the request.
 In most cases the response is a thin wrapper over the HTTP response.
@@ -276,12 +276,12 @@ In fact many commands will share a singl
 Thanks to Groovy these methods can be accessed as attributes.
 In the some of the examples the staticCode was retrieved for example.
 
-    println Hdfs.put(hadoop).rm(dir).now().statusCode
+    println Hdfs.put(session).rm(dir).now().statusCode
 
 Groovy will invoke the getStatusCode method to retrieve the statusCode attribute.
 
 The three methods getStream(), getBytes() and getString deserve special attention.
-Care must be taken that the HTTP body is read only once.
+Care must be taken that the HTTP body is fully read once and only once.
 Therefore one of these methods (and only one) must be called once and only once.
 Calling one of these more than once will cause an error.
 Failing to call one of these methods once will result in lingering open HTTP connections.
@@ -294,160 +294,12 @@ The close() method may be called after r
 
 ### Services ###
 
-There are three basic DSL services and commands bundled with the shell.
-
-
-#### HDFS ####
-
-Provides basic HDFS commands.
-*Using these DSL commands requires that WebHDFS be running in the Hadoop cluster.*
-
-#### Jobs (Templeton/WebHCat) ####
-
-Provides basic job submission and status commands.
-*Using these DSL commands requires that Templeton/WebHCat be running in the Hadoop cluster.*
-
-
-#### Workflow (Oozie) ####
-
-Provides basic workflow submission and status commands.
-*Using these DSL commands requires that Oozie be running in the Hadoop cluster.*
-
-
-### HDFS Commands (WebHDFS) ###
-
-#### ls() - List the contents of a HDFS directory.
-
-* Request
-    * dir (String) - The HDFS directory to list.
-* Response
-    * BasicResponse
-* Example
-    * `Hdfs.ls(hadoop).ls().dir("/").now()`
-
-#### rm() - Remove a HDFS file or directory.
-
-* Request
-    * file (String) - The HDFS file or directory to remove.
-    * recursive (Boolean) - If the file is a directory also remove any contained files and directories. Optional: default=false
-* Response
-    * EmptyResponse - Implicit close().
-* Example
-    * `Hdfs.rm(hadoop).file("/tmp/example").recursive().now()`
-
-#### put() - Copy a file from the local file system to HDFS.
-
-* Request
-    * text (String) - The text to copy to the remote file.
-    * file (String) - The name of a local file to copy to the remote file.
-    * to (String) - The name of the remote file create.
-* Response
-    * EmptyResponse - Implicit close().
-* Example
-    * `Hdfs.put(hadoop).file("localFile").to("/tmp/example/remoteFile").now()`
-
-#### get() - Copy a file from HDFS to the local file system.
-
-* Request
-    * file (String) - The name of the local file to create from the remote file.  If this isn't specified the file content must be read from the response.
-    * from (String) - The name of the remote file to copy.
-* Response
-    * BasicResponse
-* Example
-    * `Hdfs.get(hadoop).file("localFile").from("/tmp/example/remoteFile").now()`
-
-#### mkdir() - Create a directory in HDFS.
-
-* Request
-    * dir (String) - The name of the remote directory to create.
-    * perm (String) - The permissions to create the remote directory with.  Optional: default="777"
-* Response
-    * EmptyResponse - Implicit close().
-* Example
-    * `Hdfs.mkdir(hadoop).dir("/tmp/example").perm("777").now()`
-
-
-### Job Commands (WebHCat/Templeton)
-
-#### submitJava() - Submit a Java MapReduce job.
-
-* Request
-    * jar (String) - The remote file name of the JAR containing the app to execute.
-    * app (String) - The app name to execute.  This is wordcount for example not the class name.
-    * input (String) - The remote directory name to use as input for the job.
-    * output (String) - The remote directory name to store output from the job.
-* Response
-    * jobId : String - The job ID of the submitted job.  Consumes body.
-* Example
-    * `Job.submitJava(hadoop).jar(remoteJarName).app(appName).input(remoteInputDir).output(remoteOutputDir).now().jobId`
-
-#### submitPig() - Submit a Pig job.
-
-* Request
-    * file (String) - The remote file name of the pig script.
-    * arg (String) - An argument to pass to the script.
-    * statusDir (String) - The remote directory to store status output.
-* Response
-    * jobId : String - The job ID of the submitted job.  Consumes body.
-* Example
-    * `Job.submitPig(hadoop).file(remotePigFileName).arg("-v").statusDir(remoteStatusDir).now()`
-
-#### submitHive() - Submit a Hive job.
-
-* Request
-    * file (String) - The remote file name of the hive script.
-    * arg (String) - An argument to pass to the script.
-    * statusDir (String) - The remote directory to store status output.
-* Response
-    * jobId : String - The job ID of the submitted job.  Consumes body.
-* Example
-    * `Job.submitHive(hadoop).file(remoteHiveFileName).arg("-v").statusDir(remoteStatusDir).now()`
-
-#### queryQueue() - Return a list of all job IDs registered to the user.
-
-* Request
-    * No request parameters.
-* Response
-    * BasicResponse
-* Example
-    * `Job.queryQueue(hadoop).now().string`
-
-#### queryStatus() - Check the status of a job and get related job information given its job ID.
-
-* Request
-    * jobId (String) - The job ID to check. This is the ID received when the job was created.
-* Response
-    * BasicResponse
-* Example
-    * `Job.queryStatus(hadoop).jobId(jobId).now().string`
-
-
-### Workflow Commands (Oozie) ###
-
-#### submit() - Submit a workflow job.
-
-* Request
-    * text (String) - XML formatted workflow configuration string.
-    * file (String) - A filename containing XML formatted workflow configuration.
-    * action (String) - The initial action to take on the job.  Optional: Default is "start".
-* Response
-    * BasicResponse
-* Example
-    * `Workflow.submit(hadoop).file(localFile).action("start").now()`
-
-#### status() - Query the status of a workflow job.
-
-* Request
-    * jobId (String) - The job ID to check. This is the ID received when the job was created.
-* Response
-    * BasicResponse
-* Example
-    * `Workflow.status(hadoop).jobId(jobId).now().string`
+The built-in supported client DLS for each Hadoop service can be found in the #[Service Details] section.
 
 
 ### Extension ###
 
-Extensibility is a key design goal of the KnoxShell and DSL.
+Extensibility is a key design goal of the KnoxShell and client DSL.
 There are two ways to provide extended functionality for use with the shell.
 The first is to simply create Groovy scripts that use the DSL to perform a useful task.
 The second is to add new services and commands.
@@ -472,24 +324,26 @@ These happen to be Groovy source files b
 The easiest way to add these to the shell is to compile them directory into the `ext` directory.
 *Note: This command depends upon having the Groovy compiler installed and available on the execution path.*
 
-    groovy -d ext -cp bin/shell.jar samples/SampleService.groovy samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
+    groovy -d ext -cp bin/shell.jar samples/SampleService.groovy \
+        samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
 
 These source files are available in the samples directory of the distribution but these are included here for convenience.
 
+
 #### Sample Service (Groovy)
 
     import org.apache.hadoop.gateway.shell.Hadoop
 
     class SampleService {
 
-        static String PATH = "/namenode/api/v1"
+        static String PATH = "/webhdfs/v1"
 
-        static SimpleCommand simple( Hadoop hadoop ) {
-            return new SimpleCommand( hadoop )
+        static SimpleCommand simple( Hadoop session ) {
+            return new SimpleCommand( session )
         }
 
-        static ComplexCommand.Request complex( Hadoop hadoop ) {
-            return new ComplexCommand.Request( hadoop )
+        static ComplexCommand.Request complex( Hadoop session ) {
+            return new ComplexCommand.Request( session )
         }
 
     }
@@ -506,8 +360,8 @@ These source files are available in the 
 
     class SimpleCommand extends AbstractRequest<BasicResponse> {
 
-        SimpleCommand( Hadoop hadoop ) {
-            super( hadoop )
+        SimpleCommand( Hadoop session ) {
+            super( session )
         }
 
         private String param
@@ -531,6 +385,7 @@ These source files are available in the 
 
     }
 
+
 #### Sample Complex Command (Groovy)
 
     import com.jayway.jsonpath.JsonPath
@@ -547,8 +402,8 @@ These source files are available in the 
 
         static class Request extends AbstractRequest<Response> {
 
-            Request( Hadoop hadoop ) {
-                super( hadoop )
+            Request( Hadoop session ) {
+                super( session )
             }
 
             private String param;
@@ -586,6 +441,7 @@ These source files are available in the 
 
     }
 
+
 ### Groovy
 
 The shell included in the distribution is basically an unmodified packaging of the Groovy shell.
@@ -594,13 +450,13 @@ In fact the JARs required to execute the
 Therefore these command are functionally equivalent if you have Groovy [installed][15].
 See below for a description of what is required for JARs required by the DSL from `lib` and `dep` directories.
 
-    java -jar bin/shell.jar samples/ExamplePutFile.groovy
-    groovy -classpath {JARs required by the DSL from lib and dep} samples/ExamplePutFile.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
+    groovy -classpath {JARs required by the DSL from lib and dep} samples/ExampleWebHdfsPutGet.groovy
 
 The interactive shell isn't exactly equivalent.
-However the only difference is that the shell.jar automatically executes some additional imports that are useful for the KnoxShell DSL.
+However the only difference is that the shell.jar automatically executes some additional imports that are useful for the KnoxShell client DSL.
 So these two sets of commands should be functionality equivalent.
-*However there is currently a class loading issue that prevents the groovysh command from working propertly.*
+*However there is currently a class loading issue that prevents the groovysh command from working properly.*
 
     java -jar bin/shell.jar
 
@@ -621,7 +477,7 @@ Alternatively, you can use the Groovy Co
     import org.apache.hadoop.gateway.shell.workflow.Workflow
     import java.util.concurrent.TimeUnit
 
-The list of JARs currently required by the DSL is
+The JARs currently required by the client DSL are
 
     lib/gateway-shell-${gateway-version}.jar
     dep/httpclient-4.2.3.jar
@@ -631,17 +487,17 @@ The list of JARs currently required by t
 
 So on Linux/MacOS you would need this command
 
-    groovy -cp lib/gateway-shell-0.2.0-SNAPSHOT.jar:dep/httpclient-4.2.3.jar:dep/httpcore-4.2.2.jar:dep/commons-lang3-3.1.jar:dep/commons-codec-1.7.jar samples/ExamplePutFile.groovy
+    groovy -cp lib/gateway-shell-0.2.0-SNAPSHOT.jar:dep/httpclient-4.2.3.jar:dep/httpcore-4.2.2.jar:dep/commons-lang3-3.1.jar:dep/commons-codec-1.7.jar samples/ExampleWebHdfsPutGet.groovy
 
 and on Windows you would need this command
 
-    groovy -cp lib/gateway-shell-0.2.0-SNAPSHOT.jar;dep/httpclient-4.2.3.jar;dep/httpcore-4.2.2.jar;dep/commons-lang3-3.1.jar;dep/commons-codec-1.7.jar samples/ExamplePutFile.groovy
+    groovy -cp lib/gateway-shell-0.2.0-SNAPSHOT.jar;dep/httpclient-4.2.3.jar;dep/httpcore-4.2.2.jar;dep/commons-lang3-3.1.jar;dep/commons-codec-1.7.jar samples/ExampleWebHdfsPutGet.groovy
 
 The exact list of required JARs is likely to change from release to release so it is recommended that you utilize the wrapper `bin/shell.jar`.
 
 In addition because the DSL can be used via standard Groovy, the Groovy integrations in many popular IDEs (e.g. IntelliJ , Eclipse) can also be used.
 This makes it particularly nice to develop and execute scripts to interact with Hadoop.
-The code-completion feature in particular provides immense value.
+The code-completion features in modern IDEs in particular provides immense value.
 All that is required is to add the shell-0.2.0.jar to the projects class path.
 
 There are a variety of Groovy tools that make it very easy to work with the standard interchange formats (i.e. JSON and XML).

Modified: incubator/knox/trunk/books/0.3.0/book_service-details.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/book_service-details.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/book_service-details.md (original)
+++ incubator/knox/trunk/books/0.3.0/book_service-details.md Tue Oct  8 16:45:25 2013
@@ -24,9 +24,17 @@ You may notice that there are some minor
 In general this is necessary in order to achieve the goal of leaking internal Hadoop cluster details to the client.
 
 Keep in mind that the gateway uses a plugin model for supporting Hadoop services.
-Check back with a the [Apache Knox][site] site for the latest news on plugin availability.
+Check back with the [Apache Knox][site] site for the latest news on plugin availability.
 You can also create your own custom plugin to extend the capabilities of the gateway.
 
+These are the current Hadoop services with built-in support.
+
+* #[WebHDFS]
+* #[WebHCat]
+* #[Oozie]
+* #[HBase]
+* #[Hive]
+
 ### Assumptions
 
 This document assumes a few things about your environment in order to simplify the examples.

Modified: incubator/knox/trunk/books/0.3.0/config.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/config.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/config.md (original)
+++ incubator/knox/trunk/books/0.3.0/config.md Tue Oct  8 16:45:25 2013
@@ -160,8 +160,14 @@ The Hostmap configuration required to al
                 <role>hostmap</role>
                 <name>static</name>
                 <enabled>true</enabled>
-                <param><name>ec2-23-22-31-165.compute-1.amazonaws.com</name><value>ip-10-118-99-172.ec2.internal</value></param>
-                <param><name>ec2-23-23-25-10.compute-1.amazonaws.com</name><value>ip-10-39-107-209.ec2.internal</value></param>
+                <param>
+                    <name>ec2-23-22-31-165.compute-1.amazonaws.com</name>
+                    <value>ip-10-118-99-172.ec2.internal</value>
+                </param>
+                <param>
+                    <name>ec2-23-23-25-10.compute-1.amazonaws.com</name>
+                    <value>ip-10-39-107-209.ec2.internal</value>
+                </param>
             </provider>
             ...
         </gateway>
@@ -289,11 +295,12 @@ In order to provide your own certificate
 # ----NEEDS TESTING
 One way to accomplish this is to start with a PKCS12 store for your key pair and then convert it to a Java keystore or JKS.
 
-	openssl pkcs12 -export -in cert.pem -inkey key.pem > server.p12
-	
+    openssl pkcs12 -export -in cert.pem -inkey key.pem > server.p12
+
 The above example uses openssl to create a PKCS12 encoded store for your provided certificate private key.
 
-	keytool -importkeystore -srckeystore {server.p12} -destkeystore gateway.jks -srcstoretype pkcs12
+    keytool -importkeystore -srckeystore {server.p12} -destkeystore gateway.jks -srcstoretype pkcs12
+
 This example converts the PKCS12 store into a Java keystore (JKS). It should prompt you for the keystore and key passwords for the destination keystore. You must use the master-secret for both.
 
 While using this approach a couple of important things to be aware of:
@@ -307,21 +314,23 @@ NOTE: The password for the keystore as w
 # ----END NEEDS TESTING
 
 ##### Generating a self-signed cert for use in testing or development environments #####
-	
-	keytool -genkey -keyalg RSA -alias gateway-identity -keystore gateway.jks -storepass {master-secret} -validity 360 -keysize 2048 
+
+    keytool -genkey -keyalg RSA -alias gateway-identity -keystore gateway.jks \
+        -storepass {master-secret} -validity 360 -keysize 2048
 
 Keytool will prompt you for a number of elements used that will comprise this distiniguished name (DN) within your certificate. 
 
-<b>NOTE:</b> When it prompts you for your First and Last name be sure to type in the hostname of the machine that your gateway instance will be running on. This is used by clients during hostname verification to ensure that the presented certificate matches the hostname that was used in the URL for the connection - so they need to match.
-	
-<b>NOTE:</b> When it prompts for the key password just press enter to ensure that it is the same as the keystore password. Which as was described earlier must match the master secret for the gateway instance.
+*NOTE:* When it prompts you for your First and Last name be sure to type in the hostname of the machine that your gateway instance will be running on. This is used by clients during hostname verification to ensure that the presented certificate matches the hostname that was used in the URL for the connection - so they need to match.
+
+*NOTE:* When it prompts for the key password just press enter to ensure that it is the same as the keystore password. Which as was described earlier must match the master secret for the gateway instance.
 
 ##### Credential Store #####
 Whenever you provide your own keystore with either a self-signed cert or a real certificate signed by a trusted authority, you will need to create an empty credential store. This is necessary for the current release in order for the system to utilize the same password for the keystore and the key.
 
 The credential stores in Knox use the JCEKS keystore type as it allows for the storage of general secrets in addition to certificates.
 
-	keytool -genkey -alias {anything} -keystore __gateway-credentials.jceks -storepass {master-secret} -validity 360 -keysize 1024 -storetype JCEKS
+    keytool -genkey -alias {anything} -keystore __gateway-credentials.jceks \
+        -storepass {master-secret} -validity 360 -keysize 1024 -storetype JCEKS
 
 Follow the prompts again for the DN for the cert of the credential store. This certificate isn't really used for anything at the moment but is required to create the credential store.
 

Modified: incubator/knox/trunk/books/0.3.0/config_authn.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/config_authn.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/config_authn.md (original)
+++ incubator/knox/trunk/books/0.3.0/config_authn.md Tue Oct  8 16:45:25 2013
@@ -123,18 +123,19 @@ The definition would look like the follo
 
     ...
     <provider>
-                <role>authentication</role>
-                <name>ShiroProvider</name>
-                <enabled>true</enabled>
-                <param>
-                    <!-- 
-                    session timeout in minutes,  this is really idle timeout,
-                    defaults to 30mins, if the property value is not defined,, 
-                    current client authentication would expire if client idles contiuosly for more than this value
-                    -->
-                    <name>sessionTimeout</name>
-                    <value>30</value>
-                </param>
+        <role>authentication</role>
+        <name>ShiroProvider</name>
+        <enabled>true</enabled>
+        <param>
+            <!--
+            Session timeout in minutes. This is really idle timeout.
+            Defaults to 30 minutes, if the property value is not defined.
+            Current client authentication will expire if client idles
+            continuously for more than this value
+            -->
+            <name>sessionTimeout</name>
+            <value>30</value>
+        </param>
     <provider>
     ...
 

Modified: incubator/knox/trunk/books/0.3.0/service_hbase.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_hbase.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_hbase.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_hbase.md Tue Oct  8 16:45:25 2013
@@ -114,7 +114,9 @@ For more details about client DSL usage 
 * Response
     * EmptyResponse
 * Example
-    * ```HBase.session(session).table(tableName).create()
+
+
+    HBase.session(session).table(tableName).create()
        .attribute("tb_attr1", "value1")
        .attribute("tb_attr2", "value2")
        .family("family1")
@@ -125,7 +127,7 @@ For more details about client DSL usage 
        .family("family3")
        .endFamilyDef()
        .attribute("tb_attr3", "value5")
-       .now()```
+       .now()
 
 ##### table(String tableName).update() - Update Table Schema.
 
@@ -136,7 +138,9 @@ For more details about client DSL usage 
 * Response
     * EmptyResponse
 * Example
-    * ```HBase.session(session).table(tableName).update()
+
+
+    HBase.session(session).table(tableName).update()
          .family("family1")
              .attribute("fm_attr1", "new_value3")
          .endFamilyDef()
@@ -170,14 +174,18 @@ For more details about client DSL usage 
 * Response
     * EmptyResponse
 * Example
-    * ```HBase.session(session).table(tableName).row("row_id_1").store()
+
+
+    HBase.session(session).table(tableName).row("row_id_1").store()
          .column("family1", "col1", "col_value1")
          .column("family1", "col2", "col_value2", 1234567890l)
          .column("family2", null, "fam_value1")
-         .now()```
-    * ```HBase.session(session).table(tableName).row("row_id_2").store()
+         .now()
+
+
+    HBase.session(session).table(tableName).row("row_id_2").store()
          .column("family1", "row2_col1", "row2_col_value1")
-         .now()```
+         .now()
 
 ##### table(String tableName).row(String rowId).query() - Cell or Row Query.
 
@@ -191,16 +199,22 @@ For more details about client DSL usage 
 * Response
     * BasicResponse
 * Example
-    * ```HBase.session(session).table(tableName).row("row_id_1")
+
+
+    HBase.session(session).table(tableName).row("row_id_1")
          .query()
-         .now().string```
-    * `HBase.session(session).table(tableName).row().query().now().string`
-    * ```HBase.session(session).table(tableName).row().query()
+         .now().string
+
+
+    HBase.session(session).table(tableName).row().query().now().string
+
+
+    HBase.session(session).table(tableName).row().query()
          .column("family1", "row2_col1")
          .column("family2")
          .times(0, Long.MAX_VALUE)
          .numVersions(1)
-         .now().string```
+         .now().string
 
 ##### table(String tableName).row(String rowId).delete() - Row, Column, or Cell Delete.
 
@@ -210,11 +224,15 @@ For more details about client DSL usage 
 * Response
     * EmptyResponse
 * Example
-    * ```HBase.session(session).table(tableName).row("row_id_1")
+
+
+    HBase.session(session).table(tableName).row("row_id_1")
          .delete()
          .column("family1", "col1")
          .now()```
-    * ```HBase.session(session).table(tableName).row("row_id_1")
+
+
+    HBase.session(session).table(tableName).row("row_id_1")
          .delete()
          .column("family2")
          .time(Long.MAX_VALUE)
@@ -236,7 +254,9 @@ For more details about client DSL usage 
 * Response
     * scannerId : String - the scanner ID of the created scanner. Consumes body.
 * Example
-    * ```HBase.session(session).table(tableName).scanner().create()
+
+
+    HBase.session(session).table(tableName).scanner().create()
          .column("family1", "col2")
          .column("family2")
          .startRow("row_id_1")

Modified: incubator/knox/trunk/books/0.3.0/service_oozie.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_oozie.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_oozie.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_oozie.md Tue Oct  8 16:45:25 2013
@@ -17,17 +17,71 @@
 
 ### Oozie ###
 
-TODO
+
+Oozie is a Hadoop component provides complex job workflows to be submitted and managed.
+Please refer to the latest [Oozie documentation](http://oozie.apache.org/docs/4.0.0/) for details.
+
+In order to make Oozie accessible via the gateway there are several important Haddop configuration settings.
+These all relate to the network endpoint exposed by various Hadoop services.
+
+The HTTP endpoint at which Oozie is running can be found via the oozie.base.url property in the oozie-site.xml file.
+In a Sandbox installation this can typically be found in /etc/oozie/conf/oozie-site.xml.
+
+    <property>
+        <name>oozie.base.url</name>
+        <value>http://sandbox.hortonworks.com:11000/oozie</value>
+    </property>
+
+The RPC address at which the Resource Manager exposes the JOBTRACKER endpoint can be found via the yarn.resourcemanager.address in the yarn-site.xml file.
+In a Sandbox installation this can typically be found in /etc/hadoop/conf/yarn-site.xml.
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>sandbox.hortonworks.com:8050</value>
+    </property>
+
+The RPC address at which the Name Node exposes its RPC endpoint can be found via the dfs.namenode.rpc-address in the hdfs-site.xml file.
+In a Sandbox installation this can typically be found in /etc/hadoop/conf/hdfs-site.xml.
+
+    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>sandbox.hortonworks.com:8020</value>
+    </property>
+
+The information above must be provided to the gateway via a topology descriptor file.
+These topology descriptor files are placed in `{GATEWAY_HOME}/deployments`.
+An example that is setup for the default configuration of the Sandbox is {GATEWAY_HOME}/deployments/sandbox.xml.
+These values will need to be changed for non-default Sandbox or other Hadoop cluster configuration.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>JOBTRACKER</role>
+        <url>rpc://localhost:8050</url>
+    </service>
+    <service>
+        <role>OOZIE</role>
+        <url>http://localhost:11000/oozie</url>
+    </service>
 
 #### Oozie URL Mapping ####
 
-TODO
+For Oozie URLs, the mapping of Knox Gateway accessible URLs to direct Oozie URLs is simple.
+
+| ------- | --------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/oozie` |
+| Cluster | `http://{oozie-host}:{oozie-port}/oozie}`                                   |
+
 
-#### Oozie Examples ####
+#### Oozie Request Changes ####
 
-TODO
+TODO - In some cases the Oozie requests needs to be slightly different when made through the gateway.
+These changes are required in order to protect the client from knowing the internal structure of the Hadoop cluster.
 
-#### Example #2: WebHDFS & Oozie via KnoxShell DSL
+
+#### Oozie Example via Client DSL ####
 
 This example will also submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL.
 However in this case the job will be submitted via a Oozie workflow.
@@ -35,210 +89,95 @@ There are several ways to do this depend
 
 You can use the "embedded" Groovy interpreter provided with the distribution.
 
-    java -jar bin/shell.jar samples/ExampleSubmitWorkflow.groovy
+    java -jar bin/shell.jar samples/ExampleOozieWorkflow.groovy
 
 You can manually type in the KnoxShell DSL script into the "embedded" Groovy interpreter provided with the distribution.
 
     java -jar bin/shell.jar
 
-Each line from the file below will need to be typed or copied into the interactive shell.
-
-##### samples/ExampleSubmitWorkflow.groovy #####
-
-    import com.jayway.jsonpath.JsonPath
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import org.apache.hadoop.gateway.shell.workflow.Workflow
-
-    import static java.util.concurrent.TimeUnit.SECONDS
-
-    gateway = "https://localhost:8443/gateway/sandbox"
-    jobTracker = "sandbox:50300";
-    nameNode = "sandbox:8020";
-    username = "guest"
-    password = "guest-password"
-    inputFile = "LICENSE"
-    jarFile = "samples/hadoop-examples.jar"
-
-    definition = """\
-    <workflow-app xmlns="uri:oozie:workflow:0.2" name="wordcount-workflow">
-        <start to="root-node"/>
-        <action name="root-node">
-            <java>
-                <job-tracker>$jobTracker</job-tracker>
-                <name-node>hdfs://$nameNode</name-node>
-                <main-class>org.apache.hadoop.examples.WordCount</main-class>
-                <arg>/tmp/test/input</arg>
-                <arg>/tmp/test/output</arg>
-            </java>
-            <ok to="end"/>
-            <error to="fail"/>
-        </action>
-        <kill name="fail">
-            <message>Java failed</message>
-        </kill>
-        <end name="end"/>
-    </workflow-app>
-    """
-
-    configuration = """\
-    <configuration>
-        <property>
-            <name>user.name</name>
-            <value>$username</value>
-        </property>
-        <property>
-            <name>oozie.wf.application.path</name>
-            <value>hdfs://$nameNode/tmp/test</value>
-        </property>
-    </configuration>
-    """
-
-    hadoop = Hadoop.login( gateway, username, password )
-
-    println "Delete /tmp/test " + Hdfs.rm(hadoop).file( "/tmp/test" ).recursive().now().statusCode
-    println "Mkdir /tmp/test " + Hdfs.mkdir(hadoop).dir( "/tmp/test").now().statusCode
-    putWorkflow = Hdfs.put(hadoop).text( definition ).to( "/tmp/test/workflow.xml" ).later() {
-        println "Put /tmp/test/workflow.xml " + it.statusCode }
-    putData = Hdfs.put(hadoop).file( inputFile ).to( "/tmp/test/input/FILE" ).later() {
-        println "Put /tmp/test/input/FILE " + it.statusCode }
-    putJar = Hdfs.put(hadoop).file( jarFile ).to( "/tmp/test/lib/hadoop-examples.jar" ).later() {
-        println "Put /tmp/test/lib/hadoop-examples.jar " + it.statusCode }
-    hadoop.waitFor( putWorkflow, putData, putJar )
-
-    jobId = Workflow.submit(hadoop).text( configuration ).now().jobId
-    println "Submitted job " + jobId
-
-    status = "UNKNOWN";
-    count = 0;
-    while( status != "SUCCEEDED" && count++ < 60 ) {
-      sleep( 1000 )
-      json = Workflow.status(hadoop).jobId( jobId ).now().string
-      status = JsonPath.read( json, "${SDS}.status" )
-    }
-    println "Job status " + status;
+Each line from the file `samples/ExampleOozieWorkflow.groovy` will need to be typed or copied into the interactive shell.
 
-    println "Shutdown " + hadoop.shutdown( 10, SECONDS )
+#### Oozie Example via cURL
 
-    exit
-
-#### Example #3: WebHDFS & Templeton/WebHCat via cURL
+The example below illustrates the sequence of curl commands that could be used to run a "word count" map reduce job via an Oozie workflow.
 
-The example below illustrates the sequence of curl commands that could be used to run a "word count" map reduce job.
 It utilizes the hadoop-examples.jar from a Hadoop install for running a simple word count job.
 A copy of that jar has been included in the samples directory for convenience.
-Take care to follow the instructions below for steps 4/5 and 6/7 where the Location header returned by the call to the NameNode is copied for use with the call to the DataNode that follows it.
-These replacement values are identified with { } markup.
-
-    # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
-    curl -i -k -u guest:guest-password -X DELETE \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
-
-    # 1. Create a test input directory /tmp/test/input
-    curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input?op=MKDIRS'
-
-    # 2. Create a test output directory /tmp/test/input
-    curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=MKDIRS'
-
-    # 3. Create the inode for hadoop-examples.jar in /tmp/test
-    curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/hadoop-examples.jar?op=CREATE'
-
-    # 4. Upload hadoop-examples.jar to /tmp/test.  Use a hadoop-examples.jar from a Hadoop install.
-    curl -i -k -u guest:guest-password -T samples/hadoop-examples.jar -X PUT '{Value Location header from command above}'
 
-    # 5. Create the inode for a sample file README in /tmp/test/input
-    curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE'
-
-    # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
-    curl -i -k -u guest:guest-password -T README -X PUT '{Value of Location header from command above}'
-
-    # 7. Submit the word count job via WebHCat/Templeton.
-    # Take note of the Job ID in the JSON response as this will be used in the next step.
-    curl -v -i -k -u guest:guest-password -X POST \
-        -d jar=/tmp/test/hadoop-examples.jar -d class=wordcount \
-        -d arg=/tmp/test/input -d arg=/tmp/test/output \
-        'https://localhost:8443/gateway/sample/templeton/api/v1/mapreduce/jar'
-
-    # 8. Look at the status of the job
-    curl -i -k -u guest:guest-password -X GET \
-        'https://localhost:8443/gateway/sample/templeton/api/v1/queue/{Job ID returned in JSON body from previous step}'
-
-    # 9. Look at the status of the job queue
-    curl -i -k -u guest:guest-password -X GET \
-        'https://localhost:8443/gateway/sample/templeton/api/v1/queue'
-
-    # 10. List the contents of the output directory /tmp/test/output
-    curl -i -k -u guest:guest-password -X GET \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=LISTSTATUS'
-
-    # 11. Optionally cleanup the test directory
-    curl -i -k -u guest:guest-password -X DELETE \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
-
-#### Example #4: WebHDFS & Oozie via cURL
+In addition a workflow definition and configuration file is required.
+These have not been included but are available for download.
+Download [workflow-definition.xml](workflow-definition.xml) and [workflow-configuration.xml](workflow-configuration.xml) and store them in the {GATEWAY_HOME} directory.
+Review the contents of workflow-configuration.xml to ensure that it matches your environment.
 
-The example below illustrates the sequence of curl commands that could be used to run a "word count" map reduce job via an Oozie workflow.
-It utilizes the hadoop-examples.jar from a Hadoop install for running a simple word count job.
-A copy of that jar has been included in the samples directory for convenience.
 Take care to follow the instructions below where replacement values are required.
 These replacement values are identified with { } markup.
 
     # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
     curl -i -k -u guest:guest-password -X DELETE \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
 
-    # 1. Create the inode for workflow definition file in /tmp/test
+    # 1. Create the inode for workflow definition file in /user/guest/example
     curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/workflow.xml?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/workflow.xml?op=CREATE'
 
     # 2. Upload the workflow definition file.  This file can be found in {GATEWAY_HOME}/templates
-    curl -i -k -u guest:guest-password -T templates/workflow-definition.xml -X PUT \
+    curl -i -k -u guest:guest-password -T workflow-definition.xml -X PUT \
         '{Value Location header from command above}'
 
-    # 3. Create the inode for hadoop-examples.jar in /tmp/test/lib
+    # 3. Create the inode for hadoop-examples.jar in /user/guest/example/lib
     curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/lib/hadoop-examples.jar?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/lib/hadoop-examples.jar?op=CREATE'
 
-    # 4. Upload hadoop-examples.jar to /tmp/test/lib.  Use a hadoop-examples.jar from a Hadoop install.
+    # 4. Upload hadoop-examples.jar to /user/guest/example/lib.  Use a hadoop-examples.jar from a Hadoop install.
     curl -i -k -u guest:guest-password -T samples/hadoop-examples.jar -X PUT \
         '{Value Location header from command above}'
 
-    # 5. Create the inode for a sample input file readme.txt in /tmp/test/input.
+    # 5. Create the inode for a sample input file readme.txt in /user/guest/example/input.
     curl -i -k -u guest:guest-password -X PUT \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/input/README?op=CREATE'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/input/README?op=CREATE'
 
-    # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
+    # 6. Upload readme.txt to /user/guest/example/input.  Use the readme.txt in {GATEWAY_HOME}.
     # The sample below uses this README file found in {GATEWAY_HOME}.
     curl -i -k -u guest:guest-password -T README -X PUT \
         '{Value of Location header from command above}'
 
-    # 7. Create the job configuration file by replacing the {NameNode host:port} and {JobTracker host:port}
-    # in the command below to values that match your Hadoop configuration.
-    # NOTE: The hostnames must be resolvable by the Oozie daemon.  The ports are the RPC ports not the HTTP ports.
-    # For example {NameNode host:port} might be sandbox:8020 and {JobTracker host:port} sandbox:50300
-    # The source workflow-configuration.xml file can be found in {GATEWAY_HOME}/templates
-    # Alternatively, this file can copied and edited manually for environments without the sed utility.
-    sed -e s/REPLACE.NAMENODE.RPCHOSTPORT/{NameNode host:port}/ \
-        -e s/REPLACE.JOBTRACKER.RPCHOSTPORT/{JobTracker host:port}/ \
-        <templates/workflow-configuration.xml >workflow-configuration.xml
-
-    # 8. Submit the job via Oozie
+    # 7. Submit the job via Oozie
     # Take note of the Job ID in the JSON response as this will be used in the next step.
-    curl -i -k -u guest:guest-password -T workflow-configuration.xml -H Content-Type:application/xml -X POST \
-        'https://localhost:8443/gateway/sample/oozie/api/v1/jobs?action=start'
+    curl -i -k -u guest:guest-password -H Content-Type:application/xml -T workflow-configuration.xml \
+        -X POST 'https://localhost:8443/gateway/sandbox/oozie/v1/jobs?action=start'
 
-    # 9. Query the job status via Oozie.
+    # 8. Query the job status via Oozie.
     curl -i -k -u guest:guest-password -X GET \
-        'https://localhost:8443/gateway/sample/oozie/api/v1/job/{Job ID returned in JSON body from previous step}'
+        'https://localhost:8443/gateway/sandbox/oozie/v1/job/{Job ID from JSON body}'
 
-    # 10. List the contents of the output directory /tmp/test/output
+    # 9. List the contents of the output directory /user/guest/example/output
     curl -i -k -u guest:guest-password -X GET \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test/output?op=LISTSTATUS'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/output?op=LISTSTATUS'
 
-    # 11. Optionally cleanup the test directory
+    # 10. Optionally cleanup the test directory
     curl -i -k -u guest:guest-password -X DELETE \
-        'https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp/test?op=DELETE&recursive=true'
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+### Oozie Client DSL ###
+
+#### submit() - Submit a workflow job.
+
+* Request
+    * text (String) - XML formatted workflow configuration string.
+    * file (String) - A filename containing XML formatted workflow configuration.
+    * action (String) - The initial action to take on the job.  Optional: Default is "start".
+* Response
+    * BasicResponse
+* Example
+    * `Workflow.submit(session).file(localFile).action("start").now()`
+
+#### status() - Query the status of a workflow job.
+
+* Request
+    * jobId (String) - The job ID to check. This is the ID received when the job was created.
+* Response
+    * BasicResponse
+* Example
+    * `Workflow.status(session).jobId(jobId).now().string`
+
+

Modified: incubator/knox/trunk/books/0.3.0/service_webhcat.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_webhcat.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_webhcat.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_webhcat.md Tue Oct  8 16:45:25 2013
@@ -17,74 +17,136 @@
 
 ### WebHCat ###
 
-TODO
+WebHCat is a related but separate service from Hive.
+As such it is installed and configured independently.
+The [WebHCat wiki pages](https://cwiki.apache.org/confluence/display/Hive/WebHCat) describe this processes.
+In sandbox this configuration file for WebHCat is located at /etc/hadoop/hcatalog/webhcat-site.xml.
+Note the properties shown below as they are related to configuration required by the gateway.
+
+    <property>
+        <name>templeton.port</name>
+        <value>50111</value>
+    </property>
+
+Also important is the configuration of the JOBTRACKER RPC endpoint.
+For Hadoop 2 this can be found in the yarn-site.xml file.
+In Sandbox this file can be found at /etc/hadoop/conf/yarn-site.xml.
+The property yarn.resourcemanager.address within that file is relevant for the gateway's configuration.
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>sandbox.hortonworks.com:8050</value>
+    </property>
+
+See #[WebHDFS] for details about locating the Haddop configuration for the NAMENODE endpoint.
+
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The values in this sample are configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>JOBTRACKER</role>
+        <url>rpc://localhost:8050</url>
+    </service>
+    <service>
+        <role>WEBHCAT</role>
+        <url>http://localhost:50111/templeton</url>
+    </service>
+
+The URLs provided for the role NAMENODE and JOBTRACKER do not result in an endpoint being exposed by the gateway.
+This information is only required so that other URLs can be rewritten that reference the appropriate RPC address for Hadoop services.
+This prevents clients from needed to be aware of the internal cluster details.
+Note that for Hadoop 2 the JOBTRACKER RPC endpoint is provided by the Resource Manager component.
+
+By default the gateway is configured to use the HTTP endpoint for WebHCat in the Sandbox.
+This could alternatively be configured to use the HTTPS endpoint by provided the correct address.
 
 #### WebHCat URL Mapping ####
 
-TODO
+For WebHCat URLs, the mapping of Knox Gateway accessible URLs to direct WebHCat URLs is simple.
 
-#### WebHCat Examples ####
+| ------- | ------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/templeton` |
+| Cluster | `http://{webhcat-host}:{webhcat-port}/templeton}`                               |
 
-TODO
 
-#### Example #1: WebHDFS & Templeton/WebHCat via KnoxShell DSL
+#### WebHCat Example ####
 
 This example will submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL.
 There are several ways to do this depending upon your preference.
 
 You can use the "embedded" Groovy interpreter provided with the distribution.
 
-    java -jar bin/shell.jar samples/ExampleSubmitJob.groovy
+    java -jar bin/shell.jar samples/ExampleWebHCatJob.groovy
 
 You can manually type in the KnoxShell DSL script into the "embedded" Groovy interpreter provided with the distribution.
 
     java -jar bin/shell.jar
 
-Each line from the file below will need to be typed or copied into the interactive shell.
+Each line from the file `samples/ExampleWebHCatJob.groovy` would then need to be typed or copied into the interactive shell.
 
-##### samples/ExampleSubmitJob
 
-    import com.jayway.jsonpath.JsonPath
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import org.apache.hadoop.gateway.shell.job.Job
-
-    import static java.util.concurrent.TimeUnit.SECONDS
-
-    gateway = "https://localhost:8443/gateway/sandbox"
-    username = "guest"
-    password = "guest-password"
-    dataFile = "LICENSE"
-    jarFile = "samples/hadoop-examples.jar"
-
-    hadoop = Hadoop.login( gateway, username, password )
-
-    println "Delete /tmp/test " + Hdfs.rm(hadoop).file( "/tmp/test" ).recursive().now().statusCode
-    println "Create /tmp/test " + Hdfs.mkdir(hadoop).dir( "/tmp/test").now().statusCode
-
-    putData = Hdfs.put(hadoop).file( dataFile ).to( "/tmp/test/input/FILE" ).later() {
-        println "Put /tmp/test/input/FILE " + it.statusCode }
-    putJar = Hdfs.put(hadoop).file( jarFile ).to( "/tmp/test/hadoop-examples.jar" ).later() {
-         println "Put /tmp/test/hadoop-examples.jar " + it.statusCode }
-    hadoop.waitFor( putData, putJar )
-
-    jobId = Job.submitJava(hadoop) \
-        .jar( "/tmp/test/hadoop-examples.jar" ) \
-        .app( "wordcount" ) \
-        .input( "/tmp/test/input" ) \
-        .output( "/tmp/test/output" ) \
-        .now().jobId
-    println "Submitted job " + jobId
-
-    done = false
-    count = 0
-    while( !done && count++ < 60 ) {
-        sleep( 1000 )
-        json = Job.queryStatus(hadoop).jobId(jobId).now().string
-        done = JsonPath.read( json, "${SDS}.status.jobComplete" )
-    }
-    println "Done " + done
+#### WebHCat Client DSL ####
 
-    println "Shutdown " + hadoop.shutdown( 10, SECONDS )
+##### submitJava() - Submit a Java MapReduce job.
 
-    exit
+* Request
+    * jar (String) - The remote file name of the JAR containing the app to execute.
+    * app (String) - The app name to execute.  This is wordcount for example not the class name.
+    * input (String) - The remote directory name to use as input for the job.
+    * output (String) - The remote directory name to store output from the job.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+
+
+    Job.submitJava(session)
+        .jar(remoteJarName)
+        .app(appName)
+        .input(remoteInputDir)
+        .output(remoteOutputDir)
+        .now()
+        .jobId
+
+##### submitPig() - Submit a Pig job.
+
+* Request
+    * file (String) - The remote file name of the pig script.
+    * arg (String) - An argument to pass to the script.
+    * statusDir (String) - The remote directory to store status output.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+    * `Job.submitPig(session).file(remotePigFileName).arg("-v").statusDir(remoteStatusDir).now()`
+
+##### submitHive() - Submit a Hive job.
+
+* Request
+    * file (String) - The remote file name of the hive script.
+    * arg (String) - An argument to pass to the script.
+    * statusDir (String) - The remote directory to store status output.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+    * `Job.submitHive(session).file(remoteHiveFileName).arg("-v").statusDir(remoteStatusDir).now()`
+
+##### queryQueue() - Return a list of all job IDs registered to the user.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `Job.queryQueue(session).now().string`
+
+##### queryStatus() - Check the status of a job and get related job information given its job ID.
+
+* Request
+    * jobId (String) - The job ID to check. This is the ID received when the job was created.
+* Response
+    * BasicResponse
+* Example
+    * `Job.queryStatus(session).jobId(jobId).now().string`

Modified: incubator/knox/trunk/books/0.3.0/service_webhdfs.md
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/0.3.0/service_webhdfs.md?rev=1530346&r1=1530345&r2=1530346&view=diff
==============================================================================
--- incubator/knox/trunk/books/0.3.0/service_webhdfs.md (original)
+++ incubator/knox/trunk/books/0.3.0/service_webhdfs.md Tue Oct  8 16:45:25 2013
@@ -93,8 +93,8 @@ The examples below upload a file, downlo
 
 You can use the Groovy example scripts and interpreter provided with the distribution.
 
-    java -jar bin/shell.jar samples/ExampleWebHfsPutGet.groovy
-    java -jar bin/shell.jar samples/ExampleWebHfsLs.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
 
 You can manually type the client DSL script into the KnoxShell interactive Groovy interpreter provided with the distribution.
 The command below starts the KnoxShell in interactive mode.
@@ -104,38 +104,38 @@ The command below starts the KnoxShell i
 Each line below could be typed or copied into the interactive shell and executed.
 This is provided as an example to illustrate the use of the client DSL.
 
-    # Import the client DSL and a useful utilities for working with JSON.
+    // Import the client DSL and a useful utilities for working with JSON.
     import org.apache.hadoop.gateway.shell.Hadoop
     import org.apache.hadoop.gateway.shell.hdfs.Hdfs
     import groovy.json.JsonSlurper
 
-    # Setup some basic config.
+    // Setup some basic config.
     gateway = "https://localhost:8443/gateway/sandbox"
     username = "guest"
     password = "guest-password"
 
-    # Start the session.
+    // Start the session.
     session = Hadoop.login( gateway, username, password )
 
-    # Cleanup anything leftover from a previous run.
+    // Cleanup anything leftover from a previous run.
     Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
 
-    # Upload the README to HDFS.
-    Hdfs.put( session ).file( README ).to( "/user/guest/example/README" ).now()
+    // Upload the README to HDFS.
+    Hdfs.put( session ).file( "README" ).to( "/user/guest/example/README" ).now()
 
-    # Download the README from HDFS.
+    // Download the README from HDFS.
     text = Hdfs.get( session ).from( "/user/guest/example/README" ).now().string
     println text
 
-    # List the contents of the directory.
+    // List the contents of the directory.
     text = Hdfs.ls( session ).dir( "/user/guest/example" ).now().string
     json = (new JsonSlurper()).parseText( text )
     println json.FileStatuses.FileStatus.pathSuffix
 
-    # Cleanup the directory.
+    // Cleanup the directory.
     Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
 
-    # Clean the session.
+    // Clean the session.
     session.shutdown()
 
 
@@ -181,18 +181,19 @@ Use can use cURL to directly invoke the 
 
 ##### WebHDFS client DSL
 
-###### get - Get a file from HDFS (OPEN).
+###### get() - Get a file from HDFS (OPEN).
 
 * Request
     * from( String name ) - The full name of the file in HDFS.
     * file( String name ) - The name name of a local file to create with the content.
+    If this isn't specified the file content must be read from the response.
 * Response
     * BasicResponse
     * If file parameter specified content will be streamed to file.
 * Example
     * `Hdfs.get( session ).from( "/user/guest/example/README" ).now().string`
 
-###### ls - Query the contents of a directory (LISTSTATUS)
+###### ls() - Query the contents of a directory (LISTSTATUS)
 
 * Request
     * dir( String name ) - The full name of the directory in HDFS.
@@ -201,34 +202,34 @@ Use can use cURL to directly invoke the 
 * Example
     * `Hdfs.ls( session ).dir( "/user/guest/example" ).now().string`
 
-###### mkdir - Create a directory in HDFS (MKDIRS)
+###### mkdir() - Create a directory in HDFS (MKDIRS)
 
 * Request
     * dir( String name ) - The full name of the directory to create in HDFS.
-    * perm( String perm ) - The permissions for the directory (e.g. 644).
+    * perm( String perm ) - The permissions for the directory (e.g. 644).  Optional: default="777"
 * Response
-    * BasicResponse
+    * EmptyResponse - Implicit close().
 * Example
     * `Hdfs.mkdir( session ).dir( "/user/guest/example" ).now()`
 
-###### put - Write a file into HDFS (CREATE)
+###### put() - Write a file into HDFS (CREATE)
 
 * Request
     * text( String text ) - Text to upload to HDFS.  Takes precidence over file if both present.
     * file( String name ) - The name of a local file to upload to HDFS.
     * to( String name ) - The fully qualified name to create in HDFS.
 * Response
-    * BasicResponse
+    * EmptyResponse - Implicit close().
 * Example
     * `Hdfs.put( session ).file( README ).to( "/user/guest/example/README" ).now()`
 
-###### rm - Delete a file or directory (DELETE)
+###### rm() - Delete a file or directory (DELETE)
 
 * Request
     * file( String name ) - The fully qualified file or directory name in HDFS.
-    * recursive( Boolean recursive ) - Delete directory and all of its contents if True.
+    * recursive( Boolean recursive ) - Delete directory and all of its contents if True.  Optional: default=False
 * Response
-    * BasicResponse
+    * BasicResponse - Implicit close().
 * Example
     * `Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()`
 

Added: incubator/knox/trunk/books/static/workflow-configuration.xml
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/static/workflow-configuration.xml?rev=1530346&view=auto
==============================================================================
--- incubator/knox/trunk/books/static/workflow-configuration.xml (added)
+++ incubator/knox/trunk/books/static/workflow-configuration.xml Tue Oct  8 16:45:25 2013
@@ -0,0 +1,43 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+    <property>
+        <name>user.name</name>
+        <value>default</value>
+    </property>
+    <property>
+        <name>nameNode</name>
+        <value>default</value>
+    </property>
+    <property>
+        <name>jobTracker</name>
+        <value>default</value>
+    </property>
+    <property>
+        <name>inputDir</name>
+        <value>/user/guest/example/input</value>
+    </property>
+    <property>
+        <name>outputDir</name>
+        <value>/user/guest/example/output</value>
+    </property>
+    <property>
+        <name>oozie.wf.application.path</name>
+        <value>/user/guest/example</value>
+    </property>
+</configuration>

Added: incubator/knox/trunk/books/static/workflow-definition.xml
URL: http://svn.apache.org/viewvc/incubator/knox/trunk/books/static/workflow-definition.xml?rev=1530346&view=auto
==============================================================================
--- incubator/knox/trunk/books/static/workflow-definition.xml (added)
+++ incubator/knox/trunk/books/static/workflow-definition.xml Tue Oct  8 16:45:25 2013
@@ -0,0 +1,35 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<workflow-app xmlns="uri:oozie:workflow:0.2" name="wordcount-workflow">
+    <start to="root-node"/>
+    <action name="root-node">
+        <java>
+            <job-tracker>${jobTracker}</job-tracker>
+            <name-node>${nameNode}</name-node>
+            <main-class>org.apache.hadoop.examples.WordCount</main-class>
+            <arg>${inputDir}</arg>
+            <arg>${outputDir}</arg>
+        </java>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>Java failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
+    </kill>
+    <end name="end"/>
+</workflow-app>



Mime
View raw message