knox-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From su...@apache.org
Subject svn commit: r1681785 [8/8] - in /knox: site/ site/books/knox-0-7-0/ trunk/ trunk/books/0.7.0/ trunk/books/0.7.0/dev-guide/
Date Tue, 26 May 2015 16:07:09 GMT
Added: knox/trunk/books/0.7.0/service_hbase.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_hbase.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_hbase.md (added)
+++ knox/trunk/books/0.7.0/service_hbase.md Tue May 26 16:07:07 2015
@@ -0,0 +1,651 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### HBase ###
+
+The HBase REST API is provided by the Stargate service for HBase.
+See the HBase Stargate Setup section below for getting started with stargate and Knox with the Hortonworks Sandbox environment.
+
+#### HBase URL Mapping ####
+
+| ------- | ----------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/hbase` |
+| Cluster | `http://{stargate-host}:60080/`                                         |
+
+#### HBase Examples ####
+
+The examples below illustrate the set of basic operations with HBase instance using Stargate REST API.
+Use following link to get more more details about HBase/Stargate API: http://wiki.apache.org/hadoop/Hbase/Stargate.
+
+Note: Some HBase examples may not work due to enabled [Access Control](https://hbase.apache.org/book/hbase.accesscontrol.configuration.html). User may not be granted for performing operations in samples. In order to check if Access Control is configured in the HBase instance verify hbase-site.xml for a presence of `org.apache.hadoop.hbase.security.access.AccessController` in `hbase.coprocessor.master.classes` and `hbase.coprocessor.region.classes` properties.  
+To grant the Read, Write, Create permissions to `guest` user execute the following command:
+
+    echo grant 'guest', 'RWC' | hbase shell
+
+If you are using a cluster secured with Kerberos you will need to have used `kinit` to authenticate to the KDC    
+
+#### HBase Stargate Setup ####
+
+#### Launch Stargate ####
+
+The command below launches the Stargate daemon on port 60080
+
+    sudo {HBASE_BIN}/hbase-daemon.sh start rest -p 60080
+
+Where {HBASE_BIN} is /usr/hdp/current/hbase-master/bin/ in the case of a HDP install.
+
+Port 60080 is used because it was specified in sample Hadoop cluster deployment `{GATEWAY_HOME}/conf/topologies/sandbox.xml`.
+
+#### Configure Sandbox port mapping for VirtualBox ####
+
+1. Select the VM
+2. Select menu Machine>Settings...
+3. Select tab Network
+4. Select Adapter 1
+5. Press Port Forwarding button
+6. Press Plus button to insert new rule: Name=Stargate, Host Port=60080, Guest Port=60080
+7. Press OK to close the rule window
+8. Press OK to Network window save the changes
+
+60080 port is used because it was specified in sample Hadoop cluster deployment `{GATEWAY_HOME}/conf/topologies/sandbox.xml`.
+
+#### HBase Restart ####
+
+If it becomes necessary to restart HBase you can log into the hosts running HBase and use these steps.
+
+    sudo {HBASE_BIN}hbase-daemon.sh stop rest
+    sudo -u hbase {HBASE_BIN}/hbase-daemon.sh stop regionserver
+    sudo -u hbase {HBASE_BIN}/hbase-daemon.sh stop master
+    sudo -u hbase {HBASE_BIN}/hbase-daemon.sh stop zookeeper
+
+    sudo -u hbase {HBASE_BIN}/hbase-daemon.sh start regionserver
+    sudo -u hbase {HBASE_BIN}/hbase-daemon.sh start master
+    sudo -u hbase {HBASE_BIN}/hbase-daemon.sh start zookeeper
+    sudo {HBASE_BIN}/hbase-daemon.sh start rest -p 60080
+
+Where {HBASE_BIN} is /usr/hdp/current/hbase-master/bin/ in the case of a HDP install.
+ 
+#### HBase/Stargate client DSL ####
+
+For more details about client DSL usage please follow this [page|https://cwiki.apache.org/confluence/display/KNOX/Client+Usage].
+
+After launching the shell, execute the following command to be able to use the snippets below.
+`import org.apache.hadoop.gateway.shell.hbase.HBase;`
+ 
+#### systemVersion() - Query Software Version.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `HBase.session(session).systemVersion().now().string`
+
+#### clusterVersion() - Query Storage Cluster Version.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `HBase.session(session).clusterVersion().now().string`
+
+#### status() - Query Storage Cluster Status.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `HBase.session(session).status().now().string`
+
+#### table().list() - Query Table List.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+  * `HBase.session(session).table().list().now().string`
+
+#### table(String tableName).schema() - Query Table Schema.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `HBase.session(session).table().schema().now().string`
+
+#### table(String tableName).create() - Create Table Schema.
+
+* Request
+    * attribute(String name, Object value) - the table's attribute.
+    * family(String name) - starts family definition. Has sub requests:
+    * attribute(String name, Object value) - the family's attribute.
+    * endFamilyDef() - finishes family definition.
+* Response
+    * EmptyResponse
+* Example
+
+
+    HBase.session(session).table(tableName).create()
+       .attribute("tb_attr1", "value1")
+       .attribute("tb_attr2", "value2")
+       .family("family1")
+           .attribute("fm_attr1", "value3")
+           .attribute("fm_attr2", "value4")
+       .endFamilyDef()
+       .family("family2")
+       .family("family3")
+       .endFamilyDef()
+       .attribute("tb_attr3", "value5")
+       .now()
+
+#### table(String tableName).update() - Update Table Schema.
+
+* Request
+    * family(String name) - starts family definition. Has sub requests:
+    * attribute(String name, Object value) - the family's attribute.
+    * endFamilyDef() - finishes family definition.
+* Response
+    * EmptyResponse
+* Example
+
+
+    HBase.session(session).table(tableName).update()
+         .family("family1")
+             .attribute("fm_attr1", "new_value3")
+         .endFamilyDef()
+         .family("family4")
+             .attribute("fm_attr3", "value6")
+         .endFamilyDef()
+         .now()```
+
+#### table(String tableName).regions() - Query Table Metadata.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `HBase.session(session).table(tableName).regions().now().string`
+
+#### table(String tableName).delete() - Delete Table.
+
+* Request
+    * No request parameters.
+* Response
+    * EmptyResponse
+* Example
+    * `HBase.session(session).table(tableName).delete().now()`
+
+#### table(String tableName).row(String rowId).store() - Cell Store.
+
+* Request
+    * column(String family, String qualifier, Object value, Long time) - the data to store; "qualifier" may be "null"; "time" is optional.
+* Response
+    * EmptyResponse
+* Example
+
+
+    HBase.session(session).table(tableName).row("row_id_1").store()
+         .column("family1", "col1", "col_value1")
+         .column("family1", "col2", "col_value2", 1234567890l)
+         .column("family2", null, "fam_value1")
+         .now()
+
+
+    HBase.session(session).table(tableName).row("row_id_2").store()
+         .column("family1", "row2_col1", "row2_col_value1")
+         .now()
+
+#### table(String tableName).row(String rowId).query() - Cell or Row Query.
+
+* rowId is optional. Querying with null or empty rowId will select all rows.
+* Request
+    * column(String family, String qualifier) - the column to select; "qualifier" is optional.
+    * startTime(Long) - the lower bound for filtration by time.
+    * endTime(Long) - the upper bound for filtration by time.
+    * times(Long startTime, Long endTime) - the lower and upper bounds for filtration by time.
+    * numVersions(Long) - the maximum number of versions to return.
+* Response
+    * BasicResponse
+* Example
+
+
+    HBase.session(session).table(tableName).row("row_id_1")
+         .query()
+         .now().string
+
+
+    HBase.session(session).table(tableName).row().query().now().string
+
+
+    HBase.session(session).table(tableName).row().query()
+         .column("family1", "row2_col1")
+         .column("family2")
+         .times(0, Long.MAX_VALUE)
+         .numVersions(1)
+         .now().string
+
+#### table(String tableName).row(String rowId).delete() - Row, Column, or Cell Delete.
+
+* Request
+    * column(String family, String qualifier) - the column to delete; "qualifier" is optional.
+    * time(Long) - the upper bound for time filtration.
+* Response
+    * EmptyResponse
+* Example
+
+
+    HBase.session(session).table(tableName).row("row_id_1")
+         .delete()
+         .column("family1", "col1")
+         .now()```
+
+
+    HBase.session(session).table(tableName).row("row_id_1")
+         .delete()
+         .column("family2")
+         .time(Long.MAX_VALUE)
+         .now()```
+
+#### table(String tableName).scanner().create() - Scanner Creation.
+
+* Request
+    * startRow(String) - the lower bound for filtration by row id.
+    * endRow(String) - the upper bound for filtration by row id.
+    * rows(String startRow, String endRow) - the lower and upper bounds for filtration by row id.
+    * column(String family, String qualifier) - the column to select; "qualifier" is optional.
+    * batch(Integer) - the batch size.
+    * startTime(Long) - the lower bound for filtration by time.
+    * endTime(Long) - the upper bound for filtration by time.
+    * times(Long startTime, Long endTime) - the lower and upper bounds for filtration by time.
+    * filter(String) - the filter XML definition.
+    * maxVersions(Integer) - the the maximum number of versions to return.
+* Response
+    * scannerId : String - the scanner ID of the created scanner. Consumes body.
+* Example
+
+
+    HBase.session(session).table(tableName).scanner().create()
+         .column("family1", "col2")
+         .column("family2")
+         .startRow("row_id_1")
+         .endRow("row_id_2")
+         .batch(1)
+         .startTime(0)
+         .endTime(Long.MAX_VALUE)
+         .filter("")
+         .maxVersions(100)
+         .now()```
+
+#### table(String tableName).scanner(String scannerId).getNext() - Scanner Get Next.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `HBase.session(session).table(tableName).scanner(scannerId).getNext().now().string`
+
+#### table(String tableName).scanner(String scannerId).delete() - Scanner Deletion.
+
+* Request
+    * No request parameters.
+* Response
+    * EmptyResponse
+* Example
+    * `HBase.session(session).table(tableName).scanner(scannerId).delete().now()`
+
+### HBase/Stargate via Client DSL ###
+
+This example illustrates sequence of all basic HBase operations: 
+1. get system version
+2. get cluster version
+3. get cluster status
+4. create the table
+5. get list of tables
+6. get table schema
+7. update table schema
+8. insert single row into table
+9. query row by id
+10. query all rows
+11. delete cell from row
+12. delete entire column family from row
+13. get table regions
+14. create scanner
+15. fetch values using scanner
+16. drop scanner
+17. drop the table
+
+There are several ways to do this depending upon your preference.
+
+You can use the Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleHBase.groovy
+
+You can manually type in the KnoxShell DSL script into the interactive Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file below will need to be typed or copied into the interactive shell.
+
+    /**
+     * Licensed to the Apache Software Foundation (ASF) under one
+     * or more contributor license agreements.  See the NOTICE file
+     * distributed with this work for additional information
+     * regarding copyright ownership.  The ASF licenses this file
+     * to you under the Apache License, Version 2.0 (the
+     * "License"); you may not use this file except in compliance
+     * with the License.  You may obtain a copy of the License at
+     *
+     *     http://www.apache.org/licenses/LICENSE-2.0
+     *
+     * Unless required by applicable law or agreed to in writing, software
+     * distributed under the License is distributed on an "AS IS" BASIS,
+     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     * See the License for the specific language governing permissions and
+     * limitations under the License.
+     */
+    package org.apache.hadoop.gateway.shell.hbase
+
+    import org.apache.hadoop.gateway.shell.Hadoop
+
+    import static java.util.concurrent.TimeUnit.SECONDS
+
+    gateway = "https://localhost:8443/gateway/sandbox"
+    username = "guest"
+    password = "guest-password"
+    tableName = "test_table"
+
+    session = Hadoop.login(gateway, username, password)
+
+    println "System version : " + HBase.session(session).systemVersion().now().string
+
+    println "Cluster version : " + HBase.session(session).clusterVersion().now().string
+
+    println "Status : " + HBase.session(session).status().now().string
+
+    println "Creating table '" + tableName + "'..."
+
+    HBase.session(session).table(tableName).create()  \
+        .attribute("tb_attr1", "value1")  \
+        .attribute("tb_attr2", "value2")  \
+        .family("family1")  \
+            .attribute("fm_attr1", "value3")  \
+            .attribute("fm_attr2", "value4")  \
+        .endFamilyDef()  \
+        .family("family2")  \
+        .family("family3")  \
+        .endFamilyDef()  \
+        .attribute("tb_attr3", "value5")  \
+        .now()
+
+    println "Done"
+
+    println "Table List : " + HBase.session(session).table().list().now().string
+
+    println "Schema for table '" + tableName + "' : " + HBase.session(session)  \
+        .table(tableName)  \
+        .schema()  \
+        .now().string
+
+    println "Updating schema of table '" + tableName + "'..."
+
+    HBase.session(session).table(tableName).update()  \
+        .family("family1")  \
+            .attribute("fm_attr1", "new_value3")  \
+        .endFamilyDef()  \
+        .family("family4")  \
+            .attribute("fm_attr3", "value6")  \
+        .endFamilyDef()  \
+        .now()
+
+    println "Done"
+
+    println "Schema for table '" + tableName + "' : " + HBase.session(session)  \
+        .table(tableName)  \
+        .schema()  \
+        .now().string
+
+    println "Inserting data into table..."
+
+    HBase.session(session).table(tableName).row("row_id_1").store()  \
+        .column("family1", "col1", "col_value1")  \
+        .column("family1", "col2", "col_value2", 1234567890l)  \
+        .column("family2", null, "fam_value1")  \
+        .now()
+
+    HBase.session(session).table(tableName).row("row_id_2").store()  \
+        .column("family1", "row2_col1", "row2_col_value1")  \
+        .now()
+
+    println "Done"
+
+    println "Querying row by id..."
+
+    println HBase.session(session).table(tableName).row("row_id_1")  \
+        .query()  \
+        .now().string
+
+    println "Querying all rows..."
+
+    println HBase.session(session).table(tableName).row().query().now().string
+
+    println "Querying row by id with extended settings..."
+
+    println HBase.session(session).table(tableName).row().query()  \
+        .column("family1", "row2_col1")  \
+        .column("family2")  \
+        .times(0, Long.MAX_VALUE)  \
+        .numVersions(1)  \
+        .now().string
+
+    println "Deleting cell..."
+
+    HBase.session(session).table(tableName).row("row_id_1")  \
+        .delete()  \
+        .column("family1", "col1")  \
+        .now()
+
+    println "Rows after delete:"
+
+    println HBase.session(session).table(tableName).row().query().now().string
+
+    println "Extended cell delete"
+
+    HBase.session(session).table(tableName).row("row_id_1")  \
+        .delete()  \
+        .column("family2")  \
+        .time(Long.MAX_VALUE)  \
+        .now()
+
+    println "Rows after delete:"
+
+    println HBase.session(session).table(tableName).row().query().now().string
+
+    println "Table regions : " + HBase.session(session).table(tableName)  \
+        .regions()  \
+        .now().string
+
+    println "Creating scanner..."
+
+    scannerId = HBase.session(session).table(tableName).scanner().create()  \
+        .column("family1", "col2")  \
+        .column("family2")  \
+        .startRow("row_id_1")  \
+        .endRow("row_id_2")  \
+        .batch(1)  \
+        .startTime(0)  \
+        .endTime(Long.MAX_VALUE)  \
+        .filter("")  \
+        .maxVersions(100)  \
+        .now().scannerId
+
+    println "Scanner id=" + scannerId
+
+    println "Scanner get next..."
+
+    println HBase.session(session).table(tableName).scanner(scannerId)  \
+        .getNext()  \
+        .now().string
+
+    println "Dropping scanner with id=" + scannerId
+
+    HBase.session(session).table(tableName).scanner(scannerId).delete().now()
+
+    println "Done"
+
+    println "Dropping table '" + tableName + "'..."
+
+    HBase.session(session).table(tableName).delete().now()
+
+    println "Done"
+
+    session.shutdown(10, SECONDS)
+
+### HBase/Stargate via cURL
+
+#### Get software version
+
+Set Accept Header to "text/plain", "text/xml", "application/json" or "application/x-protobuf"
+
+    %  curl -ik -u guest:guest-password\
+     -H "Accept:  application/json"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/version'
+
+#### Get version information regarding the HBase cluster backing the Stargate instance
+
+Set Accept Header to "text/plain", "text/xml" or "application/x-protobuf"
+
+    %  curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/version/cluster'
+
+#### Get detailed status on the HBase cluster backing the Stargate instance.
+
+Set Accept Header to "text/plain", "text/xml", "application/json" or "application/x-protobuf"
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/status/cluster'
+
+#### Get the list of available tables.
+
+Set Accept Header to "text/plain", "text/xml", "application/json" or "application/x-protobuf"
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase'
+
+#### Create table with two column families using xml input
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"   -H "Content-Type: text/xml"\
+     -d '<?xml version="1.0" encoding="UTF-8"?><TableSchema name="table1"><ColumnSchema name="family1"/><ColumnSchema name="family2"/></TableSchema>'\
+     -X PUT 'https://localhost:8443/gateway/sandbox/hbase/table1/schema'
+
+#### Create table with two column families using JSON input
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: application/json"  -H "Content-Type: application/json"\
+     -d '{"name":"table2","ColumnSchema":[{"name":"family3"},{"name":"family4"}]}'\
+     -X PUT 'https://localhost:8443/gateway/sandbox/hbase/table2/schema'
+
+#### Get table metadata
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/table1/regions'
+
+#### Insert single row table
+
+    curl -ik -u guest:guest-password\
+     -H "Content-Type: text/xml"\
+     -H "Accept: text/xml"\
+     -d '<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><Row key="cm93MQ=="><Cell column="ZmFtaWx5MTpjb2wx" >dGVzdA==</Cell></Row></CellSet>'\
+     -X POST 'https://localhost:8443/gateway/sandbox/hbase/table1/row1'
+
+#### Insert multiple rows into table
+
+    curl -ik -u guest:guest-password\
+     -H "Content-Type: text/xml"\
+     -H "Accept: text/xml"\
+     -d '<?xml version="1.0" encoding="UTF-8" standalone="yes"?><CellSet><Row key="cm93MA=="><Cell column=" ZmFtaWx5Mzpjb2x1bW4x" >dGVzdA==</Cell></Row><Row key="cm93MQ=="><Cell column=" ZmFtaWx5NDpjb2x1bW4x" >dGVzdA==</Cell></Row></CellSet>'\
+     -X POST 'https://localhost:8443/gateway/sandbox/hbase/table2/false-row-key'
+
+#### Get all data from table
+
+Set Accept Header to "text/plain", "text/xml", "application/json" or "application/x-protobuf"
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/table1/*'
+
+#### Execute cell or row query
+
+Set Accept Header to "text/plain", "text/xml", "application/json" or "application/x-protobuf"
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/table1/row1/family1:col1'
+
+#### Delete entire row from table
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X DELETE 'https://localhost:8443/gateway/sandbox/hbase/table2/row0'
+
+#### Delete column family from row
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X DELETE 'https://localhost:8443/gateway/sandbox/hbase/table2/row0/family3'
+
+#### Delete specific column from row
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X DELETE 'https://localhost:8443/gateway/sandbox/hbase/table2/row0/family3'
+
+#### Create scanner
+
+Scanner URL will be in Location response header
+
+    curl -ik -u guest:guest-password\
+     -H "Content-Type: text/xml"\
+     -d '<Scanner batch="1"/>'\
+     -X PUT 'https://localhost:8443/gateway/sandbox/hbase/table1/scanner'
+
+#### Get the values of the next cells found by the scanner
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: application/json"\
+     -X GET 'https://localhost:8443/gateway/sandbox/hbase/table1/scanner/13705290446328cff5ed'
+
+#### Delete scanner
+
+    curl -ik -u guest:guest-password\
+     -H "Accept: text/xml"\
+     -X DELETE 'https://localhost:8443/gateway/sandbox/hbase/table1/scanner/13705290446328cff5ed'
+
+#### Delete table
+
+    curl -ik -u guest:guest-password\
+     -X DELETE 'https://localhost:8443/gateway/sandbox/hbase/table1/schema'

Added: knox/trunk/books/0.7.0/service_hive.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_hive.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_hive.md (added)
+++ knox/trunk/books/0.7.0/service_hive.md Tue May 26 16:07:07 2015
@@ -0,0 +1,268 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Hive ###
+
+The [Hive wiki pages](https://cwiki.apache.org/confluence/display/Hive/Home) describe Hive installation and configuration processes.
+In sandbox configuration file for Hive is located at /etc/hive/hive-site.xml.
+Hive Server has to be started in HTTP mode.
+Note the properties shown below as they are related to configuration required by the gateway.
+
+    <property>
+        <name>hive.server2.thrift.http.port</name>
+        <value>10001</value>
+        <description>Port number when in HTTP mode.</description>
+    </property>
+
+    <property>
+        <name>hive.server2.thrift.http.path</name>
+        <value>cliservice</value>
+        <description>Path component of URL endpoint when in HTTP mode.</description>
+    </property>
+
+    <property>
+        <name>hive.server2.transport.mode</name>
+        <value>http</value>
+        <description>Server transport mode. "binary" or "http".</description>
+    </property>
+
+    <property>
+        <name>hive.server2.allow.user.substitution</name>
+        <value>true</value>
+    </property>
+
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The value in this sample is configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>HIVE</role>
+        <url>http://localhost:10001/cliservice</url>
+    </service>
+
+By default the gateway is configured to use the binary transport mode for Hive in the Sandbox.
+
+#### Hive JDBC URL Mapping ####
+
+| ------- | ------------------------------------------------------------------------------- |
+| Gateway | jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password}?hive.server2.transport.mode=http;hive.server2.thrift.http.path={gateway-path}/{cluster-name}/hive|
+| Cluster |`http://{hive-host}:{hive-port}/{hive-path}`|
+
+#### Hive Examples ####
+
+This guide provides detailed examples for how to to some basic interactions with Hive via the Apache Knox Gateway.
+
+##### Hive Setup #####
+
+1. Make sure you are running the correct version of Hive to ensure JDBC/Thrift/HTTP support.
+2. Make sure Hive Server is running on the correct port.
+3. Make sure Hive Server is running in HTTP mode.
+4. Client side (JDBC):
+     1. Hive JDBC in HTTP mode depends on following minimal libraries set to run successfully(must be in the classpath):
+         * hive-jdbc-0.14.0-standalone.jar;
+         * commons-logging-1.1.3.jar;
+     2. Connection URL has to be following:     jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password}?hive.server2.transport.mode=http;hive.server2.thrift.http.path={gateway-path}/{cluster-name}/hive
+     3. Look at https://cwiki.apache.org/confluence/display/Hive/GettingStarted#GettingStarted-DDLOperations for examples.
+       Hint: For testing it would be better to execute `set hive.security.authorization.enabled=false` as the first statement.
+       Hint: Good examples of Hive DDL/DML can be found here http://gettingstarted.hadooponazure.com/hw/hive.html
+
+##### Customization #####
+
+This example may need to be tailored to the execution environment.
+In particular host name, host port, user name, user password and context path may need to be changed to match your environment.
+In particular there is one example file in the distribution that may need to be customized.
+Take a moment to review this file.
+All of the values that may need to be customized can be found together at the top of the file.
+
+* samples/hive/java/jdbc/sandbox/HiveJDBCSample.java
+
+##### Client JDBC Example #####
+
+Sample example for creating new table, loading data into it from the file system local to the Hive server and querying data from that table.
+
+###### Java ######
+
+    import java.sql.Connection;
+    import java.sql.DriverManager;
+    import java.sql.ResultSet;
+    import java.sql.SQLException;
+    import java.sql.Statement;
+
+    import java.util.logging.Level;
+    import java.util.logging.Logger;
+
+    public class HiveJDBCSample {
+
+      public static void main( String[] args ) {
+        Connection connection = null;
+        Statement statement = null;
+        ResultSet resultSet = null;
+
+        try {
+          String user = "guest";
+          String password = user + "-password";
+          String gatewayHost = "localhost";
+          int gatewayPort = 8443;
+          String trustStore = "/usr/lib/knox/data/security/keystores/gateway.jks";
+          String trustStorePassword = "knoxsecret";
+          String contextPath = "gateway/sandbox/hive";
+          String connectionString = String.format( "jdbc:hive2://%s:%d/;ssl=true;sslTrustStore=%s;trustStorePassword=%s?hive.server2.transport.mode=http;hive.server2.thrift.http.path=/%s", gatewayHost, gatewayPort, trustStore, trustStorePassword, contextPath );
+
+          // load Hive JDBC Driver
+          Class.forName( "org.apache.hive.jdbc.HiveDriver" );
+
+          // configure JDBC connection
+          connection = DriverManager.getConnection( connectionString, user, password );
+
+          statement = connection.createStatement();
+
+          // disable Hive authorization - it could be ommited if Hive authorization
+          // was configured properly
+          statement.execute( "set hive.security.authorization.enabled=false" );
+
+          // create sample table
+          statement.execute( "CREATE TABLE logs(column1 string, column2 string, column3 string, column4 string, column5 string, column6 string, column7 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '" );
+
+          // load data into Hive from file /tmp/log.txt which is placed on the local file system
+          statement.execute( "LOAD DATA LOCAL INPATH '/tmp/log.txt' OVERWRITE INTO TABLE logs" );
+
+          resultSet = statement.executeQuery( "SELECT * FROM logs" );
+
+          while ( resultSet.next() ) {
+            System.out.println( resultSet.getString( 1 ) + " --- " + resultSet.getString( 2 ) + " --- " + resultSet.getString( 3 ) + " --- " + resultSet.getString( 4 ) );
+          }
+        } catch ( ClassNotFoundException ex ) {
+          Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+        } catch ( SQLException ex ) {
+          Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+        } finally {
+          if ( resultSet != null ) {
+            try {
+              resultSet.close();
+            } catch ( SQLException ex ) {
+              Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+            }
+          }
+          if ( statement != null ) {
+            try {
+              statement.close();
+            } catch ( SQLException ex ) {
+              Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+            }
+          }
+          if ( connection != null ) {
+            try {
+              connection.close();
+            } catch ( SQLException ex ) {
+              Logger.getLogger( HiveJDBCSample.class.getName() ).log( Level.SEVERE, null, ex );
+            }
+          }
+        }
+      }
+    }
+
+###### Groovy ######
+
+Make sure that GATEWAY_HOME/ext directory contains following libraries for successful execution:
+
+- hive-jdbc-0.14.0-standalone.jar;
+- commons-logging-1.1.3.jar;
+
+There are several ways to execute this sample depending upon your preference.
+
+You can use the Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/hive/groovy/jdbc/sandbox/HiveJDBCSample.groovy
+
+You can manually type in the KnoxShell DSL script into the interactive Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file below will need to be typed or copied into the interactive shell.
+
+    import java.sql.DriverManager
+
+    user = "guest";
+    password = user + "-password";
+    gatewayHost = "localhost";
+    gatewayPort = 8443;
+    trustStore = "/usr/lib/knox/data/security/keystores/gateway.jks";
+    trustStorePassword = "knoxsecret";
+    contextPath = "gateway/sandbox/hive";
+    connectionString = String.format( "jdbc:hive2://%s:%d/;ssl=true;sslTrustStore=%s;trustStorePassword=%s?hive.server2.transport.mode=http;hive.server2.thrift.http.path=/%s", gatewayHost, gatewayPort, trustStore, trustStorePassword, contextPath );
+
+    // Load Hive JDBC Driver
+    Class.forName( "org.apache.hive.jdbc.HiveDriver" );
+
+    // Configure JDBC connection
+    connection = DriverManager.getConnection( connectionString, user, password );
+
+    statement = connection.createStatement();
+
+    // Disable Hive authorization - This can be ommited if Hive authorization is configured properly
+    statement.execute( "set hive.security.authorization.enabled=false" );
+
+    // Create sample table
+    statement.execute( "CREATE TABLE logs(column1 string, column2 string, column3 string, column4 string, column5 string, column6 string, column7 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '" );
+
+    // Load data into Hive from file /tmp/log.txt which is placed on the local file system
+    statement.execute( "LOAD DATA LOCAL INPATH '/tmp/sample.log' OVERWRITE INTO TABLE logs" );
+
+    resultSet = statement.executeQuery( "SELECT * FROM logs" );
+
+    while ( resultSet.next() ) {
+      System.out.println( resultSet.getString( 1 ) + " --- " + resultSet.getString( 2 ) );
+    }
+
+    resultSet.close();
+    statement.close();
+    connection.close();
+
+Exampes use 'log.txt' with content:
+
+    2012-02-03 18:35:34 SampleClass6 [INFO] everything normal for id 577725851
+    2012-02-03 18:35:34 SampleClass4 [FATAL] system problem at id 1991281254
+    2012-02-03 18:35:34 SampleClass3 [DEBUG] detail for id 1304807656
+    2012-02-03 18:35:34 SampleClass3 [WARN] missing id 423340895
+    2012-02-03 18:35:34 SampleClass5 [TRACE] verbose detail for id 2082654978
+    2012-02-03 18:35:34 SampleClass0 [ERROR] incorrect id  1886438513
+    2012-02-03 18:35:34 SampleClass9 [TRACE] verbose detail for id 438634209
+    2012-02-03 18:35:34 SampleClass8 [DEBUG] detail for id 2074121310
+    2012-02-03 18:35:34 SampleClass0 [TRACE] verbose detail for id 1505582508
+    2012-02-03 18:35:34 SampleClass0 [TRACE] verbose detail for id 1903854437
+    2012-02-03 18:35:34 SampleClass7 [DEBUG] detail for id 915853141
+    2012-02-03 18:35:34 SampleClass3 [TRACE] verbose detail for id 303132401
+    2012-02-03 18:35:34 SampleClass6 [TRACE] verbose detail for id 151914369
+    2012-02-03 18:35:34 SampleClass2 [DEBUG] detail for id 146527742
+    ...
+
+Expected output:
+
+    2012-02-03 --- 18:35:34 --- SampleClass6 --- [INFO]
+    2012-02-03 --- 18:35:34 --- SampleClass4 --- [FATAL]
+    2012-02-03 --- 18:35:34 --- SampleClass3 --- [DEBUG]
+    2012-02-03 --- 18:35:34 --- SampleClass3 --- [WARN]
+    2012-02-03 --- 18:35:34 --- SampleClass5 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass0 --- [ERROR]
+    2012-02-03 --- 18:35:34 --- SampleClass9 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass8 --- [DEBUG]
+    2012-02-03 --- 18:35:34 --- SampleClass0 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass0 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass7 --- [DEBUG]
+    2012-02-03 --- 18:35:34 --- SampleClass3 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass6 --- [TRACE]
+    2012-02-03 --- 18:35:34 --- SampleClass2 --- [DEBUG]
+    ...

Added: knox/trunk/books/0.7.0/service_oozie.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_oozie.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_oozie.md (added)
+++ knox/trunk/books/0.7.0/service_oozie.md Tue May 26 16:07:07 2015
@@ -0,0 +1,193 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Oozie ###
+
+
+Oozie is a Hadoop component provides complex job workflows to be submitted and managed.
+Please refer to the latest [Oozie documentation](http://oozie.apache.org/docs/4.0.0/) for details.
+
+In order to make Oozie accessible via the gateway there are several important Haddop configuration settings.
+These all relate to the network endpoint exposed by various Hadoop services.
+
+The HTTP endpoint at which Oozie is running can be found via the oozie.base.url property in the oozie-site.xml file.
+In a Sandbox installation this can typically be found in /etc/oozie/conf/oozie-site.xml.
+
+    <property>
+        <name>oozie.base.url</name>
+        <value>http://sandbox.hortonworks.com:11000/oozie</value>
+    </property>
+
+The RPC address at which the Resource Manager exposes the JOBTRACKER endpoint can be found via the yarn.resourcemanager.address in the yarn-site.xml file.
+In a Sandbox installation this can typically be found in /etc/hadoop/conf/yarn-site.xml.
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>sandbox.hortonworks.com:8050</value>
+    </property>
+
+The RPC address at which the Name Node exposes its RPC endpoint can be found via the dfs.namenode.rpc-address in the hdfs-site.xml file.
+In a Sandbox installation this can typically be found in /etc/hadoop/conf/hdfs-site.xml.
+
+    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>sandbox.hortonworks.com:8020</value>
+    </property>
+
+If HDFS has been configured to be in High Availability mode (HA), then instead of the RPC address mentioned above for the Name Node, look up
+and use the logical name of the service found via dfs.nameservices in hdfs-site.xml. For example,
+
+    <property>
+        <name>dfs.nameservices</name>
+        <value>ha-service</value>
+    </property>
+
+Please note, only one of the URL's, either the RPC endpoint or the HA service name should be used as the NAMENODE hdfs URL in the gateway topology file.
+
+The information above must be provided to the gateway via a topology descriptor file.
+These topology descriptor files are placed in `{GATEWAY_HOME}/deployments`.
+An example that is setup for the default configuration of the Sandbox is {GATEWAY_HOME}/deployments/sandbox.xml.
+These values will need to be changed for non-default Sandbox or other Hadoop cluster configuration.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>JOBTRACKER</role>
+        <url>rpc://localhost:8050</url>
+    </service>
+    <service>
+        <role>OOZIE</role>
+        <url>http://localhost:11000/oozie</url>
+    </service>
+
+#### Oozie URL Mapping ####
+
+For Oozie URLs, the mapping of Knox Gateway accessible URLs to direct Oozie URLs is simple.
+
+| ------- | --------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/oozie` |
+| Cluster | `http://{oozie-host}:{oozie-port}/oozie}`                                   |
+
+
+#### Oozie Request Changes ####
+
+TODO - In some cases the Oozie requests needs to be slightly different when made through the gateway.
+These changes are required in order to protect the client from knowing the internal structure of the Hadoop cluster.
+
+
+#### Oozie Example via Client DSL ####
+
+This example will also submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL.
+However in this case the job will be submitted via a Oozie workflow.
+There are several ways to do this depending upon your preference.
+
+You can use the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleOozieWorkflow.groovy
+
+You can manually type in the KnoxShell DSL script into the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file `samples/ExampleOozieWorkflow.groovy` will need to be typed or copied into the interactive shell.
+
+#### Oozie Example via cURL
+
+The example below illustrates the sequence of curl commands that could be used to run a "word count" map reduce job via an Oozie workflow.
+
+It utilizes the hadoop-examples.jar from a Hadoop install for running a simple word count job.
+A copy of that jar has been included in the samples directory for convenience.
+
+In addition a workflow definition and configuration file is required.
+These have not been included but are available for download.
+Download [workflow-definition.xml](workflow-definition.xml) and [workflow-configuration.xml](workflow-configuration.xml) and store them in the {GATEWAY_HOME} directory.
+Review the contents of workflow-configuration.xml to ensure that it matches your environment.
+
+Take care to follow the instructions below where replacement values are required.
+These replacement values are identified with { } markup.
+
+    # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+    # 1. Create the inode for workflow definition file in /user/guest/example
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/workflow.xml?op=CREATE'
+
+    # 2. Upload the workflow definition file.  This file can be found in {GATEWAY_HOME}/templates
+    curl -i -k -u guest:guest-password -T workflow-definition.xml -X PUT \
+        '{Value Location header from command above}'
+
+    # 3. Create the inode for hadoop-examples.jar in /user/guest/example/lib
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/lib/hadoop-examples.jar?op=CREATE'
+
+    # 4. Upload hadoop-examples.jar to /user/guest/example/lib.  Use a hadoop-examples.jar from a Hadoop install.
+    curl -i -k -u guest:guest-password -T samples/hadoop-examples.jar -X PUT \
+        '{Value Location header from command above}'
+
+    # 5. Create the inode for a sample input file readme.txt in /user/guest/example/input.
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/input/README?op=CREATE'
+
+    # 6. Upload readme.txt to /user/guest/example/input.  Use the readme.txt in {GATEWAY_HOME}.
+    # The sample below uses this README file found in {GATEWAY_HOME}.
+    curl -i -k -u guest:guest-password -T README -X PUT \
+        '{Value of Location header from command above}'
+
+    # 7. Submit the job via Oozie
+    # Take note of the Job ID in the JSON response as this will be used in the next step.
+    curl -i -k -u guest:guest-password -H Content-Type:application/xml -T workflow-configuration.xml \
+        -X POST 'https://localhost:8443/gateway/sandbox/oozie/v1/jobs?action=start'
+
+    # 8. Query the job status via Oozie.
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/oozie/v1/job/{Job ID from JSON body}'
+
+    # 9. List the contents of the output directory /user/guest/example/output
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/output?op=LISTSTATUS'
+
+    # 10. Optionally cleanup the test directory
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+### Oozie Client DSL ###
+
+#### submit() - Submit a workflow job.
+
+* Request
+    * text (String) - XML formatted workflow configuration string.
+    * file (String) - A filename containing XML formatted workflow configuration.
+    * action (String) - The initial action to take on the job.  Optional: Default is "start".
+* Response
+    * BasicResponse
+* Example
+    * `Workflow.submit(session).file(localFile).action("start").now()`
+
+#### status() - Query the status of a workflow job.
+
+* Request
+    * jobId (String) - The job ID to check. This is the ID received when the job was created.
+* Response
+    * BasicResponse
+* Example
+    * `Workflow.status(session).jobId(jobId).now().string`
+
+

Added: knox/trunk/books/0.7.0/service_storm.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_storm.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_storm.md (added)
+++ knox/trunk/books/0.7.0/service_storm.md Tue May 26 16:07:07 2015
@@ -0,0 +1,114 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Storm ###
+
+Storm is a distributed realtime computation system. Storm exposes REST APIs for UI functionality that can be used for
+retrieving metrics data and configuration information as well as management operations such as starting or stopping topologies.
+
+The docs for this can be found here
+
+https://github.com/apache/storm/blob/master/STORM-UI-REST-API.md
+
+To enable this functionality, a topology file needs to have the following configuration:
+
+
+    <service>
+            <role>STORM</role>
+            <url>http://<hostname>:<port></url>
+    </service>
+
+The default UI daemon port is 8744. If it is configured to some other port, that configuration can be
+found in storm.yaml as the value for the property 'ui.port'.
+
+In addition to the storm service configuration above, a STORM-LOGVIEWER service must be configured if the
+log files are to be retrieved through Knox. The value of the port for the logviewer can be found by the property
+'logviewer.port' also in the file storm.yaml.
+
+    <service>
+            <role>STORM-LOGVIEWER</role>
+            <url>http://<hostname>:<port></url>
+    </service>
+
+
+#### Storm URL Mapping ####
+
+For Storm URLs, the mapping of Knox Gateway accessible URLs to direct Storm URLs is the following.
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/storm` |
+| Cluster | `http://{storm-host}:{storm-port}`                                      |
+
+For the log viewer the mapping is as follows
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/storm/logviewer` |
+| Cluster | `http://{storm-logviewer-host}:{storm-logviewer-port}`                                      |
+
+
+#### Storm Examples
+
+Some of the various calls that can be made and examples using curl are listed below.
+
+    # 0. Getting cluster configuration
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/cluster/configuration'
+    
+    # 1. Getting cluster summary information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/cluster/summary'
+
+    # 2. Getting supervisor summary information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/supervisor/summary'
+    
+    # 3. topologies summary information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/topology/summary'
+    
+    # 4. Getting specific topology information. Substitute {id} with the topology id.
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/topology/{id}'
+
+    # 5. To get component level information. Substitute {id} with the topology id and {component} with the component id e.g. 'spout'
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/storm/api/v1/topology/{id}/component/{component}'
+
+
+The following POST operations all require a 'x-csrf-token' header along with other information that can be stored in a cookie file.
+In particular the 'ring-session' header and 'JSESSIONID'.
+
+    # 6. To activate a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/activate
+
+    # 7. To de-activate a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/deactivate
+
+    # 8. To rebalance a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/rebalance/0
+
+    # 9. To kill a topology. Substitute {id} with the topology id and {token-value} with the x-csrf-token value.
+
+    curl -ik -b ~/cookiejar.txt -c ~/cookiejar.txt -u guest:guest-password -H 'x-csrf-token:{token-value}' -X POST \
+     http://localhost:8744/api/v1/topology/{id}/kill/0
+

Added: knox/trunk/books/0.7.0/service_webhcat.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_webhcat.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_webhcat.md (added)
+++ knox/trunk/books/0.7.0/service_webhcat.md Tue May 26 16:07:07 2015
@@ -0,0 +1,152 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### WebHCat ###
+
+WebHCat is a related but separate service from Hive.
+As such it is installed and configured independently.
+The [WebHCat wiki pages](https://cwiki.apache.org/confluence/display/Hive/WebHCat) describe this processes.
+In sandbox this configuration file for WebHCat is located at /etc/hadoop/hcatalog/webhcat-site.xml.
+Note the properties shown below as they are related to configuration required by the gateway.
+
+    <property>
+        <name>templeton.port</name>
+        <value>50111</value>
+    </property>
+
+Also important is the configuration of the JOBTRACKER RPC endpoint.
+For Hadoop 2 this can be found in the yarn-site.xml file.
+In Sandbox this file can be found at /etc/hadoop/conf/yarn-site.xml.
+The property yarn.resourcemanager.address within that file is relevant for the gateway's configuration.
+
+    <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>sandbox.hortonworks.com:8050</value>
+    </property>
+
+See #[WebHDFS] for details about locating the Hadoop configuration for the NAMENODE endpoint.
+
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The values in this sample are configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>JOBTRACKER</role>
+        <url>rpc://localhost:8050</url>
+    </service>
+    <service>
+        <role>WEBHCAT</role>
+        <url>http://localhost:50111/templeton</url>
+    </service>
+
+The URLs provided for the role NAMENODE and JOBTRACKER do not result in an endpoint being exposed by the gateway.
+This information is only required so that other URLs can be rewritten that reference the appropriate RPC address for Hadoop services.
+This prevents clients from needed to be aware of the internal cluster details.
+Note that for Hadoop 2 the JOBTRACKER RPC endpoint is provided by the Resource Manager component.
+
+By default the gateway is configured to use the HTTP endpoint for WebHCat in the Sandbox.
+This could alternatively be configured to use the HTTPS endpoint by provided the correct address.
+
+#### WebHCat URL Mapping ####
+
+For WebHCat URLs, the mapping of Knox Gateway accessible URLs to direct WebHCat URLs is simple.
+
+| ------- | ------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/templeton` |
+| Cluster | `http://{webhcat-host}:{webhcat-port}/templeton}`                               |
+
+
+#### WebHCat Example ####
+
+This example will submit the familiar WordCount Java MapReduce job to the Hadoop cluster via the gateway using the KnoxShell DSL.
+There are several ways to do this depending upon your preference.
+
+You can use the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleWebHCatJob.groovy
+
+You can manually type in the KnoxShell DSL script into the "embedded" Groovy interpreter provided with the distribution.
+
+    java -jar bin/shell.jar
+
+Each line from the file `samples/ExampleWebHCatJob.groovy` would then need to be typed or copied into the interactive shell.
+
+
+#### WebHCat Client DSL ####
+
+##### submitJava() - Submit a Java MapReduce job.
+
+* Request
+    * jar (String) - The remote file name of the JAR containing the app to execute.
+    * app (String) - The app name to execute.  This is wordcount for example not the class name.
+    * input (String) - The remote directory name to use as input for the job.
+    * output (String) - The remote directory name to store output from the job.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+
+
+    Job.submitJava(session)
+        .jar(remoteJarName)
+        .app(appName)
+        .input(remoteInputDir)
+        .output(remoteOutputDir)
+        .now()
+        .jobId
+
+##### submitPig() - Submit a Pig job.
+
+* Request
+    * file (String) - The remote file name of the pig script.
+    * arg (String) - An argument to pass to the script.
+    * statusDir (String) - The remote directory to store status output.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+    * `Job.submitPig(session).file(remotePigFileName).arg("-v").statusDir(remoteStatusDir).now()`
+
+##### submitHive() - Submit a Hive job.
+
+* Request
+    * file (String) - The remote file name of the hive script.
+    * arg (String) - An argument to pass to the script.
+    * statusDir (String) - The remote directory to store status output.
+* Response
+    * jobId : String - The job ID of the submitted job.  Consumes body.
+* Example
+    * `Job.submitHive(session).file(remoteHiveFileName).arg("-v").statusDir(remoteStatusDir).now()`
+
+##### queryQueue() - Return a list of all job IDs registered to the user.
+
+* Request
+    * No request parameters.
+* Response
+    * BasicResponse
+* Example
+    * `Job.queryQueue(session).now().string`
+
+##### queryStatus() - Check the status of a job and get related job information given its job ID.
+
+* Request
+    * jobId (String) - The job ID to check. This is the ID received when the job was created.
+* Response
+    * BasicResponse
+* Example
+    * `Job.queryStatus(session).jobId(jobId).now().string`

Added: knox/trunk/books/0.7.0/service_webhdfs.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_webhdfs.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_webhdfs.md (added)
+++ knox/trunk/books/0.7.0/service_webhdfs.md Tue May 26 16:07:07 2015
@@ -0,0 +1,294 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### WebHDFS ###
+
+REST API access to HDFS in a Hadoop cluster is provided by WebHDFS.
+The [WebHDFS REST API](http://hadoop.apache.org/docs/stable/webhdfs.html) documentation is available online.
+WebHDFS must be enabled in the hdfs-site.xml configuration file.
+In sandbox this configuration file is located at /etc/hadoop/conf/hdfs-site.xml.
+Note the properties shown below as they are related to configuration required by the gateway.
+Some of these represent the default values and may not actually be present in hdfs-site.xml.
+
+    <property>
+        <name>dfs.webhdfs.enabled</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>dfs.namenode.rpc-address</name>
+        <value>sandbox.hortonworks.com:8020</value>
+    </property>
+    <property>
+        <name>dfs.namenode.http-address</name>
+        <value>sandbox.hortonworks.com:50070</value>
+    </property>
+    <property>
+        <name>dfs.https.namenode.https-address</name>
+        <value>sandbox.hortonworks.com:50470</value>
+    </property>
+
+The values above need to be reflected in each topology descriptor file deployed to the gateway.
+The gateway by default includes a sample topology descriptor file `{GATEWAY_HOME}/deployments/sandbox.xml`.
+The values in this sample are configured to work with an installed Sandbox VM.
+
+    <service>
+        <role>NAMENODE</role>
+        <url>hdfs://localhost:8020</url>
+    </service>
+    <service>
+        <role>WEBHDFS</role>
+        <url>http://localhost:50070/webhdfs</url>
+    </service>
+
+The URL provided for the role NAMENODE does not result in an endpoint being exposed by the gateway.
+This information is only required so that other URLs can be rewritten that reference the Name Node's RPC address.
+This prevents clients from needed to be aware of the internal cluster details.
+
+By default the gateway is configured to use the HTTP endpoint for WebHDFS in the Sandbox.
+This could alternatively be configured to use the HTTPS endpoint by provided the correct address.
+
+#### WebHDFS URL Mapping ####
+
+For Name Node URLs, the mapping of Knox Gateway accessible WebHDFS URLs to direct WebHDFS URLs is simple.
+
+| ------- | ----------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/webhdfs` |
+| Cluster | `http://{webhdfs-host}:50070/webhdfs`                                         |
+
+However, there is a subtle difference to URLs that are returned by WebHDFS in the Location header of many requests.
+Direct WebHDFS requests may return Location headers that contain the address of a particular Data Node.
+The gateway will rewrite these URLs to ensure subsequent requests come back through the gateway and internal cluster details are protected.
+
+A WebHDFS request to the Node Node to retrieve a file will return a URL of the form below in the Location header.
+
+    http://{datanode-host}:{data-node-port}/webhdfs/v1/{path}?...
+
+Note that this URL contains the network location of a Data Node.
+The gateway will rewrite this URL to look like the URL below.
+
+    https://{gateway-host}:{gateway-port}/{gateway-path}/{custer-name}/webhdfs/data/v1/{path}?_={encrypted-query-parameters}
+
+The `{encrypted-query-parameters}` will contain the `{datanode-host}` and `{datanode-port}` information.
+This information along with the original query parameters are encrypted so that the internal Hadoop details are protected.
+
+#### WebHDFS Examples ####
+
+The examples below upload a file, download the file and list the contents of the directory.
+
+##### WebHDFS via client DSL
+
+You can use the Groovy example scripts and interpreter provided with the distribution.
+
+    java -jar bin/shell.jar samples/ExampleWebHdfsPutGet.groovy
+    java -jar bin/shell.jar samples/ExampleWebHdfsLs.groovy
+
+You can manually type the client DSL script into the KnoxShell interactive Groovy interpreter provided with the distribution.
+The command below starts the KnoxShell in interactive mode.
+
+    java -jar bin/shell.jar
+
+Each line below could be typed or copied into the interactive shell and executed.
+This is provided as an example to illustrate the use of the client DSL.
+
+    // Import the client DSL and a useful utilities for working with JSON.
+    import org.apache.hadoop.gateway.shell.Hadoop
+    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
+    import groovy.json.JsonSlurper
+
+    // Setup some basic config.
+    gateway = "https://localhost:8443/gateway/sandbox"
+    username = "guest"
+    password = "guest-password"
+
+    // Start the session.
+    session = Hadoop.login( gateway, username, password )
+
+    // Cleanup anything leftover from a previous run.
+    Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
+
+    // Upload the README to HDFS.
+    Hdfs.put( session ).file( "README" ).to( "/user/guest/example/README" ).now()
+
+    // Download the README from HDFS.
+    text = Hdfs.get( session ).from( "/user/guest/example/README" ).now().string
+    println text
+
+    // List the contents of the directory.
+    text = Hdfs.ls( session ).dir( "/user/guest/example" ).now().string
+    json = (new JsonSlurper()).parseText( text )
+    println json.FileStatuses.FileStatus.pathSuffix
+
+    // Cleanup the directory.
+    Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()
+
+    // Clean the session.
+    session.shutdown()
+
+
+##### WebHDFS via cURL
+
+Use can use cURL to directly invoke the REST APIs via the gateway.
+
+###### Optionally cleanup the sample directory in case a previous example was run without cleaning up.
+
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+###### Register the name for a sample file README in /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X PUT \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/README?op=CREATE'
+
+###### Upload README to /user/guest/example.  Use the README in {GATEWAY_HOME}.
+
+    curl -i -k -u guest:guest-password -T README -X PUT \
+        '{Value of Location header from command above}'
+
+###### List the contents of the directory /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=LISTSTATUS'
+
+###### Request the content of the README file in /user/guest/example.
+
+    curl -i -k -u guest:guest-password -X GET \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example/README?op=OPEN'
+
+###### Read the content of the file.
+
+    curl -i -k -u guest:guest-password -X GET \
+        '{Value of Location header from command above}'
+
+###### Optionally cleanup the example directory.
+
+    curl -i -k -u guest:guest-password -X DELETE \
+        'https://localhost:8443/gateway/sandbox/webhdfs/v1/user/guest/example?op=DELETE&recursive=true'
+
+
+##### WebHDFS client DSL
+
+###### get() - Get a file from HDFS (OPEN).
+
+* Request
+    * from( String name ) - The full name of the file in HDFS.
+    * file( String name ) - The name name of a local file to create with the content.
+    If this isn't specified the file content must be read from the response.
+* Response
+    * BasicResponse
+    * If file parameter specified content will be streamed to file.
+* Example
+    * `Hdfs.get( session ).from( "/user/guest/example/README" ).now().string`
+
+###### ls() - Query the contents of a directory (LISTSTATUS)
+
+* Request
+    * dir( String name ) - The full name of the directory in HDFS.
+* Response
+    * BasicResponse
+* Example
+    * `Hdfs.ls( session ).dir( "/user/guest/example" ).now().string`
+
+###### mkdir() - Create a directory in HDFS (MKDIRS)
+
+* Request
+    * dir( String name ) - The full name of the directory to create in HDFS.
+    * perm( String perm ) - The permissions for the directory (e.g. 644).  Optional: default="777"
+* Response
+    * EmptyResponse - Implicit close().
+* Example
+    * `Hdfs.mkdir( session ).dir( "/user/guest/example" ).now()`
+
+###### put() - Write a file into HDFS (CREATE)
+
+* Request
+    * text( String text ) - Text to upload to HDFS.  Takes precidence over file if both present.
+    * file( String name ) - The name of a local file to upload to HDFS.
+    * to( String name ) - The fully qualified name to create in HDFS.
+* Response
+    * EmptyResponse - Implicit close().
+* Example
+    * `Hdfs.put( session ).file( README ).to( "/user/guest/example/README" ).now()`
+
+###### rm() - Delete a file or directory (DELETE)
+
+* Request
+    * file( String name ) - The fully qualified file or directory name in HDFS.
+    * recursive( Boolean recursive ) - Delete directory and all of its contents if True.  Optional: default=False
+* Response
+    * BasicResponse - Implicit close().
+* Example
+    * `Hdfs.rm( session ).file( "/user/guest/example" ).recursive().now()`
+
+
+### WebHDFS HA ###
+
+Knox provides basic failover and retry functionality for REST API calls made to WebHDFS when HDFS HA has been 
+configured and enabled.
+
+To enable HA functionality for WebHDFS in Knox the following configuration has to be added to the topology file.
+
+    <provider>
+       <role>ha</role>
+       <name>HaProvider</name>
+       <enabled>true</enabled>
+       <param>
+           <name>WEBHDFS</name>
+           <value>maxFailoverAttempts=3;failoverSleep=1000;maxRetryAttempts=300;retrySleep=1000;enabled=true</value>
+       </param>
+    </provider>
+    
+The role and name of the provider above must be as shown. The name in the 'param' section must match that of the service 
+role name that is being configured for HA and the value in the 'param' section is the configuration for that particular
+service in HA mode. In this case the name is 'WEBHDFS'.
+
+The various configuration parameters are described below:
+     
+* maxFailoverAttempts - 
+This is the maximum number of times a failover will be attempted. The failover strategy at this time is very simplistic
+in that the next URL in the list of URLs provided for the service is used and the one that failed is put at the bottom 
+of the list. If the list is exhausted and the maximum number of attempts is not reached then the first URL that failed 
+will be tried again (the list will start again from the original top entry).
+
+* failoverSleep - 
+The amount of time in millis that the process will wait or sleep before attempting to failover.
+
+* maxRetryAttempts - 
+The is the maximum number of times that a retry request will be attempted. Unlike failover, the retry is done on the 
+same URL that failed. This is a special case in HDFS when the node is in safe mode. The expectation is that the node will
+come out of safe mode so a retry is desirable here as opposed to a failover.
+
+* retrySleep - 
+The amount of time in millis that the process will wait or sleep before a retry is issued.
+
+* enabled - 
+Flag to turn the particular service on or off for HA.
+
+And for the service configuration itself the additional URLs that standby nodes should be added to the list. The active 
+URL (at the time of configuration) should ideally be added to the top of the list.
+
+
+    <service>
+        <role>WEBHDFS</role>
+        <url>http://{host1}:50070/webhdfs</url>
+        <url>http://{host2}:50070/webhdfs</url>
+    </service>
+    
+
+
+
+
+

Added: knox/trunk/books/0.7.0/service_yarn.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_yarn.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/service_yarn.md (added)
+++ knox/trunk/books/0.7.0/service_yarn.md Tue May 26 16:07:07 2015
@@ -0,0 +1,125 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### Yarn ###
+
+Knox provides gateway functionality for the REST APIs of the ResourceManager. The ResourceManager REST API's allow the
+user to get information about the cluster - status on the cluster, metrics on the cluster, scheduler information,
+information about nodes in the cluster, and information about applications on the cluster. Also as of hadoop version
+2.5.0, the user can submit a new application as well as kill it (or get state) using the 'Writable' APIs.
+
+The docs for this can be found here
+
+http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html
+
+To enable this functionality, a topology file needs to have the following configuration:
+
+
+    <service>
+            <role>RESOURCEMANAGER</role>
+            <url>http://<hostname>:<port>/ws</url>
+    </service>
+
+The default resource manager http port is 8088. If it is configured to some other port, that configuration can be
+found in yarn-site.xml under the property 'yarn.resourcemanager.webapp.address'.
+
+#### Yarn URL Mapping ####
+
+For Yarn URLs, the mapping of Knox Gateway accessible URLs to direct Yarn URLs is the following.
+
+| ------- | ------------------------------------------------------------------------------------- |
+| Gateway | `https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/resourcemanager` |
+| Cluster | `http://{yarn-host}:{yarn-port}/ws}`                                      |
+
+
+#### Yarn Examples via cURL
+
+Some of the various calls that can be made and examples using curl are listed below.
+
+    # 0. Getting cluster info
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster'
+    
+    # 1. Getting cluster metrics
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/metrics'
+    
+    To get the same information in an xml format
+    
+    curl -ikv -u guest:guest-password -H Accept:application/xml -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/metrics'
+    
+    # 2. Getting scheduler information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/scheduler'
+    
+    # 3. Getting all the applications listed and their information
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps'
+    
+    # 4. Getting applications statistics
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/appstatistics'
+    
+    Also query params can be used as below to filter the results
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/appstatistics?states=accepted,running,finished&applicationTypes=mapreduce'
+    
+    # 5. To get a specific application (please note, replace the application id with a real value)
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}'
+    
+    # 6. To get the attempts made for a particular application
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/appattempts'
+    
+    # 7. To get information about the various nodes
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/nodes'
+    
+    Also to get a specific node, use an id obtained in the response from above (the node id is scrambled) and issue the following
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/nodes/{node_id}'
+    
+    # 8. To create a new Application
+    
+    curl -ikv -u guest:guest-password -X POST 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/new-application'
+    
+    An application id is returned from the request above and this can be used to submit an application.
+    
+    # 9. To submit an application, put together a request containing the application id received in the above response (please refer to Yarn REST
+    API documentation).
+    
+    curl -ikv -u guest:guest-password -T request.json -H Content-Type:application/json -X POST 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps'
+    
+    Here the request is saved in a file called request.json
+    
+    #10. To get application state
+    
+    curl -ikv -u guest:guest-password -X GET 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/state'
+    
+    curl -ikv -u guest:guest-password -H Content-Type:application/json -X PUT -T state-killed.json 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/application_1409008107556_0007/state'
+    
+    # 11. To kill an application that is running issue the below command with the application id of the application that is to be killed.
+    The contents of the state-killed.json file are :
+    
+    {
+      "state":"KILLED"
+    }
+    
+    
+    curl -ikv -u guest:guest-password -H Content-Type:application/json -X PUT -T state-killed.json 'https://localhost:8443/gateway/sandbox/resourcemanager/v1/cluster/apps/{application_id}/state'
+

Added: knox/trunk/books/0.7.0/x-forwarded-headers.md
URL: http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/x-forwarded-headers.md?rev=1681785&view=auto
==============================================================================
--- knox/trunk/books/0.7.0/x-forwarded-headers.md (added)
+++ knox/trunk/books/0.7.0/x-forwarded-headers.md Tue May 26 16:07:07 2015
@@ -0,0 +1,76 @@
+<!---
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--->
+
+### X-Forwareded-* Headers Support ###
+Out-of-the-box Knox provides support for some X-Forwarded-* headers through the use of a Servlet Filter. Specifically the
+headers handled/populated by Knox are:
+
+- X-Forwarded-For
+- X-Forwarded-Proto
+- X-Forwarded-Port
+- X-Forwarded-Host
+- X-Forwarded-Server
+- X-Forwarded-Context
+
+If this functionality can be turned off by a configuration setting in the file gateway-site.xml and redeploying the
+necessary topology/topologies.
+
+The setting is (under the 'configuration' tag) :
+
+       <property>
+            <name>gateway.xforwarded.enabled</name>
+            <value>false</value>
+        </property>
+
+If this setting is absent, the default behavior is that the X-Forwarded-* header support is on or in other words,
+'gateway.xforwarded.enabled' is set to 'true' by default.
+
+
+#### Header population ####
+
+The following are the various rules for population of these headers:
+
+##### X-Forwarded-For #####
+
+This header represents a list of client IP addresses. If the header is already present Knox adds a comma separated value
+to the list. The value added is the client's IP address as Knox sees it. This value is added to the end of the list.
+
+##### X-Forwarded-Proto #####
+
+The protocol used in the client request. If this header is passed into Knox it's value is maintained, otherwise Knox will
+populate the header with the value 'https' if the request is a secure one or 'http' otherwise.
+
+##### X-Forwarded-Port #####
+
+The port used in the client request. If this header is passed into Knox it's value is maintained, otherwise Knox will
+populate the header with the value of the port that the request was made coming into Knox.
+
+##### X-Forwarded-Host #####
+
+Represents the original host requested by the client in the Host HTTP request header. The value passed into Knox is maintained
+by Knox. If no value is present, Knox populates the header with the value of the HTTP Host header.
+
+##### X-Forwarded-Server #####
+
+The hostname of the server Knox is running on.
+
+##### X-Forwarded-Context #####
+
+This header value contains the context path of the request to Knox.
+
+
+

Modified: knox/trunk/build.xml
URL: http://svn.apache.org/viewvc/knox/trunk/build.xml?rev=1681785&r1=1681784&r2=1681785&view=diff
==============================================================================
--- knox/trunk/build.xml (original)
+++ knox/trunk/build.xml Tue May 26 16:07:07 2015
@@ -33,6 +33,7 @@
     <property name="book-0-5-0-dir" value="${book-target}/${gateway-artifact}-0-5-0"/>
     <property name="book-0-5-0-file" value="${book-0-5-0-dir}/${gateway-artifact}-0-5-0.html"/>
     <property name="book-0-6-0-dir" value="${book-target}/${gateway-artifact}-0-6-0"/>
+    <property name="book-0-7-0-dir" value="${book-target}/${gateway-artifact}-0-7-0"/>
 
     <property name="svn.release.path" value="https://dist.apache.org/repos/dist/release/incubator/${gateway-project}" />
     <property name="svn.staging.path" value="https://dist.apache.org/repos/dist/dev/incubator/${gateway-project}" />
@@ -81,7 +82,7 @@
     </target>
 
     <target name="books" depends="markbook,_books"/>
-    <target name="_books" depends="_book-0-3-0,_book-0-4-0,_book-0-5-0,_book-0-6-0"/>
+    <target name="_books" depends="_book-0-3-0,_book-0-4-0,_book-0-5-0,_book-0-6-0,_book-0-7-0"/>
     <target name="_book-0-3-0" depends="init">
         <delete dir="${book-target}/${gateway-artifact}-0-3-0" includes="**/*.html,**/*.css,**/*.png"/>
         <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
@@ -134,6 +135,20 @@
             <fileset dir="books/static"/>
         </copy>
     </target>
+    <target name="_book-0-7-0" depends="init">
+        <delete dir="${book-target}/${gateway-artifact}-0-7-0" includes="**/*.html,**/*.css,**/*.png"/>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/0.7.0/book.md"/>
+            <arg value="-o"/><arg value="${book-0-7-0-dir}/user-guide.html"/>
+        </java>
+        <java jar="markbook/target/markbook.jar" fork="true" failonerror="true">
+            <arg value="-i"/><arg value="books/0.7.0/dev-guide/book.md"/>
+            <arg value="-o"/><arg value="${book-0-7-0-dir}/dev-guide.html"/>
+        </java>
+        <copy todir="${book-target}/${gateway-artifact}-0-7-0">
+            <fileset dir="books/static"/>
+        </copy>
+    </target>
 
     <target name="markbook" depends="init" description="Build and package markbook tool.">
         <exec executable="${mvn.cmd}">



Mime
View raw message