knox-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kmin...@apache.org
Subject [2/2] git commit: Removed site source from project they now can be found in Subversion https://svn.apache.org/repos/asf/incubator/knox. See wiki for site maintenance process: https://cwiki.apache.org/confluence/display/KNOX/Site+Maintenance
Date Wed, 27 Mar 2013 16:05:56 GMT
Removed site source from project they now can be found in Subversion https://svn.apache.org/repos/asf/incubator/knox.  See wiki for site maintenance process: https://cwiki.apache.org/confluence/display/KNOX/Site+Maintenance


Project: http://git-wip-us.apache.org/repos/asf/incubator-knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-knox/commit/b83a1df3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-knox/tree/b83a1df3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-knox/diff/b83a1df3

Branch: refs/heads/master
Commit: b83a1df394c5f258d9ad8314e8c66a92538793ab
Parents: b15a65a
Author: Kevin Minder <kevin.minder@hortonworks.com>
Authored: Wed Mar 27 12:05:51 2013 -0400
Committer: Kevin Minder <kevin.minder@hortonworks.com>
Committed: Wed Mar 27 12:05:51 2013 -0400

----------------------------------------------------------------------
 gateway-site/pom.xml                               |  240 ------
 gateway-site/src/site/markdown/build-process.md    |   42 -
 gateway-site/src/site/markdown/client.md.vm        |  627 ---------------
 .../src/site/markdown/contribute-process.md        |   41 -
 gateway-site/src/site/markdown/examples.md.vm      |  375 ---------
 .../src/site/markdown/getting-started.md.vm        |  364 ---------
 gateway-site/src/site/markdown/index.md            |   36 -
 gateway-site/src/site/markdown/news.md             |   34 -
 gateway-site/src/site/markdown/privacy-policy.md   |   43 -
 gateway-site/src/site/markdown/release-0-2-0.md    |   35 -
 gateway-site/src/site/markdown/release-process.md  |   45 -
 gateway-site/src/site/markdown/roadmap-0-3-0.md    |   33 -
 gateway-site/src/site/markdown/sandbox.md          |   62 --
 gateway-site/src/site/markdown/site-process.md     |   42 -
 gateway-site/src/site/markdown/template.md         |   35 -
 .../resources/images/apache-incubator-logo.png     |  Bin 8626 -> 0 bytes
 .../src/site/resources/images/apache-logo.gif      |  Bin 5866 -> 0 bytes
 gateway-site/src/site/site.xml                     |  144 ----
 pom.xml                                            |    1 -
 19 files changed, 0 insertions(+), 2199 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/pom.xml
----------------------------------------------------------------------
diff --git a/gateway-site/pom.xml b/gateway-site/pom.xml
deleted file mode 100644
index 18e9934..0000000
--- a/gateway-site/pom.xml
+++ /dev/null
@@ -1,240 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>gateway</artifactId>
-        <version>0.2.0-SNAPSHOT</version>
-    </parent>
-    <artifactId>gateway-site</artifactId>
-
-    <name>Apache Knox Gateway</name>
-    <description>Knox is a gateway for Hadoop clusters.</description>
-    <url>http://incubator.apache.org/knox</url>
-
-    <properties>
-        <HHH>###</HHH>
-        <HHHH>####</HHHH>
-        <HHHHH>#####</HHHHH>
-    </properties>
-
-    <licenses>
-        <license>
-            <name>The Apache Software License, Version 2.0</name>
-            <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
-            <distribution>repo</distribution>
-        </license>
-    </licenses>
-
-    <organization>
-        <name>Apache Software Foundation</name>
-        <url>http://www.apache.org</url>
-    </organization>
-
-    <scm>
-        <connection>scm:svn:http://svn.apache.org/repos/asf/incubator/knox</connection>
-        <developerConnection>scm:svn:https://svn.apache.org/repos/asf/incubator/knox</developerConnection>
-        <tag>HEAD</tag>
-        <url>http://svn.apache.org/repos/asf/incubator/knox</url>
-    </scm>
-
-    <issueManagement>
-        <system>Jira</system>
-        <url>http://issues.apache.org/jira/browse/KNOX</url>
-    </issueManagement>
-
-    <mailingLists>
-        <mailingList>
-            <name>User list</name>
-            <subscribe>mailto:user-subscribe@knox.incubator.apache.org</subscribe>
-            <unsubscribe>mailto:user-unsubscribe@knox.incubator.apache.org</unsubscribe>
-            <post>mailto:user@knox.incubator.apache.org</post>
-            <archive>http://mail-archives.apache.org/mod_mbox/knox-user/</archive>
-        </mailingList>
-        <mailingList>
-            <name>Development list</name>
-            <subscribe>mailto:dev-subscribe@knox.incubator.apache.org</subscribe>
-            <unsubscribe>mailto:dev-unsubscribe@knox.incubator.apache.org</unsubscribe>
-            <post>mailto:dev@knox.incubator.apache.org</post>
-            <archive>http://mail-archives.apache.org/mod_mbox/knox-dev/</archive>
-        </mailingList>
-        <mailingList>
-            <name>Commit list</name>
-            <subscribe>mailto:commit-subscribe@knox.incubator.apache.org</subscribe>
-            <unsubscribe>mailto:commit-unsubscribe@knox.incubator.apache.org</unsubscribe>
-            <post>mailto:commit@knox.incubator.apache.org</post>
-            <archive>http://mail-archives.apache.org/mod_mbox/knox-commit/</archive>
-        </mailingList>
-    </mailingLists>
-
-    <developers>
-        <developer>
-            <id>jspeidel</id>
-            <name>John Speidel</name>
-            <email>jspeidel@apache.org</email>
-            <timezone>-5</timezone>
-            <roles>
-                <role>PMC</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-        <developer>
-            <id>kminder</id>
-            <name>Kevin Minder</name>
-            <email>kminder@apache.org</email>
-            <timezone>-5</timezone>
-            <roles>
-                <role>PMC</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-        <developer>
-            <id>lmccay</id>
-            <name>Larry McCay</name>
-            <email>lmccay@apache.org</email>
-            <timezone>-5</timezone>
-            <roles>
-                <role>PMC</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-        <developer>
-            <id>smohanty</id>
-            <name>Sumit Mohanty</name>
-            <email>smohanty@apache.org</email>
-            <timezone>-8</timezone>
-            <roles>
-                <role>PMC</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-        <developer>
-            <id>tbeerbower</id>
-            <name>Thomas Beerbower</name>
-            <email>tbeerbower@apache.org</email>
-            <timezone>-5</timezone>
-            <roles>
-                <role>PMC</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-        <developer>
-            <id>venkatesh</id>
-            <name>Venkatesh Seetharam</name>
-            <email>venkatesh@apache.org</email>
-            <timezone>-8</timezone>
-            <roles>
-                <role>PMC</role>
-            </roles>
-            <organization>
-                Hortonworks
-            </organization>
-        </developer>
-    </developers>
-
-    <build>
-        <resources>
-            <resource>
-                <directory>src/main/resources</directory>
-                <filtering>true</filtering>
-            </resource>
-        </resources>
-        <pluginManagement>
-            <plugins>
-                <plugin>
-                    <groupId>org.apache.maven.plugins</groupId>
-                    <artifactId>maven-resources-plugin</artifactId>
-                    <version>2.4.3</version>
-                    <configuration>
-                        <encoding>UTF-8</encoding>
-                    </configuration>
-                </plugin>
-                <plugin>
-                    <groupId>org.apache.maven.plugins</groupId>
-                    <artifactId>maven-site-plugin</artifactId>
-                    <version>3.0</version>
-                    <dependencies>
-                        <dependency>
-                            <groupId>org.apache.maven.doxia</groupId>
-                            <artifactId>doxia-module-markdown</artifactId>
-                            <version>1.3</version>
-                        </dependency>
-                    </dependencies>
-                    <configuration>
-                        <inputEncoding>UTF-8</inputEncoding>
-                        <outputEncoding>UTF-8</outputEncoding>
-                        <!--
-                        <outputDirectory>./target</outputDirectory>
-                        -->
-                    </configuration>
-                </plugin>
-            </plugins>
-        </pluginManagement>
-    </build>
-
-    <reporting>
-        <excludeDefaults>true</excludeDefaults>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-project-info-reports-plugin</artifactId>
-                <version>2.4</version>
-                <reportSets>
-                    <reportSet>
-                        <reports>
-                            <report>project-team</report>
-                            <report>mailing-list</report>
-                            <report>issue-tracking</report>
-                            <report>license</report>
-                            <!-- for now
-                                            <report>modules</report>
-                                            <report>dependencies</report>
-                                            <report>cim</report>
-                                            <report>scm</report>
-                            -->
-                        </reports>
-                    </reportSet>
-                </reportSets>
-            </plugin>
-        </plugins>
-    </reporting>
-
-    <distributionManagement>
-        <site>
-            <id>apache-website</id>
-            <name>Apache website</name>
-            <url>scpexe://people.apache.org/www/incubator.apache.org/knox</url>
-        </site>
-    </distributionManagement>
-
-</project>

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/build-process.md
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/build-process.md b/gateway-site/src/site/markdown/build-process.md
deleted file mode 100644
index c7272dc..0000000
--- a/gateway-site/src/site/markdown/build-process.md
+++ /dev/null
@@ -1,42 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Build Process
--------------
-The following process can be used to build the project without an existing repository clone.
-
-    git clone https://git-wip-us.apache.org/repos/asf/incubator-knox.git knox
-    mvn clean install
-
-If the repository has already been cloned the following process should be used.
-
-    git pull
-    mvn clean install
-
-
-Disclaimer
-----------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/client.md.vm
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/client.md.vm b/gateway-site/src/site/markdown/client.md.vm
deleted file mode 100644
index 2ee6f3e..0000000
--- a/gateway-site/src/site/markdown/client.md.vm
+++ /dev/null
@@ -1,627 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Introduction
-------------
-Hadoop requires a client that can be used to interact remotely with the services provided by Hadoop cluster.
-This will also be true when using the Apache Knox Gateway to provide perimeter security and centralized access for these services.
-The two primary existing clients for Hadoop are the CLI (i.e. Command Line Interface, hadoop) and HUE (i.e. Hadoop User Environment).
-for several reasons however, neither of these clients can *currently* be used to access Hadoop services via the Apache Knox Gateway.
-
-This lead to thinking about a very simple client that could help people use and evaluate the gateway.
-The list below outline the general requirements for such a client.
-
-1. Promote the evaluation and adoption of the Apache Knox Gateway
-2. Simple to deploy and use on data worker desktops to access to remote Hadoop clusters
-3. Simple to extend with new commands both by other Hadoop projects and by the end user
-4. Support the notion of a SSO session for multiple Hadoop interactions
-5. Support the multiple authentication and federation token capabilities of the Apache Knox Gateway
-6. Promote the use of REST APIs as the dominant remote client mechanism for Hadoop services
-7. Promote the the sense of Hadoop as a single unified product
-8. Aligned with the Apache Knox Gateway's overall goals for security
-
-The result is a very simple DSL ([Domain Specific Language][1]) of sorts that is used via [Groovy][2] scripts.
-Here is an example of a command that copies a file from the local file system to HDFS.
-*Note: The variables session, localFile and remoteFile are assumed to be defined.*
-
-    Hdfs.put( session ).file( localFile ).to( remoteFile ).now()
-
-***This work is very early in development but is also very useful in its current state.***
-***We are very interested in receiving feedback about how to improve this feature and the DSL in particular.***
-
-A note of thanks to [REST-assured][3] which provides a [Fluent interface][4] style DSL for testing REST services.
-It served as the initial inspiration for the creation of this DSL.
-
-
-Assumptions
------------
-This document assumes a few things about your environment in order to simplify the examples.
-
-1. The JVM is executable as simply java.
-2. The Apache Knox Gateway is installed and functional.
-3. The example commands are executed within the context of the GATEWAY_HOME current directory.
-The GATEWAY_HOME directory is the directory within the Apache Knox Gateway installation that contains the README file and the bin, conf and deployments directories.
-4. A few examples require the use of commands from a standard Groovy installation.  These examples are optional but to try them you will need Groovy [installed][15].
-
-
-Usage
------
-The DSL requires a shell to interpret the Groovy script.
-The shell can either be used interactively or to execute a script file.
-To simplify use, the distribution contains an embedded version of the Groovy shell.
-
-The shell can be run interactively.
-
-    java -jar bin/shell.jar
-
-The shell can also be used to execute a script by passing a single filename argument.
-
-    java -jar bin/shell.jar sample/SmokeTestJob.groovy
-
-When running interactively it may be helpful to reduce some of the output generated by the shell console.
-Use the following command in the interactive shell to reduce that output.
-This only needs to be done once as these preferences are persisted.
-
-    set verbosity QUIET
-    set show-last-result false
-
-Also when running interactively use the `exit` command to terminate the shell.
-Using `^C` to exit can sometimes leaves the parent shell in a problematic state.
-
-
-Examples
---------
-Once the shell can be launched the DSL can be used to interact with the gateway and Hadoop.
-Below is a very simple example of an interactive shell session to upload a file to HDFS.
-
-    java -jar bin/shell.jar
-    knox:000> hadoop = Hadoop.login( "https://localhost:8443/gateway/sample", "hdfs", "hdfs-password" )
-    knox:000> Hdfs.put( hadoop ).file( "README" ).to( "/tmp/example/README" ).now()
-
-The `knox:000>` in the example above is the prompt from the embedded Groovy console.
-If you output doesn't look like this you may need to set the verbosity and show-last-result preferences as described above in the Usage section.
-
-Without using some other tool to browse HDFS it is impossible to tell that that this command did anything.
-Execute this to get a bit more feedback.
-
-    knox:000> println "Status=" + Hdfs.put( hadoop ).file( "README" ).to( "/tmp/example/README2" ).now().statusCode
-    Status=201
-
-Notice that a different filename is used for the destination.
-Without this an error would have resulted.
-Of course the DSL also provides a command to list the contents of a directory.
-
-    knox:000> println Hdfs.ls( hadoop ).dir( "/tmp/example" ).now().string
-    {"FileStatuses":{"FileStatus":[{"accessTime":1363711366977,"blockSize":134217728,"group":"hdfs","length":19395,"modificationTime":1363711366977,"owner":"hdfs","pathSuffix":"README","permission":"644","replication":1,"type":"FILE"},{"accessTime":1363711375617,"blockSize":134217728,"group":"hdfs","length":19395,"modificationTime":1363711375617,"owner":"hdfs","pathSuffix":"README2","permission":"644","replication":1,"type":"FILE"}]}}
-
-It is a design decision of the DSL to not provide type safe classes for various request and response payloads.
-Doing so would provide an undesirable coupling between the DSL and the service implementation.
-It also would make adding new commands much more difficult.
-See the Groovy section below for a variety capabilities and tools for working with JSON and XML to make this easy.
-The example below shows the use of JsonSlurper and GPath to extract content from a JSON response.
-
-    knox:000> import groovy.json.JsonSlurper
-    knox:000> text = Hdfs.ls( hadoop ).dir( "/tmp/example" ).now().string
-    knox:000> json = (new JsonSlurper()).parseText( text )
-    knox:000> println json.FileStatuses.FileStatus.pathSuffix
-    [README, README2]
-
-*In the future, "built-in" methods to slurp JSON and XML may be added to make this a bit easier.*
-*This would allow for this type if single line interaction.*
-`println Hdfs.ls(hadoop).dir("/tmp").now().json().FileStatuses.FileStatus.pathSuffix`
-
-Shell session should always be ended with shutting down the session.
-The examples above do not touch on it but the DSL supports the simple execution of commands asynchronously.
-The shutdown command attempts to ensures that all asynchronous commands have completed before existing the shell.
-
-    knox:000> hadoop.shutdown()
-    knox:000> exit
-
-All of the commands above could have been combined into a script file and executed as a single line.
-
-    java -jar bin/shell.jar samples/Example.groovy
-
-This script file is available in the distribution but for convenience, this is the content.
-
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import groovy.json.JsonSlurper
-
-    gateway = "https://localhost:8443/gateway/sample"
-    username = "mapred"
-    password = "mapred-password"
-    dataFile = "README"
-
-    hadoop = Hadoop.login( gateway, username, password )
-    Hdfs.rm( hadoop ).file( "/tmp/example" ).recursive().now()
-    Hdfs.put( hadoop ).file( dataFile ).to( "/tmp/example/README" ).now()
-    text = Hdfs.ls( hadoop ).dir( "/tmp/example" ).now().string
-    json = (new JsonSlurper()).parseText( text )
-    println json.FileStatuses.FileStatus.pathSuffix
-    hadoop.shutdown()
-
-Notice the Hdfs.rm command.  This is included simply to ensure that the script can be rerun.
-Without this an error would result the second time it is run.
-
-
-Constructs
-----------
-In order to understand the DSL there are three primary constructs that need to be understood.
-
-${HHH} Hadoop
-This construct encapsulates the client side session state that will be shared between all command invocations.
-In particular it will simplify the management of any tokens that need to be presented with each command invocation.
-It also manages a thread pool that is used by all asynchronous commands which is why it is important to call one of the shutdown methods.
-
-The syntax associated with this is expected to change we expect that credentials will not need to be provided to the gateway.
-Rather it is expected that some form of access token will be used to initialize the session.
-
-${HHH} Services
-Services are the primary extension point for adding new suites of commands.
-The built in examples are: Hdfs, Job and Workflow.
-The desire for extensibility is the reason for the slightly awkward Hdfs.ls(hadoop) syntax.
-Certainly something more like hadoop.hdfs().ls() would have been preferred but this would prevent adding new commands easily.
-At a minimum it would result in extension commands with a different syntax from the "built-in" commands.
-
-The service objects essentially function as a factory for a suite of commands.
-
-${HHH} Commands
-Commands provide the behavior of the DSL.
-They typically follow a Fluent interface style in order to allow for single line commands.
-There are really three parts to each command: Request, Invocation, Response
-
-${HHHH} Request
-The request is populated by all of the methods following the "verb" method and the "invoke" method.
-For example in Hdfs.rm(hadoop).ls(dir).now() the request is populated between the "verb" method rm() and the "invoke" method now().
-
-${HHHH} Invocation
-The invocation method controls how the request is invoked.
-Currently supported synchronous and asynchronous invocation.
-The now() method executes the request and returns the result immediately.
-The later() method submits the request to be executed later and returns a future from which the result can be retrieved.
-In addition later() invocation method can optionally be provided a closure to execute when the request is complete.
-See the Futures and Closures sections below for additional detail and examples.
-
-${HHHH} Response
-The response contains the results of the invocation of the request.
-In most cases the response is a thin wrapper over the HTTP response.
-In fact many commands will share a single BasicResponse type that only provides a few simple methods.
-
-    public int getStatusCode()
-    public long getContentLength()
-    public String getContentType()
-    public String getContentEncoding()
-    public InputStream getStream()
-    public String getString()
-    public byte[] getBytes()
-    public void close();
-
-Thanks to Groovy these methods can be accessed as attributes.
-In the some of the examples the staticCode was retrieved for example.
-
-    println Hdfs.put(hadoop).rm(dir).now().statusCode
-
-Groovy will invoke the getStatusCode method to retrieve the statusCode attribute.
-
-The three methods getStream(), getBytes() and getString deserve special attention.
-Care must be taken that the HTTP body is read only once.
-Therefore one of these methods (and only one) must be called once and only once.
-Calling one of these more than once will cause an error.
-Failing to call one of these methods once will result in lingering open HTTP connections.
-The close() method may be used if the caller is not interested in reading the result body.
-Most commands that do not expect a response body will call close implicitly.
-If the body is retrieved via getBytes() or getString(), the close() method need not be called.
-When using getStream(), care must be taken to consume the entire body otherwise lingering open HTTP connections will result.
-The close() method may be called after reading the body partially to discard the remainder of the body.
-
-
-Services
---------
-There are three basic DSL services and commands bundled with the shell.
-
-${HHH} HDFS
-Provides basic HDFS commands.
-***Using these DSL commands requires that WebHDFS be running in the Hadoop cluster.***
-
-${HHH} Jobs (Templeton/WebHCat)
-Provides basic job submission and status commands.
-***Using these DSL commands requires that Templeton/WebHCat be running in the Hadoop cluster.***
-
-${HHH} Workflow (Oozie)
-Provides basic workflow submission and status commands.
-***Using these DSL commands requires that Oozie be running in the Hadoop cluster.***
-
-
-HDFS Commands (WebHDFS)
------------------------
-${HHH} ls() - List the contents of a HDFS directory.
-* Request
-    * dir (String) - The HDFS directory to list.
-* Response
-    * BasicResponse
-* Example
-    * `Hdfs.ls(hadoop).ls().dir("/").now()`
-
-${HHH} rm() - Remove a HDFS file or directory.
-* Request
-    * file (String) - The HDFS file or directory to remove.
-    * recursive (Boolean) - If the file is a directory also remove any contained files and directories. Optional: default=false
-* Response
-    * EmptyResponse - Implicit close().
-* Example
-    * `Hdfs.rm(hadoop).file("/tmp/example").recursive().now()`
-
-${HHH} put() - Copy a file from the local file system to HDFS.
-* Request
-    * text (String) - The text to copy to the remote file.
-    * file (String) - The name of a local file to copy to the remote file.
-    * to (String) - The name of the remote file create.
-* Response
-    * EmptyResponse - Implicit close().
-* Example
-    * `Hdfs.put(hadoop).file("localFile").to("/tmp/example/remoteFile").now()`
-
-${HHH} get() - Copy a file from HDFS to the local file system.
-* Request
-    * file (String) - The name of the local file to create from the remote file.  If this isn't specified the file content must be read from the response.
-    * from (String) - The name of the remote file to copy.
-* Response
-    * BasicResponse
-* Example
-    * `Hdfs.get(hadoop).file("localFile").from("/tmp/example/remoteFile").now()`
-
-${HHH} mkdir() - Create a directory in HDFS.
-* Request
-    * dir (String) - The name of the remote directory to create.
-    * perm (String) - The permissions to create the remote directory with.  Optional: default="777"
-* Response
-    * EmptyResponse - Implicit close().
-* Example
-    * `Hdfs.mkdir(hadoop).dir("/tmp/example").perm("777").now()`
-
-
-Job Commands (WebHCat/Templeton)
---------------------------------
-${HHH} submitJava() - Submit a Java MapReduce job.
-* Request
-    * jar (String) - The remote file name of the JAR containing the app to execute.
-    * app (String) - The app name to execute.  This is wordcount for example not the class name.
-    * input (String) - The remote directory name to use as input for the job.
-    * output (String) - The remote directory name to store output from the job.
-* Response
-    * jobId : String - The job ID of the submitted job.  Consumes body.
-* Example
-    * `Job.submitJava(hadoop).jar(remoteJarName).app(appName).input(remoteInputDir).output(remoteOutputDir).now().jobId`
-
-${HHH} submitPig() - Submit a Pig job.
-* Request
-    * file (String) - The remote file name of the pig script.
-    * arg (String) - An argument to pass to the script.
-    * statusDir (String) - The remote directory to store status output.
-* Response
-    * jobId : String - The job ID of the submitted job.  Consumes body.
-* Example
-    * `Job.submitPig(hadoop).file(remotePigFileName).arg("-v").statusDir(remoteStatusDir).now()`
-
-${HHH} submitHive() - Submit a Hive job.
-* Request
-    * file (String) - The remote file name of the hive script.
-    * arg (String) - An argument to pass to the script.
-    * statusDir (String) - The remote directory to store status output.
-* Response
-    * jobId : String - The job ID of the submitted job.  Consumes body.
-* Example
-    * `Job.submitHive(hadoop).file(remoteHiveFileName).arg("-v").statusDir(remoteStatusDir).now()`
-
-${HHH} queryQueue() - Return a list of all job IDs registered to the user.
-* Request
-    * No request parameters.
-* Response
-    * BasicResponse
-* Example
-    * `Job.queryQueue(hadoop).now().string`
-
-${HHH} queryStatus() - Check the status of a job and get related job information given its job ID.
-* Request
-    * jobId (String) - The job ID to check. This is the ID received when the job was created.
-* Response
-    * BasicResponse
-* Example
-    * `Job.queryStatus(hadoop).jobId(jobId).now().string`
-
-
-Workflow Commands (Oozie)
--------------------------
-${HHH} submit() - Submit a workflow job.
-* Request
-    * text (String) - XML formatted workflow configuration string.
-    * file (String) - A filename containing XML formatted workflow configuration.
-    * action (String) - The initial action to take on the job.  Optional: Default is "start".
-* Response
-    * BasicResponse
-* Example
-    * `Workflow.submit(hadoop).file(localFile).action("start").now()`
-
-${HHH} status() - Query the status of a workflow job.
-* Request
-    * jobId (String) - The job ID to check. This is the ID received when the job was created.
-* Response
-    * BasicResponse
-* Example
-    * `Workflow.status(hadoop).jobId(jobId).now().string`
-
-
-Futures
--------
-The DSL supports the ability to invoke commands asynchronously via the later() invocation method.
-The object returned from the later() method is a java.util.concurrent.Future parametrized with the response type of the command.
-This is an example of how to asynchronously put a file to HDFS.
-
-    future = Hdfs.put(hadoop).file("README").to("tmp/example/README").later()
-    println future.get().statusCode
-
-The future.get() method will block until the asynchronous command is complete.
-To illustrate the usefullness of this however multiple concurrent commands are required.
-
-    readmeFuture = Hdfs.put(hadoop).file("README").to("tmp/example/README").later()
-    licenseFuture = Hdfs.put(hadoop).file("LICENSE").to("tmp/example/LICENSE").later()
-    hadoop.waitFor( readmeFuture, licenseFuture )
-    println readmeFuture.get().statusCode
-    println licenseFuture.get().statusCode
-
-The hadoop.waitFor() method will wait for one or more asynchronous commands to complete.
-
-
-Closures
---------
-Futures alone only provide asynchronous invocation of the command.
-What if some processing should also occur asynchronously once the command is complete.
-Support for this is provided by closures.
-Closures are blocks of code that are passed into the later() invocation method.
-In Groovy these are contained within {} immediately after a method.
-These blocks of code are executed once the asynchronous command is complete.
-
-    Hdfs.put(hadoop).file("README").to("tmp/example/README").later(){ println it.statusCode }
-
-In this example the put() command is executed on a separate thread and once complete the { println it.statusCode } block is executed on that thread.
-The it variable is automatically populated by Groovy and is a reference to the result that is returned from the future or now() method.
-The future example above can be rewritten to illustrate the use of closures.
-
-    readmeFuture = Hdfs.put(hadoop).file("README").to("tmp/example/README").later() { println it.statusCode }
-    licenseFuture = Hdfs.put(hadoop).file("LICENSE").to("tmp/example/LICENSE").later() { println it.statusCode }
-    hadoop.waitFor( readmeFuture, licenseFuture )
-
-Again, the hadoop.waitFor() method will wait for one or more asynchronous commands to complete.
-
-Extension
----------
-Extensibility is a key design goal of the KnoxShell and DSL.
-There are two ways to provide extended functionality for use with the shell.
-The first is to simply create Groovy scripts that use the DSL to perform a useful task.
-The second is to add new services and commands.
-In order to add new service and commands new classes must be written in either Groovy or Java and added to the classpath of the shell.
-Fortunately there is a very simple way to add classes and JARs to the shell classpath.
-The first time the shell is executed it will create a configuration file in the same directory as the JAR with the same base name and a `.cfg` extension.
-
-    bin/shell-${gateway-version}.jar
-    bin/shell-${gateway-version}.cfg
-
-That file contains both the main class for the shell as well as a definition of the classpath.
-Currently that file will by default contain the following.
-
-    main.class=org.apache.hadoop.gateway.shell.Shell
-    class.path=../lib; ../lib/*.jar; ../ext; ../ext/*.jar
-
-Therefore to extend the shell you should copy any new service and command class either to the `ext` directory or if they are packaged within a JAR copy the JAR to the `ext` directory.
-The `lib` directory is reserved for JARs that may be delivered with the product.
-
-Below are samples for the service and command classes that would need to be written to add new commands to the shell.
-These happen to be Groovy source files but could with very minor changes be Java files.
-The easiest way to add these to the shell is to compile them directory into the `ext` directory.
-*Note: This command depends upon having the Groovy compiler installed and available on the execution path.*
-
-    groovyc -d ext -cp bin/shell-${gateway-version}.jar samples/SampleService.groovy samples/SampleSimpleCommand.groovy samples/SampleComplexCommand.groovy
-
-These source files are available in the samples directory of the distribution but these are included here for convenience.
-
-${HHH} Sample Service (Groovy)
-    import org.apache.hadoop.gateway.shell.Hadoop
-
-    class SampleService {
-
-      static String PATH = "/namenode/api/v1"
-
-      static SimpleCommand simple( Hadoop hadoop ) {
-        return new SimpleCommand( hadoop )
-      }
-
-      static ComplexCommand.Request complex( Hadoop hadoop ) {
-        return new ComplexCommand.Request( hadoop )
-      }
-
-    }
-
-${HHH} Sample Simple Command (Groovy)
-    import org.apache.hadoop.gateway.shell.AbstractRequest
-    import org.apache.hadoop.gateway.shell.BasicResponse
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.http.client.methods.HttpGet
-    import org.apache.http.client.utils.URIBuilder
-
-    import java.util.concurrent.Callable
-
-    class SimpleCommand extends AbstractRequest<BasicResponse> {
-
-      SimpleCommand( Hadoop hadoop ) {
-        super( hadoop )
-      }
-
-      private String param
-      SimpleCommand param( String param ) {
-        this.param = param
-        return this
-      }
-
-      @Override
-      protected Callable<BasicResponse> callable() {
-        return new Callable<BasicResponse>() {
-          @Override
-          BasicResponse call() {
-            URIBuilder uri = uri( SampleService.PATH, param )
-            addQueryParam( uri, "op", "LISTSTATUS" )
-            HttpGet get = new HttpGet( uri.build() )
-            return new BasicResponse( execute( get ) )
-          }
-        }
-      }
-
-    }
-
-${HHH} Sample Complex Command (Groovy)
-    import com.jayway.jsonpath.JsonPath
-    import org.apache.hadoop.gateway.shell.AbstractRequest
-    import org.apache.hadoop.gateway.shell.BasicResponse
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.http.HttpResponse
-    import org.apache.http.client.methods.HttpGet
-    import org.apache.http.client.utils.URIBuilder
-
-    import java.util.concurrent.Callable
-
-    class ComplexCommand {
-
-      static class Request extends AbstractRequest<Response> {
-
-        Request( Hadoop hadoop ) {
-          super( hadoop )
-        }
-
-        private String param;
-        Request param( String param ) {
-          this.param = param;
-          return this;
-        }
-
-        @Override
-        protected Callable<Response> callable() {
-          return new Callable<Response>() {
-            @Override
-            Response call() {
-              URIBuilder uri = uri( SampleService.PATH, param )
-              addQueryParam( uri, "op", "LISTSTATUS" )
-              HttpGet get = new HttpGet( uri.build() )
-              return new Response( execute( get ) )
-            }
-          }
-        }
-
-      }
-
-      static class Response extends BasicResponse {
-
-        Response(HttpResponse response) {
-          super(response)
-        }
-
-        public List<String> getNames() {
-          return JsonPath.read( string, "\$.FileStatuses.FileStatus[*].pathSuffix" )
-        }
-
-      }
-
-    }
-
-
-Groovy
-------
-The shell included in the distribution is basically an unmodified packaging of the Groovy shell.
-Therefore these command are functionally equivalent if you have Groovy [installed][15].
-
-    java -jar bin/shell.jar sample/SmokeTestJob.groovy
-    groovy -cp bin/shell.jar sample/SmokeTestJob.groovy
-
-The interactive shell isn't exactly equivalent.
-However the only difference is that the shell-${gateway-version}.jar automatically executes some additional imports that are useful for the KnoxShell DSL.
-So these two sets of commands should be functionality equivalent.
-***However there is currently a class loading issue that prevents the groovysh command from working propertly.***
-
-    java -jar bin/shell.jar
-
-    groovysh -cp bin/shell-${gateway-version}.jar
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import org.apache.hadoop.gateway.shell.job.Job
-    import org.apache.hadoop.gateway.shell.workflow.Workflow
-    import java.util.concurrent.TimeUnit
-
-Alternatively, you can use the Groovy Console which does not appear to have the same class loading issue.
-
-    groovyConsole -cp bin/shell.jar
-
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import org.apache.hadoop.gateway.shell.job.Job
-    import org.apache.hadoop.gateway.shell.workflow.Workflow
-    import java.util.concurrent.TimeUnit
-
-In addition because the DSL can be used via standard Groovy, the Groovy integrations in many popular IDEs (e.g. IntelliJ , Eclipse) can also be used.
-This makes it particularly nice to develop and execute scripts to interact with Hadoop.
-The code-completion feature in particular provides immense value.
-All that is required is to add the shell-0.2.0.jar to the projects class path.
-
-There are a variety of Groovy tools that make it very easy to work with the standard interchange formats (i.e. JSON and XML).
-In Groovy the creation of XML or JSON is typically done via a "builder" and parsing done via a "slurper".
-In addition once JSON or XML is "slurped" the GPath, an XPath like feature build into Groovy can be used to access data.
-* XML
-  * Markup Builder [Overview][5], [API][6]
-  * XML Slurper [Overview][7], [API][8]
-  * XPath [Overview][9], [API][10]
-* JSON
-  * JSON Builder [API][11]
-  * JSON Slurper [API][12]
-  * JSON Path [API][14]
-* GPath [Overview][13]
-
-
-Disclaimer
-----------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
-
-[1]: http://en.wikipedia.org/wiki/Domain-specific_language
-[2]: http://groovy.codehaus.org/
-[3]: https://code.google.com/p/rest-assured/
-[4]: http://en.wikipedia.org/wiki/Fluent_interface
-[5]: http://groovy.codehaus.org/Creating+XML+using+Groovy's+MarkupBuilder
-[6]: http://groovy.codehaus.org/api/groovy/xml/MarkupBuilder.html
-[7]: http://groovy.codehaus.org/Reading+XML+using+Groovy's+XmlSlurper
-[8]: http://groovy.codehaus.org/api/groovy/util/XmlSlurper.html
-[9]: http://groovy.codehaus.org/GPath
-[10]: http://docs.oracle.com/javase/1.5.0/docs/api/javax/xml/xpath/XPath.html
-[11]: http://groovy.codehaus.org/gapi/groovy/json/JsonBuilder.html
-[12]: http://groovy.codehaus.org/gapi/groovy/json/JsonSlurper.html
-[13]: http://groovy.codehaus.org/GPath
-[14]: https://code.google.com/p/json-path/
-[15]: http://groovy.codehaus.org/Installing+Groovy
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/contribute-process.md
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/contribute-process.md b/gateway-site/src/site/markdown/contribute-process.md
deleted file mode 100644
index 47afeeb..0000000
--- a/gateway-site/src/site/markdown/contribute-process.md
+++ /dev/null
@@ -1,41 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Development & Contribute Process
-================================
-
-Non-Member
-----------
-TODO: Here we will describe how to develop and contribute to the project as a non-member.
-
-Member
-------
-TODO: Here we will describe how to develop and contribute to the project as a member.
-
-
-Disclaimer
-----------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/examples.md.vm
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/examples.md.vm b/gateway-site/src/site/markdown/examples.md.vm
deleted file mode 100644
index 8408c25..0000000
--- a/gateway-site/src/site/markdown/examples.md.vm
+++ /dev/null
@@ -1,375 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-------------------------------------------------------------------------------
-Apache Knox Gateway - Usage Examples
-------------------------------------------------------------------------------
-This guide provides detailed examples for how to do some basic interactions
-with Hadoop via the Apache Knox Gateway.
-
-The first two examples submit a Java MapReduce job and workflow using the
-KnoxShell DSL
-
-* Example #1: WebHDFS & Templeton/WebHCat via KnoxShell DSL
-* Example #2: WebHDFS & Oozie via KnoxShell DSL
-
-The second two examples submit the same job and workflow but do so using only
-the [cURL](http://curl.haxx.se/) command line HTTP client.
-
-* Example #1: WebHDFS & Templeton/WebHCat via cURL
-* Example #2: WebHDFS & Oozie via KnoxShell cURL
-
-------------------------------------------------------------------------------
-Assumptions
-------------------------------------------------------------------------------
-This document assumes a few things about your environment in order to
-simplify the examples.
-
-1. The JVM is executable as simply java.
-2. The Apache Knox Gateway is installed and functional.
-3. The example commands are executed within the context of the GATEWAY_HOME
-   current directory. The GATEWAY_HOME directory is the directory within the
-   Apache Knox Gateway installation that contains the README file and the bin,
-   conf and deployments directories.
-4. A few examples optionally require the use of commands from a standard
-   Groovy installation.  These examples are optional but to try them you will
-   need Groovy [installed][gii].
-
-[gii]: http://groovy.codehaus.org/Installing+Groovy
-
-------------------------------------------------------------------------------
-Customization
-------------------------------------------------------------------------------
-These examples may need to be tailored to the execution environment.  In
-particular hostnames and ports may need to be changes to match your
-environment.  In particular there are two example files in the distribution
-that may need to be customized.  Take a moment to review these files.
-All of the values that may need to be customized can be found together at the
-top of each file.
-
-* samples/ExampleSubmitJob.groovy
-* samples/ExampleSubmitWorkflow.groovy
-
-If you are using the Sandbox VM for your Hadoop cluster you may want to
-review [these configuration tips][sb].
-
-[sb]: sandbox.html
-
-------------------------------------------------------------------------------
-Example #1: WebHDFS & Templeton/WebHCat via KnoxShell DSL
-------------------------------------------------------------------------------
-This example will submit the familiar WordCount Java MapReduce job to the
-Hadoop cluster via the gateway using the KnoxShell DSL.  There are several
-ways to do this depending upon your preference.
-
-You can use the "embedded" Groovy interpreter provided with the distribution.
-
-    java -jar bin/shell.jar samples/ExampleSubmitJob.groovy
-
-You can load the KnoxShell DSL script into the standard Groovy Console.
-
-    groovyConsole -cp bin/shell-${gateway-version}.jar samples/ExampleSubmitJob.groovy
-
-You can manually type in the KnoxShell DSL script into the "embedded" Groovy
-interpreter provided with the distribution.
-
-    java -jar bin/shell.jar
-
-Each line from the file below will need to be typed or copied into the
-interactive shell.
-
-***samples/ExampleSubmitJob***
-
-    import com.jayway.jsonpath.JsonPath
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import org.apache.hadoop.gateway.shell.job.Job
-
-    import static java.util.concurrent.TimeUnit.SECONDS
-
-    gateway = "https://localhost:8443/gateway/sample"
-    username = "mapred"
-    password = "mapred-password"
-    dataFile = "LICENSE"
-    jarFile = "samples/hadoop-examples.jar"
-
-    hadoop = Hadoop.login( gateway, username, password )
-
-    println "Delete /tmp/test " + Hdfs.rm(hadoop).file( "/tmp/test" ).recursive().now().statusCode
-    println "Create /tmp/test " + Hdfs.mkdir(hadoop).dir( "/tmp/test").now().statusCode
-
-    putData = Hdfs.put(hadoop).file( dataFile ).to( "/tmp/test/input/FILE" ).later() {
-      println "Put /tmp/test/input/FILE " + it.statusCode }
-    putJar = Hdfs.put(hadoop).file( jarFile ).to( "/tmp/test/hadoop-examples.jar" ).later() {
-      println "Put /tmp/test/hadoop-examples.jar " + it.statusCode }
-    hadoop.waitFor( putData, putJar )
-
-    jobId = Job.submitJava(hadoop) \
-      .jar( "/tmp/test/hadoop-examples.jar" ) \
-      .app( "wordcount" ) \
-      .input( "/tmp/test/input" ) \
-      .output( "/tmp/test/output" ) \
-      .now().jobId
-    println "Submitted job " + jobId
-
-    done = false
-    count = 0
-    while( !done && count++ < 60 ) {
-      sleep( 1000 )
-      json = Job.queryStatus(hadoop).jobId(jobId).now().string
-      done = JsonPath.read( json, "\$.status.jobComplete" )
-    }
-    println "Done " + done
-
-    println "Shutdown " + hadoop.shutdown( 10, SECONDS )
-
-------------------------------------------------------------------------------
-Example #2: WebHDFS & Oozie via KnoxShell DSL
-------------------------------------------------------------------------------
-This example will also submit the familiar WordCount Java MapReduce job to the
-Hadoop cluster via the gateway using the KnoxShell DSL.  However in this case
-the job will be submitted via a Oozie workflow.  There are several ways to do
-this depending upon your preference.
-
-You can use the "embedded" Groovy interpreter provided with the distribution.
-    java -jar bin/shell.jar samples/ExampleSubmitWorkflow.groovy
-
-You can load the KnoxShell DSL script into the standard Groovy Console.
-    groovyConsole -cp bin/shell-${gateway-version}.jar samples/ExampleSubmitWorkflow.groovy
-
-You can manually type in the KnoxShell DSL script into the "embedded" Groovy
-interpreter provided with the distribution.
-
-    java -jar bin/shell.jar
-
-Each line from the file below will need to be typed or copied into the
-interactive shell.
-
-***samples/ExampleSubmitWorkflow.groovy***
-
-    import com.jayway.jsonpath.JsonPath
-    import org.apache.hadoop.gateway.shell.Hadoop
-    import org.apache.hadoop.gateway.shell.hdfs.Hdfs
-    import org.apache.hadoop.gateway.shell.workflow.Workflow
-
-    import static java.util.concurrent.TimeUnit.SECONDS
-
-    gateway = "https://localhost:8443/gateway/sample"
-    jobTracker = "sandbox:50300";
-    nameNode = "sandbox:8020";
-    username = "mapred"
-    password = "mapred-password"
-    inputFile = "LICENSE"
-    jarFile = "samples/hadoop-examples.jar"
-
-    definition = """\
-    <workflow-app xmlns="uri:oozie:workflow:0.2" name="wordcount-workflow">
-        <start to="root-node"/>
-        <action name="root-node">
-            <java>
-                <job-tracker>$jobTracker</job-tracker>
-                <name-node>hdfs://$nameNode</name-node>
-                <main-class>org.apache.hadoop.examples.WordCount</main-class>
-                <arg>/tmp/test/input</arg>
-                <arg>/tmp/test/output</arg>
-            </java>
-            <ok to="end"/>
-            <error to="fail"/>
-        </action>
-        <kill name="fail">
-            <message>Java failed</message>
-        </kill>
-        <end name="end"/>
-    </workflow-app>
-    """
-
-    configuration = """\
-    <configuration>
-        <property>
-            <name>user.name</name>
-            <value>$username</value>
-        </property>
-        <property>
-            <name>oozie.wf.application.path</name>
-            <value>hdfs://$nameNode/tmp/test</value>
-        </property>
-    </configuration>
-    """
-
-    hadoop = Hadoop.login( gateway, username, password )
-
-    println "Delete /tmp/test " + Hdfs.rm(hadoop).file( "/tmp/test" ).recursive().now().statusCode
-    println "Mkdir /tmp/test " + Hdfs.mkdir(hadoop).dir( "/tmp/test").now().statusCode
-    putWorkflow = Hdfs.put(hadoop).text( definition ).to( "/tmp/test/workflow.xml" ).later() {
-      println "Put /tmp/test/workflow.xml " + it.statusCode }
-    putData = Hdfs.put(hadoop).file( inputFile ).to( "/tmp/test/input/FILE" ).later() {
-      println "Put /tmp/test/input/FILE " + it.statusCode }
-    putJar = Hdfs.put(hadoop).file( jarFile ).to( "/tmp/test/lib/hadoop-examples.jar" ).later() {
-      println "Put /tmp/test/lib/hadoop-examples.jar " + it.statusCode }
-    hadoop.waitFor( putWorkflow, putData, putJar )
-
-    jobId = Workflow.submit(hadoop).text( configuration ).now().jobId
-    println "Submitted job " + jobId
-
-    status = "UNKNOWN";
-    count = 0;
-    while( status != "SUCCEEDED" && count++ < 60 ) {
-      sleep( 1000 )
-      json = Workflow.status(hadoop).jobId( jobId ).now().string
-      status = JsonPath.read( json, "\$.status" )
-    }
-    println "Job status " + status;
-
-    println "Shutdown " + hadoop.shutdown( 10, SECONDS )
-
-------------------------------------------------------------------------------
-Example #3: WebHDFS & Templeton/WebHCat via cURL
-------------------------------------------------------------------------------
-The example below illustrates the sequence of curl commands that could be used
-to run a "word count" map reduce job.  It utilizes the hadoop-examples.jar
-from a Hadoop install for running a simple word count job.  Take care to
-follow the instructions below for steps 4/5 and 6/7 where the Location header
-returned by the call to the NameNode is copied for use with the call to the
-DataNode that follows it.
-
-    # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
-    curl -i -k -u mapred:mapred-password -X DELETE \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
-
-    # 1. Create a test input directory /tmp/test/input
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input?op=MKDIRS'
-
-    # 2. Create a test output directory /tmp/test/input
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=MKDIRS'
-
-    # 3. Create the inode for hadoop-examples.jar in /tmp/test
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/hadoop-examples.jar?op=CREATE'
-
-    # 4. Upload hadoop-examples.jar to /tmp/test.  Use a hadoop-examples.jar from a Hadoop install.
-    curl -i -k -u mapred:mapred-password -T hadoop-examples.jar -X PUT '{Value Location header from command above}'
-
-    # 5. Create the inode for a sample file README in /tmp/test/input
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE'
-
-    # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
-    curl -i -k -u mapred:mapred-password -T README -X PUT '{Value of Location header from command above}'
-
-    # 7. Submit the word count job via WebHCat/Templeton.
-    # Take note of the Job ID in the JSON response as this will be used in the next step.
-    curl -v -i -k -u mapred:mapred-password -X POST \
-      -d jar=/tmp/test/hadoop-examples.jar -d class=wordcount \
-      -d arg=/tmp/test/input -d arg=/tmp/test/output \
-      'https://localhost:8443/gateway/sample/templeton/api/v1/mapreduce/jar'
-
-    # 8. Look at the status of the job
-    curl -i -k -u mapred:mapred-password -X GET \
-      'https://localhost:8443/gateway/sample/templeton/api/v1/queue/{Job ID returned in JSON body from previous step}'
-
-    # 9. Look at the status of the job queue
-    curl -i -k -u mapred:mapred-password -X GET \
-      'https://localhost:8443/gateway/sample/templeton/api/v1/queue'
-
-    # 10. List the contents of the output directory /tmp/test/output
-    curl -i -k -u mapred:mapred-password -X GET \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=LISTSTATUS'
-
-    # 11. Optionally cleanup the test directory
-    curl -i -k -u mapred:mapred-password -X DELETE \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
-
-------------------------------------------------------------------------------
-Example #4: WebHDFS & Oozie via cURL
-------------------------------------------------------------------------------
-The example below illustrates the sequence of curl commands that could be used
-to run a "word count" map reduce job via an Oozie workflow.  It utilizes the
-hadoop-examples.jar from a Hadoop install for running a simple word count job.
-Take care to follow the instructions below where replacement values are
-required.  These replacement values are identivied with { } markup.
-
-    # 0. Optionally cleanup the test directory in case a previous example was run without cleaning up.
-    curl -i -k -u mapred:mapred-password -X DELETE \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
-
-    # 1. Create the inode for workflow definition file in /tmp/test
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/workflow.xml?op=CREATE'
-
-    # 2. Upload the workflow definition file.  This file can be found in {GATEWAY_HOME}/templates
-    curl -i -k -u mapred:mapred-password -T templates/workflow-definition.xml -X PUT \
-      '{Value Location header from command above}'
-
-    # 3. Create the inode for hadoop-examples.jar in /tmp/test/lib
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/lib/hadoop-examples.jar?op=CREATE'
-
-    # 4. Upload hadoop-examples.jar to /tmp/test/lib.  Use a hadoop-examples.jar from a Hadoop install.
-    curl -i -k -u mapred:mapred-password -T hadoop-examples.jar -X PUT \
-      '{Value Location header from command above}'
-
-    # 5. Create the inode for a sample input file readme.txt in /tmp/test/input.
-    curl -i -k -u mapred:mapred-password -X PUT \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/input/README?op=CREATE'
-
-    # 6. Upload readme.txt to /tmp/test/input.  Use the readme.txt in {GATEWAY_HOME}.
-    # The sample below uses this README file found in {GATEWAY_HOME}.
-    curl -i -k -u mapred:mapred-password -T README -X PUT \
-      '{Value of Location header from command above}'
-
-    # 7. Create the job configuration file by replacing the {NameNode host:port} and {JobTracker host:port}
-    # in the command below to values that match your Hadoop configuration.
-    # NOTE: The hostnames must be resolvable by the Oozie daemon.  The ports are the RPC ports not the HTTP ports.
-    # For example {NameNode host:port} might be sandbox:8020 and {JobTracker host:port} sandbox:50300
-    # The source workflow-configuration.xml file can be found in {GATEWAY_HOME}/templates
-    # Alternatively, this file can copied and edited manually for environments without the sed utility.
-    sed -e s/REPLACE.NAMENODE.RPCHOSTPORT/{NameNode host:port}/ \
-      -e s/REPLACE.JOBTRACKER.RPCHOSTPORT/{JobTracker host:port}/ \
-      <templates/workflow-configuration.xml >workflow-configuration.xml
-
-    # 8. Submit the job via Oozie
-    # Take note of the Job ID in the JSON response as this will be used in the next step.
-    curl -i -k -u mapred:mapred-password -T workflow-configuration.xml -H Content-Type:application/xml -X POST \
-      'https://localhost:8443/gateway/oozie/sample/api/v1/jobs?action=start'
-
-    # 9. Query the job status via Oozie.
-    curl -i -k -u mapred:mapred-password -X GET \
-      'https://localhost:8443/gateway/sample/oozie/api/v1/job/{Job ID returned in JSON body from previous step}'
-
-    # 10. List the contents of the output directory /tmp/test/output
-    curl -i -k -u mapred:mapred-password -X GET \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test/output?op=LISTSTATUS'
-
-    # 11. Optionally cleanup the test directory
-    curl -i -k -u mapred:mapred-password -X DELETE \
-      'https://localhost:8443/gateway/sample/namenode/api/v1/tmp/test?op=DELETE&recursive=true'
-
-------------------------------------------------------------------------------
-Disclaimer
-------------------------------------------------------------------------------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/getting-started.md.vm
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/getting-started.md.vm b/gateway-site/src/site/markdown/getting-started.md.vm
deleted file mode 100644
index d00e838..0000000
--- a/gateway-site/src/site/markdown/getting-started.md.vm
+++ /dev/null
@@ -1,364 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-------------------------------------------------------------------------------
-Apache Knox Gateway - Getting Started
-------------------------------------------------------------------------------
-This guide describes the steps required to install, deploy and validate the
-Apache Knox Gateway.
-
-------------------------------------------------------------------------------
-Requirements
-------------------------------------------------------------------------------
-The following prerequisites must be installed to successfully complete the
-steps described in this guide.
-
-${HHH} Java
-Java 1.6 or later
-
-${HHH} Hadoop
-A local installation of a Hadoop Cluster is required at this time.
-Hadoop EC2 cluster and/or Sandbox installations are currently difficult
-to access remotely via the Gateway. The EC2 and Sandbox limitation is
-caused by Hadoop services running with internal IP addresses.  For the
-Gateway to work in these cases it will need to be deployed on the EC2
-cluster or Sandbox, at this time.
-
-The instructions that follow assume that the Gateway is *not* collocated
-with the Hadoop clusters themselves and (most importantly) that the
-hostnames and IP addresses of the cluster services are accessible by the
-gateway where ever it happens to be running.
-
-The Hadoop cluster should be ensured to have WebHDFS, WebHCat
-(i.e. Templeton) and Oozie configured, deployed and running.
-
-This release of the Apache Knox Gateway has been tested against the
-[Hortonworks Sandbox 1.2][hsb] with [these changes][sb].
-
-[hsb]: http://hortonworks.com/products/hortonworks-sandbox/
-[sb]: sandbox.html
-
-------------------------------------------------------------------------------
-Installation
-------------------------------------------------------------------------------
-${HHH} 1. Extract the distribution ZIP
-
-Download and extract the gateway-${gateway-version}.zip file into the
-installation directory that will contain your `{GATEWAY_HOME}`
-
-    jar xf gateway-${gateway-version}.zip
-
-This will create a directory `gateway-${gateway-version}` in your current
-directory.
-
-${HHH} 2. Enter the `{GATEWAY_HOME}` directory
-
-    cd gateway-${gateway-version}
-
-The fully qualified name of this directory will be referenced as
-`{GATEWAY_HOME}` throughout the remainder of this document.
-
-${HHH} 3. Start the demo LDAP server (ApacheDS)
-
-First, understand that the LDAP server provided here is for demonstration
-purposes.  You may configure the LDAP specifics within the topology
-descriptor for the cluster as described in step 5 below, in order to
-customize what LDAP instance to use.  The assumption is that most users
-will leverage the demo LDAP server while evaluating this release and should
-therefore continue with the instructions here in step 3.
-
-Edit `{GATEWAY_HOME}/conf/users.ldif` if required and add your users and
-groups to the file.  A number of normal Hadoop users
-(e.g. hdfs, mapred, hcat, hive) have already been included.  Note that
-the passwords in this file are "fictitious" and have nothing to do with
-the actual accounts on the Hadoop cluster you are using.  There is also
-a copy of this file in the templates directory that you can use to start
-over if necessary.
-
-Start the LDAP server - pointing it to the config dir where it will find
-the users.ldif file in the conf directory.
-
-    java -jar bin/ldap.jar conf &
-
-There are a number of log messages of the form `Created null.` that can
-safely be ignored.  Take note of the port on which it was started as this
-needs to match later configuration.  This will create a directory named
-'org.apache.hadoop.gateway.security.EmbeddedApacheDirectoryServer' that
-can safely be ignored.
-
-${HHH} 4. Start the Gateway server
-
-    java -jar bin/server.jar
-
-Take note of the port identified in the logging output as you will need this
-for accessing the gateway.
-
-The server will prompt you for the master secret (password). This secret is
-used to secure artifacts used to secure artifacts used by the gateway server
-for things like SSL, credential/password aliasing. This secret will have to
-be entered at startup unless you choose to persist it. Remember this secret
-and keep it safe.  It represents the keys to the kingdom. See the Persisting
-the Master section for more information.
-
-${HHH} 5. Configure the Gateway with the topology of your Hadoop cluster
-Edit the file `{GATEWAY_HOME}/deployments/sample.xml`
-
-Change the host and port in the urls of the `<service>` elements for
-NAMENODE, TEMPLETON and OOZIE services to match your Hadoop cluster
-deployment.
-
-The default configuration contains the LDAP URL for a LDAP server.  By
-default that file is configured to access the demo ApacheDS based LDAP
-server and its default configuration. By default, this server listens on
-port 33389.  Optionally, you can change the LDAP URL for the LDAP server
-to be used for authentication.  This is set via the
-main.ldapRealm.contextFactory.url property in the
-`<gateway><provider><authentication>` section.
-
-Save the file.  The directory {GATEWAY_HOME}/deployments is monitored
-by the Gateway server and reacts to the discovery of a new or changed
-cluster topology descriptor by provisioning the endpoints and required
-filter chains to serve the needs of each cluster as described by the
-topology file.  Note that the name of the file excluding the extension
-is also used as the path for that cluster in the URL.  So for example
-the sample.xml file will result in Gateway URLs of the form
-`http://{gateway-host}:{gateway-port}/gateway/sample/namenode/api/v1`
-
-${HHH} 6. Test the installation and configuration of your Gateway
-Invoke the LISTSATUS operation on HDFS represented by your configured
-NAMENODE by using your web browser or curl:
-
-    curl -i -k -u hdfs:hdfs-password -X GET \
-    'https://localhost:8443/gateway/sample/namenode/api/v1/?op=LISTSTATUS'
-
-The results of the above command should result in something to along the
-lines of the output below.  The exact information returned is subject to
-the content within HDFS in your Hadoop cluster.
-
-    HTTP/1.1 200 OK
-    Content-Type: application/json
-    Content-Length: 760
-    Server: Jetty(6.1.26)
-
-    {"FileStatuses":{"FileStatus":[
-    {"accessTime":0,"blockSize":0,"group":"hdfs","length":0,"modificationTime":1350595859762,"owner":"hdfs","pathSuffix":"apps","permission":"755","replication":0,"type":"DIRECTORY"},
-    {"accessTime":0,"blockSize":0,"group":"mapred","length":0,"modificationTime":1350595874024,"owner":"mapred","pathSuffix":"mapred","permission":"755","replication":0,"type":"DIRECTORY"},
-    {"accessTime":0,"blockSize":0,"group":"hdfs","length":0,"modificationTime":1350596040075,"owner":"hdfs","pathSuffix":"tmp","permission":"777","replication":0,"type":"DIRECTORY"},
-    {"accessTime":0,"blockSize":0,"group":"hdfs","length":0,"modificationTime":1350595857178,"owner":"hdfs","pathSuffix":"user","permission":"755","replication":0,"type":"DIRECTORY"}
-    ]}}
-
-For additional information on WebHDFS, Templeton/WebHCat and Oozie
-REST APIs, see the following URLs respectively:
-
-* WebHDFS - http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html
-* Templeton/WebHCat - http://people.apache.org/~thejas/templeton_doc_v1/
-* Oozie - http://oozie.apache.org/docs/3.3.1/WebServicesAPI.html
-
-------------------------------------------------------------------------------
-Examples
-------------------------------------------------------------------------------
-More examples can be found [here](/examples.html).
-
-------------------------------------------------------------------------------
-Persisting the Master Secret
-------------------------------------------------------------------------------
-The master secret is required to start the server. This secret is used to
-access secured artifacts by the gateway instance. Keystore, trust stores and
-credential stores are all protected with the master secret.
-
-You may persist the master secret by supplying the *-persist-master* switch at
-startup. This will result in a warning indicating that persisting the secret
-is less secure than providing it at startup. We do make some provisions in
-order to protect the persisted password.
-
-It is encrypted with AES 128 bit encryption and where possible the file
-permissions are set to only be accessable by the user that the gateway is
-running as.
-
-After persisting the secret, ensure that the file at config/security/master
-has the appropriate permissions set for your environment. This is probably
-the most important layer of defense for master secret. Do not assume that
-the encryption if sufficient protection.
-
-A specific user should be created to run the gateway this will protect a
-persisted master file.
-
-------------------------------------------------------------------------------
-Management of Security Artifacts
-------------------------------------------------------------------------------
-There are a number of artifacts that are used by the gateway in ensuring the
-security of wire level communications, access to protected resources and the
-encryption of sensitive data. These artifacts can be managed from outside of
-the gateway instances or generated and populated by the gateway instance
-itself.
-
-The following is a description of how this is coordinated with both standalone
-(development, demo, etc) gateway instances and instances as part of a cluster
-of gateways in mind.
-
-Upon start of the gateway server we:
-
-1. Look for an identity store at conf/security/keystores/gateway.jks. The
-identity store contains the certificate and private key used to represent the
-identity of the server for SSL connections and signature creation.
-
-    * If there is no identity store we create one and generate a self-signed
-      certificate for use in standalone/demo mode. The certificate is stored
-      with an alias of gateway-identity.
-    * If there is an identity store found than we ensure that it can be loaded
-      using the provided master secret and that there is an alias with called
-      gateway-identity.
-
-2. Look for a credential store at
-   `conf/security/keystores/__gateway-credentials.jceks`. This credential
-   store is used to store secrets/passwords that are used by the gateway.
-   For instance, this is where the pass-phrase for accessing the
-   gateway-identity certificate is kept.
-
-  * If there is no credential store found then we create one and populate it
-    with a generated pass-phrase for the alias `gateway-identity-passphrase`.
-    This is coordinated with the population of the self-signed cert into the
-    identity-store.
-  * If a credential store is found then we ensure that it can be loaded using
-    the provided master secret and that the expected aliases have been
-    populated with secrets.
-
-Upon deployment of a Hadoop cluster topology within the gateway we:
-
-1. Look for a credential store for the topology. For instance, we have a
-   sample topology that gets deployed out of the box.  We look for
-   `conf/security/keystores/sample-credentials.jceks`. This topology specific
-   credential store is used for storing secrets/passwords that are used for
-   encrypting sensitive data with topology specific keys.
-
-    * If no credential store is found for the topology being deployed then
-      one is created for it. Population of the aliases is delegated to the
-      configured providers within the system that will require the use of a
-      secret for a particular task. They may programmatic set the value
-      of the secret or choose to have the value for the specified alias
-      generated through the AliasService.
-    * If a credential store is found then we ensure that it can be loaded
-      with the provided master secret and the configured providers have the
-      opportunity to ensure that the aliases are populated and if not to
-      populate them.
-
-By leveraging the algorithm described above we can provide a window of
-opportunity for management of these artifacts in a number of ways.
-
-1. Using a single gateway instance as a master instance the artifacts can be
-   generated or placed into the expected location and then replicated across
-   all of the slave instances before startup.
-2. Using an NFS mount as a central location for the artifacts would provide
-   a single source of truth without the need to replicate them over the
-   network. Of course, NFS mounts have their own challenges.
-
-Summary of Secrets to be Managed:
-
-1. Master secret - the same for all gateway instances in a cluster of gateways
-2. All security related artifacts are protected with the master secret
-3. Secrets used by the gateway itself are stored within the gateway credential
-   store and are the same across all gateway instances in the cluster of
-   gateways
-4. Secrets used by providers within cluster topologies are stored in topology
-   specific credential stores and are the same for the same topology across
-   the cluster of gateway instances. However, they are specific to the
-   topology - so secrets for one hadoop cluster are different from those of
-   another. This allows for fail-over from one gateway instance to another
-   even when encryption is being used while not allowing the compromise of one
-   encryption key to expose the data for all clusters.
-
-NOTE: the SSL certificate will need special consideration depending on the
-type of certificate. Wildcard certs may be able to be shared across all
-gateway instances in a cluster. When certs are dedicated to specific machines
-the gateway identity store will not be able to be blindly replicated as
-hostname verification problems will ensue. Obviously, trust-stores will need
-to be taken into account as well.
-
-------------------------------------------------------------------------------
-Mapping Gateway URLs to Hadoop cluster URLs
-------------------------------------------------------------------------------
-The Gateway functions much like a reverse proxy.  As such it maintains a
-mapping of URLs that are exposed externally by the Gateway to URLs that are
-provided by the Hadoop cluster.  Examples of mappings for the NameNode and
-Templeton are shown below.  These mapping are generated from the combination
-of the Gateway configuration file (i.e. {GATEWAY_HOME}/gateway-site.xml)
-and the cluster topology descriptors
-(e.g. {GATEWAY_HOME}/deployments/{cluster-name}.xml).
-
-* HDFS (NameNode)
-    * Gateway: `http://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/namenode/api/v1`
-    * Cluster: `http://{namenode-host}:50070/webhdfs/v1`
-* WebHCat (Templeton)
-    * Gateway: `http://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/templeton/api/v1`
-    * Cluster: `http://{templeton-host}:50111/templeton/v1`
-* Oozie
-    * Gateway: `http://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/oozie/api/v1`
-    * Cluster: `http://{templeton-host}:11000/oozie/v1`
-
-The values for `{gateway-host}`, `{gateway-port}`, `{gateway-path}` are
-provided via the Gateway configuration file
-(i.e. `{GATEWAY_HOME}/gateway-site.xml`).
-
-The value for `{cluster-name}` is derived from the name of the cluster
-topology descriptor (e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
-
-The value for `{namenode-host}` and `{templeton-host}` is provided via the
-cluster topology descriptor
-(e.g. `{GATEWAY_HOME}/deployments/{cluster-name}.xml`).
-
-Note: The ports 50070, 50111 and 11000 are the defaults for NameNode,
-      Templeton and Oozie respectively. Their values can also be provided via
-      the cluster topology descriptor if your Hadoop cluster uses different
-      ports.
-
-------------------------------------------------------------------------------
-Enabling logging
-------------------------------------------------------------------------------
-If necessary you can enable additional logging by editing the
-`log4j.properties` file in the `conf` directory.  Changing the rootLogger
-value from `ERROR` to `DEBUG` will generate a large amount of debug logging.
-A number of useful, more fine loggers are also provided in the file.
-
-------------------------------------------------------------------------------
-Filing bugs
-------------------------------------------------------------------------------
-Currently we do not have Jira setup for Knox.  Therefore if you find an issue
-please send an email to the Knox user list (user AT knox.incubator.apache.org)
-with a subject prefix of [BUG] describing the issue.  Please include the
-results of this command in the email.
-
-    java -jar bin/server.jar -version
-
-in the Environment section.  Also include the version of Hadoop being used.
-
-One we have Jira setup the email archive will be reviewed and Jira issues
-created for each bug.
-
-------------------------------------------------------------------------------
-Disclaimer
-------------------------------------------------------------------------------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
-

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/index.md b/gateway-site/src/site/markdown/index.md
deleted file mode 100644
index c907b41..0000000
--- a/gateway-site/src/site/markdown/index.md
+++ /dev/null
@@ -1,36 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Introduction
-------------
-The charter for the Apache Knox Gateway project is to simplify and normalize
-the deployment and implementation of secure Hadoop clusters as well as be
-a central access point for the service specific REST APIs exposed from
-within the Hadoop clusters.
-
-Disclaimer
-----------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/news.md
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/news.md b/gateway-site/src/site/markdown/news.md
deleted file mode 100644
index 73d171e..0000000
--- a/gateway-site/src/site/markdown/news.md
+++ /dev/null
@@ -1,34 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-2013-03-05
-----------
-Started creating the site.
-
-Disclaimer
-----------
-The Apache Knox Gateway is an effort undergoing incubation at the
-Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC.
-
-Incubation is required of all newly accepted projects until a further review
-indicates that the infrastructure, communications, and decision making process
-have stabilized in a manner consistent with other successful ASF projects.
-
-While incubation status is not necessarily a reflection of the completeness
-or stability of the code, it does indicate that the project has yet to be
-fully endorsed by the ASF.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-knox/blob/b83a1df3/gateway-site/src/site/markdown/privacy-policy.md
----------------------------------------------------------------------
diff --git a/gateway-site/src/site/markdown/privacy-policy.md b/gateway-site/src/site/markdown/privacy-policy.md
deleted file mode 100644
index 4b97833..0000000
--- a/gateway-site/src/site/markdown/privacy-policy.md
+++ /dev/null
@@ -1,43 +0,0 @@
-<!---
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-Privacy Policy
---------------
-
-Information about your use of this website is collected using server access logs and a tracking cookie. The
-collected information consists of the following:
-
-1. The IP address from which you access the website;
-2. The type of browser and operating system you use to access our site;
-3. The date and time you access our site;
-4. The pages you visit; and
-5. The addresses of pages from where you followed a link to our site.
-
-Part of this information is gathered using a tracking cookie set by the
-[Google Analytics][1] service and handled by Google as described in their
-[privacy policy][2]. See your browser documentation for instructions on how to
-disable the cookie if you prefer not to share this data with Google.
-
-We use the gathered information to help us make our site more useful to visitors and to better understand how and
-when our site is used. We do not track or collect personally identifiable information or associate gathered data
-with any personally identifying information from other sources.
-
-By using this website, you consent to the collection of this data in the manner and for the purpose described above.
-
-[1]: http://www.google.com/analytics/
-[2]: http://www.google.com/privacy.html
\ No newline at end of file


Mime
View raw message