knox-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From m...@apache.org
Subject [10/25] knox git commit: KNOX-1039 - Added admin APIs for managing shared provider configurations and descriptors (Phil Zampino via Sandeep More)
Date Thu, 02 Nov 2017 18:48:14 GMT
KNOX-1039 - Added admin APIs for managing shared provider configurations and descriptors (Phil Zampino via Sandeep More)


Project: http://git-wip-us.apache.org/repos/asf/knox/repo
Commit: http://git-wip-us.apache.org/repos/asf/knox/commit/9ad9bcdb
Tree: http://git-wip-us.apache.org/repos/asf/knox/tree/9ad9bcdb
Diff: http://git-wip-us.apache.org/repos/asf/knox/diff/9ad9bcdb

Branch: refs/heads/KNOX-998-Package_Restructuring
Commit: 9ad9bcdbbdb82acdabd05fe1500da9a6f8d22634
Parents: 41952dd
Author: Sandeep More <more@apache.org>
Authored: Thu Oct 26 13:20:35 2017 -0400
Committer: Sandeep More <more@apache.org>
Committed: Thu Oct 26 13:20:35 2017 -0400

----------------------------------------------------------------------
 .../ambari/AmbariServiceDiscovery.java          |   3 +-
 .../apache/hadoop/gateway/GatewayMessages.java  |  34 +-
 .../gateway/config/impl/GatewayConfigImpl.java  |   3 +-
 .../topology/impl/DefaultTopologyService.java   | 221 +++++--
 .../topology/DefaultTopologyServiceTest.java    | 402 +++++++++++--
 .../topology/file/provider-config-one.xml       |  74 +++
 .../topology/file/simple-descriptor-five.json   |  14 +
 .../topology/file/simple-descriptor-six.json    |  18 +
 .../service/admin/HrefListingMarshaller.java    |  75 +++
 .../service/admin/TopologiesResource.java       | 379 +++++++++++-
 .../hadoop/gateway/i18n/GatewaySpiMessages.java |  10 +-
 .../services/topology/TopologyService.java      |  33 +-
 .../gateway/GatewayAdminTopologyFuncTest.java   | 586 +++++++++++++++++++
 13 files changed, 1736 insertions(+), 116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
----------------------------------------------------------------------
diff --git a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
index 37f68ae..b7f9f53 100644
--- a/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
+++ b/gateway-discovery-ambari/src/main/java/org/apache/hadoop/gateway/topology/discovery/ambari/AmbariServiceDiscovery.java
@@ -247,10 +247,11 @@ class AmbariServiceDiscovery implements ServiceDiscovery {
             }
 
             if (aliasService != null) {
-                // If not password alias is configured, then try the default alias
+                // If no password alias is configured, then try the default alias
                 if (passwordAlias == null) {
                     passwordAlias = DEFAULT_PWD_ALIAS;
                 }
+
                 try {
                     char[] pwd = aliasService.getPasswordFromAliasForGateway(passwordAlias);
                     if (pwd != null) {

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/main/java/org/apache/hadoop/gateway/GatewayMessages.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/GatewayMessages.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/GatewayMessages.java
index 6f73c1e..4cb4c40 100644
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/GatewayMessages.java
+++ b/gateway-server/src/main/java/org/apache/hadoop/gateway/GatewayMessages.java
@@ -514,8 +514,40 @@ public interface GatewayMessages {
   void topologyPortMappingCannotFindTopology(final String topology, final int port);
 
 
+  @Message( level = MessageLevel.INFO, text = "Monitoring simple descriptors in directory: {0}" )
+  void monitoringDescriptorChangesInDirectory(String descriptorsDir);
+
+
+  @Message( level = MessageLevel.INFO, text = "Monitoring shared provider configurations in directory: {0}" )
+  void monitoringProviderConfigChangesInDirectory(String sharedProviderDir);
+
+  @Message( level = MessageLevel.INFO, text = "Prevented deletion of shared provider configuration because there are referencing descriptors: {0}" )
+  void preventedDeletionOfSharedProviderConfiguration(String providerConfigurationPath);
+
+  @Message( level = MessageLevel.INFO, text = "Generated topology {0} because the associated descriptor {1} changed." )
+  void generatedTopologyForDescriptorChange(String topologyName, String descriptorName);
+
   @Message( level = MessageLevel.ERROR, text = "An error occurred while processing {0} : {1}" )
   void simpleDescriptorHandlingError(final String simpleDesc,
-                                     @StackTrace( level = MessageLevel.DEBUG ) Exception e );
+                                     @StackTrace(level = MessageLevel.DEBUG) Exception e);
+
+  @Message(level = MessageLevel.DEBUG, text = "Successfully wrote configuration: {0}")
+  void wroteConfigurationFile(final String filePath);
+
+  @Message(level = MessageLevel.ERROR, text = "Failed to write configuration: {0}")
+  void failedToWriteConfigurationFile(final String filePath,
+                                      @StackTrace(level = MessageLevel.DEBUG) Exception e );
+
+  @Message( level = MessageLevel.INFO, text = "Deleting topology {0} because the associated descriptor {1} was deleted." )
+  void deletingTopologyForDescriptorDeletion(String topologyName, String descriptorName);
+
+  @Message( level = MessageLevel.INFO, text = "Deleting descriptor {0} because the associated topology {1} was deleted." )
+  void deletingDescriptorForTopologyDeletion(String descriptorName, String topologyName);
+
+  @Message( level = MessageLevel.DEBUG, text = "Added descriptor {0} reference to provider configuration {1}." )
+  void addedProviderConfigurationReference(String descriptorName, String providerConfigurationName);
+
+  @Message( level = MessageLevel.DEBUG, text = "Removed descriptor {0} reference to provider configuration {1}." )
+  void removedProviderConfigurationReference(String descriptorName, String providerConfigurationName);
 
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/main/java/org/apache/hadoop/gateway/config/impl/GatewayConfigImpl.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/config/impl/GatewayConfigImpl.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/config/impl/GatewayConfigImpl.java
index 0956a4a..4202a18 100644
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/config/impl/GatewayConfigImpl.java
+++ b/gateway-server/src/main/java/org/apache/hadoop/gateway/config/impl/GatewayConfigImpl.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.gateway.config.impl;
 
+import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -250,7 +251,7 @@ public class GatewayConfigImpl extends Configuration implements GatewayConfig {
   @Override
   public String getGatewayConfDir() {
     String value = getVar( GATEWAY_CONF_HOME_VAR, getGatewayHomeDir() + File.separator + "conf"  );
-    return value;
+    return FilenameUtils.normalize(value);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultTopologyService.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultTopologyService.java b/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultTopologyService.java
index 13e1a3d..39e8029 100644
--- a/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultTopologyService.java
+++ b/gateway-server/src/main/java/org/apache/hadoop/gateway/services/topology/impl/DefaultTopologyService.java
@@ -62,6 +62,7 @@ import java.io.FileFilter;
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -91,8 +92,11 @@ public class DefaultTopologyService
   private static DigesterLoader digesterLoader = newLoader(new KnoxFormatXmlTopologyRules(), new AmbariFormatXmlTopologyRules());
   private List<FileAlterationMonitor> monitors = new ArrayList<>();
   private File topologiesDirectory;
+  private File sharedProvidersDirectory;
   private File descriptorsDirectory;
 
+  private DescriptorsMonitor descriptorsMonitor;
+
   private Set<TopologyListener> listeners;
   private volatile Map<File, Topology> topologies;
   private AliasService aliasService;
@@ -211,8 +215,7 @@ public class DefaultTopologyService
   }
 
   private File calculateAbsoluteTopologiesDir(GatewayConfig config) {
-    String normalizedTopologyDir = FilenameUtils.normalize(config.getGatewayTopologyDir());
-    File topoDir = new File(normalizedTopologyDir);
+    File topoDir = new File(config.getGatewayTopologyDir());
     topoDir = topoDir.getAbsoluteFile();
     return topoDir;
   }
@@ -220,15 +223,10 @@ public class DefaultTopologyService
   private File calculateAbsoluteConfigDir(GatewayConfig config) {
     File configDir = null;
 
-    String path = FilenameUtils.normalize(config.getGatewayConfDir());
-    if (path != null) {
-      configDir = new File(config.getGatewayConfDir());
-    } else {
-      configDir = (new File(config.getGatewayTopologyDir())).getParentFile();
-    }
-    configDir = configDir.getAbsoluteFile();
+    String path = config.getGatewayConfDir();
+    configDir = (path != null) ? new File(path) : (new File(config.getGatewayTopologyDir())).getParentFile();
 
-    return configDir;
+    return configDir.getAbsoluteFile();
   }
 
   private void  initListener(FileAlterationMonitor  monitor,
@@ -250,31 +248,34 @@ public class DefaultTopologyService
   private Map<File, Topology> loadTopologies(File directory) {
     Map<File, Topology> map = new HashMap<>();
     if (directory.isDirectory() && directory.canRead()) {
-      for (File file : directory.listFiles(this)) {
-        try {
-          Topology loadTopology = loadTopology(file);
-          if (null != loadTopology) {
-            map.put(file, loadTopology);
-          } else {
+      File[] existingTopologies = directory.listFiles(this);
+      if (existingTopologies != null) {
+        for (File file : existingTopologies) {
+          try {
+            Topology loadTopology = loadTopology(file);
+            if (null != loadTopology) {
+              map.put(file, loadTopology);
+            } else {
+              auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
+                      ActionOutcome.FAILURE);
+              log.failedToLoadTopology(file.getAbsolutePath());
+            }
+          } catch (IOException e) {
+            // Maybe it makes sense to throw exception
             auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-              ActionOutcome.FAILURE);
-            log.failedToLoadTopology(file.getAbsolutePath());
+                    ActionOutcome.FAILURE);
+            log.failedToLoadTopology(file.getAbsolutePath(), e);
+          } catch (SAXException e) {
+            // Maybe it makes sense to throw exception
+            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
+                    ActionOutcome.FAILURE);
+            log.failedToLoadTopology(file.getAbsolutePath(), e);
+          } catch (Exception e) {
+            // Maybe it makes sense to throw exception
+            auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
+                    ActionOutcome.FAILURE);
+            log.failedToLoadTopology(file.getAbsolutePath(), e);
           }
-        } catch (IOException e) {
-          // Maybe it makes sense to throw exception
-          auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-            ActionOutcome.FAILURE);
-          log.failedToLoadTopology(file.getAbsolutePath(), e);
-        } catch (SAXException e) {
-          // Maybe it makes sense to throw exception
-          auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-            ActionOutcome.FAILURE);
-          log.failedToLoadTopology(file.getAbsolutePath(), e);
-        } catch (Exception e) {
-          // Maybe it makes sense to throw exception
-          auditor.audit(Action.LOAD, file.getAbsolutePath(), ResourceType.TOPOLOGY,
-            ActionOutcome.FAILURE);
-          log.failedToLoadTopology(file.getAbsolutePath(), e);
         }
       }
     }
@@ -356,8 +357,7 @@ public class DefaultTopologyService
     File topoDir = topologiesDirectory;
 
     if(topoDir.isDirectory() && topoDir.canRead()) {
-      File[] results = topoDir.listFiles();
-      for (File f : results) {
+      for (File f : listFiles(topoDir)) {
         String fName = FilenameUtils.getBaseName(f.getName());
         if(fName.equals(t.getName())) {
           f.delete();
@@ -381,9 +381,9 @@ public class DefaultTopologyService
   public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config) {
     File tFile = null;
     Map<String, List<String>> urls = new HashMap<>();
-    if(topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
-      for(File f : topologiesDirectory.listFiles()){
-        if(FilenameUtils.removeExtension(f.getName()).equals(t.getName())){
+    if (topologiesDirectory.isDirectory() && topologiesDirectory.canRead()) {
+      for (File f : listFiles(topologiesDirectory)) {
+        if (FilenameUtils.removeExtension(f.getName()).equals(t.getName())) {
           tFile = f;
         }
       }
@@ -405,6 +405,63 @@ public class DefaultTopologyService
   }
 
   @Override
+  public boolean deployProviderConfiguration(String name, String content) {
+    return writeConfig(sharedProvidersDirectory, name, content);
+  }
+
+  @Override
+  public Collection<File> getProviderConfigurations() {
+    List<File> providerConfigs = new ArrayList<>();
+    for (File providerConfig : listFiles(sharedProvidersDirectory)) {
+      if (SharedProviderConfigMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(providerConfig.getName()))) {
+        providerConfigs.add(providerConfig);
+      }
+    }
+    return providerConfigs;
+  }
+
+  @Override
+  public boolean deleteProviderConfiguration(String name) {
+    boolean result = false;
+
+    File providerConfig = getExistingFile(sharedProvidersDirectory, name);
+    if (providerConfig != null) {
+      List<String> references = descriptorsMonitor.getReferencingDescriptors(providerConfig.getAbsolutePath());
+      if (references.isEmpty()) {
+        result = providerConfig.delete();
+      } else {
+        log.preventedDeletionOfSharedProviderConfiguration(providerConfig.getAbsolutePath());
+      }
+    } else {
+      result = true; // If it already does NOT exist, then the delete effectively succeeded
+    }
+
+    return result;
+  }
+
+  @Override
+  public boolean deployDescriptor(String name, String content) {
+    return writeConfig(descriptorsDirectory, name, content);
+  }
+
+  @Override
+  public Collection<File> getDescriptors() {
+    List<File> descriptors = new ArrayList<>();
+    for (File descriptor : listFiles(descriptorsDirectory)) {
+      if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
+        descriptors.add(descriptor);
+      }
+    }
+    return descriptors;
+  }
+
+  @Override
+  public boolean deleteDescriptor(String name) {
+    File descriptor = getExistingFile(descriptorsDirectory, name);
+    return (descriptor == null) || descriptor.delete();
+  }
+
+  @Override
   public void addTopologyChangeListener(TopologyListener listener) {
     listeners.add(listener);
   }
@@ -448,6 +505,7 @@ public class DefaultTopologyService
       File simpleDesc =
               new File(descriptorsDirectory, FilenameUtils.getBaseName(file.getName()) + "." + ext);
       if (simpleDesc.exists()) {
+        log.deletingDescriptorForTopologyDeletion(simpleDesc.getName(), file.getName());
         simpleDesc.delete();
       }
     }
@@ -481,20 +539,22 @@ public class DefaultTopologyService
 
       File configDirectory = calculateAbsoluteConfigDir(config);
       descriptorsDirectory = new File(configDirectory, "descriptors");
-      File sharedProvidersDirectory = new File(configDirectory, "shared-providers");
+      sharedProvidersDirectory = new File(configDirectory, "shared-providers");
 
       // Add support for conf/topologies
       initListener(topologiesDirectory, this, this);
 
       // Add support for conf/descriptors
-      DescriptorsMonitor dm = new DescriptorsMonitor(topologiesDirectory, aliasService);
+      descriptorsMonitor = new DescriptorsMonitor(topologiesDirectory, aliasService);
       initListener(descriptorsDirectory,
-                   dm,
-                   dm);
+                   descriptorsMonitor,
+                   descriptorsMonitor);
+      log.monitoringDescriptorChangesInDirectory(descriptorsDirectory.getAbsolutePath());
 
       // Add support for conf/shared-providers
-      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(dm, descriptorsDirectory);
+      SharedProviderConfigMonitor spm = new SharedProviderConfigMonitor(descriptorsMonitor, descriptorsDirectory);
       initListener(sharedProvidersDirectory, spm, spm);
+      log.monitoringProviderConfigChangesInDirectory(sharedProvidersDirectory.getAbsolutePath());
 
       // For all the descriptors currently in the descriptors dir at start-up time, trigger topology generation.
       // This happens prior to the start-up loading of the topologies.
@@ -502,7 +562,7 @@ public class DefaultTopologyService
       if (descriptorFilenames != null) {
           for (String descriptorFilename : descriptorFilenames) {
               if (DescriptorsMonitor.isDescriptorFile(descriptorFilename)) {
-                  dm.onFileChange(new File(descriptorsDirectory, descriptorFilename));
+                  descriptorsMonitor.onFileChange(new File(descriptorsDirectory, descriptorFilename));
               }
           }
       }
@@ -514,6 +574,70 @@ public class DefaultTopologyService
 
 
   /**
+   * Utility method for listing the files in the specified directory.
+   * This method is "nicer" than the File#listFiles() because it will not return null.
+   *
+   * @param directory The directory whose files should be returned.
+   *
+   * @return A List of the Files on the directory.
+   */
+  private static List<File> listFiles(File directory) {
+    List<File> result = null;
+    File[] files = directory.listFiles();
+    if (files != null) {
+      result = Arrays.asList(files);
+    } else {
+      result = Collections.emptyList();
+    }
+    return result;
+  }
+
+  /**
+   * Search for a file in the specified directory whose base name (filename without extension) matches the
+   * specified basename.
+   *
+   * @param directory The directory in which to search.
+   * @param basename  The basename of interest.
+   *
+   * @return The matching File
+   */
+  private static File getExistingFile(File directory, String basename) {
+    File match = null;
+    for (File file : listFiles(directory)) {
+      if (FilenameUtils.getBaseName(file.getName()).equals(basename)) {
+        match = file;
+        break;
+      }
+    }
+    return match;
+  }
+
+  /**
+   * Write the specified content to a file.
+   *
+   * @param dest    The destination directory.
+   * @param name    The name of the file.
+   * @param content The contents of the file.
+   *
+   * @return true, if the write succeeds; otherwise, false.
+   */
+  private static boolean writeConfig(File dest, String name, String content) {
+    boolean result = false;
+
+    File destFile = new File(dest, name);
+    try {
+      FileUtils.writeStringToFile(destFile, content);
+      log.wroteConfigurationFile(destFile.getAbsolutePath());
+      result = true;
+    } catch (IOException e) {
+      log.failedToWriteConfigurationFile(destFile.getAbsolutePath(), e);
+    }
+
+    return result;
+  }
+
+
+  /**
    * Change handler for simple descriptors
    */
   public static class DescriptorsMonitor extends FileAlterationListenerAdaptor
@@ -543,7 +667,7 @@ public class DefaultTopologyService
     }
 
     List<String> getReferencingDescriptors(String providerConfigPath) {
-      List<String> result = providerConfigReferences.get(providerConfigPath);
+      List<String> result = providerConfigReferences.get(FilenameUtils.normalize(providerConfigPath));
       if (result == null) {
         result = Collections.emptyList();
       }
@@ -562,6 +686,7 @@ public class DefaultTopologyService
         File topologyFile =
                 new File(topologiesDir, FilenameUtils.getBaseName(file.getName()) + "." + ext);
         if (topologyFile.exists()) {
+          log.deletingTopologyForDescriptorDeletion(topologyFile.getName(), file.getName());
           topologyFile.delete();
         }
       }
@@ -574,8 +699,10 @@ public class DefaultTopologyService
           break;
         }
       }
+
       if (reference != null) {
         providerConfigReferences.get(reference).remove(normalizedFilePath);
+        log.removedProviderConfigurationReference(normalizedFilePath, reference);
       }
     }
 
@@ -584,6 +711,7 @@ public class DefaultTopologyService
       try {
         // When a simple descriptor has been created or modified, generate the new topology descriptor
         Map<String, File> result = SimpleDescriptorHandler.handle(file, topologiesDir, aliasService);
+        log.generatedTopologyForDescriptorChange(result.get("topology").getName(), file.getName());
 
         // Add the provider config reference relationship for handling updates to the provider config
         String providerConfig = FilenameUtils.normalize(result.get("reference").getAbsolutePath());
@@ -602,6 +730,7 @@ public class DefaultTopologyService
 
           // Add the current reference relationship
           refs.add(descriptorName);
+          log.addedProviderConfigurationReference(descriptorName, providerConfig);
         }
       } catch (Exception e) {
         log.simpleDescriptorHandlingError(file.getName(), e);
@@ -662,7 +791,7 @@ public class DefaultTopologyService
     private List<File> getReferencingDescriptors(File sharedProviderConfig) {
       List<File> references = new ArrayList<>();
 
-      for (File descriptor : descriptorsDir.listFiles()) {
+      for (File descriptor : listFiles(descriptorsDir)) {
         if (DescriptorsMonitor.SUPPORTED_EXTENSIONS.contains(FilenameUtils.getExtension(descriptor.getName()))) {
           for (String reference : descriptorsMonitor.getReferencingDescriptors(FilenameUtils.normalize(sharedProviderConfig.getAbsolutePath()))) {
             references.add(new File(reference));

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/test/java/org/apache/hadoop/gateway/services/topology/DefaultTopologyServiceTest.java
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/java/org/apache/hadoop/gateway/services/topology/DefaultTopologyServiceTest.java b/gateway-server/src/test/java/org/apache/hadoop/gateway/services/topology/DefaultTopologyServiceTest.java
index 498d750..2357ad6 100644
--- a/gateway-server/src/test/java/org/apache/hadoop/gateway/services/topology/DefaultTopologyServiceTest.java
+++ b/gateway-server/src/test/java/org/apache/hadoop/gateway/services/topology/DefaultTopologyServiceTest.java
@@ -18,16 +18,15 @@
 package org.apache.hadoop.gateway.services.topology;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.monitor.FileAlterationListener;
 import org.apache.commons.io.monitor.FileAlterationMonitor;
 import org.apache.commons.io.monitor.FileAlterationObserver;
 import org.apache.hadoop.gateway.config.GatewayConfig;
 import org.apache.hadoop.gateway.services.security.AliasService;
 import org.apache.hadoop.gateway.services.topology.impl.DefaultTopologyService;
 import org.apache.hadoop.gateway.topology.*;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscovery;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryConfig;
-import org.apache.hadoop.gateway.topology.discovery.ServiceDiscoveryFactory;
 import org.apache.hadoop.test.TestUtils;
 import org.easymock.EasyMock;
 import org.junit.After;
@@ -38,13 +37,24 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.isA;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.Matchers.hasItem;
 import static org.hamcrest.core.IsNull.notNullValue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 
@@ -86,12 +96,6 @@ public class DefaultTopologyServiceTest {
     File dir = createDir();
     File topologyDir = new File(dir, "topologies");
 
-    File descriptorsDir = new File(dir, "descriptors");
-    descriptorsDir.mkdirs();
-
-    File sharedProvidersDir = new File(dir, "shared-providers");
-    sharedProvidersDir.mkdirs();
-
     long time = topologyDir.lastModified();
     try {
       createFile(topologyDir, "one.xml", "org/apache/hadoop/gateway/topology/file/topology-one.xml", time);
@@ -104,7 +108,7 @@ public class DefaultTopologyServiceTest {
 
       GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
       EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
-      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
+      EasyMock.expect(config.getGatewayConfDir()).andReturn(topologyDir.getParentFile().getAbsolutePath()).anyTimes();
       EasyMock.replay(config);
 
       provider.init(config, c);
@@ -167,59 +171,371 @@ public class DefaultTopologyServiceTest {
       assertThat(topology.getName(), is("one"));
       assertThat(topology.getTimestamp(), is(time));
 
+    } finally {
+      FileUtils.deleteQuietly(dir);
+    }
+  }
+
+  /**
+   * KNOX-1014
+   *
+   * Test the lifecycle relationship between simple descriptors and topology files.
+   *
+   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
+   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
+   */
+  @Test
+  public void testSimpleDescriptorsTopologyGeneration() throws Exception {
+
+    File dir = createDir();
+    File topologyDir = new File(dir, "topologies");
+    topologyDir.mkdirs();
+
+    File descriptorsDir = new File(dir, "descriptors");
+    descriptorsDir.mkdirs();
+
+    File sharedProvidersDir = new File(dir, "shared-providers");
+    sharedProvidersDir.mkdirs();
+
+    try {
+      TestTopologyListener topoListener = new TestTopologyListener();
+      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
+
+      TopologyService provider = new DefaultTopologyService();
+      Map<String, String> c = new HashMap<>();
+
+      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
+      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
+      EasyMock.replay(config);
+
+      provider.init(config, c);
+      provider.addTopologyChangeListener(topoListener);
+      provider.reloadTopologies();
+
+
       // Add a simple descriptor to the descriptors dir to verify topology generation and loading (KNOX-1006)
-      // N.B. This part of the test depends on the DummyServiceDiscovery extension being configured:
-      //         org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
       AliasService aliasService = EasyMock.createNiceMock(AliasService.class);
       EasyMock.expect(aliasService.getPasswordFromAliasForGateway(anyObject(String.class))).andReturn(null).anyTimes();
       EasyMock.replay(aliasService);
       DefaultTopologyService.DescriptorsMonitor dm =
-                                          new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
+              new DefaultTopologyService.DescriptorsMonitor(topologyDir, aliasService);
+
+      // Listener to simulate the topologies directory monitor, to notice when a topology has been deleted
+      provider.addTopologyChangeListener(new TestTopologyDeleteListener((DefaultTopologyService)provider));
 
       // Write out the referenced provider config first
       File provCfgFile = createFile(sharedProvidersDir,
                                     "ambari-cluster-policy.xml",
                                     "org/apache/hadoop/gateway/topology/file/ambari-cluster-policy.xml",
-                                    1L);
+                                    System.currentTimeMillis());
       try {
         // Create the simple descriptor in the descriptors dir
-        File simpleDesc =
-                createFile(descriptorsDir,
-                           "four.json",
-                           "org/apache/hadoop/gateway/topology/file/simple-topology-four.json",
-                           1L);
+        File simpleDesc = createFile(descriptorsDir,
+                                     "four.json",
+                                     "org/apache/hadoop/gateway/topology/file/simple-topology-four.json",
+                                     System.currentTimeMillis());
 
         // Trigger the topology generation by noticing the simple descriptor
         dm.onFileChange(simpleDesc);
 
         // Load the generated topology
         provider.reloadTopologies();
+        Collection<Topology> topologies = provider.getTopologies();
+        assertThat(topologies.size(), is(1));
+        Iterator<Topology> iterator = topologies.iterator();
+        Topology topology = iterator.next();
+        assertThat("four", is(topology.getName()));
+        int serviceCount = topology.getServices().size();
+        assertEquals("Expected the same number of services as are declared in the simple dscriptor.", 10, serviceCount);
+
+        // Overwrite the simple descriptor with a different set of services, and check that the changes are
+        // propagated to the associated topology
+        simpleDesc = createFile(descriptorsDir,
+                                "four.json",
+                                "org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json",
+                                System.currentTimeMillis());
+        dm.onFileChange(simpleDesc);
+        provider.reloadTopologies();
+        topologies = provider.getTopologies();
+        topology = topologies.iterator().next();
+        assertNotEquals(serviceCount, topology.getServices().size());
+        assertEquals(6, topology.getServices().size());
+
+        // Delete the simple descriptor, and make sure that the associated topology file is deleted
+        simpleDesc.delete();
+        dm.onFileDelete(simpleDesc);
+        provider.reloadTopologies();
+        topologies = provider.getTopologies();
+        assertTrue(topologies.isEmpty());
+
+        // Delete a topology file, and make sure that the associated simple descriptor is deleted
+        // Overwrite the simple descriptor with a different set of services, and check that the changes are
+        // propagated to the associated topology
+        simpleDesc = createFile(descriptorsDir,
+                                "deleteme.json",
+                                "org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json",
+                                System.currentTimeMillis());
+        dm.onFileChange(simpleDesc);
+        provider.reloadTopologies();
         topologies = provider.getTopologies();
-        assertThat(topologies.size(), is(2));
-        names = new HashSet<>(Arrays.asList("one", "four"));
-        iterator = topologies.iterator();
-        topology = iterator.next();
-        assertThat(names, hasItem(topology.getName()));
-        names.remove(topology.getName());
-        topology = iterator.next();
-        assertThat(names, hasItem(topology.getName()));
-        names.remove(topology.getName());
-        assertThat(names.size(), is(0));
+        assertFalse(topologies.isEmpty());
+        topology = topologies.iterator().next();
+        assertEquals("deleteme", topology.getName());
+        File topologyFile = new File(topologyDir, topology.getName() + ".xml");
+        assertTrue(topologyFile.exists());
+        topologyFile.delete();
+        provider.reloadTopologies();
+        assertFalse("Simple descriptor should have been deleted because the associated topology was.",
+                    simpleDesc.exists());
+
       } finally {
         provCfgFile.delete();
-
       }
     } finally {
       FileUtils.deleteQuietly(dir);
     }
   }
 
+  /**
+   * KNOX-1014
+   *
+   * Test the lifecycle relationship between provider configuration files, simple descriptors, and topology files.
+   *
+   * N.B. This test depends on the DummyServiceDiscovery extension being configured:
+   *        org.apache.hadoop.gateway.topology.discovery.test.extension.DummyServiceDiscovery
+   */
+  @Test
+  public void testTopologiesUpdateFromProviderConfigChange() throws Exception {
+    File dir = createDir();
+    File topologyDir = new File(dir, "topologies");
+    topologyDir.mkdirs();
+
+    File descriptorsDir = new File(dir, "descriptors");
+    descriptorsDir.mkdirs();
+
+    File sharedProvidersDir = new File(dir, "shared-providers");
+    sharedProvidersDir.mkdirs();
+
+    try {
+      TestTopologyListener topoListener = new TestTopologyListener();
+      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
+
+      TopologyService ts = new DefaultTopologyService();
+      Map<String, String> c = new HashMap<>();
+
+      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
+      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
+      EasyMock.replay(config);
+
+      ts.init(config, c);
+      ts.addTopologyChangeListener(topoListener);
+      ts.reloadTopologies();
+
+      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
+      dmField.setAccessible(true);
+      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
+
+      // Write out the referenced provider configs first
+      createFile(sharedProvidersDir,
+                 "provider-config-one.xml",
+                 "org/apache/hadoop/gateway/topology/file/provider-config-one.xml",
+                 System.currentTimeMillis());
+
+      // Create the simple descriptor, which depends on provider-config-one.xml
+      File simpleDesc = createFile(descriptorsDir,
+                                   "six.json",
+                                   "org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json",
+                                   System.currentTimeMillis());
+
+      // "Notice" the simple descriptor change, and generate a topology based on it
+      dm.onFileChange(simpleDesc);
+
+      // Load the generated topology
+      ts.reloadTopologies();
+      Collection<Topology> topologies = ts.getTopologies();
+      assertThat(topologies.size(), is(1));
+      Iterator<Topology> iterator = topologies.iterator();
+      Topology topology = iterator.next();
+      assertFalse("The Shiro provider is disabled in provider-config-one.xml",
+                  topology.getProvider("authentication", "ShiroProvider").isEnabled());
+
+      // Overwrite the referenced provider configuration with a different ShiroProvider config, and check that the
+      // changes are propagated to the associated topology
+      File providerConfig = createFile(sharedProvidersDir,
+                                       "provider-config-one.xml",
+                                       "org/apache/hadoop/gateway/topology/file/ambari-cluster-policy.xml",
+                                       System.currentTimeMillis());
+
+      // "Notice" the simple descriptor change as a result of the referenced config change
+      dm.onFileChange(simpleDesc);
+
+      // Load the generated topology
+      ts.reloadTopologies();
+      topologies = ts.getTopologies();
+      assertFalse(topologies.isEmpty());
+      topology = topologies.iterator().next();
+      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
+              topology.getProvider("authentication", "ShiroProvider").isEnabled());
+
+      // Delete the provider configuration, and make sure that the associated topology file is unaffected.
+      // The topology file should not be affected because the simple descriptor handling will fail to resolve the
+      // referenced provider configuration.
+      providerConfig.delete();     // Delete the file
+      dm.onFileChange(simpleDesc); // The provider config deletion will trigger a descriptor change notification
+      ts.reloadTopologies();
+      topologies = ts.getTopologies();
+      assertFalse(topologies.isEmpty());
+      assertTrue("The Shiro provider is enabled in ambari-cluster-policy.xml",
+              topology.getProvider("authentication", "ShiroProvider").isEnabled());
+
+    } finally {
+      FileUtils.deleteQuietly(dir);
+    }
+  }
+
+  /**
+   * KNOX-1039
+   */
+  @Test
+  public void testConfigurationCRUDAPI() throws Exception {
+    File dir = createDir();
+    File topologyDir = new File(dir, "topologies");
+    topologyDir.mkdirs();
+
+    File descriptorsDir = new File(dir, "descriptors");
+    descriptorsDir.mkdirs();
+
+    File sharedProvidersDir = new File(dir, "shared-providers");
+    sharedProvidersDir.mkdirs();
+
+    try {
+      TestTopologyListener topoListener = new TestTopologyListener();
+      FileAlterationMonitor monitor = new FileAlterationMonitor(Long.MAX_VALUE);
+
+      TopologyService ts = new DefaultTopologyService();
+      Map<String, String> c = new HashMap<>();
+
+      GatewayConfig config = EasyMock.createNiceMock(GatewayConfig.class);
+      EasyMock.expect(config.getGatewayTopologyDir()).andReturn(topologyDir.getAbsolutePath()).anyTimes();
+      EasyMock.expect(config.getGatewayConfDir()).andReturn(descriptorsDir.getParentFile().getAbsolutePath()).anyTimes();
+      EasyMock.replay(config);
+
+      ts.init(config, c);
+      ts.addTopologyChangeListener(topoListener);
+      ts.reloadTopologies();
+
+      java.lang.reflect.Field dmField = ts.getClass().getDeclaredField("descriptorsMonitor");
+      dmField.setAccessible(true);
+      DefaultTopologyService.DescriptorsMonitor dm = (DefaultTopologyService.DescriptorsMonitor) dmField.get(ts);
+
+      final String simpleDescName  = "six.json";
+      final String provConfOne     = "provider-config-one.xml";
+      final String provConfTwo     = "ambari-cluster-policy.xml";
+
+      // "Deploy" the referenced provider configs first
+      boolean isDeployed =
+        ts.deployProviderConfiguration(provConfOne,
+                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/provider-config-one.xml").toURI())));
+      assertTrue(isDeployed);
+      File provConfOneFile = new File(sharedProvidersDir, provConfOne);
+      assertTrue(provConfOneFile.exists());
+
+      isDeployed =
+        ts.deployProviderConfiguration(provConfTwo,
+                FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/ambari-cluster-policy.xml").toURI())));
+      assertTrue(isDeployed);
+      File provConfTwoFile = new File(sharedProvidersDir, provConfTwo);
+      assertTrue(provConfTwoFile.exists());
+
+      // Validate the provider configurations known by the topology service
+      Collection<File> providerConfigurations = ts.getProviderConfigurations();
+      assertNotNull(providerConfigurations);
+      assertEquals(2, providerConfigurations.size());
+      assertTrue(providerConfigurations.contains(provConfOneFile));
+      assertTrue(providerConfigurations.contains(provConfTwoFile));
+
+      // "Deploy" the simple descriptor, which depends on provConfOne
+      isDeployed =
+        ts.deployDescriptor(simpleDescName,
+            FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json").toURI())));
+      assertTrue(isDeployed);
+      File simpleDesc = new File(descriptorsDir, simpleDescName);
+      assertTrue(simpleDesc.exists());
+
+      // Validate the simple descriptors known by the topology service
+      Collection<File> descriptors = ts.getDescriptors();
+      assertNotNull(descriptors);
+      assertEquals(1, descriptors.size());
+      assertTrue(descriptors.contains(simpleDesc));
+
+      // "Notice" the simple descriptor, so the provider configuration dependency relationship is recorded
+      dm.onFileChange(simpleDesc);
+
+      // Attempt to delete the referenced provConfOne
+      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
+                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
+
+      // Overwrite the simple descriptor with content that changes the provider config reference to provConfTwo
+      isDeployed =
+        ts.deployDescriptor(simpleDescName,
+              FileUtils.readFileToString(new File(ClassLoader.getSystemResource("org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json").toURI())));
+      assertTrue(isDeployed);
+      assertTrue(simpleDesc.exists());
+      ts.getProviderConfigurations();
+
+      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
+      dm.onFileChange(simpleDesc);
+
+      // Attempt to delete the referenced provConfOne
+      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
+                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfOne)));
+
+      // Re-validate the provider configurations known by the topology service
+      providerConfigurations = ts.getProviderConfigurations();
+      assertNotNull(providerConfigurations);
+      assertEquals(1, providerConfigurations.size());
+      assertFalse(providerConfigurations.contains(provConfOneFile));
+      assertTrue(providerConfigurations.contains(provConfTwoFile));
+
+      // Attempt to delete the referenced provConfTwo
+      assertFalse("Should not be able to delete a provider configuration that is referenced by one or more descriptors",
+                  ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
+
+      // Delete the referencing simple descriptor
+      assertTrue(ts.deleteDescriptor(FilenameUtils.getBaseName(simpleDescName)));
+      assertFalse(simpleDesc.exists());
+
+      // Re-validate the simple descriptors known by the topology service
+      descriptors = ts.getDescriptors();
+      assertNotNull(descriptors);
+      assertTrue(descriptors.isEmpty());
+
+      // "Notice" the simple descriptor, so the provider configuration dependency relationship is updated
+      dm.onFileDelete(simpleDesc);
+
+      // Attempt to delete the referenced provConfTwo
+      assertTrue("Should be able to delete the provider configuration, now that it's not referenced by any descriptors",
+                 ts.deleteProviderConfiguration(FilenameUtils.getBaseName(provConfTwo)));
+
+      // Re-validate the provider configurations known by the topology service
+      providerConfigurations = ts.getProviderConfigurations();
+      assertNotNull(providerConfigurations);
+      assertTrue(providerConfigurations.isEmpty());
+
+    } finally {
+      FileUtils.deleteQuietly(dir);
+    }
+  }
+
   private void kickMonitor(FileAlterationMonitor monitor) {
     for (FileAlterationObserver observer : monitor.getObservers()) {
       observer.checkAndNotify();
     }
   }
 
+
   @Test
   public void testProviderParamsOrderIsPreserved() {
 
@@ -252,7 +568,7 @@ public class DefaultTopologyServiceTest {
 
   private class TestTopologyListener implements TopologyListener {
 
-    public ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
+    ArrayList<List<TopologyEvent>> events = new ArrayList<List<TopologyEvent>>();
 
     @Override
     public void handleTopologyEvent(List<TopologyEvent> events) {
@@ -261,4 +577,24 @@ public class DefaultTopologyServiceTest {
 
   }
 
+
+  private class TestTopologyDeleteListener implements TopologyListener {
+
+    FileAlterationListener delegate;
+
+    TestTopologyDeleteListener(FileAlterationListener delegate) {
+      this.delegate = delegate;
+    }
+
+    @Override
+    public void handleTopologyEvent(List<TopologyEvent> events) {
+      for (TopologyEvent event : events) {
+        if (event.getType().equals(TopologyEvent.Type.DELETED)) {
+          delegate.onFileDelete(new File(event.getTopology().getUri()));
+        }
+      }
+    }
+
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml
new file mode 100644
index 0000000..95465a4
--- /dev/null
+++ b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/provider-config-one.xml
@@ -0,0 +1,74 @@
+<gateway>
+    <provider>
+        <role>authentication</role>
+        <name>ShiroProvider</name>
+        <enabled>false</enabled>
+        <param>
+            <!--
+            session timeout in minutes,  this is really idle timeout,
+            defaults to 30mins, if the property value is not defined,,
+            current client authentication would expire if client idles contiuosly for more than this value
+            -->
+            <name>sessionTimeout</name>
+            <value>30</value>
+        </param>
+        <param>
+            <name>main.ldapRealm</name>
+            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>
+        </param>
+        <param>
+            <name>main.ldapContextFactory</name>
+            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory</name>
+            <value>$ldapContextFactory</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.userDnTemplate</name>
+            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory.url</name>
+            <value>ldap://localhost:33389</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>
+            <value>simple</value>
+        </param>
+        <param>
+            <name>urls./**</name>
+            <value>authcBasic</value>
+        </param>
+    </provider>
+
+    <provider>
+        <role>identity-assertion</role>
+        <name>Default</name>
+        <enabled>true</enabled>
+    </provider>
+
+    <!--
+    Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.
+    For example, a hadoop service running in AWS may return a response that includes URLs containing the
+    some AWS internal host name.  If the client needs to make a subsequent request to the host identified
+    in those URLs they need to be mapped to external host names that the client Knox can use to connect.
+
+    If the external hostname and internal host names are same turn of this provider by setting the value of
+    enabled parameter as false.
+
+    The name parameter specifies the external host names in a comma separated list.
+    The value parameter specifies corresponding internal host names in a comma separated list.
+
+    Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out
+    of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the
+    Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.
+    -->
+    <provider>
+        <role>hostmap</role>
+        <name>static</name>
+        <enabled>true</enabled>
+        <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>
+    </provider>
+
+</gateway>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json
new file mode 100644
index 0000000..52cec35
--- /dev/null
+++ b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-five.json
@@ -0,0 +1,14 @@
+{
+  "discovery-type":"DUMMY",
+  "discovery-address":"http://c6401.ambari.apache.org:8080",
+  "provider-config-ref":"../shared-providers/ambari-cluster-policy.xml",
+  "cluster":"dummy",
+  "services":[
+    {"name":"NAMENODE"},
+    {"name":"JOBTRACKER"},
+    {"name":"WEBHDFS"},
+    {"name":"OOZIE"},
+    {"name":"HIVE"},
+    {"name":"RESOURCEMANAGER"}
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json
----------------------------------------------------------------------
diff --git a/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json
new file mode 100644
index 0000000..e78f193
--- /dev/null
+++ b/gateway-server/src/test/resources/org/apache/hadoop/gateway/topology/file/simple-descriptor-six.json
@@ -0,0 +1,18 @@
+{
+  "discovery-type":"DUMMY",
+  "discovery-address":"http://c6401.ambari.apache.org:8080",
+  "provider-config-ref":"../shared-providers/provider-config-one.xml",
+  "cluster":"dummy",
+  "services":[
+    {"name":"NAMENODE"},
+    {"name":"JOBTRACKER"},
+    {"name":"WEBHDFS"},
+    {"name":"WEBHCAT"},
+    {"name":"OOZIE"},
+    {"name":"WEBHBASE"},
+    {"name":"HIVE"},
+    {"name":"RESOURCEMANAGER"},
+    {"name":"AMBARI", "urls":["http://c6401.ambari.apache.org:8080"]},
+    {"name":"AMBARIUI", "urls":["http://c6401.ambari.apache.org:8080"]}
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java
----------------------------------------------------------------------
diff --git a/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java b/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java
new file mode 100644
index 0000000..c251213
--- /dev/null
+++ b/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/HrefListingMarshaller.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.gateway.service.admin;
+
+import org.eclipse.persistence.jaxb.JAXBContextProperties;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.HashMap;
+import java.util.Map;
+
+@Provider
+@Produces({MediaType.APPLICATION_JSON})
+public class HrefListingMarshaller implements MessageBodyWriter<TopologiesResource.HrefListing> {
+
+    @Override
+    public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
+        return (TopologiesResource.HrefListing.class == type);
+    }
+
+    @Override
+    public long getSize(TopologiesResource.HrefListing instance,
+                        Class<?> type,
+                        Type genericType,
+                        Annotation[] annotations,
+                        MediaType mediaType) {
+        return -1;
+    }
+
+    @Override
+    public void writeTo(TopologiesResource.HrefListing instance,
+                        Class<?> type,
+                        Type genericType,
+                        Annotation[] annotations,
+                        MediaType mediaType,
+                        MultivaluedMap<String, Object> httpHeaders,
+                        OutputStream entityStream) throws IOException, WebApplicationException {
+        try {
+            Map<String, Object> properties = new HashMap<>(1);
+            properties.put( JAXBContextProperties.MEDIA_TYPE, mediaType.toString());
+            JAXBContext context = JAXBContext.newInstance(new Class[]{TopologiesResource.HrefListing.class}, properties);
+            Marshaller m = context.createMarshaller();
+            m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, true);
+            m.marshal(instance, entityStream);
+        } catch (JAXBException e) {
+            throw new IOException(e);
+        }
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/TopologiesResource.java
----------------------------------------------------------------------
diff --git a/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/TopologiesResource.java b/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/TopologiesResource.java
index 1504eca..28573bf 100644
--- a/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/TopologiesResource.java
+++ b/gateway-service-admin/src/main/java/org/apache/hadoop/gateway/service/admin/TopologiesResource.java
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.gateway.service.admin;
 
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.FilenameUtils;
+import org.apache.hadoop.gateway.i18n.GatewaySpiMessages;
+import org.apache.hadoop.gateway.i18n.messages.MessagesFactory;
 import org.apache.hadoop.gateway.service.admin.beans.BeanConverter;
 import org.apache.hadoop.gateway.service.admin.beans.Topology;
 import org.apache.hadoop.gateway.services.GatewayServices;
@@ -37,25 +42,47 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlElementWrapper;
+import java.io.File;
+import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
 import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 import static javax.ws.rs.core.MediaType.APPLICATION_XML;
+import static javax.ws.rs.core.MediaType.TEXT_PLAIN;
+
 import static javax.ws.rs.core.Response.ok;
+import static javax.ws.rs.core.Response.created;
+import static javax.ws.rs.core.Response.notModified;
+import static javax.ws.rs.core.Response.status;
+
 
 @Path("/api/v1")
 public class TopologiesResource {
+
+  private static final String XML_EXT  = ".xml";
+  private static final String JSON_EXT = ".json";
+
+  private static final String TOPOLOGIES_API_PATH    = "topologies";
+  private static final String SINGLE_TOPOLOGY_API_PATH = TOPOLOGIES_API_PATH + "/{id}";
+  private static final String PROVIDERCONFIG_API_PATH = "providerconfig";
+  private static final String SINGLE_PROVIDERCONFIG_API_PATH = PROVIDERCONFIG_API_PATH + "/{name}";
+  private static final String DESCRIPTORS_API_PATH    = "descriptors";
+  private static final String SINGLE_DESCRIPTOR_API_PATH = DESCRIPTORS_API_PATH + "/{name}";
+
+  private static GatewaySpiMessages log = MessagesFactory.get(GatewaySpiMessages.class);
+
   @Context
   private HttpServletRequest request;
 
   @GET
   @Produces({APPLICATION_JSON, APPLICATION_XML})
-  @Path("topologies/{id}")
+  @Path(SINGLE_TOPOLOGY_API_PATH)
   public Topology getTopology(@PathParam("id") String id) {
     GatewayServices services = (GatewayServices) request.getServletContext()
         .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
@@ -78,7 +105,7 @@ public class TopologiesResource {
 
   @GET
   @Produces({APPLICATION_JSON, APPLICATION_XML})
-  @Path("topologies")
+  @Path(TOPOLOGIES_API_PATH)
   public SimpleTopologyWrapper getTopologies() {
     GatewayServices services = (GatewayServices) request.getServletContext()
         .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
@@ -106,7 +133,7 @@ public class TopologiesResource {
 
   @PUT
   @Consumes({APPLICATION_JSON, APPLICATION_XML})
-  @Path("topologies/{id}")
+  @Path(SINGLE_TOPOLOGY_API_PATH)
   public Topology uploadTopology(@PathParam("id") String id, Topology t) {
 
     GatewayServices gs = (GatewayServices) request.getServletContext()
@@ -122,7 +149,7 @@ public class TopologiesResource {
 
   @DELETE
   @Produces(APPLICATION_JSON)
-  @Path("topologies/{id}")
+  @Path(SINGLE_TOPOLOGY_API_PATH)
   public Response deleteTopology(@PathParam("id") String id) {
     boolean deleted = false;
     if(!"admin".equals(id)) {
@@ -143,6 +170,244 @@ public class TopologiesResource {
     return ok().entity("{ \"deleted\" : " + deleted + " }").build();
   }
 
+  @GET
+  @Produces({APPLICATION_JSON})
+  @Path(PROVIDERCONFIG_API_PATH)
+  public HrefListing getProviderConfigurations() {
+    HrefListing listing = new HrefListing();
+    listing.setHref(buildHref(request));
+
+    GatewayServices services =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    List<HrefListItem> configs = new ArrayList<>();
+    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
+    // Get all the simple descriptor file names
+    for (File providerConfig : ts.getProviderConfigurations()){
+      String id = FilenameUtils.getBaseName(providerConfig.getName());
+      configs.add(new HrefListItem(buildHref(id, request), providerConfig.getName()));
+    }
+
+    listing.setItems(configs);
+    return listing;
+  }
+
+  @GET
+  @Produces({APPLICATION_XML})
+  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
+  public Response getProviderConfiguration(@PathParam("name") String name) {
+    Response response;
+
+    GatewayServices services =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
+
+    File providerConfigFile = null;
+
+    for (File pc : ts.getProviderConfigurations()){
+      // If the file name matches the specified id
+      if (FilenameUtils.getBaseName(pc.getName()).equals(name)) {
+        providerConfigFile = pc;
+        break;
+      }
+    }
+
+    if (providerConfigFile != null) {
+      byte[] content = null;
+      try {
+        content = FileUtils.readFileToByteArray(providerConfigFile);
+        response = ok().entity(content).build();
+      } catch (IOException e) {
+        log.failedToReadConfigurationFile(providerConfigFile.getAbsolutePath(), e);
+        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
+      }
+
+    } else {
+      response = Response.status(Response.Status.NOT_FOUND).build();
+    }
+    return response;
+  }
+
+  @DELETE
+  @Produces(APPLICATION_JSON)
+  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
+  public Response deleteProviderConfiguration(@PathParam("name") String name) {
+    Response response;
+    GatewayServices services =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
+    if (ts.deleteProviderConfiguration(name)) {
+      response = ok().entity("{ \"deleted\" : \"provider config " + name + "\" }").build();
+    } else {
+      response = notModified().build();
+    }
+    return response;
+  }
+
+
+  @DELETE
+  @Produces(APPLICATION_JSON)
+  @Path(SINGLE_DESCRIPTOR_API_PATH)
+  public Response deleteSimpleDescriptor(@PathParam("name") String name) {
+    Response response = null;
+    if(!"admin".equals(name)) {
+      GatewayServices services =
+              (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
+      if (ts.deleteDescriptor(name)) {
+        response = ok().entity("{ \"deleted\" : \"descriptor " + name + "\" }").build();
+      }
+    }
+
+    if (response == null) {
+      response = notModified().build();
+    }
+
+    return response;
+  }
+
+
+  @PUT
+  @Consumes({APPLICATION_XML})
+  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
+  public Response uploadProviderConfiguration(@PathParam("name") String name, String content) {
+    Response response = null;
+
+    GatewayServices gs =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
+
+    boolean isUpdate = configFileExists(ts.getProviderConfigurations(), name);
+
+    String filename = name.endsWith(XML_EXT) ? name : name + XML_EXT;
+    if (ts.deployProviderConfiguration(filename, content)) {
+      try {
+        if (isUpdate) {
+          response = Response.noContent().build();
+        } else{
+          response = created(new URI(buildHref(request))).build();
+        }
+      } catch (URISyntaxException e) {
+        log.invalidResourceURI(e.getInput(), e.getReason(), e);
+        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy provider configuration " + name + "\" }").build();
+      }
+    }
+
+    return response;
+  }
+
+
+  private boolean configFileExists(Collection<File> existing, String candidateName) {
+    boolean result = false;
+    for (File exists : existing) {
+      if (FilenameUtils.getBaseName(exists.getName()).equals(candidateName)) {
+        result = true;
+        break;
+      }
+    }
+    return result;
+  }
+
+
+  @PUT
+  @Consumes({APPLICATION_JSON})
+  @Path(SINGLE_DESCRIPTOR_API_PATH)
+  public Response uploadSimpleDescriptor(@PathParam("name") String name, String content) {
+    Response response = null;
+
+    GatewayServices gs =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
+
+    boolean isUpdate = configFileExists(ts.getDescriptors(), name);
+
+    String filename = name.endsWith(JSON_EXT) ? name : name + JSON_EXT;
+    if (ts.deployDescriptor(filename, content)) {
+      try {
+        if (isUpdate) {
+          response = Response.noContent().build();
+        } else {
+          response = created(new URI(buildHref(request))).build();
+        }
+      } catch (URISyntaxException e) {
+        log.invalidResourceURI(e.getInput(), e.getReason(), e);
+        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy descriptor " + name + "\" }").build();
+      }
+    }
+
+    return response;
+  }
+
+
+  @GET
+  @Produces({APPLICATION_JSON})
+  @Path(DESCRIPTORS_API_PATH)
+  public HrefListing getSimpleDescriptors() {
+    HrefListing listing = new HrefListing();
+    listing.setHref(buildHref(request));
+
+    GatewayServices services =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    List<HrefListItem> descriptors = new ArrayList<>();
+    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
+    for (File descriptor : ts.getDescriptors()){
+      String id = FilenameUtils.getBaseName(descriptor.getName());
+      descriptors.add(new HrefListItem(buildHref(id, request), descriptor.getName()));
+    }
+
+    listing.setItems(descriptors);
+    return listing;
+  }
+
+
+  @GET
+  @Produces({APPLICATION_JSON, TEXT_PLAIN})
+  @Path(SINGLE_DESCRIPTOR_API_PATH)
+  public Response getSimpleDescriptor(@PathParam("name") String name) {
+    Response response;
+
+    GatewayServices services =
+            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
+
+    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
+
+    File descriptorFile = null;
+
+    for (File sd : ts.getDescriptors()){
+      // If the file name matches the specified id
+      if (FilenameUtils.getBaseName(sd.getName()).equals(name)) {
+        descriptorFile = sd;
+        break;
+      }
+    }
+
+    if (descriptorFile != null) {
+      String mediaType = APPLICATION_JSON;
+
+      byte[] content = null;
+      try {
+        if ("yml".equals(FilenameUtils.getExtension(descriptorFile.getName()))) {
+          mediaType = TEXT_PLAIN;
+        }
+        content = FileUtils.readFileToByteArray(descriptorFile);
+        response = ok().type(mediaType).entity(content).build();
+      } catch (IOException e) {
+        log.failedToReadConfigurationFile(descriptorFile.getAbsolutePath(), e);
+        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
+      }
+    } else {
+      response = Response.status(Response.Status.NOT_FOUND).build();
+    }
+
+    return response;
+  }
+
 
   private static class TopologyComparator implements Comparator<SimpleTopology> {
     @Override
@@ -151,13 +416,14 @@ public class TopologiesResource {
     }
   }
 
-   String buildURI(org.apache.hadoop.gateway.topology.Topology topology, GatewayConfig config, HttpServletRequest req){
+
+  String buildURI(org.apache.hadoop.gateway.topology.Topology topology, GatewayConfig config, HttpServletRequest req){
     String uri = buildXForwardBaseURL(req);
 
-//    Strip extra context
+    // Strip extra context
     uri = uri.replace(req.getContextPath(), "");
 
-//    Add the gateway path
+    // Add the gateway path
     String gatewayPath;
     if(config.getGatewayPath() != null){
       gatewayPath = config.getGatewayPath();
@@ -170,20 +436,31 @@ public class TopologiesResource {
     return uri;
   }
 
-   String buildHref(org.apache.hadoop.gateway.topology.Topology t, HttpServletRequest req) {
+  String buildHref(HttpServletRequest req) {
+    return buildHref((String)null, req);
+  }
+
+  String buildHref(String id, HttpServletRequest req) {
     String href = buildXForwardBaseURL(req);
-//    Make sure that the pathInfo doesn't have any '/' chars at the end.
+    // Make sure that the pathInfo doesn't have any '/' chars at the end.
     String pathInfo = req.getPathInfo();
-    if(pathInfo.endsWith("/")) {
-      while(pathInfo.endsWith("/")) {
-        pathInfo = pathInfo.substring(0, pathInfo.length() - 1);
-      }
+    while(pathInfo.endsWith("/")) {
+      pathInfo = pathInfo.substring(0, pathInfo.length() - 1);
+    }
+
+    href += pathInfo;
+
+    if (id != null) {
+      href += "/" + id;
     }
 
-    href += pathInfo + "/" + t.getName();
     return href;
   }
 
+   String buildHref(org.apache.hadoop.gateway.topology.Topology t, HttpServletRequest req) {
+     return buildHref(t.getName(), req);
+  }
+
   private SimpleTopology getSimpleTopology(org.apache.hadoop.gateway.topology.Topology t, GatewayConfig config) {
     String uri = buildURI(t, config, request);
     String href = buildHref(t, request);
@@ -200,34 +477,34 @@ public class TopologiesResource {
 
     String baseURL = "";
 
-//    Get Protocol
+    // Get Protocol
     if(req.getHeader(X_Forwarded_Proto) != null){
       baseURL += req.getHeader(X_Forwarded_Proto) + "://";
     } else {
       baseURL += req.getProtocol() + "://";
     }
 
-//    Handle Server/Host and Port Here
+    // Handle Server/Host and Port Here
     if (req.getHeader(X_Forwarded_Host) != null && req.getHeader(X_Forwarded_Port) != null){
-//        Double check to see if host has port
+      // Double check to see if host has port
       if(req.getHeader(X_Forwarded_Host).contains(req.getHeader(X_Forwarded_Port))){
         baseURL += req.getHeader(X_Forwarded_Host);
       } else {
-//        If there's no port, add the host and port together;
+        // If there's no port, add the host and port together;
         baseURL += req.getHeader(X_Forwarded_Host) + ":" + req.getHeader(X_Forwarded_Port);
       }
     } else if(req.getHeader(X_Forwarded_Server) != null && req.getHeader(X_Forwarded_Port) != null){
-//      Tack on the server and port if they're available. Try host if server not available
+      // Tack on the server and port if they're available. Try host if server not available
       baseURL += req.getHeader(X_Forwarded_Server) + ":" + req.getHeader(X_Forwarded_Port);
     } else if(req.getHeader(X_Forwarded_Port) != null) {
-//      if we at least have a port, we can use it.
+      // if we at least have a port, we can use it.
       baseURL += req.getServerName() + ":" + req.getHeader(X_Forwarded_Port);
     } else {
-//      Resort to request members
+      // Resort to request members
       baseURL += req.getServerName() + ":" + req.getLocalPort();
     }
 
-//    Handle Server context
+    // Handle Server context
     if( req.getHeader(X_Forwarded_Context) != null ) {
       baseURL += req.getHeader( X_Forwarded_Context );
     } else {
@@ -237,6 +514,64 @@ public class TopologiesResource {
     return baseURL;
   }
 
+
+  static class HrefListing {
+    @JsonProperty
+    String href;
+
+    @JsonProperty
+    List<HrefListItem> items;
+
+    HrefListing() {}
+
+    public void setHref(String href) {
+      this.href = href;
+    }
+
+    public String getHref() {
+      return href;
+    }
+
+    public void setItems(List<HrefListItem> items) {
+      this.items = items;
+    }
+
+    public List<HrefListItem> getItems() {
+      return items;
+    }
+  }
+
+  static class HrefListItem {
+    @JsonProperty
+    String href;
+
+    @JsonProperty
+    String name;
+
+    HrefListItem() {}
+
+    HrefListItem(String href, String name) {
+      this.href = href;
+      this.name = name;
+    }
+
+    public void setHref(String href) {
+      this.href = href;
+    }
+
+    public String getHref() {
+      return href;
+    }
+
+    public void setName(String name) {
+      this.name = name;
+    }
+    public String getName() {
+      return name;
+    }
+  }
+
+
   @XmlAccessorType(XmlAccessType.NONE)
   public static class SimpleTopology {
 

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-spi/src/main/java/org/apache/hadoop/gateway/i18n/GatewaySpiMessages.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/i18n/GatewaySpiMessages.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/i18n/GatewaySpiMessages.java
index 45fcb54..aad4d8a 100644
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/i18n/GatewaySpiMessages.java
+++ b/gateway-spi/src/main/java/org/apache/hadoop/gateway/i18n/GatewaySpiMessages.java
@@ -79,7 +79,13 @@ public interface GatewaySpiMessages {
   @Message( level = MessageLevel.ERROR, text = "Gateway has failed to start. Unable to prompt user for master secret setup. Please consider using knoxcli.sh create-master" )
   void unableToPromptForMasterUseKnoxCLI();
 
- @Message( level = MessageLevel.ERROR, text = "Error in generating certificate: {0}" )
- void failedToGenerateCertificate( @StackTrace( level = MessageLevel.ERROR ) Exception e );
+  @Message( level = MessageLevel.ERROR, text = "Error in generating certificate: {0}" )
+  void failedToGenerateCertificate( @StackTrace( level = MessageLevel.ERROR ) Exception e );
+
+  @Message(level = MessageLevel.ERROR, text = "Failed to read configuration: {0}")
+  void failedToReadConfigurationFile(final String filePath, @StackTrace(level = MessageLevel.DEBUG) Exception e );
+
+  @Message(level = MessageLevel.ERROR, text = "Invalid resource URI {0} : {1}")
+  void invalidResourceURI(final String uri, final String reason, @StackTrace(level = MessageLevel.DEBUG) Exception e );
 
 }

http://git-wip-us.apache.org/repos/asf/knox/blob/9ad9bcdb/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/topology/TopologyService.java
----------------------------------------------------------------------
diff --git a/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/topology/TopologyService.java b/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/topology/TopologyService.java
index a964f38..017b3ec 100644
--- a/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/topology/TopologyService.java
+++ b/gateway-spi/src/main/java/org/apache/hadoop/gateway/services/topology/TopologyService.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.gateway.services.Service;
 import org.apache.hadoop.gateway.topology.Topology;
 import org.apache.hadoop.gateway.topology.TopologyListener;
 
+import java.io.File;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -29,22 +30,34 @@ import java.util.Map;
 
 public interface TopologyService extends Service {
 
-  public void reloadTopologies();
+  void reloadTopologies();
 
-  public void deployTopology(Topology t);
+  void deployTopology(Topology t);
 
-  public void redeployTopologies(String topologyName);
+  void redeployTopologies(String topologyName);
 
-  public void addTopologyChangeListener(TopologyListener listener);
+  void addTopologyChangeListener(TopologyListener listener);
 
-  public void startMonitor() throws Exception;
+  void startMonitor() throws Exception;
 
-  public void stopMonitor() throws Exception;
+  void stopMonitor() throws Exception;
 
-  public Collection<Topology> getTopologies();
+  Collection<Topology> getTopologies();
 
-  public void deleteTopology(Topology t);
+  boolean deployProviderConfiguration(String name, String content);
 
-  public Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config);
+  Collection<File> getProviderConfigurations();
 
-  }
+  boolean deployDescriptor(String name, String content);
+
+  Collection<File> getDescriptors();
+
+  void deleteTopology(Topology t);
+
+  boolean deleteDescriptor(String name);
+
+  boolean deleteProviderConfiguration(String name);
+
+  Map<String, List<String>> getServiceTestURLs(Topology t, GatewayConfig config);
+
+}


Mime
View raw message