drill-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jacq...@apache.org
Subject [37/38] git commit: Implement storage plugin for INFORMATION_SCHEMA
Date Tue, 04 Mar 2014 08:08:04 GMT
Implement storage plugin for INFORMATION_SCHEMA


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/0ca797a4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/0ca797a4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/0ca797a4

Branch: refs/heads/master
Commit: 0ca797a41a0fc7f36934c259b4c179f9713e9b8e
Parents: ae041d0
Author: John Morris <jmorris@maprtech.com>
Authored: Tue Feb 25 00:31:11 2014 -0800
Committer: Jacques Nadeau <jacques@apache.org>
Committed: Mon Mar 3 23:22:18 2014 -0800

----------------------------------------------------------------------
 .../apache/drill/exec/ops/FragmentContext.java  |   2 +-
 .../apache/drill/exec/record/SchemaBuilder.java |   4 +-
 .../drill/exec/store/StoragePluginRegistry.java |   9 +-
 .../exec/store/ischema/EmptyVectorSet.java      | 240 +++++++++++++++++++
 .../drill/exec/store/ischema/FixedTable.java    |  93 +++++++
 .../store/ischema/InfoSchemaBatchCreator.java   |  21 ++
 .../exec/store/ischema/InfoSchemaConfig.java    |  18 ++
 .../store/ischema/InfoSchemaDrillTable.java     |  23 ++
 .../exec/store/ischema/InfoSchemaGroupScan.java |  67 ++++++
 .../store/ischema/InfoSchemaStoragePlugin.java  |  74 ++++++
 .../exec/store/ischema/InfoSchemaSubScan.java   |  24 ++
 .../exec/store/ischema/InfoSchemaTable.java     | 151 ++++++++++++
 .../drill/exec/store/ischema/OptiqProvider.java | 234 ++++++++++++++++++
 .../drill/exec/store/ischema/PipeProvider.java  |  80 +++++++
 .../drill/exec/store/ischema/RowProvider.java   |  27 +++
 .../exec/store/ischema/RowRecordReader.java     | 136 +++++++++++
 .../drill/exec/store/ischema/SelectedTable.java |  44 ++++
 .../drill/exec/store/ischema/VectorSet.java     |  38 +++
 .../drill/exec/store/ischema/package-info.java  |  21 ++
 .../drill/exec/store/ischema/OrphanSchema.java  |  75 ++++++
 .../exec/store/ischema/TestOrphanSchema.java    | 141 +++++++++++
 .../exec/store/ischema/TestTableProvider.java   | 158 ++++++++++++
 .../apache/drill/jdbc/test/TestJdbcQuery.java   |   9 +
 23 files changed, 1684 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
index 630355e..8462622 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
@@ -96,7 +96,7 @@ public class FragmentContext implements Closeable {
   }
   
   public SchemaPlus getRootSchema(){
-    return null;
+    return context.getStorage().getSchemaFactory().getOrphanedRootSchema();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
index 556e68b..7c6b105 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
@@ -17,8 +17,8 @@
  */
 package org.apache.drill.exec.record;
 
+import java.util.LinkedHashSet;
 import java.util.List;
-import java.util.Set;
 
 import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
@@ -31,7 +31,7 @@ import com.google.common.collect.Sets;
  * builder will always check that this schema is a equal or more materialized version of the current schema.
  */
 public class SchemaBuilder {
-  private Set<MaterializedField> fields = Sets.newHashSet();
+  private LinkedHashSet<MaterializedField> fields = Sets.newLinkedHashSet();
 
   private BatchSchema.SelectionVectorMode selectionVectorMode = SelectionVectorMode.NONE;
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
index 0e9d1e6..ce15341 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/StoragePluginRegistry.java
@@ -46,11 +46,13 @@ import org.apache.drill.exec.planner.logical.StorageEngines;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.dfs.FileSystemPlugin;
 import org.apache.drill.exec.store.dfs.FormatPlugin;
+import org.apache.drill.exec.store.ischema.InfoSchemaConfig;
+import org.apache.drill.exec.store.ischema.InfoSchemaStoragePlugin;
 
 import com.google.common.base.Charsets;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.io.Resources;
-import com.google.hive12.common.collect.Maps;
+
 
 public class StoragePluginRegistry implements Iterable<Map.Entry<String, StoragePlugin>>{
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(StoragePluginRegistry.class);
@@ -92,6 +94,7 @@ public class StoragePluginRegistry implements Iterable<Map.Entry<String, Storage
       }
     }
     
+    
   }
   
   private Map<String, StoragePlugin> createEngines(){
@@ -112,6 +115,8 @@ public class StoragePluginRegistry implements Iterable<Map.Entry<String, Storage
         logger.error("Failure while setting up StoragePlugin with name: '{}'.", config.getKey(), e);
       }
     }
+    activeEngines.put("INFORMATION_SCHEMA", new InfoSchemaStoragePlugin(new InfoSchemaConfig(), context, "INFORMATION_SCHEMA"));
+    
     return activeEngines;
   }
 
@@ -246,7 +251,7 @@ public class StoragePluginRegistry implements Iterable<Map.Entry<String, Storage
 
   private class OrphanPlus implements SchemaPlus{
 
-    private HashMap<String, SchemaPlus> schemas = Maps.newHashMap();
+    private HashMap<String, SchemaPlus> schemas = new HashMap();
     
     @Override
     public SchemaPlus getParentSchema() {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java
new file mode 100644
index 0000000..5379bc9
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java
@@ -0,0 +1,240 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.ischema;
+
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.vector.AllocationHelper;
+import org.apache.drill.exec.vector.IntVector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.VarCharVector;
+
+/**
+ * Manages the value vectors used to implement columns in a record batch.
+ * The vectors themselves are created by subclasses, so this class
+ * handles the generic handling of the vectors.
+ */
+public abstract class EmptyVectorSet implements VectorSet {
+  
+  protected List<ValueVector> vectors;
+
+  /**
+   * Prepare to construct a new set of vectors.
+   * The actual vectors will be created by subclasses
+   * by the time our "next" procedure is invoked.
+   */
+  public EmptyVectorSet() {
+    vectors = new ArrayList<ValueVector>();
+  }
+
+  /**
+   * Prepare to read the next batch of rows.
+   * @param maxRows
+   */
+  @Override
+  public void beginBatch(int maxRows) {
+    
+    // Allocate memory for each column (value vector)
+    for (ValueVector v: vectors) {
+      AllocationHelper.allocate(v, maxRows, 100); // TODO: later, use configured size
+    }
+  }
+  
+  
+  /**
+   * Write a row to the value vectors. 
+   * This is a routine to "assign generic objects to generic ValueVectors"
+   * which can be overridden to optimize for fixed types of vectors and 
+   * fixed types of values.
+   * @param index - the position within the value vectors.
+   * @param row - the objects to write into the vectors
+   * @return true if there was room to write all the values.
+   */
+  @Override
+  public boolean writeRowToVectors(int index, Object[] row) {
+    for (int i=0; i<row.length; i++) {
+      if (!setSafe(vectors.get(i), index, row[i])) {
+        return false;
+      }
+    } 
+    return true;
+  }
+  
+
+  
+  /**
+   * Signal the end of the current batch.
+   * @param actualRowCount
+   */
+  @Override
+  public void endBatch(int actualRowCount) {
+    
+    // Finalize each of the value vectors.
+    for (ValueVector v: vectors) {
+      v.getMutator().setValueCount(actualRowCount);
+    }
+  }
+  
+  /**
+   * When everything is done, free up the resources.
+   */
+  @Override
+  public void cleanup() {
+    for (ValueVector v: vectors) {
+      v.close();
+    }
+  }
+  
+  
+  /**
+   * Make the value vectors visible to whomever needs them.
+   */
+  public List<ValueVector> getValueVectors() {
+    return vectors;
+  }
+  
+
+  /**
+   * Estimate how many rows will fit in a given amount of memory.
+   * Perfect estimates are nice, but things work out OK if
+   * the estimates are a bit off.
+   */
+  @Override
+  public int getEstimatedRowCount(int bufSize) {
+    return Math.max(1, bufSize/getEstimatedRowSize());
+  }
+
+
+ 
+  /**
+   * Estimate the size of an average row. Used for allocating memory.
+   * Override when more information is known about the data.
+   * @return bytes per row.
+   */
+  protected int getEstimatedRowSize() {
+    
+    // Add up the sizes of the vectors
+    int size = 0;
+    for (ValueVector v: vectors) {
+      size += TypeHelper.getSize(v.getField().getType());  
+    }
+    return size;
+  }
+  
+  
+  /**
+   * Helper function to create value vectors for a set of columns.
+   * @param names - the names of the fields
+   * @param types - the major types of the fields
+   * @param allocator - a buffer allocator
+   */
+  protected void createVectors(String[] names, MajorType[] types, BufferAllocator allocator) {
+    vectors = new ArrayList<ValueVector>(names.length);
+    for (int i=0; i<names.length; i++) {
+      vectors.add(createVector(names[i], types[i], allocator));
+    }
+  }
+  
+ 
+  /**
+   * Create a value vector for a single column.
+   * @param name - the name of the field
+   * @param type - the type of the field
+   * @param allocator - a buffer allocator
+   * @return the new value vector.
+   */
+  private static ValueVector createVector(String name, MajorType type, BufferAllocator allocator) {
+    return TypeHelper.getNewVector(field(name, type), allocator);
+  }
+  
+  
+  /**
+   * Helper function to create a MaterializedField, used to create a ValueVector.
+   * @param name - the name of the field
+   * @param majorType - the type of the field
+   * @return the MaterializedField
+   */
+  private static MaterializedField field(String name, MajorType majorType) {
+    return MaterializedField.create(new SchemaPath(name, ExpressionPosition.UNKNOWN), majorType);
+  }
+  
+  
+  //////////////////////////////////////////////////////////////////
+  //
+  // The following section contains wrappers around ValueVectors.
+  // The wrappers make it easier to create vectors and set values.
+  //
+  // A different approach is to enhance TypeHelper to provide
+  // a uniform way to "setSafe" the common Java types into the type vectors.
+  // (It does that already for some types, but Strings are a particular nuisance.)
+  //
+  // For now, only types used in information schema are implemented.
+  //
+  ///////////////////////////////////////////////////////////////////
+  static final Charset UTF8 = Charset.forName("UTF-8");
+  
+  
+  // Here are the types used in information schema. 
+  public static final MajorType VARCHAR = Types.required(MinorType.VARCHAR);
+  public static final MajorType INT = Types.required(MinorType.INT);
+  //public static final MajorType NULLABLINT = Types.optional(MinorType.INT);
+  
+  
+  /**
+   * A generic routine to set a Java value into a value vector. It assumes the types are compatible.
+   * When a subclass knows the types of its columns, it should use the strongly typed routines instead.
+   * <P>
+   * Note the value corresponds to what would be received by a varargs procedure.
+   * Also note we are switching on minor type. We really should switch on major type, but it is not an enum or ordinal.
+   * @return true if the value was successfully set.
+   */
+  protected static boolean setSafe(ValueVector vector, int index, Object value) {
+    switch (vector.getField().getType().getMinorType()) {
+    case INT:       return setSafe((IntVector)vector, index, (int)value);
+    case VARCHAR:   return setSafe((VarCharVector)vector, index, (String)value);
+    default:        return false;
+    }
+  }
+
+  
+  /**
+   * Strongly typed routines for setting a Java value into a value vector. 
+   * @return true if the value was successfully set.
+   */
+  protected static boolean setSafe(VarCharVector v, int index, String string) {
+    return v.getMutator().setSafe(index, string.getBytes(UTF8));
+  } 
+  
+  protected static boolean setSafe(IntVector v, int index, int value) {
+    return v.getMutator().setSafe(index, value);
+  }
+   
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java
new file mode 100644
index 0000000..a2f289d
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeFactory;
+import org.eigenbase.sql.type.SqlTypeFactoryImpl;
+import org.eigenbase.sql.type.SqlTypeName;
+
+/**
+ * A FixedTable represents a table where the fields are always the same name and type.
+ * Since the names and types are unchanging, it is easy to create value vectors during startup
+ * and to use strong types when storing values in the vectors.
+ */
+public class FixedTable extends EmptyVectorSet  {
+  String tableName;
+  String[] fieldNames;
+  MajorType[] fieldTypes;
+
+  /* (non-Javadoc)
+   * @see org.apache.drill.exec.store.ischema.VectorSet#createVectors(org.apache.drill.exec.memory.BufferAllocator)
+   */
+  @Override
+  public void createVectors(BufferAllocator allocator) {
+    createVectors(fieldNames, fieldTypes, allocator);
+  }
+  
+  /**
+   * Construct a generic table with an unchanging schema. 
+   * We leave it to subclasses to define the fields and types.
+   * @param tableName - name of the table
+   * @param fieldNames - names of the fields
+   * @param fieldTypes - major types of the fields
+   */
+  FixedTable(String tableName, String[] fieldNames, MajorType[] fieldTypes) {
+    this.tableName = tableName;
+    this.fieldNames = fieldNames;
+    this.fieldTypes = fieldTypes;
+  }
+  
+  public String getName() {
+    return tableName;
+  }
+  
+  
+  
+  /**
+   * Helper function to get the Optiq Schema type from a Drill Type.
+   * Again, we only do it for the information schema types, so it needs to be generalized.
+   * (This probably already exists elsewhere.)
+   */
+  
+  static public RelDataType getRelDataType(RelDataTypeFactory typeFactory, MajorType type) {
+    switch (type.getMinorType()) {
+    case INT:    return typeFactory.createSqlType(SqlTypeName.INTEGER);
+    case VARCHAR: return typeFactory.createSqlType(SqlTypeName.VARCHAR);
+    default: return null; // TODO - throw exception?
+    }
+  }
+    
+  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
+    
+    // Convert the array of Drill types to an array of Optiq types
+    RelDataType[] relTypes = new RelDataType[fieldTypes.length];
+    for (int i=0; i<fieldTypes.length; i++) {
+      relTypes[i] = getRelDataType(typeFactory, fieldTypes[i]);
+    }
+    
+    // Create a struct type to represent the 
+    return typeFactory.createStructType(relTypes, fieldNames);
+  }
+
+  
+  
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
new file mode 100644
index 0000000..68844e1
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
@@ -0,0 +1,21 @@
+package org.apache.drill.exec.store.ischema;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.impl.BatchCreator;
+import org.apache.drill.exec.physical.impl.ScanBatch;
+import org.apache.drill.exec.record.RecordBatch;
+import org.apache.drill.exec.store.RecordReader;
+
+public class InfoSchemaBatchCreator implements BatchCreator<InfoSchemaSubScan>{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InfoSchemaBatchCreator.class);
+
+  @Override
+  public RecordBatch getBatch(FragmentContext context, InfoSchemaSubScan config, List<RecordBatch> children) throws ExecutionSetupException {
+    RecordReader rr = new RowRecordReader(context, config.getTable(), context.getRootSchema());
+    return new ScanBatch(context, Collections.singleton(rr).iterator());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConfig.java
new file mode 100644
index 0000000..1447eb3
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaConfig.java
@@ -0,0 +1,18 @@
+package org.apache.drill.exec.store.ischema;
+
+import org.apache.drill.common.logical.StoragePluginConfig;
+
+public class InfoSchemaConfig implements StoragePluginConfig{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InfoSchemaConfig.class);
+  
+  @Override
+  public int hashCode(){
+    return 1;
+  }
+  
+  @Override
+  public boolean equals(Object o){
+    return o instanceof InfoSchemaConfig;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaDrillTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaDrillTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaDrillTable.java
new file mode 100644
index 0000000..7b6cb32
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaDrillTable.java
@@ -0,0 +1,23 @@
+package org.apache.drill.exec.store.ischema;
+
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeFactory;
+
+public class InfoSchemaDrillTable extends DrillTable{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InfoSchemaDrillTable.class);
+
+  private final SelectedTable table;
+  
+  public InfoSchemaDrillTable(String storageEngineName, SelectedTable selection, StoragePluginConfig storageEngineConfig) {
+    super(storageEngineName, selection, storageEngineConfig);
+    this.table = selection;
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
+    return table.getRowType(typeFactory);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
new file mode 100644
index 0000000..1985d6a
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
@@ -0,0 +1,67 @@
+package org.apache.drill.exec.store.ischema;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.exceptions.PhysicalOperatorSetupException;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.OperatorCost;
+import org.apache.drill.exec.physical.base.AbstractGroupScan;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.Size;
+import org.apache.drill.exec.physical.base.SubScan;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.google.hive12.common.base.Preconditions;
+
+@JsonTypeName("info-schema")
+public class InfoSchemaGroupScan extends AbstractGroupScan{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InfoSchemaGroupScan.class);
+
+  private final SelectedTable table;
+  
+  @JsonCreator
+  public InfoSchemaGroupScan(@JsonProperty("table") SelectedTable table) {
+    this.table = table;
+  }
+  
+  @Override
+  public void applyAssignments(List<DrillbitEndpoint> endpoints) throws PhysicalOperatorSetupException {
+    Preconditions.checkArgument(endpoints.size() == 1);
+  }
+
+  @Override
+  public SubScan getSpecificScan(int minorFragmentId) throws ExecutionSetupException {
+    Preconditions.checkArgument(minorFragmentId == 0);
+    return new InfoSchemaSubScan(table);
+  }
+
+  @Override
+  public int getMaxParallelizationWidth() {
+    return 1;
+  }
+
+  @Override
+  public OperatorCost getCost() {
+    return new OperatorCost(1,1,1,1);
+  }
+
+  @Override
+  public Size getSize() {
+    return new Size(1000, 1000);
+  }
+
+  @Override
+  public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException {
+    return this;
+  }
+
+  @Override
+  public List<EndpointAffinity> getOperatorAffinity() {
+    return Collections.emptyList();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
new file mode 100644
index 0000000..e5ab158
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
@@ -0,0 +1,74 @@
+package org.apache.drill.exec.store.ischema;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import net.hydromatic.optiq.Schema;
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.logical.data.Scan;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.store.AbstractStoragePlugin;
+import org.apache.drill.exec.store.SchemaHolder;
+
+import com.google.hive12.common.collect.ImmutableMap;
+import com.google.hive12.common.collect.Maps;
+
+public class InfoSchemaStoragePlugin extends AbstractStoragePlugin{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InfoSchemaStoragePlugin.class);
+
+  private final InfoSchemaConfig config;
+  private final DrillbitContext context;
+  private final String name;
+  
+  public InfoSchemaStoragePlugin(InfoSchemaConfig config, DrillbitContext context, String name){
+    this.config = config;
+    this.context = context;
+    this.name = name;
+  }
+  
+  @Override
+  public boolean supportsRead() {
+    return true;
+  }
+
+  @Override
+  public InfoSchemaGroupScan getPhysicalScan(Scan scan) throws IOException {
+    SelectedTable table = scan.getSelection().getWith(context.getConfig(),  SelectedTable.class);
+    return new InfoSchemaGroupScan(table);
+  }
+
+  @Override
+  public Schema createAndAddSchema(SchemaPlus parent) {
+    Schema s = new ISchema(parent);
+    parent.add(s);
+    return s;
+  }
+  
+  private class ISchema extends AbstractSchema{
+    private Map<String, InfoSchemaDrillTable> tables;
+    public ISchema(SchemaPlus parent){
+      super(new SchemaHolder(parent), "INFORMATION_SCHEMA");
+      Map<String, InfoSchemaDrillTable> tbls = Maps.newHashMap();
+      for(SelectedTable tbl : SelectedTable.values()){
+        tbls.put(tbl.name(), new InfoSchemaDrillTable("INFORMATION_SCHEMA", tbl, config));  
+      }
+      this.tables = ImmutableMap.copyOf(tbls);
+    }
+    
+    @Override
+    public DrillTable getTable(String name) {
+      return tables.get(name);
+    }
+    
+    @Override
+    public Set<String> getTableNames() {
+      return tables.keySet();
+    }
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java
new file mode 100644
index 0000000..20375ca
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaSubScan.java
@@ -0,0 +1,24 @@
+package org.apache.drill.exec.store.ischema;
+
+import org.apache.drill.exec.physical.base.AbstractSubScan;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+public class InfoSchemaSubScan extends AbstractSubScan{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InfoSchemaSubScan.class);
+  
+
+  private final SelectedTable table;
+  
+  @JsonCreator
+  public InfoSchemaSubScan(@JsonProperty("table") SelectedTable table) {
+    this.table = table;
+  }
+
+  public SelectedTable getTable() {
+    return table;
+  }
+  
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
new file mode 100644
index 0000000..595488f
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.exec.vector.IntVector;
+import org.apache.drill.exec.vector.VarCharVector;
+
+/**
+ * InfoSchemaTable defines the various Information Schema tables.
+ * <p>
+ * All the information schema tables are grouped together for convenience.
+ * For each specific table, the corresponding class:
+ * <p>Declares the table name.
+ * <p>Declares the field names and types.
+ * <p>Optionally defines a typed method to write a row of data to the vectors.
+ *    If not defined here, FixedTable will kick in and do the job using
+ *    a slower, generic method.
+ */
+public class InfoSchemaTable{
+
+  /**
+   * Layout for the SCHEMATA table.
+   */
+  public static class Schemata extends FixedTable {
+    static final String tableName = "SCHEMATA";
+    static final String[] fieldNames = {"CATALOG_NAME", "SCHEMA_NAME", "SCHEMA_OWNER"};
+    static final MajorType[] fieldTypes = {VARCHAR,         VARCHAR,       VARCHAR};
+
+    public Schemata() {
+      super(tableName, fieldNames, fieldTypes);
+    }
+
+    // Optional ...
+    public boolean writeRowToVectors(int index, Object[] row) {
+      return 
+          setSafe((VarCharVector)vectors.get(0), index, (String)row[0])  &&
+          setSafe((VarCharVector)vectors.get(1), index,  (String)row[1]) &&
+          setSafe((VarCharVector)vectors.get(2), index,  (String)row[2]);
+    }
+  }
+
+  /**
+   * Layout for the TABLES table.
+   */
+  public static class Tables extends FixedTable {
+    static final String tableName = "TABLES";
+    static final String[] fieldNames = {"TABLE_CATALOG", "TABLE_SCHEMA", "TABLE_NAME", "TABLE_TYPE"};
+    static final MajorType[] fieldTypes = {VARCHAR,          VARCHAR,        VARCHAR,      VARCHAR};
+
+    public Tables() {
+      super(tableName, fieldNames, fieldTypes);
+    }  
+
+    // Optional ...
+    public boolean writeRowToVectors(int index, Object[] row) {
+      return
+          setSafe((VarCharVector)vectors.get(0), index, (String)row[0]) &&
+          setSafe((VarCharVector)vectors.get(1), index, (String)row[1]) &&
+          setSafe((VarCharVector)vectors.get(2), index, (String)row[2]) &&
+          setSafe((VarCharVector)vectors.get(3), index, (String)row[3]);
+    }
+  }
+
+
+  /**
+   * Layout for the COLUMNS table.
+   */
+  public static class Columns extends FixedTable {
+    static final String tableName = "COLUMNS";
+    static final String[] fieldNames = {"TABLE_CATALOG",     "TABLE_SCHEMA",     "TABLE_NAME",    "COLUMN_NAME",
+      "ORDINAL_POSITION",   "IS_NULLABLE",      "DATA_TYPE",     "CHARACTER_MAXIMUM_LENGTH",
+      "NUMERIC_PRECISION_RADIX", "NUMERIC_SCALE", "NUMERIC_PRECISION"};
+    static final MajorType[] fieldTypes= { VARCHAR,         VARCHAR,       VARCHAR,      VARCHAR,
+      INT,             VARCHAR,        VARCHAR,     INT,
+      INT,             INT,            INT};
+    public Columns() {
+      super(tableName, fieldNames, fieldTypes);
+    }
+
+
+    // Optional ...
+    public boolean writeRowToVectors(int index, Object[] row) {
+      return 
+          setSafe((VarCharVector)vectors.get(0), index, (String)row[0]) &&
+          setSafe((VarCharVector)vectors.get(1), index, (String)row[1]) &&
+          setSafe((VarCharVector)vectors.get(2), index, (String)row[2]) &&
+          setSafe((VarCharVector)vectors.get(3), index, (String)row[3]) && 
+          setSafe((IntVector)vectors.get(4), index, (int)row[4])        &&
+          setSafe((VarCharVector)vectors.get(5), index, (String)row[5])     &&
+          setSafe((VarCharVector)vectors.get(6), index, (String)row[6]) &&  
+          setSafe((IntVector)vectors.get(7), index, (int)row[7]) &&
+          setSafe((IntVector)vectors.get(8), index, (int)row[8])        &&
+          setSafe((IntVector)vectors.get(9), index, (int)row[9])        &&
+          setSafe((IntVector)vectors.get(10), index, (int)row[10]);
+    }
+  }
+
+
+  /**
+   * Layout for the VIEWS table.
+   */
+  static public class Views extends FixedTable {
+    static final String tableName = "VIEWS";
+    static final String[] fieldNames = {"TABLE_CATALOG", "TABLE_SHEMA", "TABLE_NAME", "VIEW_DEFINITION"};
+    static final MajorType[] fieldTypes = {VARCHAR,         VARCHAR,       VARCHAR,      VARCHAR};
+
+    Views() {
+      super(tableName, fieldNames, fieldTypes);
+    }
+
+    // Optional ...
+    public boolean writeRowToVectors(int index, Object[] row) {
+      return setSafe((VarCharVector)vectors.get(0), index, (String)row[0]) &&
+          setSafe((VarCharVector)vectors.get(1), index, (String)row[1])    &&
+          setSafe((VarCharVector)vectors.get(2), index, (String)row[2])    &&
+          setSafe((VarCharVector)vectors.get(3), index, (String)row[3]);
+    }
+  }
+
+
+  /**
+   * Layout for the CATALOGS table.
+   */
+  static public class Catalogs extends FixedTable {
+    static final String tableName = "CATALOGS";
+    static final String[] fieldNames = {"CATALOG_NAME", "CATALOG_DESCRIPTION", "CATALOG_CONNECT"};
+    static final MajorType[] fieldTypes = {VARCHAR,      VARCHAR,               VARCHAR};
+
+    Catalogs() {
+      super(tableName, fieldNames, fieldTypes);
+    }
+  }
+  
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java
new file mode 100644
index 0000000..795b92c
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import net.hydromatic.optiq.Schema;
+import net.hydromatic.optiq.SchemaPlus;
+import net.hydromatic.optiq.Table;
+
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeField;
+import org.eigenbase.sql.type.SqlTypeFactoryImpl;
+import org.eigenbase.sql.type.SqlTypeName;
+
+/**
+ * OptiqProvider provides data for the various tables in the information schema.
+ * Each table has its own nested class, keeping them grouped together.
+ * Note "writeRow(...)" must match the values expected by the corresponding table.
+ * <p>
+ * To keep code concise, each class inherits from both an OptiqScanner
+ * and a PipeProvider. Double inheritance is a problem in Java, so this
+ * code needs to be cleaned up. For the moment, OptiqScanner artificially
+ * inherits from PipeProvider.
+ */
+public class OptiqProvider  {
+
+  /**
+   * Provide data for TABLES table.
+   */
+  static public class Tables extends Abstract { 
+    Tables(SchemaPlus root) {
+      super(root);
+    }
+
+    @Override
+    public boolean visitTableName(String schema, String tableName) {
+      return writeRow("DRILL", schema, tableName, Schema.TableType.TABLE.toString());
+    }
+  }
+
+
+  /**
+   * Provide data for SCHEMATA table.
+   */
+  static public class Schemata extends Abstract {
+    @Override
+    public boolean visitSchema(String schemaName, Schema schema) {
+      if (schemaName != null && schemaName != "") {
+          writeRow("DRILL", schemaName, "<owner>");
+      }
+      return false;
+    }
+
+    Schemata(SchemaPlus root) {
+      super(root);
+    }
+  }
+
+  
+
+  /**
+   * Provide data for COLUMNS data.
+   */
+  static public class Columns extends Abstract {
+
+    public Columns(SchemaPlus root) {
+      super(root);
+    }
+
+    @Override
+    public boolean visitField(String schemaName, String tableName, RelDataTypeField field) {
+      String columnName = field.getName();
+      RelDataType type = field.getType();
+      SqlTypeName sqlType = type.getSqlTypeName();
+      
+      int position = field.getIndex();
+      String nullable;
+      if (type.isNullable()) nullable = "YES";
+      else                   nullable = "NO";
+      String sqlTypeName = sqlType.getName();
+      int radix = (sqlType == SqlTypeName.DECIMAL)?10:-1;        // TODO: where do we get radix?
+      int charMaxLen = -1;  // TODO: where do we get char length?
+      int scale = (sqlType.allowsPrec())?type.getScale(): -1;
+      int precision = (sqlType.allowsScale())?type.getPrecision(): -1;
+
+      writeRow("DRILL", schemaName, tableName, columnName, position, nullable, sqlTypeName, charMaxLen, radix, scale, precision);
+
+      return false;
+    }
+  }
+
+
+
+  /**
+   * Provide data for VIEWS table
+   */
+  public static class Views extends Abstract {
+    public Views(SchemaPlus root) {
+      super(root);
+    }
+    @Override
+    public boolean visitTable(String schemaName, String tableName, Table table) {
+      if (table.getJdbcTableType() == Schema.TableType.VIEW) {
+        writeRow("DRILL", schemaName, tableName, "TODO: GetViewDefinition");
+      }
+      return false;
+    }
+  }
+
+  public static class Catalogs extends Abstract {
+    public Catalogs(SchemaPlus root) {
+      super(root);
+    }
+    @Override
+    public void generateRows() {
+      writeRow("DRILL", "The internal metadata used by Drill", "");
+    }
+  }
+
+
+  /**
+   * An abstract class which helps generate data. It does the actual scanning of an Optiq schema,
+   * but relies on a subclass to provide a "visit" routine to write out data.
+   */
+  public static class Abstract extends OptiqScanner {
+    SchemaPlus root;
+
+    protected Abstract(SchemaPlus root) {
+      this.root = root;
+    }
+
+
+    /**
+     * Start writing out rows.
+     */
+    @Override
+    public void generateRows() {
+
+      // Scan the root schema for subschema, tables, columns.
+      scanSchema(root); 
+    }
+  }
+
+
+
+  /**
+   * An OptiqScanner scans the Optiq schema, generating rows for each 
+   * schema, table or column. It is intended to be subclassed, where the
+   * subclass does what it needs when visiting a Optiq schema structure.
+   */
+  // We would really prefer multiple inheritance from both OptiqScanner and PipeProvider,
+  //   but making one a subclass of the other works for now. 
+  //   TODO: Refactor to avoid subclassing of what should be an unrelated class.
+  abstract static class OptiqScanner extends PipeProvider {
+
+
+    /**
+     *  Each visitor implements at least one of the the following methods.
+     *    If the schema visitor returns true, then visit the tables.
+     *    If the table visitor returns true, then visit the fields (columns).
+     */
+    public boolean visitSchema(String schemaName, Schema schema){return true;}
+    public boolean visitTableName(String schemaName, String tableName){return true;}
+    public boolean visitTable(String schemaName, String tableName, Table table){return true;}
+    public boolean visitField(String schemaName, String tableName, RelDataTypeField field){return true;}
+
+
+
+    /**
+     * Start scanning an Optiq Schema.
+     * @param root - where to start
+     */
+    protected void scanSchema(Schema root) {
+      scanSchema(root.getName(), root);
+    }
+    
+    /**
+     * Recursively scan the schema, invoking the visitor as appropriate.
+     * @param schemaPath - the path to the current schema, so far,
+     * @param schema - the current schema.
+     * @param visitor - the methods to invoke at each entity in the schema.
+     */
+    private void scanSchema(String schemaPath, Schema schema) {
+      
+      // If we have an empty schema path, then don't insert a leading dot.
+      String separator;
+      if (schemaPath == "") separator = "";
+      else                  separator = ".";
+
+      // Recursively scan the subschema.
+      for (String name: schema.getSubSchemaNames()) {
+        scanSchema(schemaPath + separator + name, schema.getSubSchema(name));
+      }
+
+      // Visit this schema and if requested ...
+      if (visitSchema(schemaPath, schema)) {
+
+        // ... do for each of the schema's tables.
+        for (String tableName: schema.getTableNames()) {
+          if(visitTableName(schemaPath, tableName)){
+            Table table = schema.getTable(tableName);
+            
+            // Visit the table, and if requested ...
+            if (visitTable(schemaPath,  tableName, table)) {
+
+              // ... do for each of the table's fields.
+              RelDataType tableRow = table.getRowType(new SqlTypeFactoryImpl()); // TODO: Is this correct?
+              for (RelDataTypeField field: tableRow.getFieldList()) {
+
+                // Visit the field.
+                visitField(schemaPath,  tableName, field);
+              }
+            }            
+          }
+        }
+      }
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java
new file mode 100644
index 0000000..30d7d76
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import java.util.ArrayList;
+import java.util.ListIterator;
+
+/**
+ * PipeProvider sets up the framework so some subclass can "write" rows
+ * to a an internal pipe, which another class (RowRecordReader) can "read" to
+ * build up a record batch.
+ * <p>
+ * This class helps work around the situation where the rows cannot be conveniently
+ * be generated one at a time by an iterator. Logically, the "writer" writes rows to the pipe,
+ * while a "reader" reads rows from the pipe. The vocabulary implies two separate threads,
+ * but the current implementation is actually just a wrapper around a List.
+ */
+public abstract class PipeProvider implements RowProvider {
+  ArrayList<Object[]> pipe = null;
+  ListIterator<Object[]> iter;
+  
+  /**
+   * Method to generate and write rows to the pipe.
+   */
+  abstract void generateRows();
+  
+  /**
+   * true if there are rows waiting to be "read".
+   */
+  public boolean hasNext() {
+    if (pipe == null) {
+      pipe = new ArrayList<Object[]>();
+      generateRows();
+      iter = pipe.listIterator();
+    }
+    return iter.hasNext();
+  }
+  
+  /**
+   * Read the next row from the pipe. 
+   * Should only be called after "hasNext" indicates there are more rows.
+   */
+  public Object[] next() {
+    return iter.next();
+  }
+  
+  /**
+   * Sometimes, a row cannot be immediately processed. Put the last row back and re-read it next time.
+   */
+  public void previous() {
+    iter.previous();
+  }
+  
+  /**
+   * Write a row to the pipe.
+   * @param values - a varargs list of values, in the same order as the RecordReader's value vectors.
+   * @return true if the row was successfully written to the pipe.
+   */
+  protected boolean writeRow(Object...values) {
+    pipe.add(values);
+    return true;
+  }
+  
+}
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java
new file mode 100644
index 0000000..c26c836
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+/**
+ * An interface for providing rows of data.
+ */
+public interface RowProvider {
+  public Object[] next();  // Fetch the next row of values
+  public boolean hasNext();  // true if there are rows remaining to fetch.
+  public void previous();  // Put back the last row read, so it can be reread. (Only one row can be put back)
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java
new file mode 100644
index 0000000..4832132
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.ischema;
+
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.store.RecordReader;
+import org.apache.drill.exec.vector.ValueVector;
+
+
+
+
+/**
+ * RowRecordReader is a RecordReader which creates RecordBatchs by
+ * reading rows one at a time. The fixed format rows come from a "RowProvider".
+ */
+public class RowRecordReader implements RecordReader {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RowRecordReader.class);
+
+  protected final VectorSet batch;
+  protected final RowProvider provider;
+  protected final FragmentContext context;
+  protected OutputMutator output;
+  
+  private int bufSize = 32*1024*1024;
+  private int maxRowCount;
+  /**
+   * Construct a RecordReader which uses rows from a RowProvider and puts them into a set of value vectors.
+   * @param context
+   * @param vectors
+   */
+  public RowRecordReader(FragmentContext context, VectorSet batch, RowProvider provider) {
+    this.context = context;
+    this.provider = provider;
+    this.batch = batch;
+  }
+ 
+  public RowRecordReader(FragmentContext context, SelectedTable table, SchemaPlus rootSchema){
+    this.context = context;
+    this.provider = table.getProvider(rootSchema);
+    this.batch = table.getFixedTable();
+  }
+
+  /** 
+   * Prepare to create record batches. 
+   */
+  @Override
+  public void setup(OutputMutator output) throws ExecutionSetupException {
+    this.output = output; 
+    batch.createVectors(context.getAllocator());
+    
+    // Inform drill of the output columns. They were set up when the vector handler was created.
+    //  Note we are currently working with fixed tables.
+    try {
+      for (ValueVector v: batch.getValueVectors()) {
+        output.addField(v);;
+      }
+      output.setNewSchema();
+    } catch (SchemaChangeException e) {
+      throw new ExecutionSetupException("Failure while setting up fields", e);
+    }
+    
+    // Estimate the number of records we can hold in a RecordBatch
+    maxRowCount = batch.getEstimatedRowCount(bufSize);
+  }
+
+
+
+  /** 
+   * Return the next record batch.  An empty batch means end of data.
+   */
+  @Override
+  public int next() {
+    
+    // Make note are are starting a new batch of records
+    batch.beginBatch(maxRowCount);
+    
+    // Repeat until out of data or vectors are full
+    int actualCount;
+    for (actualCount = 0; actualCount < maxRowCount && provider.hasNext(); actualCount++) {
+   
+      // Put the next row into the vectors. If vectors full, try again later.
+      Object[] row = provider.next();
+      if (!batch.writeRowToVectors(actualCount, row)) {
+        provider.previous();
+        break;
+      }
+    }
+    
+    // Make note the batch is complete. 
+    batch.endBatch(actualCount);
+    
+    // Problem if we had a single row which didn't fit.
+    if (actualCount == 0 && provider.hasNext()) {
+      throw new DrillRuntimeException("Row size larger than batch size");
+    }
+    
+    // Return the number of rows.  0 means end of data.
+    return actualCount;
+  }
+     
+
+      
+  /**
+   *  Release all resources 
+   */
+  public void cleanup() {
+    batch.cleanup();
+  }
+
+
+  
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
new file mode 100644
index 0000000..3e354f8
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
@@ -0,0 +1,44 @@
+package org.apache.drill.exec.store.ischema;
+
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.exec.store.ischema.InfoSchemaTable.Catalogs;
+import org.apache.drill.exec.store.ischema.InfoSchemaTable.Columns;
+import org.apache.drill.exec.store.ischema.InfoSchemaTable.Schemata;
+import org.apache.drill.exec.store.ischema.InfoSchemaTable.Tables;
+import org.apache.drill.exec.store.ischema.InfoSchemaTable.Views;
+import org.apache.drill.exec.store.ischema.OptiqProvider.OptiqScanner;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeFactory;
+
+public enum SelectedTable{
+  CATALOGS(new Catalogs(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Catalogs(root);}} ), //
+  SCHEMATA(new Schemata(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Schemata(root);}} ), //
+  VIEWS(new Views(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Views(root);}} ), //
+  COLUMNS(new Columns(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Columns(root);}} ), //
+  TABLES(new Tables(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Tables(root);}} ); //
+  
+  private final FixedTable tableDef;
+  private final ScannerFactory providerFactory;
+  
+  private SelectedTable(FixedTable tableDef, ScannerFactory providerFactory) {
+    this.tableDef = tableDef;
+    this.providerFactory = providerFactory;
+  }
+  
+  public OptiqScanner getProvider(SchemaPlus root){
+    return providerFactory.get(root);
+  }
+  
+  private interface ScannerFactory{
+    public OptiqScanner get(SchemaPlus root);
+  }
+  
+  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
+    return tableDef.getRowType(typeFactory);
+  }
+  
+  public FixedTable getFixedTable(){
+    return tableDef;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java
new file mode 100644
index 0000000..cb39337
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import java.util.List;
+
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.vector.ValueVector;
+
+/**
+ * A collection of value vectors representing the columns in a table.
+ */
+public interface VectorSet {
+    public void cleanup();
+   
+    public void beginBatch(int maxRows);
+    public boolean writeRowToVectors(int index, Object[] values);
+    public void endBatch(int actualRows);
+ 
+    public int getEstimatedRowCount(int bufSize);
+    public void createVectors(BufferAllocator allocator);
+    public List<ValueVector>  getValueVectors();
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/package-info.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/package-info.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/package-info.java
new file mode 100644
index 0000000..ea38e06
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.drill.exec.store.ischema;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java
new file mode 100644
index 0000000..268b844
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java
@@ -0,0 +1,75 @@
+package org.apache.drill.exec.store.ischema;
+
+
+import static org.mockito.Mockito.*;
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.exec.memory.TopLevelAllocator;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.junit.Test;
+
+
+import com.codahale.metrics.MetricRegistry;
+/**
+ * OrphanSchema is a stand alone schema tree which is not connected to Optiq.
+ * This class is a refactoring of exec.store.TestOrphanSchema.java. The primary
+ * change is to package a "create()" method for providing a test schema. 
+ * For convenient testing, it mocks up the Drillbit context.
+ */
+public class OrphanSchema {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(OrphanSchema.class);
+  
+  /**
+   * Create an orphan schema to be used for testing.
+   * @return root node of the created schema.
+   */
+  public static SchemaPlus create(){
+    
+    final DrillConfig c = DrillConfig.create();
+    
+    // Mock up a context which will allow us to create a schema.
+    final DrillbitContext bitContext = mock(DrillbitContext.class);
+    when(bitContext.getMetrics()).thenReturn(new MetricRegistry());
+    when(bitContext.getAllocator()).thenReturn(new TopLevelAllocator());
+    when(bitContext.getConfig()).thenReturn(c);
+    
+    // Using the mock context, get the orphan schema.
+    StoragePluginRegistry r = new StoragePluginRegistry(bitContext);
+    SchemaPlus plus = r.getSchemaFactory().getOrphanedRootSchema();
+
+    return plus;
+  }
+  
+  
+  /**
+   * This test replicates the one in org.apache.drill.exec.server,
+   * but it is refactored to provide a standalone "create()" method.
+   */
+  
+  @Test
+  public void test() {
+    printSchema(create(), 0);
+  }
+  
+  private static void t(final int t){
+    for(int i =0; i < t; i++) System.out.print('\t');
+  }
+  private static void printSchema(SchemaPlus s, int indent){
+    t(indent);
+    System.out.print("Schema: ");
+    System.out.println(s.getName().equals("") ? "root" : s.getName());
+    for(String table : s.getTableNames()){
+      t(indent + 1);
+      System.out.print("Table: ");
+      System.out.println(table);
+    }
+    
+    for(String schema : s.getSubSchemaNames()){
+      SchemaPlus p = s.getSubSchema(schema);
+      printSchema(p, indent + 1);
+    }
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java
new file mode 100644
index 0000000..c29c05f
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.memory.TopLevelAllocator;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.store.RecordReader;
+import org.apache.drill.exec.store.ischema.FixedTable;
+import org.apache.drill.exec.store.ischema.InfoSchemaTable;
+import org.apache.drill.exec.store.ischema.OptiqProvider;
+import org.apache.drill.exec.store.ischema.RowProvider;
+import org.apache.drill.exec.store.ischema.RowRecordReader;
+import org.apache.drill.exec.vector.ValueVector;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Using an orphan schema, create and display the various information schema tables.
+ * An "orphan schema" is a stand alone schema which is not (yet?) connected to Optiq.
+ */
+public class TestOrphanSchema {
+  SchemaPlus root = OrphanSchema.create();
+
+  @Test
+  public void testTables() {
+    displayTable(new InfoSchemaTable.Tables(), new OptiqProvider.Tables(root));
+  }
+  
+  @Test
+  public void testSchemata() {
+    displayTable(new InfoSchemaTable.Schemata(), new OptiqProvider.Schemata(root));
+  }
+  
+  
+  @Test
+  public void testViews() {
+    displayTable(new InfoSchemaTable.Views(), new OptiqProvider.Views(root));
+  }
+  
+  @Test
+  public void testCatalogs() {
+    displayTable(new InfoSchemaTable.Catalogs(), new OptiqProvider.Catalogs(root));
+  }
+  
+  @Test
+  public void testColumns() {
+    displayTable(new InfoSchemaTable.Columns(), new OptiqProvider.Columns(root));
+  }
+  
+  
+  private void displayTable(FixedTable table, RowProvider provider) {
+
+    // Set up a mock context
+    FragmentContext context = mock(FragmentContext.class);
+    when(context.getAllocator()).thenReturn(new TopLevelAllocator());
+    
+    // Create a RecordReader which reads from the test table.
+    RecordReader reader = new RowRecordReader(context, table, provider);
+    
+    // Create an dummy output mutator for the RecordReader.
+    TestOutput output = new TestOutput();
+    try {reader.setup(output);}
+    catch (ExecutionSetupException e) {Assert.fail("reader threw an exception");}
+    
+    // print out headers
+    System.out.printf("\n%20s\n", table.getName());
+    System.out.printf("%10s", "RowNumber");
+    for (ValueVector v: table.getValueVectors()) {
+      System.out.printf(" | %16s", v.getField().getName());
+    }
+    System.out.println();
+
+    // Do for each record batch
+    int rowNumber = 0;
+    for (;;) {
+      int count = reader.next();
+      if (count == 0) break;
+      
+      // Do for each row in the batch
+      for (int row=0; row<count; row++, rowNumber++) {
+       
+        // Display the row
+        System.out.printf("%10d", rowNumber);
+        for (ValueVector v: table.getValueVectors()) {
+          System.out.printf(" | %16s", v.getAccessor().getObject(row));
+        }
+        System.out.println();
+        
+      }
+    }
+  }
+
+  
+  /** 
+   * A dummy OutputMutator so we can examine the contents of the current batch 
+   */
+  static class TestOutput implements OutputMutator {
+    List<ValueVector> vectors = new ArrayList<ValueVector>();
+
+    public void addField(ValueVector vector) throws SchemaChangeException {
+      vectors.add(vector); 
+    }
+    
+    public Object get(int column, int row) {
+      return vectors.get(column).getAccessor().getObject(row);
+    }
+     
+    public void removeField(MaterializedField field) {}
+    public void removeAllFields() {}
+    public void setNewSchema() {}
+  }
+  
+ 
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java
new file mode 100644
index 0000000..c4da32b
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.memory.TopLevelAllocator;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.store.RecordReader;
+import org.apache.drill.exec.store.ischema.FixedTable;
+import org.apache.drill.exec.store.ischema.PipeProvider;
+import org.apache.drill.exec.store.ischema.RowRecordReader;
+import org.apache.drill.exec.vector.ValueVector;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Using a test table with two columns, create data and verify the values are in the record batch.
+ */
+public class TestTableProvider {
+  
+  @Test
+  public void zeroRead() {
+    readTestTable(0);
+  }
+  
+  @Test
+  public void oneRead() {
+    readTestTable(1);
+  }
+  
+  @Test
+  public void smallRead() {
+    readTestTable(10);
+  }
+  
+  @Test
+  public void largeRead() {
+    readTestTable(1024*1024);
+  }
+  
+  
+  /**
+   * Read record batches from the test table and verify the contents.
+   * @param nrRows - the total number of rows expected.
+   */
+  private void readTestTable(int nrRows) {
+    
+    // Mock up a context with a BufferAllocator
+    FragmentContext context = mock(FragmentContext.class);
+    when(context.getAllocator()).thenReturn(new TopLevelAllocator());
+    
+    // Create a RecordReader which reads from the test table.
+    RecordReader reader = new RowRecordReader(context, new TestTable(), new TestProvider(nrRows));
+    
+    // Create an dummy output mutator for the RecordReader.
+    TestOutput output = new TestOutput();
+    try {reader.setup(output);}
+    catch (ExecutionSetupException e) {Assert.fail("reader threw an exception");}
+
+    // Do for each record batch
+    int rowNumber = 0;
+    for (;;) {
+      int count = reader.next();
+      if (count == 0) break;
+      
+      // Do for each row in the batch
+      for (int row=0; row<count; row++, rowNumber++) {
+        
+        // Verify the row has an integer and string containing the row number
+        int intValue = (int)output.get(1, row);
+        String strValue = (String)output.get(0, row);
+        Assert.assertEquals(rowNumber, intValue);
+        Assert.assertEquals(rowNumber, Integer.parseInt(strValue));
+      }
+    }
+  
+  // Verify we read the correct number of rows.
+  Assert.assertEquals(nrRows, rowNumber);
+  }
+
+  
+  /**
+   * Class to define the table we want to create. Two columns - string, integer
+   */
+  static class TestTable extends FixedTable {
+    static final String tableName = "MOCK_TABLE";
+    static final String fieldNames[] = {"STRING_COLUMM", "INTEGER_COLUMN"};
+    static final MajorType fieldTypes[] = {VARCHAR, INT};
+    TestTable() {
+      super(tableName, fieldNames, fieldTypes);
+    }
+  }
+  
+  
+  /**
+   * Class to generate data for the table
+   */
+  static class TestProvider extends PipeProvider {
+    int maxRows;
+    TestProvider(int maxRows) {
+      this.maxRows = maxRows;
+    }
+    void generateRows() {
+      for (int rowNumber=0; rowNumber<maxRows; rowNumber++) {
+        writeRow(Integer.toString(rowNumber), rowNumber);
+      }
+    }
+  }
+  
+  
+  
+  
+  /** 
+   * A dummy OutputMutator so we can examine the contents of the current batch 
+   */
+  static class TestOutput implements OutputMutator {
+    List<ValueVector> vectors = new ArrayList<ValueVector>();
+
+    public void addField(ValueVector vector) throws SchemaChangeException {
+      vectors.add(vector); 
+    }
+    
+    public Object get(int column, int row) {
+      return vectors.get(column).getAccessor().getObject(row);
+    }
+     
+    public void removeField(MaterializedField field) {}
+    public void removeAllFields() {}
+    public void setNewSchema() {}
+  }
+  
+ 
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0ca797a4/sqlparser/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
----------------------------------------------------------------------
diff --git a/sqlparser/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java b/sqlparser/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
index dc80f33..f5081b3 100644
--- a/sqlparser/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
+++ b/sqlparser/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
@@ -64,6 +64,14 @@ public class TestJdbcQuery {
     testQuery("select * from cp.`employee.json`");
   }
 
+  @Test
+  public void testInfoSchema() throws Exception{
+    testQuery("select * from INFORMATION_SCHEMA.SCHEMATA");
+    testQuery("select * from INFORMATION_SCHEMA.CATALOGS");
+    testQuery("select * from INFORMATION_SCHEMA.VIEWS");
+    testQuery("select * from INFORMATION_SCHEMA.TABLES");
+    testQuery("select * from INFORMATION_SCHEMA.COLUMNS");
+  }
   
   @Test 
   public void testCast() throws Exception{
@@ -125,6 +133,7 @@ public class TestJdbcQuery {
         System.out.println(String.format("Query completed in %d millis.", watch.elapsed(TimeUnit.MILLISECONDS)));
       }
 
+      System.out.println("\n\n\n");
       success = true;
     }finally{
       if(!success) Thread.sleep(2000);


Mime
View raw message