drill-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From prog...@apache.org
Subject [1/5] drill git commit: DRILL-5325: Unit tests for the managed sort
Date Wed, 21 Jun 2017 18:29:09 GMT
Repository: drill
Updated Branches:
  refs/heads/master c16e5f807 -> 90f43bff7


http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSortImpl.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSortImpl.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSortImpl.java
new file mode 100644
index 0000000..e249c19
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSortImpl.java
@@ -0,0 +1,609 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.physical.impl.xsort.managed;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.drill.common.expression.FieldReference;
+import org.apache.drill.common.logical.data.Order.Ordering;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.ops.OperExecContext;
+import org.apache.drill.exec.physical.config.Sort;
+import org.apache.drill.exec.physical.impl.spill.SpillSet;
+import org.apache.drill.exec.physical.impl.xsort.managed.SortImpl.SortResults;
+import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
+import org.apache.drill.exec.proto.UserBitShared.QueryId;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.VectorContainer;
+import org.apache.drill.test.DrillTest;
+import org.apache.drill.test.OperatorFixture;
+import org.apache.drill.test.OperatorFixture.OperatorFixtureBuilder;
+import org.apache.drill.test.rowSet.DirectRowSet;
+import org.apache.drill.test.rowSet.HyperRowSetImpl;
+import org.apache.drill.test.rowSet.IndirectRowSet;
+import org.apache.drill.test.rowSet.RowSet;
+import org.apache.drill.test.rowSet.RowSet.ExtendableRowSet;
+import org.apache.drill.test.rowSet.RowSet.RowSetReader;
+import org.apache.drill.test.rowSet.RowSet.RowSetWriter;
+import org.apache.drill.test.rowSet.RowSetBuilder;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.junit.Test;
+
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Lists;
+
+/**
+ * Tests the external sort implementation: the "guts" of the sort stripped of the
+ * Volcano-protocol layer. Assumes the individual components are already tested.
+ */
+
+public class TestSortImpl extends DrillTest {
+
+  /**
+   * Create the sort implementation to be used by test.
+   *
+   * @param fixture operator fixture
+   * @param sortOrder sort order as specified by {@link Ordering}
+   * @param nullOrder null order as specified by {@link Ordering}
+   * @param outputBatch where the sort should write its output
+   * @return the sort initialized sort implementation, ready to
+   * do work
+   */
+
+  public static SortImpl makeSortImpl(OperatorFixture fixture,
+                               String sortOrder, String nullOrder,
+                               VectorContainer outputBatch) {
+    FieldReference expr = FieldReference.getWithQuotedRef("key");
+    Ordering ordering = new Ordering(sortOrder, expr, nullOrder);
+    Sort popConfig = new Sort(null, Lists.newArrayList(ordering), false);
+    OperExecContext opContext = fixture.newOperExecContext(popConfig);
+    QueryId queryId = QueryId.newBuilder()
+        .setPart1(1234)
+        .setPart2(5678)
+        .build();
+    FragmentHandle handle = FragmentHandle.newBuilder()
+          .setMajorFragmentId(2)
+          .setMinorFragmentId(3)
+          .setQueryId(queryId)
+          .build();
+    SortConfig sortConfig = new SortConfig(opContext.getConfig());
+    SpillSet spillSet = new SpillSet(opContext.getConfig(), handle,
+                                     popConfig);
+    PriorityQueueCopierWrapper copierHolder = new PriorityQueueCopierWrapper(opContext);
+    SpilledRuns spilledRuns = new SpilledRuns(opContext, spillSet, copierHolder);
+    return new SortImpl(opContext, sortConfig, spilledRuns, outputBatch);
+  }
+
+  /**
+   * Handy fixture to hold a sort, a set of input row sets (batches) and the
+   * output set of row sets (batches.) Pumps the input into the sort and
+   * harvests the output. Subclasses define the specifics of the sort,
+   * define the input data, and validate the output data.
+   */
+
+  public static class SortTestFixture {
+    private final OperatorFixture fixture;
+    private final List<RowSet> inputSets = new ArrayList<>();
+    private final List<RowSet> expected = new ArrayList<>();
+    String sortOrder = Ordering.ORDER_ASC;
+    String nullOrder = Ordering.NULLS_UNSPECIFIED;
+
+    public SortTestFixture(OperatorFixture fixture) {
+      this.fixture = fixture;
+    }
+
+    public SortTestFixture(OperatorFixture fixture, String sortOrder, String nullOrder) {
+      this.fixture = fixture;
+      this.sortOrder = sortOrder;
+      this.nullOrder = nullOrder;
+    }
+
+    public void addInput(RowSet input) {
+      inputSets.add(input);
+    }
+
+    public void addOutput(RowSet output) {
+      expected.add(output);
+    }
+
+    public void run() {
+      VectorContainer dest = new VectorContainer();
+      SortImpl sort = makeSortImpl(fixture, sortOrder, nullOrder, dest);
+
+      // Simulates a NEW_SCHEMA event
+
+      if (! inputSets.isEmpty()) {
+        sort.setSchema(inputSets.get(0).container().getSchema());
+      }
+
+      // Simulates an OK event
+
+      for (RowSet input : inputSets) {
+        sort.addBatch(input.vectorAccessible());
+      }
+
+      // Simulate returning results
+
+      SortResults results = sort.startMerge();
+      if (results.getContainer() != dest) {
+        dest.clear();
+        dest = results.getContainer();
+      }
+      for (RowSet expectedSet : expected) {
+        assertTrue(results.next());
+        RowSet rowSet = toRowSet(fixture, results, dest);
+        // Uncomment these for debugging. Leave them commented otherwise
+        // to avoid polluting the Maven build output unnecessarily.
+//        System.out.println("Expected:");
+//        expectedSet.print();
+//        System.out.println("Actual:");
+//        rowSet.print();
+        new RowSetComparison(expectedSet)
+              .verify(rowSet);
+        expectedSet.clear();
+      }
+      assertFalse(results.next());
+      validateSort(sort);
+      results.close();
+      dest.clear();
+      sort.close();
+      validateFinalStats(sort);
+    }
+
+    protected void validateSort(SortImpl sort) { }
+    protected void validateFinalStats(SortImpl sort) { }
+  }
+
+  /**
+   * Sort produces a variety of output types. Convert each type to the corresponding
+   * row set format. For historical reasons, the sort dumps its output into a vector
+   * container (normally attached to the external sort batch, here used stand-alone.)
+   *
+   * @param fixture operator test fixture
+   * @param results sort results iterator
+   * @param dest container that holds the sort results
+   * @return
+   */
+
+  private static RowSet toRowSet(OperatorFixture fixture, SortResults results, VectorContainer dest) {
+    if (results.getSv4() != null) {
+      return new HyperRowSetImpl(fixture.allocator(), dest, results.getSv4());
+    } else if (results.getSv2() != null) {
+      return new IndirectRowSet(fixture.allocator(), dest, results.getSv2());
+    } else {
+      return new DirectRowSet(fixture.allocator(), dest);
+    }
+  }
+
+  /**
+   * Test for null input (no input batches). Note that, in this case,
+   * we never see a schema.
+   * @throws Exception
+   */
+
+  @Test
+  public void testNullInput() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      SortTestFixture sortTest = new SortTestFixture(fixture);
+      sortTest.run();
+    }
+  }
+
+  /**
+   * Test for an input with a schema, but only an empty input batch.
+   * @throws Exception
+   */
+
+  @Test
+  public void testEmptyInput() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      BatchSchema schema = SortTestUtilities.nonNullSchema();
+      SortTestFixture sortTest = new SortTestFixture(fixture);
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .build());
+      sortTest.run();
+    }
+  }
+
+  /**
+   * Degenerate case: single row in single batch.
+   * @throws Exception
+   */
+
+  @Test
+  public void testSingleRow() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      BatchSchema schema = SortTestUtilities.nonNullSchema();
+      SortTestFixture sortTest = new SortTestFixture(fixture);
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .build());
+      sortTest.addOutput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .build());
+      sortTest.run();
+    }
+  }
+
+  /**
+   * Degenerate case: two (unsorted) rows in single batch
+   * @throws Exception
+   */
+
+  @Test
+  public void testSingleBatch() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      BatchSchema schema = SortTestUtilities.nonNullSchema();
+      SortTestFixture sortTest = new SortTestFixture(fixture);
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(2, "second")
+          .add(1, "first")
+          .build());
+      sortTest.addOutput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .add(2, "second")
+          .build());
+      sortTest.run();
+    }
+  }
+
+  /**
+   * Degenerate case, one row in each of two
+   * (unsorted) batches.
+   * @throws Exception
+   */
+
+  @Test
+  public void testTwoBatches() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      BatchSchema schema = SortTestUtilities.nonNullSchema();
+      SortTestFixture sortTest = new SortTestFixture(fixture);
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(2, "second")
+          .build());
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .build());
+      sortTest.addOutput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .add(2, "second")
+          .build());
+      sortTest.run();
+    }
+  }
+
+  /**
+   * Crude-but-effective data generator that produces pseudo-random data
+   * that can be easily verified. The pseudo-random data is generate by the
+   * simple means of incrementing a counter using a random value, and wrapping.
+   * This ensures we visit each value twice, and that the sorted output will
+   * be a continuous run of numbers in proper order.
+   */
+
+  public static class DataGenerator {
+    private final OperatorFixture fixture;
+    private final BatchSchema schema;
+    private final int targetCount;
+    private final int batchSize;
+    private final int step;
+    private int rowCount;
+    private int currentValue;
+
+    public DataGenerator(OperatorFixture fixture, int targetCount, int batchSize) {
+      this(fixture, targetCount, batchSize, 0, guessStep(targetCount));
+    }
+
+    public DataGenerator(OperatorFixture fixture, int targetCount, int batchSize, int seed, int step) {
+      this.fixture = fixture;
+      this.targetCount = targetCount;
+      this.batchSize = Math.min(batchSize, Character.MAX_VALUE);
+      this.step = step;
+      schema = SortTestUtilities.nonNullSchema();
+      currentValue = seed;
+    }
+
+    /**
+     * Pick a reasonable prime step based on data size.
+     *
+     * @param target number of rows to generate
+     * @return the prime step size
+     */
+
+    private static int guessStep(int target) {
+      if (target < 10) {
+        return 7;
+      } else if (target < 200) {
+        return 71;
+      } else if (target < 2000) {
+        return 701;
+      } else if (target < 20000) {
+        return 7001;
+      } else {
+        return 17011;
+      }
+    }
+
+    public RowSet nextRowSet() {
+      if (rowCount == targetCount) {
+        return null;
+      }
+      RowSetBuilder builder = fixture.rowSetBuilder(schema);
+      int end = Math.min(batchSize, targetCount - rowCount);
+      for (int i = 0; i < end; i++) {
+        builder.add(currentValue, i + ", " + currentValue);
+        currentValue = (currentValue + step) % targetCount;
+        rowCount++;
+      }
+      return builder.build();
+    }
+  }
+
+  /**
+   * Validate a sort output batch based on the expectation that the key
+   * is an ordered sequence of integers, split across multiple batches.
+   */
+
+  public static class DataValidator {
+    private final int targetCount;
+    private final int batchSize;
+    private int batchCount;
+    private int rowCount;
+
+    public DataValidator(int targetCount, int batchSize) {
+      this.targetCount = targetCount;
+      this.batchSize = Math.min(batchSize, Character.MAX_VALUE);
+    }
+
+    public void validate(RowSet output) {
+      batchCount++;
+      int expectedSize = Math.min(batchSize, targetCount - rowCount);
+      assertEquals("Size of batch " + batchCount, expectedSize, output.rowCount());
+      RowSetReader reader = output.reader();
+      while (reader.next()) {
+        assertEquals("Value of " + batchCount + ":" + rowCount,
+            rowCount, reader.column(0).getInt());
+        rowCount++;
+      }
+    }
+
+    public void validateDone() {
+      assertEquals("Wrong row count", targetCount, rowCount);
+    }
+  }
+
+  Stopwatch timer = Stopwatch.createUnstarted();
+
+  /**
+   * Run a full-blown sort test with multiple input batches. Because we want to
+   * generate multiple inputs, we don't create them statically. Instead, we generate
+   * them on the fly using a data generator. A matching data validator verifies the
+   * output. Here, we are focusing on overall test flow. Separate, detailed, unit
+   * tests have already probed the details of each sort component and data type,
+   * so we don't need to repeat that whole exercise here; using integer keys is
+   * sufficient.
+   *
+   * @param fixture the operator test fixture
+   * @param dataGen input batch generator
+   * @param validator validates output batches
+   */
+
+  public void runLargeSortTest(OperatorFixture fixture, DataGenerator dataGen,
+                               DataValidator validator) {
+    VectorContainer dest = new VectorContainer();
+    SortImpl sort = makeSortImpl(fixture, Ordering.ORDER_ASC, Ordering.NULLS_UNSPECIFIED, dest);
+
+    int batchCount = 0;
+    RowSet input;
+    while ((input = dataGen.nextRowSet()) != null) {
+      batchCount++;
+      if (batchCount == 1) {
+        // Simulates a NEW_SCHEMA event
+
+        timer.start();
+        sort.setSchema(input.container().getSchema());
+        timer.stop();
+      }
+
+      // Simulates an OK event
+
+      timer.start();
+      sort.addBatch(input.vectorAccessible());
+      timer.stop();
+    }
+
+    // Simulate returning results
+
+    timer.start();
+    SortResults results = sort.startMerge();
+    if (results.getContainer() != dest) {
+      dest.clear();
+      dest = results.getContainer();
+    }
+    while (results.next()) {
+      timer.stop();
+      RowSet output = toRowSet(fixture, results, dest);
+      validator.validate(output);
+      timer.start();
+    }
+    timer.stop();
+    validator.validateDone();
+    results.close();
+    dest.clear();
+    sort.close();
+  }
+
+  /**
+   * Set up and run a test for "jumbo" batches, and time the run.
+   * @param fixture operator test fixture
+   * @param rowCount number of rows to test
+   */
+
+  public void runJumboBatchTest(OperatorFixture fixture, int rowCount) {
+    timer.reset();
+    DataGenerator dataGen = new DataGenerator(fixture, rowCount, Character.MAX_VALUE);
+    DataValidator validator = new DataValidator(rowCount, Character.MAX_VALUE);
+    runLargeSortTest(fixture, dataGen, validator);
+    System.out.println(timer.elapsed(TimeUnit.MILLISECONDS));
+  }
+
+  /**
+   * Most tests have used small row counts because we want to probe specific bits
+   * of interest. Try 1000 rows just to ensure things work
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testModerateBatch() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      runJumboBatchTest(fixture, 1000);
+    }
+  }
+
+  /**
+   * Hit the sort with the largest possible batch size to ensure nothing is lost
+   * at the edges.
+   *
+   * @throws Exception
+   */
+
+  @Test
+  public void testLargeBatch() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      runJumboBatchTest(fixture, Character.MAX_VALUE);
+    }
+  }
+
+  /**
+   * Run a test using wide rows. This stresses the "copier" portion of the sort
+   * and allows us to test the original generated copier and the revised "generic"
+   * copier.
+   *
+   * @param fixture operator test fixture
+   * @param colCount number of data (non-key) columns
+   * @param rowCount number of rows to generate
+   */
+
+  public void runWideRowsTest(OperatorFixture fixture, int colCount, int rowCount) {
+    SchemaBuilder builder = new SchemaBuilder()
+        .add("key", MinorType.INT);
+    for (int i = 0; i < colCount; i++) {
+      builder.add("col" + (i+1), MinorType.INT);
+    }
+    BatchSchema schema = builder.build();
+    ExtendableRowSet rowSet = fixture.rowSet(schema);
+    RowSetWriter writer = rowSet.writer(rowCount);
+    for (int i = 0; i < rowCount; i++) {
+      writer.set(0, i);
+      for (int j = 0; j < colCount; j++) {
+        writer.set(j + 1, i * 100_000 + j);
+      }
+      writer.save();
+    }
+    writer.done();
+
+    VectorContainer dest = new VectorContainer();
+    SortImpl sort = makeSortImpl(fixture, Ordering.ORDER_ASC, Ordering.NULLS_UNSPECIFIED, dest);
+    timer.reset();
+    timer.start();
+    sort.setSchema(rowSet.container().getSchema());
+    sort.addBatch(rowSet.vectorAccessible());
+    SortResults results = sort.startMerge();
+    if (results.getContainer() != dest) {
+      dest.clear();
+      dest = results.getContainer();
+    }
+    assertTrue(results.next());
+    timer.stop();
+    assertFalse(results.next());
+    results.close();
+    dest.clear();
+    sort.close();
+    System.out.println(timer.elapsed(TimeUnit.MILLISECONDS));
+  }
+
+  /**
+   * Test wide rows with the stock copier.
+   *
+   * @throws Exception
+   */
+
+  @Test
+  public void testWideRows() throws Exception {
+    try (OperatorFixture fixture = OperatorFixture.standardFixture()) {
+      runWideRowsTest(fixture, 1000, Character.MAX_VALUE);
+    }
+  }
+
+  /**
+   * Force the sorter to spill, and verify that the resulting data
+   * is correct. Uses a specific property of the sort to set the
+   * in-memory batch limit so that we don't have to fiddle with filling
+   * up memory. The point here is not to test the code that decides when
+   * to spill (that was already tested.) Nor to test the spilling
+   * mechanism itself (that has also already been tested.) Rather it is
+   * to ensure that, when those components are integrated into the
+   * sort implementation, that the whole assembly does the right thing.
+   *
+   * @throws Exception
+   */
+
+  @Test
+  public void testSpill() throws Exception {
+    OperatorFixtureBuilder builder = OperatorFixture.builder();
+    builder.configBuilder()
+      .put(ExecConstants.EXTERNAL_SORT_BATCH_LIMIT, 2);
+    try (OperatorFixture fixture = builder.build()) {
+      BatchSchema schema = SortTestUtilities.nonNullSchema();
+      SortTestFixture sortTest = new SortTestFixture(fixture) {
+        @Override
+        protected void validateSort(SortImpl sort) {
+          assertEquals(1, sort.getMetrics().getSpillCount());
+          assertEquals(0, sort.getMetrics().getMergeCount());
+          assertEquals(2, sort.getMetrics().getPeakBatchCount());
+        }
+        @Override
+        protected void validateFinalStats(SortImpl sort) {
+          assertTrue(sort.getMetrics().getWriteBytes() > 0);
+        }
+      };
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(2, "second")
+          .build());
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(3, "third")
+          .build());
+      sortTest.addInput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .build());
+      sortTest.addOutput(fixture.rowSetBuilder(schema)
+          .add(1, "first")
+          .add(2, "second")
+          .add(3, "third")
+          .build());
+      sortTest.run();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSorter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSorter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSorter.java
new file mode 100644
index 0000000..dd371d7
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/managed/TestSorter.java
@@ -0,0 +1,605 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.physical.impl.xsort.managed;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Random;
+
+import org.apache.drill.common.expression.FieldReference;
+import org.apache.drill.common.logical.data.Order.Ordering;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.ops.OperExecContext;
+import org.apache.drill.exec.physical.config.Sort;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.test.DrillTest;
+import org.apache.drill.test.OperatorFixture;
+import org.apache.drill.test.rowSet.RowSet;
+import org.apache.drill.test.rowSet.RowSet.ExtendableRowSet;
+import org.apache.drill.test.rowSet.RowSet.RowSetReader;
+import org.apache.drill.test.rowSet.RowSet.RowSetWriter;
+import org.apache.drill.test.rowSet.RowSet.SingleRowSet;
+import org.apache.drill.test.rowSet.RowSetBuilder;
+import org.apache.drill.test.rowSet.RowSetComparison;
+import org.apache.drill.test.rowSet.RowSetUtilities;
+import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.joda.time.Period;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Tests the generated per-batch sort code via its wrapper layer.
+ */
+
+public class TestSorter extends DrillTest {
+
+  public static OperatorFixture fixture;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    fixture = OperatorFixture.builder().build();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    fixture.close();
+  }
+
+  public static Sort makeSortConfig(String key, String sortOrder, String nullOrder) {
+    FieldReference expr = FieldReference.getWithQuotedRef(key);
+    Ordering ordering = new Ordering(sortOrder, expr, nullOrder);
+    return new Sort(null, Lists.newArrayList(ordering), false);
+  }
+
+  public void runSorterTest(SingleRowSet rowSet, SingleRowSet expected) throws Exception {
+    runSorterTest(makeSortConfig("key", Ordering.ORDER_ASC, Ordering.NULLS_LAST), rowSet, expected);
+  }
+
+  public void runSorterTest(Sort popConfig, SingleRowSet rowSet, SingleRowSet expected) throws Exception {
+    OperExecContext opContext = fixture.newOperExecContext(popConfig);
+    SorterWrapper sorter = new SorterWrapper(opContext);
+
+    sorter.sortBatch(rowSet.container(), rowSet.getSv2());
+
+    new RowSetComparison(expected)
+        .verifyAndClear(rowSet);
+    sorter.close();
+  }
+
+  // Test degenerate case: no rows
+
+  @Test
+  public void testEmptyRowSet() throws Exception {
+    BatchSchema schema = SortTestUtilities.nonNullSchema();
+    SingleRowSet rowSet = new RowSetBuilder(fixture.allocator(), schema)
+        .withSv2()
+        .build();
+    SingleRowSet expected = new RowSetBuilder(fixture.allocator(), schema)
+        .build();
+    runSorterTest(rowSet, expected);
+  }
+
+  // Sanity test: single row
+
+  @Test
+  public void testSingleRow() throws Exception {
+    BatchSchema schema = SortTestUtilities.nonNullSchema();
+    SingleRowSet rowSet = new RowSetBuilder(fixture.allocator(), schema)
+          .add(0, "0")
+          .withSv2()
+          .build();
+
+    SingleRowSet expected = new RowSetBuilder(fixture.allocator(), schema)
+        .add(0, "0")
+        .build();
+    runSorterTest(rowSet, expected);
+  }
+
+  // Paranoia: sort with two rows.
+
+  @Test
+  public void testTwoRows() throws Exception {
+    BatchSchema schema = SortTestUtilities.nonNullSchema();
+    SingleRowSet rowSet = new RowSetBuilder(fixture.allocator(), schema)
+        .add(1, "1")
+        .add(0, "0")
+        .withSv2()
+        .build();
+
+    SingleRowSet expected = new RowSetBuilder(fixture.allocator(), schema)
+        .add(0, "0")
+        .add(1, "1")
+        .build();
+    runSorterTest(rowSet, expected);
+  }
+
+  private abstract static class BaseSortTester {
+    protected final OperatorFixture fixture;
+    protected final SorterWrapper sorter;
+    protected final boolean nullable;
+
+    public BaseSortTester(OperatorFixture fixture, String sortOrder, String nullOrder, boolean nullable) {
+      this.fixture = fixture;
+      Sort popConfig = makeSortConfig("key", sortOrder, nullOrder);
+      this.nullable = nullable;
+
+      OperExecContext opContext = fixture.newOperExecContext(popConfig);
+      sorter = new SorterWrapper(opContext);
+    }
+  }
+
+  private abstract static class SortTester extends BaseSortTester {
+
+    protected DataItem data[];
+
+    public SortTester(OperatorFixture fixture, String sortOrder, String nullOrder, boolean nullable) {
+      super(fixture, sortOrder, nullOrder, nullable);
+    }
+
+    public void test(MinorType type) throws SchemaChangeException {
+      data = makeDataArray(20);
+      BatchSchema schema = SortTestUtilities.makeSchema(type, nullable);
+      SingleRowSet input = makeDataSet(fixture.allocator(), schema, data);
+      input = input.toIndirect();
+      sorter.sortBatch(input.container(), input.getSv2());
+      sorter.close();
+      verify(input);
+    }
+
+    public static class DataItem {
+      public final int key;
+      public final int value;
+      public final boolean isNull;
+
+      public DataItem(int key, int value, boolean isNull) {
+        this.key = key;
+        this.value = value;
+        this.isNull = isNull;
+      }
+
+      @Override
+      public String toString() {
+        return "(" + key + ", \"" + value + "\", " +
+               (isNull ? "null" : "set") + ")";
+      }
+    }
+
+    public DataItem[] makeDataArray(int size) {
+      DataItem values[] = new DataItem[size];
+      int key = 11;
+      int delta = 3;
+      for (int i = 0; i < size; i++) {
+        values[i] = new DataItem(key, i, key % 5 == 0);
+        key = (key + delta) % size;
+      }
+      return values;
+    }
+
+    public SingleRowSet makeDataSet(BufferAllocator allocator, BatchSchema schema, DataItem[] items) {
+      ExtendableRowSet rowSet = fixture.rowSet(schema);
+      RowSetWriter writer = rowSet.writer(items.length);
+      for (int i = 0; i < items.length; i++) {
+        DataItem item = items[i];
+        if (nullable && item.isNull) {
+          writer.column(0).setNull();
+        } else {
+          RowSetUtilities.setFromInt(writer, 0, item.key);
+        }
+        writer.column(1).setString(Integer.toString(item.value));
+        writer.save();
+      }
+      writer.done();
+      return rowSet;
+    }
+
+    private void verify(RowSet actual) {
+      DataItem expected[] = Arrays.copyOf(data, data.length);
+      doSort(expected);
+      RowSet expectedRows = makeDataSet(actual.allocator(), actual.schema().batch(), expected);
+//      System.out.println("Expected:");
+//      expectedRows.print();
+//      System.out.println("Actual:");
+//      actual.print();
+      doVerify(expected, expectedRows, actual);
+    }
+
+    protected void doVerify(DataItem[] expected, RowSet expectedRows, RowSet actual) {
+      new RowSetComparison(expectedRows)
+            .verifyAndClear(actual);
+    }
+
+    protected abstract void doSort(DataItem[] expected);
+  }
+
+  private static class TestSorterNumeric extends SortTester {
+
+    private final int sign;
+
+    public TestSorterNumeric(OperatorFixture fixture, boolean asc) {
+      super(fixture,
+            asc ? Ordering.ORDER_ASC : Ordering.ORDER_DESC,
+            Ordering.NULLS_UNSPECIFIED, false);
+      sign = asc ? 1 : -1;
+    }
+
+    @Override
+    protected void doSort(DataItem[] expected) {
+      Arrays.sort(expected, new Comparator<DataItem>(){
+        @Override
+        public int compare(DataItem o1, DataItem o2) {
+          return sign * Integer.compare(o1.key, o2.key);
+        }
+      });
+    }
+  }
+
+  private static class TestSorterNullableNumeric extends SortTester {
+
+    private final int sign;
+    private final int nullSign;
+
+    public TestSorterNullableNumeric(OperatorFixture fixture, boolean asc, boolean nullsLast) {
+      super(fixture,
+          asc ? Ordering.ORDER_ASC : Ordering.ORDER_DESC,
+          nullsLast ? Ordering.NULLS_LAST : Ordering.NULLS_FIRST,
+          true);
+      sign = asc ? 1 : -1;
+      nullSign = nullsLast ? 1 : -1;
+    }
+
+    @Override
+    protected void doSort(DataItem[] expected) {
+      Arrays.sort(expected, new Comparator<DataItem>(){
+        @Override
+        public int compare(DataItem o1, DataItem o2) {
+          if (o1.isNull  &&  o2.isNull) { return 0; }
+          if (o1.isNull) { return nullSign; }
+          if (o2.isNull) { return -nullSign; }
+          return sign * Integer.compare(o1.key, o2.key);
+        }
+      });
+    }
+
+    @Override
+    protected void doVerify(DataItem[] expected, RowSet expectedRows, RowSet actual) {
+      int nullCount = 0;
+      for (DataItem item : expected) {
+        if (item.isNull) { nullCount++; }
+      }
+      int length = expected.length - nullCount;
+      int offset = (nullSign == 1) ? 0 : nullCount;
+      new RowSetComparison(expectedRows)
+            .offset(offset)
+            .span(length)
+            .verify(actual);
+      offset = length - offset;
+      new RowSetComparison(expectedRows)
+            .offset(offset)
+            .span(nullCount)
+            .withMask(true, false)
+            .verifyAndClear(actual);
+    }
+  }
+
+  private static class TestSorterStringAsc extends SortTester {
+
+    public TestSorterStringAsc(OperatorFixture fixture) {
+      super(fixture, Ordering.ORDER_ASC, Ordering.NULLS_UNSPECIFIED, false);
+    }
+
+    @Override
+    protected void doSort(DataItem[] expected) {
+      Arrays.sort(expected, new Comparator<DataItem>(){
+        @Override
+        public int compare(DataItem o1, DataItem o2) {
+          return Integer.toString(o1.key).compareTo(Integer.toString(o2.key));
+        }
+      });
+    }
+  }
+
+  private static class TestSorterBinaryAsc extends SortTester {
+
+    public TestSorterBinaryAsc(OperatorFixture fixture) {
+      super(fixture, Ordering.ORDER_ASC, Ordering.NULLS_UNSPECIFIED, false);
+    }
+
+    @Override
+    protected void doSort(DataItem[] expected) {
+      Arrays.sort(expected, new Comparator<DataItem>(){
+        @Override
+        public int compare(DataItem o1, DataItem o2) {
+          return Integer.toHexString(o1.key).compareTo(Integer.toHexString(o2.key));
+        }
+      });
+    }
+  }
+
+  private abstract static class BaseTestSorterIntervalAsc extends BaseSortTester {
+
+    public BaseTestSorterIntervalAsc(OperatorFixture fixture) {
+      super(fixture, Ordering.ORDER_ASC, Ordering.NULLS_UNSPECIFIED, false);
+    }
+
+    public void test(MinorType type) throws SchemaChangeException {
+      BatchSchema schema = new SchemaBuilder()
+          .add("key", type)
+          .build();
+      SingleRowSet input = makeInputData(fixture.allocator(), schema);
+      input = input.toIndirect();
+      sorter.sortBatch(input.container(), input.getSv2());
+      sorter.close();
+      verify(input);
+      input.clear();
+    }
+
+    protected SingleRowSet makeInputData(BufferAllocator allocator,
+        BatchSchema schema) {
+      RowSetBuilder builder = fixture.rowSetBuilder(schema);
+      int rowCount = 100;
+      Random rand = new Random();
+      for (int i = 0; i < rowCount; i++) {
+        int ms = rand.nextInt(1000);
+        int sec = rand.nextInt(60);
+        int min = rand.nextInt(60);
+        int hr = rand.nextInt(24);
+        int day = rand.nextInt(28);
+        int mo = rand.nextInt(12);
+        int yr = rand.nextInt(10);
+        Period period = makePeriod(yr, mo, day, hr, min, sec, ms);
+         builder.add(period);
+      }
+      return builder.build();
+    }
+
+    protected abstract Period makePeriod(int yr, int mo, int day, int hr, int min, int sec,
+        int ms);
+
+    private void verify(SingleRowSet output) {
+      RowSetReader reader = output.reader();
+      int prevYears = 0;
+      int prevMonths = 0;
+      long prevMs = 0;
+      while (reader.next()) {
+        Period period = reader.column(0).getPeriod().normalizedStandard();
+//        System.out.println(period);
+        int years = period.getYears();
+        assertTrue(prevYears <= years);
+        if (prevYears != years) {
+          prevMonths = 0;
+          prevMs = 0;
+        }
+        prevYears = years;
+
+        int months = period.getMonths();
+        assertTrue(prevMonths <= months);
+        if (prevMonths != months) {
+           prevMs = 0;
+        }
+        prevMonths = months;
+
+        Period remainder = period
+            .withYears(0)
+            .withMonths(0);
+
+        long ms = remainder.toStandardDuration().getMillis();
+        assertTrue(prevMs <= ms);
+        prevMs = ms;
+      }
+    }
+  }
+
+  private static class TestSorterIntervalAsc extends BaseTestSorterIntervalAsc {
+
+    public TestSorterIntervalAsc(OperatorFixture fixture) {
+      super(fixture);
+    }
+
+    public void test() throws SchemaChangeException {
+      test(MinorType.INTERVAL);
+    }
+
+    @Override
+    protected Period makePeriod(int yr, int mo, int day, int hr, int min,
+        int sec, int ms) {
+      return Period.years(yr)
+          .withMonths(mo)
+          .withDays(day)
+          .withHours(hr)
+          .withMinutes(min)
+          .withSeconds(sec)
+          .withMillis(ms);
+    }
+  }
+
+  private static class TestSorterIntervalYearAsc extends BaseTestSorterIntervalAsc {
+
+    public TestSorterIntervalYearAsc(OperatorFixture fixture) {
+      super(fixture);
+    }
+
+    public void test() throws SchemaChangeException {
+      test(MinorType.INTERVALYEAR);
+    }
+
+    @Override
+    protected Period makePeriod(int yr, int mo, int day, int hr, int min,
+        int sec, int ms) {
+      return Period.years(yr)
+          .withMonths(mo);
+    }
+  }
+
+  private static class TestSorterIntervalDayAsc extends BaseTestSorterIntervalAsc {
+
+    public TestSorterIntervalDayAsc(OperatorFixture fixture) {
+      super(fixture);
+    }
+
+    public void test() throws SchemaChangeException {
+      test(MinorType.INTERVALDAY);
+    }
+
+    @Override
+    protected Period makePeriod(int yr, int mo, int day, int hr, int min,
+        int sec, int ms) {
+      return Period.days(day)
+          .withHours(hr)
+          .withMinutes(min)
+          .withSeconds(sec)
+          .withMillis(ms);
+    }
+  }
+
+  @Test
+  public void testNumericTypes() throws Exception {
+    TestSorterNumeric tester1 = new TestSorterNumeric(fixture, true);
+//      tester1.test(MinorType.TINYINT); // DRILL-5329
+//      tester1.test(MinorType.UINT1); DRILL-5329
+//      tester1.test(MinorType.SMALLINT); DRILL-5329
+//      tester1.test(MinorType.UINT2); DRILL-5329
+    tester1.test(MinorType.INT);
+//      tester1.test(MinorType.UINT4); DRILL-5329
+    tester1.test(MinorType.BIGINT);
+//      tester1.test(MinorType.UINT8); DRILL-5329
+    tester1.test(MinorType.FLOAT4);
+    tester1.test(MinorType.FLOAT8);
+    tester1.test(MinorType.DECIMAL9);
+    tester1.test(MinorType.DECIMAL18);
+//      tester1.test(MinorType.DECIMAL28SPARSE); DRILL-5329
+//      tester1.test(MinorType.DECIMAL38SPARSE); DRILL-5329
+//    tester1.test(MinorType.DECIMAL28DENSE); No writer
+//    tester1.test(MinorType.DECIMAL38DENSE); No writer
+    tester1.test(MinorType.DATE);
+    tester1.test(MinorType.TIME);
+    tester1.test(MinorType.TIMESTAMP);
+  }
+
+  @Test
+  public void testVarCharTypes() throws Exception {
+    TestSorterStringAsc tester = new TestSorterStringAsc(fixture);
+    tester.test(MinorType.VARCHAR);
+//      tester.test(MinorType.VAR16CHAR); DRILL-5329
+  }
+
+  /**
+   * Test the VARBINARY data type as a sort key.
+   *
+   * @throws Exception for internal errors
+   */
+
+  @Test
+  public void testVarBinary() throws Exception {
+    TestSorterBinaryAsc tester = new TestSorterBinaryAsc(fixture);
+    tester.test(MinorType.VARBINARY);
+  }
+
+  /**
+   * Test the INTERVAL data type as a sort key.
+   *
+   * @throws Exception for internal errors
+   */
+
+  @Test
+  public void testInterval() throws Exception {
+    TestSorterIntervalAsc tester = new TestSorterIntervalAsc(fixture);
+    tester.test();
+  }
+
+  /**
+   * Test the INTERVALYEAR data type as a sort key.
+   *
+   * @throws Exception for internal errors
+   */
+
+  @Test
+  public void testIntervalYear() throws Exception {
+    TestSorterIntervalYearAsc tester = new TestSorterIntervalYearAsc(fixture);
+    tester.test();
+  }
+
+  /**
+   * Test the INTERVALDAY data type as a sort key.
+   *
+   * @throws Exception for internal errors
+   */
+
+  @Test
+  public void testIntervalDay() throws Exception {
+    TestSorterIntervalDayAsc tester = new TestSorterIntervalDayAsc(fixture);
+    tester.test();
+  }
+
+  @Test
+  public void testDesc() throws Exception {
+    TestSorterNumeric tester = new TestSorterNumeric(fixture, false);
+    tester.test(MinorType.INT);
+  }
+
+  /**
+   * Verify that nulls sort in the requested position: high or low.
+   * Earlier tests verify that "unspecified" maps to high or low
+   * depending on sort order.
+   */
+
+  @Test
+  public void testNullable() throws Exception {
+    TestSorterNullableNumeric tester = new TestSorterNullableNumeric(fixture, true, true);
+    tester.test(MinorType.INT);
+    tester = new TestSorterNullableNumeric(fixture, true, false);
+    tester.test(MinorType.INT);
+    tester = new TestSorterNullableNumeric(fixture, false, true);
+    tester.test(MinorType.INT);
+    tester = new TestSorterNullableNumeric(fixture, false, false);
+    tester.test(MinorType.INT);
+  }
+
+  @Test
+  @Ignore("DRILL-5384")
+  public void testMapKey() throws Exception {
+    BatchSchema schema = new SchemaBuilder()
+        .addMap("map")
+          .add("key", MinorType.INT)
+          .add("value", MinorType.VARCHAR)
+          .buildMap()
+        .build();
+
+    SingleRowSet input = fixture.rowSetBuilder(schema)
+        .add(3, "third")
+        .add(1, "first")
+        .add(2, "second")
+        .withSv2()
+        .build();
+
+    SingleRowSet output = fixture.rowSetBuilder(schema)
+        .add(1, "first")
+        .add(2, "second")
+        .add(3, "third")
+        .build();
+    Sort popConfig = makeSortConfig("map.key", Ordering.ORDER_ASC, Ordering.NULLS_LAST);
+    runSorterTest(popConfig, input, output);
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/exec/memory/base/src/main/java/org/apache/drill/exec/memory/BaseAllocator.java
----------------------------------------------------------------------
diff --git a/exec/memory/base/src/main/java/org/apache/drill/exec/memory/BaseAllocator.java b/exec/memory/base/src/main/java/org/apache/drill/exec/memory/BaseAllocator.java
index d872d67..8efa3ee 100644
--- a/exec/memory/base/src/main/java/org/apache/drill/exec/memory/BaseAllocator.java
+++ b/exec/memory/base/src/main/java/org/apache/drill/exec/memory/BaseAllocator.java
@@ -50,6 +50,13 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
   public static final int DEBUG_LOG_LENGTH = 6;
   public static final boolean DEBUG = AssertionUtil.isAssertionsEnabled()
       || Boolean.parseBoolean(System.getProperty(DEBUG_ALLOCATOR, "false"));
+
+  /**
+   * Size of the I/O buffer used when writing to files. Set here
+   * because the buffer is used multiple times by an operator.
+   */
+
+  private static final int IO_BUFFER_SIZE = 32*1024;
   private final Object DEBUG_LOCK = DEBUG ? new Object() : null;
 
   private final BaseAllocator parentAllocator;
@@ -241,7 +248,6 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
         releaseBytes(actualRequestSize);
       }
     }
-
   }
 
   /**
@@ -452,7 +458,6 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
         historicalLog.recordEvent("releaseReservation(%d)", nBytes);
       }
     }
-
   }
 
   @Override
@@ -573,7 +578,6 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
     }
   }
 
-
   /**
    * Verifies the accounting state of the allocator. Only works for DEBUG.
    *
@@ -599,12 +603,12 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
    *           when any problems are found
    */
   private void verifyAllocator(final IdentityHashMap<UnsafeDirectLittleEndian, BaseAllocator> buffersSeen) {
-    synchronized (DEBUG_LOCK) {
+    // The remaining tests can only be performed if we're in debug mode.
+    if (!DEBUG) {
+      return;
+    }
 
-      // The remaining tests can only be performed if we're in debug mode.
-      if (!DEBUG) {
-        return;
-      }
+    synchronized (DEBUG_LOCK) {
 
       final long allocated = getAllocatedMemory();
 
@@ -757,9 +761,7 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
           reservation.historicalLog.buildHistory(sb, level + 3, true);
         }
       }
-
     }
-
   }
 
   private void dumpBuffers(final StringBuilder sb, final Set<BufferLedger> ledgerSet) {
@@ -776,7 +778,6 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
     }
   }
 
-
   public static StringBuilder indent(StringBuilder sb, int indent) {
     final char[] indentation = new char[indent * 2];
     Arrays.fill(indentation, ' ');
@@ -810,7 +811,7 @@ public abstract class BaseAllocator extends Accountant implements BufferAllocato
       // sizes provide no increase in performance.
       // Revisit from time to time.
 
-      ioBuffer = new byte[32*1024];
+      ioBuffer = new byte[IO_BUFFER_SIZE];
     }
     return ioBuffer;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/logical/src/main/java/org/apache/drill/common/expression/FieldReference.java
----------------------------------------------------------------------
diff --git a/logical/src/main/java/org/apache/drill/common/expression/FieldReference.java b/logical/src/main/java/org/apache/drill/common/expression/FieldReference.java
index 640984e..ba3bf7a 100644
--- a/logical/src/main/java/org/apache/drill/common/expression/FieldReference.java
+++ b/logical/src/main/java/org/apache/drill/common/expression/FieldReference.java
@@ -38,7 +38,7 @@ import com.fasterxml.jackson.databind.ser.std.StdSerializer;
 @JsonSerialize(using = Se.class)
 @JsonDeserialize(using = De.class)
 public class FieldReference extends SchemaPath {
-  MajorType overrideType;
+  private MajorType overrideType;
 
   public FieldReference(SchemaPath sp) {
     super(sp);
@@ -49,10 +49,8 @@ public class FieldReference extends SchemaPath {
     if (getRootSegment().getChild() != null) {
       throw new UnsupportedOperationException("Field references must be singular names.");
     }
-
   }
 
-
   private void checkSimpleString(CharSequence value) {
     if (value.toString().contains(".")) {
       throw new UnsupportedOperationException(
@@ -81,7 +79,6 @@ public class FieldReference extends SchemaPath {
     return new FieldReference(safeString, ExpressionPosition.UNKNOWN, false);
   }
 
-
   public FieldReference(CharSequence value, ExpressionPosition pos) {
     this(value, pos, true);
   }
@@ -92,7 +89,6 @@ public class FieldReference extends SchemaPath {
       checkData();
       checkSimpleString(value);
     }
-
   }
 
   public FieldReference(String value, ExpressionPosition pos, MajorType dataType) {
@@ -123,7 +119,6 @@ public class FieldReference extends SchemaPath {
       ref = ref.replace("`", "");
       return new FieldReference(ref, ExpressionPosition.UNKNOWN, false);
     }
-
   }
 
   @SuppressWarnings("serial")
@@ -138,7 +133,5 @@ public class FieldReference extends SchemaPath {
         JsonGenerationException {
       jgen.writeString('`' + value.getRootSegment().getNameSegment().getPath() + '`');
     }
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/logical/src/main/java/org/apache/drill/common/expression/SchemaPath.java
----------------------------------------------------------------------
diff --git a/logical/src/main/java/org/apache/drill/common/expression/SchemaPath.java b/logical/src/main/java/org/apache/drill/common/expression/SchemaPath.java
index f5fc687..026fb09 100644
--- a/logical/src/main/java/org/apache/drill/common/expression/SchemaPath.java
+++ b/logical/src/main/java/org/apache/drill/common/expression/SchemaPath.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -23,7 +23,6 @@ import java.util.Iterator;
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.runtime.CommonTokenStream;
 import org.antlr.runtime.RecognitionException;
-import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.expression.PathSegment.ArraySegment;
 import org.apache.drill.common.expression.PathSegment.NameSegment;
 import org.apache.drill.common.expression.parser.ExprLexer;
@@ -58,7 +57,6 @@ public class SchemaPath extends LogicalExpressionBase {
     return new SchemaPath(s);
   }
 
-  @SuppressWarnings("unused")
   public PathSegment getLastSegment() {
     PathSegment s= rootSegment;
     while (s.getChild() != null) {
@@ -157,7 +155,6 @@ public class SchemaPath extends LogicalExpressionBase {
     return new SchemaPath(newRoot);
   }
 
-  @SuppressWarnings("unused")
   public SchemaPath getUnindexedArrayChild() {
     NameSegment newRoot = rootSegment.cloneWithNewChild(new ArraySegment(null));
     return new SchemaPath(newRoot);

http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/logical/src/main/java/org/apache/drill/common/logical/data/Order.java
----------------------------------------------------------------------
diff --git a/logical/src/main/java/org/apache/drill/common/logical/data/Order.java b/logical/src/main/java/org/apache/drill/common/logical/data/Order.java
index 1bf587d..5cd3f84 100644
--- a/logical/src/main/java/org/apache/drill/common/logical/data/Order.java
+++ b/logical/src/main/java/org/apache/drill/common/logical/data/Order.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -19,22 +19,20 @@ package org.apache.drill.common.logical.data;
 
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 
-import com.google.common.collect.ImmutableMap;
-import org.apache.calcite.util.PartiallyOrderedSet;
+import org.apache.calcite.rel.RelFieldCollation.Direction;
+import org.apache.calcite.rel.RelFieldCollation.NullDirection;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.expression.FieldReference;
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.logical.data.visitors.LogicalVisitor;
-import org.apache.calcite.rel.RelFieldCollation;
-import org.apache.calcite.rel.RelFieldCollation.Direction;
-import org.apache.calcite.rel.RelFieldCollation.NullDirection;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
 
@@ -58,15 +56,15 @@ public class Order extends SingleInputOperator {
     return within;
   }
 
-    @Override
-    public <T, X, E extends Throwable> T accept(LogicalVisitor<T, X, E> logicalVisitor, X value) throws E {
-        return logicalVisitor.visitOrder(this, value);
-    }
+  @Override
+  public <T, X, E extends Throwable> T accept(LogicalVisitor<T, X, E> logicalVisitor, X value) throws E {
+      return logicalVisitor.visitOrder(this, value);
+  }
 
-    @Override
-    public Iterator<LogicalOperator> iterator() {
-        return Iterators.singletonIterator(getInput());
-    }
+  @Override
+  public Iterator<LogicalOperator> iterator() {
+      return Iterators.singletonIterator(getInput());
+  }
 
 
   /**
@@ -74,22 +72,33 @@ public class Order extends SingleInputOperator {
    */
   public static class Ordering {
 
+    public static final String ORDER_ASC = "ASC";
+    public static final String ORDER_DESC = "DESC";
+    public static final String ORDER_ASCENDING = "ASCENDING";
+    public static final String ORDER_DESCENDING = "DESCENDING";
+
+    public static final String NULLS_FIRST = "FIRST";
+    public static final String NULLS_LAST = "LAST";
+    public static final String NULLS_UNSPECIFIED = "UNSPECIFIED";
+
     private final LogicalExpression expr;
     /** Net &lt;ordering specification>. */
     private final Direction direction;
     /** Net &lt;null ordering> */
     private final NullDirection nullOrdering;
     /** The values in the plans for ordering specification are ASC, DESC, not the
-     * full words featured in the calcite {@link Direction} Enum, need to map between them. */
+     * full words featured in the Calcite {@link Direction} Enum, need to map between them. */
     private static ImmutableMap<String, Direction> DRILL_TO_CALCITE_DIR_MAPPING =
         ImmutableMap.<String, Direction>builder()
-        .put("ASC", Direction.ASCENDING)
-        .put("DESC", Direction.DESCENDING).build();
+        .put(ORDER_ASC, Direction.ASCENDING)
+        .put(ORDER_DESC, Direction.DESCENDING)
+        .put(ORDER_ASCENDING, Direction.ASCENDING)
+        .put(ORDER_DESCENDING, Direction.DESCENDING).build();
     private static ImmutableMap<String, NullDirection> DRILL_TO_CALCITE_NULL_DIR_MAPPING =
         ImmutableMap.<String, NullDirection>builder()
-            .put("FIRST", NullDirection.FIRST)
-            .put("LAST", NullDirection.LAST)
-            .put("UNSPECIFIED", NullDirection.UNSPECIFIED).build();
+            .put(NULLS_FIRST, NullDirection.FIRST)
+            .put(NULLS_LAST, NullDirection.LAST)
+            .put(NULLS_UNSPECIFIED, NullDirection.UNSPECIFIED).build();
 
     /**
      * Constructs a sort specification.
@@ -123,8 +132,12 @@ public class Order extends SingleInputOperator {
       this(direction, e, NullDirection.FIRST);
     }
 
-    private static Direction getOrderingSpecFromString( String strDirection ) {
-      Direction dir = DRILL_TO_CALCITE_DIR_MAPPING.get(strDirection);
+    @VisibleForTesting
+    public static Direction getOrderingSpecFromString(String strDirection) {
+      Direction dir = null;
+      if (strDirection != null) {
+        dir = DRILL_TO_CALCITE_DIR_MAPPING.get(strDirection.toUpperCase());
+      }
       if (dir != null || strDirection == null) {
         return filterDrillSupportedDirections(dir);
       } else {
@@ -134,16 +147,20 @@ public class Order extends SingleInputOperator {
       }
     }
 
-    private static NullDirection getNullOrderingFromString( String strNullOrdering ) {
-      NullDirection nullDir = DRILL_TO_CALCITE_NULL_DIR_MAPPING.get(strNullOrdering);
+    @VisibleForTesting
+    public static NullDirection getNullOrderingFromString( String strNullOrdering ) {
+      NullDirection nullDir = null;
+      if (strNullOrdering != null) {
+        nullDir = DRILL_TO_CALCITE_NULL_DIR_MAPPING.get(strNullOrdering.toUpperCase());
+      }
       if (nullDir != null || strNullOrdering == null) {
         return filterDrillSupportedNullDirections(nullDir);
       } else {
         throw new DrillRuntimeException(
             "Internal error:  Unknown <null ordering> string (not "
-                + "\"" + NullDirection.FIRST.name() + "\", "
-                + "\"" + NullDirection.LAST.name() + "\", or "
-                + "\"" + NullDirection.UNSPECIFIED.name() + "\" or null): "
+                + "\"" + NULLS_FIRST + "\", "
+                + "\"" + NULLS_LAST + "\", or "
+                + "\"" + NULLS_UNSPECIFIED + "\" or null): "
                 + "\"" + strNullOrdering + "\"" );
       }
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/90f43bff/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 3158b42..09ffbfa 100644
--- a/pom.xml
+++ b/pom.xml
@@ -447,7 +447,8 @@
             <systemPropertyVariables>
               <java.io.tmpdir>${project.build.directory}</java.io.tmpdir>
             </systemPropertyVariables>
-          </configuration>
+			<excludedGroups>org.apache.drill.test.SecondaryTest</excludedGroups>
+		  </configuration>
         </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>


Mime
View raw message