http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/TestBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/TestBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/test/TestBuilder.java
new file mode 100644
index 0000000..7987647
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/TestBuilder.java
@@ -0,0 +1,695 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.antlr.runtime.ANTLRStringStream;
+import org.antlr.runtime.CommonTokenStream;
+import org.antlr.runtime.RecognitionException;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.drill.test.DrillTestWrapper.TestServices;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.expression.parser.ExprLexer;
+import org.apache.drill.common.expression.parser.ExprParser;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.proto.UserBitShared.QueryType;
+import org.apache.drill.exec.proto.UserProtos.PreparedStatementHandle;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.util.JsonStringArrayList;
+import org.apache.drill.exec.util.JsonStringHashMap;
+import org.apache.drill.exec.util.Text;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import org.joda.time.DateTimeZone;
+
+public class TestBuilder {
+
+ /**
+ * Test query to rung. Type of object depends on the {@link #queryType}
+ */
+ private Object query;
+ // the type of query for the test
+ private UserBitShared.QueryType queryType;
+ // should the validation enforce ordering
+ private Boolean ordered;
+ private boolean approximateEquality;
+ private TestServices services;
+ // Used to pass the type information associated with particular column names rather than relying on the
+ // ordering of the columns in the CSV file, or the default type inferences when reading JSON, this is used for the
+ // case where results of the test query are adding type casts to the baseline queries, this saves a little bit of
+ // setup in cases where strict type enforcement is not necessary for a given test
+ protected Map<SchemaPath, TypeProtos.MajorType> baselineTypeMap;
+ // queries to run before the baseline or test queries, can be used to set options
+ private String baselineOptionSettingQueries;
+ private String testOptionSettingQueries;
+ // two different methods are available for comparing ordered results, the default reads all of the records
+ // into giant lists of objects, like one giant on-heap batch of 'vectors'
+ // this flag enables the other approach which iterates through a hyper batch for the test query results and baseline
+ // while this does work faster and use less memory, it can be harder to debug as all of the elements are not in a
+ // single list
+ private boolean highPerformanceComparison;
+ // column names for use with the baseline values
+ protected String[] baselineColumns;
+ // In cases where we need to verify larger datasets without the risk of running the baseline data through
+ // the drill engine, results can be provided in a list of maps. While this model does make a lot of sense, there is a
+ // lot of work to make the type handling/casting work correctly, and making robust complex type handling work completely outside
+ // of the drill engine for generating baselines would likely be more work than it would be worth. For now we will be
+ // going with an approach of using this facility to validate the parts of the drill engine that could break in ways
+ // that would affect the reading of baseline files (i.e. we need robust test for storage engines, project and casting that
+ // use this interface) and then rely on the engine for the rest of the tests that will use the baseline queries.
+ private List<Map<String, Object>> baselineRecords;
+
+ private int expectedNumBatches = DrillTestWrapper.EXPECTED_BATCH_COUNT_NOT_SET;
+
+ public TestBuilder(TestServices services) {
+ this.services = services;
+ reset();
+ }
+
+ public TestBuilder(TestServices services, Object query, UserBitShared.QueryType queryType, Boolean ordered,
+ boolean approximateEquality, Map<SchemaPath, TypeProtos.MajorType> baselineTypeMap,
+ String baselineOptionSettingQueries, String testOptionSettingQueries, boolean highPerformanceComparison,
+ int expectedNumBatches) {
+ this(services);
+ if (ordered == null) {
+ throw new RuntimeException("Ordering not set, when using a baseline file or query you must explicitly call the ordered() or unOrdered() method on the " + this.getClass().getSimpleName());
+ }
+ this.query = query;
+ this.queryType = queryType;
+ this.ordered = ordered;
+ this.approximateEquality = approximateEquality;
+ this.baselineTypeMap = baselineTypeMap;
+ this.baselineOptionSettingQueries = baselineOptionSettingQueries;
+ this.testOptionSettingQueries = testOptionSettingQueries;
+ this.highPerformanceComparison = highPerformanceComparison;
+ this.expectedNumBatches = expectedNumBatches;
+ }
+
+ protected TestBuilder reset() {
+ query = "";
+ ordered = null;
+ approximateEquality = false;
+ highPerformanceComparison = false;
+ testOptionSettingQueries = "";
+ baselineOptionSettingQueries = "";
+ baselineRecords = null;
+ return this;
+ }
+
+ public DrillTestWrapper build() throws Exception {
+ if ( ! ordered && highPerformanceComparison ) {
+ throw new Exception("High performance comparison only available for ordered checks, to enforce this restriction, ordered() must be called first.");
+ }
+ return new DrillTestWrapper(this, services, query, queryType, baselineOptionSettingQueries, testOptionSettingQueries,
+ getValidationQueryType(), ordered, highPerformanceComparison, baselineColumns, baselineRecords, expectedNumBatches);
+ }
+
+ public List<Pair<SchemaPath, TypeProtos.MajorType>> getExpectedSchema() {
+ return null;
+ }
+
+ public void go() throws Exception {
+ build().run();
+ }
+
+ public TestBuilder sqlQuery(String query) {
+ this.query = QueryTestUtil.normalizeQuery(query);
+ this.queryType = UserBitShared.QueryType.SQL;
+ return this;
+ }
+
+ public TestBuilder sqlQuery(String query, Object... replacements) {
+ return sqlQuery(String.format(query, replacements));
+ }
+
+ public TestBuilder preparedStatement(PreparedStatementHandle preparedStatementHandle) {
+ queryType = QueryType.PREPARED_STATEMENT;
+ query = preparedStatementHandle;
+ return this;
+ }
+
+ public TestBuilder sqlQueryFromFile(String queryFile) throws IOException {
+ String query = BaseTestQuery.getFile(queryFile);
+ this.query = query;
+ queryType = UserBitShared.QueryType.SQL;
+ return this;
+ }
+
+ public TestBuilder physicalPlanFromFile(String queryFile) throws IOException {
+ String query = BaseTestQuery.getFile(queryFile);
+ this.query = query;
+ queryType = UserBitShared.QueryType.PHYSICAL;
+ return this;
+ }
+
+ public TestBuilder ordered() {
+ ordered = true;
+ return this;
+ }
+
+ public TestBuilder unOrdered() {
+ ordered = false;
+ return this;
+ }
+
+ // this can only be used with ordered verifications, it does run faster and use less memory but may be
+ // a little harder to debug as it iterates over a hyper batch rather than reading all of the values into
+ // large on-heap lists
+ public TestBuilder highPerformanceComparison() throws Exception {
+ highPerformanceComparison = true;
+ return this;
+ }
+
+ // list of queries to run before the baseline query, can be used to set several options
+ // list takes the form of a semi-colon separated list
+ public TestBuilder optionSettingQueriesForBaseline(String queries) {
+ baselineOptionSettingQueries = queries;
+ return this;
+ }
+
+ public TestBuilder optionSettingQueriesForBaseline(String queries, Object... args) {
+ baselineOptionSettingQueries = String.format(queries, args);
+ return this;
+ }
+
+ /**
+ * list of queries to run before the test query, can be used to set several options
+ * list takes the form of a semi-colon separated list.
+ * @param queries queries that set session and system options
+ * @return this test builder
+ */
+
+ public TestBuilder optionSettingQueriesForTestQuery(String queries) {
+ testOptionSettingQueries = queries;
+ return this;
+ }
+
+ public TestBuilder optionSettingQueriesForTestQuery(String query, Object... args) throws Exception {
+ testOptionSettingQueries = String.format(query, args);
+ return this;
+ }
+
+ public TestBuilder approximateEquality() {
+ approximateEquality = true;
+ return this;
+ }
+
+ // modified code from SchemaPath.De class. This should be used sparingly and only in tests if absolutely needed.
+ public static SchemaPath parsePath(String path) {
+ try {
+ ExprLexer lexer = new ExprLexer(new ANTLRStringStream(path));
+ CommonTokenStream tokens = new CommonTokenStream(lexer);
+ ExprParser parser = new ExprParser(tokens);
+
+ ExprParser.parse_return ret = parser.parse();
+
+ if (ret.e instanceof SchemaPath) {
+ return (SchemaPath) ret.e;
+ } else {
+ throw new IllegalStateException("Schema path is not a valid format.");
+ }
+ } catch (RecognitionException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ Object getValidationQuery() throws Exception {
+ throw new RuntimeException("Must provide some kind of baseline, either a baseline file or another query");
+ }
+
+ protected UserBitShared.QueryType getValidationQueryType() throws Exception {
+ if (singleExplicitBaselineRecord()) {
+ return null;
+ }
+
+ if (ordered) {
+ // If there are no base line records or no baseline query then we will check to make sure that the records are in ascending order
+ return null;
+ }
+
+ throw new RuntimeException("Must provide some kind of baseline, either a baseline file or another query");
+ }
+
+ public JSONTestBuilder jsonBaselineFile(String filePath) {
+ return new JSONTestBuilder(filePath, services, query, queryType, ordered, approximateEquality,
+ baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries, highPerformanceComparison,
+ expectedNumBatches);
+ }
+
+ public CSVTestBuilder csvBaselineFile(String filePath) {
+ return new CSVTestBuilder(filePath, services, query, queryType, ordered, approximateEquality,
+ baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries, highPerformanceComparison,
+ expectedNumBatches);
+ }
+
+ public SchemaTestBuilder schemaBaseLine(BatchSchema batchSchema) {
+ List<Pair<SchemaPath, TypeProtos.MajorType>> expectedSchema = new ArrayList<>();
+ for (final MaterializedField field : batchSchema) {
+ expectedSchema.add(Pair.of(SchemaPath.getSimplePath(field.getName()), field.getType()));
+ }
+ return schemaBaseLine(expectedSchema);
+ }
+
+ public SchemaTestBuilder schemaBaseLine(List<Pair<SchemaPath, TypeProtos.MajorType>> expectedSchema) {
+ assert expectedSchema != null : "The expected schema can be provided once";
+ assert baselineColumns == null : "The column information should be captured in expected schema, not baselineColumns";
+
+ return new SchemaTestBuilder(
+ services,
+ query,
+ queryType,
+ baselineOptionSettingQueries,
+ testOptionSettingQueries,
+ expectedSchema);
+ }
+
+ public TestBuilder baselineTypes(Map<SchemaPath, TypeProtos.MajorType> baselineTypeMap) {
+ this.baselineTypeMap = baselineTypeMap;
+ return this;
+ }
+
+ boolean typeInfoSet() {
+ if (baselineTypeMap != null) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Indicate that the tests query should be checked for an empty result set.
+ * @return the test builder
+ */
+ public TestBuilder expectsEmptyResultSet() {
+ unOrdered();
+ baselineRecords = new ArrayList<>();
+ return this;
+ }
+
+ /**
+ * Sets the number of expected batch count for this query. The test will fail if the query returns a different number
+ * of batches
+ *
+ * @param expectedNumBatches expected batch count
+ * @return this test builder
+ */
+ public TestBuilder expectsNumBatches(int expectedNumBatches) {
+ this.expectedNumBatches = expectedNumBatches;
+ return this;
+ }
+
+ /**
+ * This method is used to pass in a simple list of values for a single record verification without
+ * the need to create a CSV or JSON file to store the baseline.
+ *
+ * This can be called repeatedly to pass a list of records to verify. It works for both ordered and unordered
+ * checks.
+ *
+ * @param baselineValues - the baseline values to validate
+ * @return the test builder
+ */
+ public TestBuilder baselineValues(Object ... baselineValues) {
+ assert getExpectedSchema() == null : "The expected schema is not needed when baselineValues are provided ";
+ if (ordered == null) {
+ throw new RuntimeException("Ordering not set, before specifying baseline data you must explicitly call the ordered() or unOrdered() method on the " + this.getClass().getSimpleName());
+ }
+ if (baselineRecords == null) {
+ baselineRecords = new ArrayList<>();
+ }
+ Map<String, Object> ret = new HashMap<>();
+ int i = 0;
+ assertEquals("Must supply the same number of baseline values as columns.", baselineValues.length, baselineColumns.length);
+ for (String s : baselineColumns) {
+ ret.put(s, baselineValues[i]);
+ i++;
+ }
+ this.baselineRecords.add(ret);
+ return this;
+ }
+
+ /**
+ * This can be used in cases where we want to avoid issues with the assumptions made by the test framework.
+ * Most of the methods for verification in the framework run drill queries to generate the read baseline files or
+ * execute alternative baseline queries. This model relies on basic functionality of reading files with storage
+ * plugins and applying casts/projects to be stable.
+ *
+ * This method can be used to verify the engine for these cases and any other future execution paths that would
+ * be used by both the test query and baseline. Without tests like this it is possible that some tests
+ * could falsely report as passing, as both the test query and baseline query could run into the same problem
+ * with an assumed stable code path and produce the same erroneous result.
+ *
+ * @param materializedRecords - a list of maps representing materialized results
+ * @return the test builder
+ */
+ public TestBuilder baselineRecords(List<Map<String, Object>> materializedRecords) {
+ this.baselineRecords = materializedRecords;
+ return this;
+ }
+
+ /**
+ * This setting has a slightly different impact on the test depending on some of the other
+ * configuration options are set.
+ *
+ * If a JSON baseline file is given, this list will act as a project list to verify the
+ * test query against a subset of the columns in the file.
+ *
+ * For a CSV baseline file, these will act as aliases for columns [0 .. n] in the repeated
+ * varchar column that is read out of CSV.
+ *
+ * For a baseline sql query, this currently has no effect.
+ *
+ * For explicit baseline values given in java code with the baselineValues() method, these will
+ * be used to create a map for the one record verification.
+ */
+ public TestBuilder baselineColumns(String... columns) {
+ assert getExpectedSchema() == null : "The expected schema is not needed when baselineColumns are provided ";
+ for (int i = 0; i < columns.length; i++) {
+ columns[i] = parsePath(columns[i]).toExpr();
+ }
+ this.baselineColumns = columns;
+ return this;
+ }
+
+ private boolean singleExplicitBaselineRecord() {
+ return baselineRecords != null;
+ }
+
+ /**
+ * Provide a SQL query to validate against.
+ * @param baselineQuery
+ * @return the test builder
+ */
+ public BaselineQueryTestBuilder sqlBaselineQuery(Object baselineQuery) {
+ return new BaselineQueryTestBuilder(baselineQuery, UserBitShared.QueryType.SQL, services, query, queryType, ordered, approximateEquality,
+ baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries, highPerformanceComparison, expectedNumBatches);
+ }
+
+ public BaselineQueryTestBuilder sqlBaselineQuery(String query, String ...replacements) {
+ return sqlBaselineQuery(String.format(query, (Object[]) replacements));
+ }
+
+ // provide a path to a file containing a SQL query to use as a baseline
+ public BaselineQueryTestBuilder sqlBaselineQueryFromFile(String baselineQueryFilename) throws IOException {
+ String baselineQuery = BaseTestQuery.getFile(baselineQueryFilename);
+ return new BaselineQueryTestBuilder(baselineQuery, UserBitShared.QueryType.SQL, services, query, queryType, ordered, approximateEquality,
+ baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries, highPerformanceComparison, expectedNumBatches);
+ }
+
+ // as physical plans are verbose, this is the only option provided for specifying them, we should enforce
+ // that physical plans, or any large JSON strings do not live in the Java source as literals
+ public BaselineQueryTestBuilder physicalPlanBaselineQueryFromFile(String baselinePhysicalPlanPath) throws IOException {
+ String baselineQuery = BaseTestQuery.getFile(baselinePhysicalPlanPath);
+ return new BaselineQueryTestBuilder(baselineQuery, UserBitShared.QueryType.PHYSICAL, services, query, queryType, ordered, approximateEquality,
+ baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries, highPerformanceComparison, expectedNumBatches);
+ }
+
+ private String getDecimalPrecisionScaleInfo(TypeProtos.MajorType type) {
+ String precision = "";
+ switch(type.getMinorType()) {
+ case DECIMAL18:
+ case DECIMAL28SPARSE:
+ case DECIMAL38SPARSE:
+ case DECIMAL38DENSE:
+ case DECIMAL28DENSE:
+ case DECIMAL9:
+ precision = String.format("(%d,%d)", type.getPrecision(), type.getScale());
+ break;
+ default:
+ ; // do nothing empty string set above
+ }
+ return precision;
+ }
+
+ public class CSVTestBuilder extends TestBuilder {
+
+ // path to the baseline file that will be inserted into the validation query
+ private String baselineFilePath;
+ // use to cast the baseline file columns, if not set the types
+ // that come out of the test query drive interpretation of baseline
+ private TypeProtos.MajorType[] baselineTypes;
+
+ CSVTestBuilder(String baselineFile, TestServices services, Object query, UserBitShared.QueryType queryType, Boolean ordered,
+ boolean approximateEquality, Map<SchemaPath, TypeProtos.MajorType> baselineTypeMap,
+ String baselineOptionSettingQueries, String testOptionSettingQueries, boolean highPerformanceComparison,
+ int expectedNumBatches) {
+ super(services, query, queryType, ordered, approximateEquality, baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries,
+ highPerformanceComparison, expectedNumBatches);
+ this.baselineFilePath = baselineFile;
+ }
+
+ public CSVTestBuilder baselineTypes(TypeProtos.MajorType... baselineTypes) {
+ this.baselineTypes = baselineTypes;
+ this.baselineTypeMap = null;
+ return this;
+ }
+
+ // convenience method to convert minor types to major types if no decimals with precisions are needed
+ public CSVTestBuilder baselineTypes(TypeProtos.MinorType ... baselineTypes) {
+ TypeProtos.MajorType[] majorTypes = new TypeProtos.MajorType[baselineTypes.length];
+ int i = 0;
+ for(TypeProtos.MinorType minorType : baselineTypes) {
+ majorTypes[i] = Types.required(minorType);
+ i++;
+ }
+ this.baselineTypes = majorTypes;
+ this.baselineTypeMap = null;
+ return this;
+ }
+
+ @Override
+ protected TestBuilder reset() {
+ super.reset();
+ baselineTypeMap = null;
+ baselineTypes = null;
+ baselineFilePath = null;
+ return this;
+ }
+
+ @Override
+ boolean typeInfoSet() {
+ if (super.typeInfoSet() || baselineTypes != null) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ String getValidationQuery() throws Exception {
+ if (baselineColumns.length == 0) {
+ throw new Exception("Baseline CSV files require passing column names, please call the baselineColumns() method on the test builder.");
+ }
+
+ if (baselineTypes != null) {
+ assertEquals("Must pass the same number of types as column names if types are provided.", baselineTypes.length, baselineColumns.length);
+ }
+
+ String[] aliasedExpectedColumns = new String[baselineColumns.length];
+ for (int i = 0; i < baselineColumns.length; i++) {
+ aliasedExpectedColumns[i] = "columns[" + i + "] ";
+ TypeProtos.MajorType majorType;
+ if (baselineTypes != null) {
+ majorType = baselineTypes[i];
+ } else if (baselineTypeMap != null) {
+ majorType = baselineTypeMap.get(parsePath(baselineColumns[i]));
+ } else {
+ throw new Exception("Type information not set for interpreting csv baseline file.");
+ }
+ String precision = getDecimalPrecisionScaleInfo(majorType);
+ // TODO - determine if there is a better behavior here, if we do not specify a length the default behavior is
+ // to cast to varchar with length 1
+ // set default cast size for varchar, the cast function will take the lesser of this passed value and the
+ // length of the incoming data when choosing the length for the outgoing data
+ if (majorType.getMinorType() == TypeProtos.MinorType.VARCHAR ||
+ majorType.getMinorType() == TypeProtos.MinorType.VARBINARY) {
+ precision = "(65000)";
+ }
+ aliasedExpectedColumns[i] = "cast(" + aliasedExpectedColumns[i] + " as " +
+ Types.getNameOfMinorType(majorType.getMinorType()) + precision + " ) " + baselineColumns[i];
+ }
+ String query = "select " + Joiner.on(", ").join(aliasedExpectedColumns) + " from cp.`" + baselineFilePath + "`";
+ return query;
+ }
+
+ @Override
+ protected UserBitShared.QueryType getValidationQueryType() throws Exception {
+ return UserBitShared.QueryType.SQL;
+ }
+ }
+
+ public class SchemaTestBuilder extends TestBuilder {
+ private List<Pair<SchemaPath, TypeProtos.MajorType>> expectedSchema;
+ SchemaTestBuilder(TestServices services, Object query, UserBitShared.QueryType queryType,
+ String baselineOptionSettingQueries, String testOptionSettingQueries, List<Pair<SchemaPath, TypeProtos.MajorType>> expectedSchema) {
+ super(services, query, queryType, false, false, null, baselineOptionSettingQueries, testOptionSettingQueries, false, -1);
+ expectsEmptyResultSet();
+ this.expectedSchema = expectedSchema;
+ }
+
+ @Override
+ public TestBuilder baselineColumns(String... columns) {
+ assert false : "The column information should be captured in expected scheme, not baselineColumns";
+ return this;
+ }
+
+ @Override
+ public TestBuilder baselineRecords(List<Map<String, Object>> materializedRecords) {
+ assert false : "Since only schema will be compared in this test, no record is expected";
+ return this;
+ }
+
+ @Override
+ public TestBuilder baselineValues(Object... objects) {
+ assert false : "Since only schema will be compared in this test, no record is expected";
+ return this;
+ }
+
+ @Override
+ protected UserBitShared.QueryType getValidationQueryType() throws Exception {
+ return null;
+ }
+
+ @Override
+ public List<Pair<SchemaPath, TypeProtos.MajorType>> getExpectedSchema() {
+ return expectedSchema;
+ }
+ }
+
+ public class JSONTestBuilder extends TestBuilder {
+
+ // path to the baseline file that will be inserted into the validation query
+ private String baselineFilePath;
+
+ JSONTestBuilder(String baselineFile, TestServices services, Object query, UserBitShared.QueryType queryType, Boolean ordered,
+ boolean approximateEquality, Map<SchemaPath, TypeProtos.MajorType> baselineTypeMap,
+ String baselineOptionSettingQueries, String testOptionSettingQueries, boolean highPerformanceComparison,
+ int expectedNumBatches) {
+ super(services, query, queryType, ordered, approximateEquality, baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries,
+ highPerformanceComparison, expectedNumBatches);
+ this.baselineFilePath = baselineFile;
+ this.baselineColumns = new String[] {"*"};
+ }
+
+ @Override
+ String getValidationQuery() {
+ return "select " + Joiner.on(", ").join(baselineColumns) + " from cp.`" + baselineFilePath + "`";
+ }
+
+ @Override
+ protected UserBitShared.QueryType getValidationQueryType() throws Exception {
+ return UserBitShared.QueryType.SQL;
+ }
+
+ }
+
+ public class BaselineQueryTestBuilder extends TestBuilder {
+
+ /**
+ * Baseline query. Type of object depends on {@link #baselineQueryType}
+ */
+ private Object baselineQuery;
+ private UserBitShared.QueryType baselineQueryType;
+
+ BaselineQueryTestBuilder(Object baselineQuery, UserBitShared.QueryType baselineQueryType, TestServices services,
+ Object query, UserBitShared.QueryType queryType, Boolean ordered,
+ boolean approximateEquality, Map<SchemaPath, TypeProtos.MajorType> baselineTypeMap,
+ String baselineOptionSettingQueries, String testOptionSettingQueries, boolean highPerformanceComparison,
+ int expectedNumBatches) {
+ super(services, query, queryType, ordered, approximateEquality, baselineTypeMap, baselineOptionSettingQueries, testOptionSettingQueries,
+ highPerformanceComparison, expectedNumBatches);
+ this.baselineQuery = baselineQuery;
+ this.baselineQueryType = baselineQueryType;
+ }
+
+ @Override
+ Object getValidationQuery() {
+ return baselineQuery;
+ }
+
+ @Override
+ protected UserBitShared.QueryType getValidationQueryType() throws Exception {
+ return baselineQueryType;
+ }
+
+ // This currently assumes that all explicit baseline queries will have fully qualified type information
+ // if this changes, the baseline query can be run in a sub query with the implicit or explicit type passing
+ // added on top of it, as is currently when done when reading a baseline file
+ @Override
+ boolean typeInfoSet() {
+ return true;
+ }
+
+ }
+
+ /**
+ * Convenience method to create a {@link JsonStringArrayList list} from the given values.
+ */
+ public static JsonStringArrayList<Object> listOf(Object... values) {
+ final JsonStringArrayList<Object> list = new JsonStringArrayList<>();
+ for (Object value:values) {
+ if (value instanceof CharSequence) {
+ list.add(new Text(value.toString()));
+ } else {
+ list.add(value);
+ }
+ }
+ return list;
+ }
+
+ /**
+ * Convenience method to create a {@link JsonStringHashMap<String, Object> map} instance with the given key value sequence.
+ *
+ * Key value sequence consists of key - value pairs such that a key precedes its value. For instance:
+ *
+ * mapOf("name", "Adam", "age", 41) corresponds to {"name": "Adam", "age": 41} in JSON.
+ */
+ public static JsonStringHashMap<String, Object> mapOf(Object... keyValueSequence) {
+ Preconditions.checkArgument(keyValueSequence.length%2==0, "Length of key value sequence must be even");
+ final JsonStringHashMap<String, Object> map = new JsonStringHashMap<>();
+ for (int i=0; i<keyValueSequence.length; i+=2) {
+ Object value = keyValueSequence[i+1];
+ if (value instanceof CharSequence) {
+ value = new Text(value.toString());
+ }
+ map.put(String.class.cast(keyValueSequence[i]), value);
+ }
+ return map;
+ }
+
+ /**
+ * Helper method for the timestamp values that depend on the local timezone
+ * @param value expected timestamp value in UTC
+ * @return timestamp value for the local timezone
+ */
+ public static Timestamp convertToLocalTimestamp(String value) {
+ long UTCTimestamp = Timestamp.valueOf(value).getTime();
+ return new Timestamp(DateTimeZone.getDefault().convertUTCToLocal(UTCTimestamp));
+ }
+}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/TestConfigLinkage.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/TestConfigLinkage.java b/exec/java-exec/src/test/java/org/apache/drill/test/TestConfigLinkage.java
deleted file mode 100644
index 83c60b3..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/test/TestConfigLinkage.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.test;
-
-import org.apache.drill.categories.OptionsTest;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.server.options.OptionDefinition;
-import org.apache.drill.exec.server.options.OptionMetaData;
-import org.apache.drill.exec.server.options.OptionValidator;
-import org.apache.drill.exec.server.options.OptionValue;
-import org.apache.drill.exec.server.options.TypeValidators;
-import org.apache.drill.exec.store.sys.SystemTable;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertEquals;
-/*
- * Tests to test if the linkage between the two config option systems
- * i.e., the linkage between boot-config system and system/session options.
- * Tests to assert if the config options are read in the order of session , system, boot-config.
- * Max width per node is slightly different from other options since it is set to zero by default
- * in the config and the option value is computed dynamically everytime if the value is zero
- * i.e., if the value is not set in system/session.
- * */
-
-@Category(OptionsTest.class)
-public class TestConfigLinkage {
- public static final String MOCK_PROPERTY = "mock.prop";
-
- public static OptionDefinition createMockPropOptionDefinition() {
- return new OptionDefinition(new TypeValidators.StringValidator(MOCK_PROPERTY), new OptionMetaData(OptionValue.AccessibleScopes.ALL, false, true));
- }
-
- @Test
- public void testDefaultInternalValue() throws Exception {
- OptionDefinition optionDefinition = createMockPropOptionDefinition();
-
- ClusterFixtureBuilder builder = ClusterFixture.builder().
- configProperty(ExecConstants.bootDefaultFor(MOCK_PROPERTY), "a").
- putDefinition(optionDefinition);
-
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String mockProp = client.queryBuilder().
- sql("SELECT string_val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS.getTableName(), MOCK_PROPERTY).singletonString();
- String mockProp2 = client.queryBuilder().
- sql("SELECT val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS_VAL.getTableName(), MOCK_PROPERTY).singletonString();
-
- assertEquals("a", mockProp);
- assertEquals("a", mockProp2);
- }
- }
-
- @Test
- public void testDefaultValidatorInternalValue() throws Exception {
- OptionDefinition optionDefinition = createMockPropOptionDefinition();
-
- ClusterFixtureBuilder builder = ClusterFixture.builder().
- putDefinition(optionDefinition).
- configProperty(ExecConstants.bootDefaultFor(MOCK_PROPERTY), "a");
-
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String mockProp = client.queryBuilder().
- sql("SELECT string_val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS.getTableName(), MOCK_PROPERTY).singletonString();
- String mockProp2 = client.queryBuilder().
- sql("SELECT val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS_VAL.getTableName(), MOCK_PROPERTY).singletonString();
-
- assertEquals("a", mockProp);
- assertEquals("a", mockProp2);
- }
- }
-
- /* Test if session option takes precedence */
- @Test
- public void testSessionOption() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.builder().sessionOption(ExecConstants.SLICE_TARGET, 10);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String slice_target = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.slice_target' and optionScope = 'SESSION'", SystemTable.OPTION_VAL
- .getTableName())
- .singletonString();
- assertEquals(slice_target, "10");
- }
- }
-
- /* Test if system option takes precedence over the boot option */
- @Test
- public void testSystemOption() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.builder().systemOption(ExecConstants.SLICE_TARGET, 10000);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String slice_target = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.slice_target' and optionScope = 'SYSTEM'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals(slice_target, "10000");
- }
- }
-
- @Test
- public void testInternalSystemOption() throws Exception {
- OptionDefinition optionDefinition = createMockPropOptionDefinition();
-
- ClusterFixtureBuilder builder = ClusterFixture.builder().
- putDefinition(optionDefinition).
- configProperty(ExecConstants.bootDefaultFor(MOCK_PROPERTY), "a").
- systemOption(MOCK_PROPERTY, "blah");
-
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String mockProp = client.queryBuilder().
- sql("SELECT string_val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS.getTableName(), MOCK_PROPERTY)
- .singletonString();
- String mockProp2 = client.queryBuilder().
- sql("SELECT val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS_VAL.getTableName(), MOCK_PROPERTY)
- .singletonString();
-
- assertEquals("blah", mockProp);
- assertEquals("blah", mockProp2);
- }
- }
-
- /* Test if config option takes precedence if config option is not set */
- @Test
- public void testConfigOption() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.builder()
- .setOptionDefault(ExecConstants.SLICE_TARGET, 1000);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String slice_target = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.slice_target' and optionScope = 'BOOT'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals(slice_target, "1000");
- }
- }
-
- /* Test if altering system option takes precedence over config option */
- @Test
- public void testAlterSystem() throws Exception {
- try (ClusterFixture cluster = ClusterFixture.standardCluster();
- ClientFixture client = cluster.clientFixture()) {
- client.queryBuilder().sql("ALTER SYSTEM SET `planner.slice_target` = 10000").run();
- String slice_target = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.slice_target' and optionScope = 'SYSTEM'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals(slice_target, "10000");
- }
- }
-
- /* Test if altering session option takes precedence over system option */
- @Test
- public void testSessionPrecedence() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.builder().systemOption(ExecConstants.SLICE_TARGET, 100000);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- client.queryBuilder().sql("ALTER SESSION SET `planner.slice_target` = 10000").run();
- String slice_target = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.slice_target' and optionScope = 'SESSION'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals(slice_target, "10000");
- }
- }
-
- /* Test if setting maxwidth option through config takes effect */
- @Test
- public void testMaxWidthPerNodeConfig() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().setOptionDefault(ExecConstants.MAX_WIDTH_PER_NODE_KEY, 2);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String maxWidth = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.width.max_per_node' and optionScope = 'BOOT'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals("2", maxWidth);
- }
- }
-
- /* Test if setting maxwidth at system level takes precedence */
- @Test
- public void testMaxWidthPerNodeSystem() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().systemOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, 3);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String maxWidth = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.width.max_per_node' and optionScope = 'SYSTEM'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals("3", maxWidth);
- }
- }
-
- /* Test if setting maxwidth at session level takes precedence */
- @Test
- public void testMaxWidthPerNodeSession() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().sessionOption(ExecConstants.MAX_WIDTH_PER_NODE_KEY, 2);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String maxWidth = client.queryBuilder().sql("SELECT val FROM sys.%s where name='planner.width.max_per_node' and optionScope = 'SESSION'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- assertEquals("2", maxWidth);
- }
- }
-
- /* Test if max width is computed correctly using the cpu load average
- when the option is not set at either system or session level
- */
- @Test
- public void testMaxWidthPerNodeDefault() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().setOptionDefault(ExecConstants.CPU_LOAD_AVERAGE_KEY, 0.70);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- long maxWidth = ExecConstants.MAX_WIDTH_PER_NODE.computeMaxWidth(0.70, 0);
- int availProc = Runtime.getRuntime().availableProcessors();
- long maxWidthPerNode = Math.max(1, Math.min(availProc, Math.round(availProc * 0.70)));
- assertEquals(maxWidthPerNode, maxWidth);
- }
- }
-
- /* Test if the scope is set during BOOT time and scope is actually BOOT */
- @Test
- public void testScope() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().setOptionDefault(ExecConstants.SLICE_TARGET, 100000);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String scope = client.queryBuilder()
- .sql("SELECT optionScope from sys.%s where name='planner.slice_target'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- Assert.assertEquals("BOOT",scope);
- }
- }
-
- /* Test if the option is set at SYSTEM scope and the scope is actually SYSTEM */
- @Test
- public void testScopeSystem() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().systemOption(ExecConstants.SLICE_TARGET, 10000);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String scope = client.queryBuilder()
- .sql("SELECT optionScope from sys.%s where name='planner.slice_target'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- Assert.assertEquals("SYSTEM",scope);
- }
- }
-
- /* Test if the option is set at SESSION scope and the scope is actually SESSION */
- @Test
- public void testScopeSession() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder().sessionOption(ExecConstants.SLICE_TARGET, 100000);
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- String scope = client.queryBuilder()
- .sql("SELECT optionScope from sys.%s where name='planner.slice_target'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- Assert.assertEquals("SESSION",scope);
- }
- }
-
- /* Test if the option is altered at SYSTEM scope and the scope is actually SYSTEM */
- @Test
- public void testScopeAlterSystem() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder();
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- client.queryBuilder().sql("ALTER SYSTEM set `planner.slice_target`= 10000").run();
- String scope = client.queryBuilder()
- .sql("SELECT optionScope from sys.%s where name='planner.slice_target'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- Assert.assertEquals("SYSTEM",scope);
- }
- }
-
- /* Test if the option is altered at SESSION scope and the scope is actually SESSION */
- @Test
- public void testScopeAlterSession() throws Exception {
- ClusterFixtureBuilder builder = ClusterFixture.bareBuilder();
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- client.queryBuilder().sql("ALTER SESSION set `planner.slice_target`= 10000").run();
- String scope = client.queryBuilder()
- .sql("SELECT optionScope from sys.%s where name='planner.slice_target'", SystemTable.OPTION_VAL.getTableName())
- .singletonString();
- Assert.assertEquals("SESSION",scope);
- }
- }
-
- @Test
- public void testAlterInternalSystemOption() throws Exception {
- OptionDefinition optionDefinition = createMockPropOptionDefinition();
-
- ClusterFixtureBuilder builder = ClusterFixture.builder().
- configProperty(ExecConstants.bootDefaultFor(MOCK_PROPERTY), "a").
- putDefinition(optionDefinition);
-
- try (ClusterFixture cluster = builder.build();
- ClientFixture client = cluster.clientFixture()) {
- client.queryBuilder().sql("ALTER SYSTEM SET `%s` = 'bleh'", MOCK_PROPERTY).run();
-
- client.queryBuilder().sql("SELECT * FROM sys.%s", SystemTable.INTERNAL_OPTIONS.getTableName()).printCsv();
- client.queryBuilder().sql("SELECT * FROM sys.%s", SystemTable.INTERNAL_OPTIONS_VAL.getTableName()).printCsv();
-
- String mockProp = client.queryBuilder().
- sql("SELECT string_val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS, MOCK_PROPERTY).singletonString();
- String mockProp2 = client.queryBuilder().
- sql("SELECT val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS_VAL, MOCK_PROPERTY).singletonString();
-
- assertEquals("bleh", mockProp);
- assertEquals("bleh", mockProp2);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java b/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
index dac71ec..e1817dd 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/package-info.java
@@ -16,55 +16,103 @@
* limitations under the License.
*/
/**
- * Provides a variety of test framework tools to simplify Drill unit
- * tests and ad-hoc tests created while developing features. Key components
- * include:
+ * <p>
+ * Provides a variety of test framework tools to simplify Drill unit
+ * tests and ad-hoc tests created while developing features. Key components
+ * include:
+ * </p>
* <ul>
- * <li>{@link ClusterFixtureBuilder}: Builder pattern to create an embedded Drillbit,
- * or cluster of Drillbits, using a specified set of configuration, session
- * and system options.</li>
- * <li>{@link ClusterFixture}: The cluster created by the builder.</li>
- * <li>{@link ClientFixture}: A facade to the Drill client that provides
- * convenience methods for setting session options, running queries and
- * so on. A client is associated with a cluster. If tests desire, multiple
- * clients can be created for a single cluster, though most need just one
- * client. A builder exists for clients, but most tests get the client
- * directly from the cluster.</li>
- * <li>{@link QueryBuilder}: a builder pattern for constructing and
- * running any form of query (SQL, logical or physical) and running the
- * query in a wide variety of ways (just count the rows, return the
- * results as a list, run using a listener, etc.)</li>
- * <li>{@link QueryBuilder.QuerySummary QuerySummary}: a summary of a
- * query returned from running the query. Contains the query ID, the
- * row count, the batch count and elapsed run time.</li>
- * <li>{@link ProfileParser}: A simple tool to load a query profile and
- * provide access to the profile structure. Also prints the key parts of
- * the profile for diagnostic purposes.</li>
- * <li>{@link LogFixture}: Allows per-test changes to log settings to,
- * say, send a particular logger to the console for easier debugging, or
- * to suppress logging of a deliberately created failure.</li>
+ * <li>
+ * {@link org.apache.drill.test.ClusterFixtureBuilder}: Builder pattern to create an embedded Drillbit,
+ * or cluster of Drillbits, using a specified set of configuration, session
+ * and system options.
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.ClusterFixture}: The cluster created by the builder.
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.ClientFixture}: A facade to the Drill client that provides
+ * convenience methods for setting session options, running queries and
+ * so on. A client is associated with a cluster. If tests desire, multiple
+ * clients can be created for a single cluster, though most need just one
+ * client. A builder exists for clients, but most tests get the client
+ * directly from the cluster.
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.QueryBuilder}: a builder pattern for constructing and
+ * running any form of query (SQL, logical or physical) and running the
+ * query in a wide variety of ways (just count the rows, return the
+ * results as a list, run using a listener, etc.)
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.QueryBuilder.QuerySummary QuerySummary}: a summary of a
+ * query returned from running the query. Contains the query ID, the
+ * row count, the batch count and elapsed run time.
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.ProfileParser}: A simple tool to load a query profile and
+ * provide access to the profile structure. Also prints the key parts of
+ * the profile for diagnostic purposes.
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.LogFixture}: Allows per-test changes to log settings to,
+ * say, send a particular logger to the console for easier debugging, or
+ * to suppress logging of a deliberately created failure.
+ * </li>
+ * <li>
+ * {@link org.apache.drill.test.BaseDirTestWatcher}: Creates temporary directories which are used for the following
+ * aspects of drill in unit tests:
+ * <ul>
+ * <li>
+ * The <b>drill.tmp-dir</b> property: A temp directory ({@link org.apache.drill.test.BaseDirTestWatcher#getTmpDir()}) is created and
+ * configured as the tmp directory for drill bits.
+ * </li>
+ * <li>
+ * The <b>drill.exec.sys.store.provider.local.path</b> property: A temp directory ({@link org.apache.drill.test.BaseDirTestWatcher#getStoreDir()}) is created and
+ * configured as the store directory for drillbits.
+ * </li>
+ * <li>
+ * The <b>dfs.default</b> workspace: A temp directory ({@link org.apache.drill.test.BaseDirTestWatcher#getRootDir()} is created and configured as the directory
+ * for this workspace.
+ * </li>
+ * <li>
+ * The <b>dfs.root</b> workspace: <b>dfs.root</b> is configured to use the same temp directory ad <b>dfs.default</b>.
+ * </li>
+ * <li>
+ * The <b>dfs.tmp</b> workspace: A temp directory ({@link org.apache.drill.test.BaseDirTestWatcher#getDfsTestTmpDir()}) is created and configured as the
+ * directory for this workspace.
+ * </li>
+ * </ul>
+ * By default a {@link org.apache.drill.test.BaseDirTestWatcher} is used in tests that extend {@link org.apache.drill.test.BaseTestQuery} and tests that use
+ * the {@link org.apache.drill.test.ClusterFixture}.
+ * </li>
* </ul>
* <h3>Usage</h3>
- * A typical test using this framework looks like this:
+ * <p>
+ * A typical test using this framework looks like this:
+ * </p>
* <code><pre>
+ {@literal @}org.junit.Rule
+ public final BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
+
{@literal @}Test
public void exampleTest() throws Exception {
+ createEmployeeCsv(dirTestWatcher.getRootDir());
// Configure the cluster. One Drillbit by default.
- FixtureBuilder builder = ClusterFixture.builder()
+ FixtureBuilder builder = ClusterFixture.builder(dirTestWatcher)
// Set up per-test specialized config and session options.
.configProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE, true)
.configProperty(ExecConstants.REMOVER_ENABLE_GENERIC_COPIER, true)
.sessionOption(ExecConstants.MAX_QUERY_MEMORY_PER_NODE_KEY, 3L * 1024 * 1024 * 1024)
- .maxParallelization(1)
- ;
+ .maxParallelization(1);
// Launch the cluster and client.
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
// Run a query (using the mock data source) and print a summary.
- String sql = "SELECT id_i FROM `mock`.employee_1M ORDER BY id_i";
+ String sql = "SELECT id_i FROM dfs.`employee.csv` ORDER BY id_i";
QuerySummary summary = client.queryBuilder().sql(sql).run();
assertEquals(1_000_000, summary.recordCount());
System.out.println(String.format("Sorted %,d records in %d batches.", summary.recordCount(), summary.batchCount()));
@@ -73,8 +121,8 @@
}
}
* </pre></code>
- * <p>
- * Typical usage for the logging fixture: <pre><code>
+ * Typical usage for the logging fixture:
+ * <pre><code>
* {@literal @}Test
* public void myTest() {
* LogFixtureBuilder logBuilder = LogFixture.builder()
@@ -84,7 +132,8 @@
* try (LogFixture logs = logBuilder.build()) {
* // Test code here
* }
- * }</code></pre>
+ * }
+ * </code></pre>
*
*/
package org.apache.drill.test;
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSet.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSet.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSet.java
index 6401f75..474508c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSet.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSet.java
@@ -85,8 +85,7 @@ public interface RowSet {
* new row set with the updated columns, then merge the new
* and old row sets to create a new immutable row set.
*/
-
- public interface RowSetWriter extends TupleWriter {
+ interface RowSetWriter extends TupleWriter {
void setRow(Object...values);
boolean valid();
int index();
@@ -97,8 +96,7 @@ public interface RowSet {
/**
* Reader for all types of row sets.
*/
-
- public interface RowSetReader extends TupleReader {
+ interface RowSetReader extends TupleReader {
/**
* Total number of rows in the row set.
@@ -159,7 +157,6 @@ public interface RowSet {
*
* @return memory size in bytes
*/
-
long size();
RowSet merge(RowSet other);
@@ -169,8 +166,7 @@ public interface RowSet {
/**
* Row set that manages a single batch of rows.
*/
-
- public interface SingleRowSet extends RowSet {
+ interface SingleRowSet extends RowSet {
ValueVector[] vectors();
SingleRowSet toIndirect();
SelectionVector2 getSv2();
@@ -181,8 +177,7 @@ public interface RowSet {
* Once writing is complete, the row set becomes an
* immutable direct row set.
*/
-
- public interface ExtendableRowSet extends SingleRowSet {
+ interface ExtendableRowSet extends SingleRowSet {
void allocate(int recordCount);
void setRowCount(int rowCount);
RowSetWriter writer(int initialRowCount);
@@ -192,8 +187,7 @@ public interface RowSet {
* Row set comprised of multiple single row sets, along with
* an indirection vector (SV4).
*/
-
- public interface HyperRowSet extends RowSet {
+ interface HyperRowSet extends RowSet {
SelectionVector4 getSv4();
HyperVectorWrapper<ValueVector> getHyperVector(int i);
}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetBuilder.java
index 80e8ae4..6f9a8d9 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetBuilder.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetBuilder.java
@@ -24,7 +24,7 @@ import org.apache.drill.test.rowSet.RowSet.SingleRowSet;
/**
* Fluent builder to quickly build up an row set (record batch)
- * programmatically. Starting with an {@link OperatorFixture}:
+ * programmatically. Starting with an {@link org.apache.drill.test.OperatorFixture}:
* <pre></code>
* OperatorFixture fixture = ...
* RowSet rowSet = fixture.rowSetBuilder(batchSchema)
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
index ea50074..6e72923 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
@@ -25,6 +25,8 @@ import org.apache.drill.exec.vector.accessor.ColumnReader;
import org.apache.drill.test.rowSet.RowSet.RowSetReader;
import org.bouncycastle.util.Arrays;
+import java.util.Comparator;
+
/**
* For testing, compare the contents of two row sets (record batches)
* to verify that they are identical. Supports masks to exclude certain
@@ -255,4 +257,39 @@ public class RowSetComparison {
}
}
}
+
+ // TODO make a native RowSetComparison comparator
+ public static class ObjectComparator implements Comparator<Object> {
+ public static final ObjectComparator INSTANCE = new ObjectComparator();
+
+ private ObjectComparator() {
+ }
+
+ @Override
+ public int compare(Object a, Object b) {
+ if (a instanceof Integer) {
+ int aInt = (Integer) a;
+ int bInt = (Integer) b;
+ return aInt - bInt;
+ } else if (a instanceof Long) {
+ Long aLong = (Long) a;
+ Long bLong = (Long) b;
+ return aLong.compareTo(bLong);
+ } else if (a instanceof Float) {
+ Float aFloat = (Float) a;
+ Float bFloat = (Float) b;
+ return aFloat.compareTo(bFloat);
+ } else if (a instanceof Double) {
+ Double aDouble = (Double) a;
+ Double bDouble = (Double) b;
+ return aDouble.compareTo(bDouble);
+ } else if (a instanceof String) {
+ String aString = (String) a;
+ String bString = (String) b;
+ return aString.compareTo(bString);
+ } else {
+ throw new UnsupportedOperationException(String.format("Unsupported type %s", a.getClass().getCanonicalName()));
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/file/JsonFileBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/file/JsonFileBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/file/JsonFileBuilder.java
new file mode 100644
index 0000000..ff93bf0
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/file/JsonFileBuilder.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.test.rowSet.file;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.vector.accessor.ColumnAccessor;
+import org.apache.drill.exec.vector.accessor.ColumnReader;
+import org.apache.drill.test.rowSet.RowSet;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class JsonFileBuilder
+{
+ public static final String DEFAULT_DOUBLE_FORMATTER = "%f";
+ public static final String DEFAULT_INTEGER_FORMATTER = "%d";
+ public static final String DEFAULT_LONG_FORMATTER = "%d";
+ public static final String DEFAULT_STRING_FORMATTER = "\"%s\"";
+ public static final String DEFAULT_DECIMAL_FORMATTER = "%s";
+ public static final String DEFAULT_PERIOD_FORMATTER = "%s";
+
+ public static final Map<String, String> DEFAULT_FORMATTERS = new ImmutableMap.Builder()
+ .put(ColumnAccessor.ValueType.DOUBLE, DEFAULT_DOUBLE_FORMATTER)
+ .put(ColumnAccessor.ValueType.INTEGER, DEFAULT_INTEGER_FORMATTER)
+ .put(ColumnAccessor.ValueType.LONG, DEFAULT_LONG_FORMATTER)
+ .put(ColumnAccessor.ValueType.STRING, DEFAULT_STRING_FORMATTER)
+ .put(ColumnAccessor.ValueType.DECIMAL, DEFAULT_DECIMAL_FORMATTER)
+ .put(ColumnAccessor.ValueType.PERIOD, DEFAULT_PERIOD_FORMATTER)
+ .build();
+
+ private final RowSet rowSet;
+ private final Map<String, String> customFormatters = Maps.newHashMap();
+
+ public JsonFileBuilder(RowSet rowSet) {
+ this.rowSet = Preconditions.checkNotNull(rowSet);
+ Preconditions.checkArgument(rowSet.rowCount() > 0, "The given rowset is empty.");
+ }
+
+ public JsonFileBuilder setCustomFormatter(final String columnName, final String columnFormatter) {
+ Preconditions.checkNotNull(columnName);
+ Preconditions.checkNotNull(columnFormatter);
+
+ Iterator<MaterializedField> fields = rowSet
+ .schema()
+ .batch()
+ .iterator();
+
+ boolean hasColumn = false;
+
+ while (!hasColumn && fields.hasNext()) {
+ hasColumn = fields.next()
+ .getName()
+ .equals(columnName);
+ }
+
+ final String message = String.format("(%s) is not a valid column", columnName);
+ Preconditions.checkArgument(hasColumn, message);
+
+ customFormatters.put(columnName, columnFormatter);
+
+ return this;
+ }
+
+ public void build(File tableFile) throws IOException {
+ tableFile.getParentFile().mkdirs();
+
+ try (BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(tableFile))) {
+ final RowSet.RowSetReader reader = rowSet.reader();
+ final int numCols = rowSet
+ .schema()
+ .batch()
+ .getFieldCount();
+ final Iterator<MaterializedField> fieldIterator = rowSet
+ .schema()
+ .batch()
+ .iterator();
+ final List<String> columnNames = Lists.newArrayList();
+ final List<String> columnFormatters = Lists.newArrayList();
+
+ // Build formatters from first row.
+ while (fieldIterator.hasNext()) {
+ final String columnName = fieldIterator.next().getName();
+ final ColumnReader columnReader = reader.column(columnName);
+ final ColumnAccessor.ValueType valueType = columnReader.valueType();
+ final String columnFormatter;
+
+ if (customFormatters.containsKey(columnName)) {
+ columnFormatter = customFormatters.get(columnName);
+ } else if (DEFAULT_FORMATTERS.containsKey(valueType)) {
+ columnFormatter = DEFAULT_FORMATTERS.get(valueType);
+ } else {
+ final String message = String.format("Unsupported column type %s", valueType);
+ throw new UnsupportedOperationException(message);
+ }
+
+ columnNames.add(columnName);
+ columnFormatters.add(columnFormatter);
+ }
+
+ final StringBuilder sb = new StringBuilder();
+ String lineSeparator = "";
+
+ for (int index = 0; index < rowSet.rowCount(); index++) {
+ reader.next();
+ sb.append(lineSeparator);
+ sb.append('{');
+ String separator = "";
+
+ for (int columnIndex = 0; columnIndex < numCols; columnIndex++) {
+ sb.append(separator);
+
+ final String columnName = columnNames.get(columnIndex);
+ final ColumnReader columnReader = reader.column(columnIndex);
+ final String columnFormatter = columnFormatters.get(columnIndex);
+ final Object columnObject = columnReader.getObject();
+ final String columnString = String.format(columnFormatter, columnObject);
+
+ sb.append('"')
+ .append(columnName)
+ .append('"')
+ .append(':')
+ .append(columnString);
+
+ separator = ",";
+ }
+
+ sb.append('}');
+ lineSeparator = "\n";
+ os.write(sb.toString().getBytes());
+ sb.delete(0, sb.length());
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/bootstrap-storage-plugins.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/bootstrap-storage-plugins.json b/exec/java-exec/src/test/resources/bootstrap-storage-plugins.json
deleted file mode 100644
index 35ca26b..0000000
--- a/exec/java-exec/src/test/resources/bootstrap-storage-plugins.json
+++ /dev/null
@@ -1,75 +0,0 @@
-{
- "storage":{
- dfs_test: {
- type: "file",
- connection: "file:///",
- workspaces: {
- "home" : {
- location: "/",
- writable: false
- },
- "tmp" : {
- location: "/tmp",
- writable: true
- }
- },
- formats: {
- "psv" : {
- type: "text",
- extensions: [ "tbl" ],
- delimiter: "|"
- },
- "csv" : {
- type: "text",
- extensions: [ "csv", "bcp" ],
- delimiter: ","
- },
- "tsv" : {
- type: "text",
- extensions: [ "tsv" ],
- delimiter: "\t"
- },
- "ssv" : {
- type: "text",
- extensions: [ "ssv" ],
- delimiter: " "
- },
- "parquet" : {
- type: "parquet"
- },
- "json" : {
- type: "json"
- },
- "httpd" : {
- type: "httpd",
- logFormat: "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\""
- },
- "txt" : {
- type : "text",
- extensions: [ "txt" ],
- delimiter: "\u0000"
- },
- "avro" : {
- type: "avro"
- },
- "sequencefile": {
- type: "sequencefile",
- extensions: [ "seq" ]
- },
- "csvh" : {
- type: "text",
- extensions: [ "csvh" ],
- delimiter: ",",
- extractHeader: true
- },
- "csvh-test" : {
- type: "text",
- extensions: [ "csvh-test" ],
- delimiter: ",",
- extractHeader: true,
- skipFirstLine: true
- }
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/parquet/parquet_nullable.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/parquet_nullable.json b/exec/java-exec/src/test/resources/parquet/parquet_nullable.json
index 56963a8..3e09f83 100644
--- a/exec/java-exec/src/test/resources/parquet/parquet_nullable.json
+++ b/exec/java-exec/src/test/resources/parquet/parquet_nullable.json
@@ -8,10 +8,10 @@
}
},
storage:{
- "dfs_test" :
+ "dfs" :
{
"type":"named",
- "name":"dfs_test"
+ "name":"dfs"
}
},
query:[
@@ -19,7 +19,7 @@
@id:"1",
op:"scan",
memo:"initial_scan",
- storageengine:"dfs_test",
+ storageengine:"dfs",
selection: {
format: {type: "parquet"},
files: [
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/parquet/parquet_nullable_varlen.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/parquet_nullable_varlen.json b/exec/java-exec/src/test/resources/parquet/parquet_nullable_varlen.json
index ea81b6c..9547f00 100644
--- a/exec/java-exec/src/test/resources/parquet/parquet_nullable_varlen.json
+++ b/exec/java-exec/src/test/resources/parquet/parquet_nullable_varlen.json
@@ -8,10 +8,10 @@
}
},
storage:{
- "dfs_test" :
+ "dfs" :
{
"type":"named",
- "name":"dfs_test"
+ "name":"dfs"
}
},
query:[
@@ -19,7 +19,7 @@
@id:"1",
op:"scan",
memo:"initial_scan",
- storageengine:"dfs_test",
+ storageengine:"dfs",
selection: {
format: {type: "parquet"},
files: [
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/parquet/parquet_scan_screen.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/parquet_scan_screen.json b/exec/java-exec/src/test/resources/parquet/parquet_scan_screen.json
index ebb0b24..50281f4 100644
--- a/exec/java-exec/src/test/resources/parquet/parquet_scan_screen.json
+++ b/exec/java-exec/src/test/resources/parquet/parquet_scan_screen.json
@@ -8,10 +8,10 @@
}
},
storage:{
- "dfs_test" :
+ "dfs" :
{
"type":"named",
- "name":"dfs_test"
+ "name":"dfs"
}
},
query:[
@@ -19,7 +19,7 @@
@id:"1",
op:"scan",
memo:"initial_scan",
- storageengine:"dfs_test",
+ storageengine:"dfs",
selection: {
format: {type: "parquet"},
files: [
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/queries/tpch/15.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/queries/tpch/15.sql b/exec/java-exec/src/test/resources/queries/tpch/15.sql
index 49927a4..3613d68 100644
--- a/exec/java-exec/src/test/resources/queries/tpch/15.sql
+++ b/exec/java-exec/src/test/resources/queries/tpch/15.sql
@@ -1,5 +1,5 @@
-- tpch15 using 1395599672 as a seed to the RNG
-use dfs_test.tmp; -- views can only be created in dfs schema
+use dfs.tmp; -- views can only be created in dfs schema
create view revenue0 (supplier_no, total_revenue) as
select
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/store/text/test.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/store/text/test.json b/exec/java-exec/src/test/resources/store/text/test.json
index 4a312d6..78ac516 100644
--- a/exec/java-exec/src/test/resources/store/text/test.json
+++ b/exec/java-exec/src/test/resources/store/text/test.json
@@ -15,7 +15,7 @@
],
storage : {
type : "named",
- name: "dfs_test"
+ name: "dfs"
},
format: {
type: "named",
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/topN/one_key_sort.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/topN/one_key_sort.json b/exec/java-exec/src/test/resources/topN/one_key_sort.json
index 45b3fc3..39b9355 100644
--- a/exec/java-exec/src/test/resources/topN/one_key_sort.json
+++ b/exec/java-exec/src/test/resources/topN/one_key_sort.json
@@ -12,11 +12,11 @@
pop:"mock-scan",
url: "http://apache.org",
entries:[
- {records: 10000000, types: [
+ {records: 100000, types: [
{name: "blue", type: "INT", mode: "REQUIRED"},
{name: "green", type: "INT", mode: "REQUIRED"}
]},
- {records: 10000000, types: [
+ {records: 100000, types: [
{name: "blue", type: "INT", mode: "REQUIRED"},
{name: "green", type: "INT", mode: "REQUIRED"}
]}
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/tpcds-sf1/q73.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/tpcds-sf1/q73.sql b/exec/java-exec/src/test/resources/tpcds-sf1/q73.sql
index 094ca2b..4db8a4f 100644
--- a/exec/java-exec/src/test/resources/tpcds-sf1/q73.sql
+++ b/exec/java-exec/src/test/resources/tpcds-sf1/q73.sql
@@ -6,10 +6,10 @@ select c.c_last_name,
dj.cnt
from (
select ss.ss_ticket_number as sstn, ss.ss_customer_sk as sscsk, count(*) cnt
- from dfs_test.tpcds.store_sales as ss,
- dfs_test.tpcds.date_dim as d,
- dfs_test.tpcds.store as s,
- dfs_test.tpcds.household_demographics as hd
+ from dfs.tpcds.store_sales as ss,
+ dfs.tpcds.date_dim as d,
+ dfs.tpcds.store as s,
+ dfs.tpcds.household_demographics as hd
where ss.ss_sold_date_sk = d.d_date_sk
and ss.ss_store_sk = s.s_store_sk
and ss.ss_hdemo_sk = hd.hd_demo_sk
@@ -20,7 +20,7 @@ from (
and ss.ss_sold_date_sk between 2451180 and 2451269
group by ss.ss_ticket_number, ss.ss_customer_sk
) dj,
- dfs_test.tpcds.customer as c
+ dfs.tpcds.customer as c
where dj.sscsk = c.c_customer_sk
and dj.cnt between 1 and 5
order by dj.cnt desc
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/3604.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/3604.sql b/exec/java-exec/src/test/resources/window/3604.sql
index f88beaf..7219b00 100644
--- a/exec/java-exec/src/test/resources/window/3604.sql
+++ b/exec/java-exec/src/test/resources/window/3604.sql
@@ -1,4 +1,4 @@
select
lead(col3) over(partition by col2 order by col0) lead_col0
from
- dfs_test.`%s/window/fewRowsAllData.parquet`
\ No newline at end of file
+ dfs.`window/fewRowsAllData.parquet`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/3605.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/3605.sql b/exec/java-exec/src/test/resources/window/3605.sql
index 1e5d54a..96e3b26 100644
--- a/exec/java-exec/src/test/resources/window/3605.sql
+++ b/exec/java-exec/src/test/resources/window/3605.sql
@@ -2,4 +2,4 @@ select
col2,
lead(col2) over(partition by col2 order by col0) as lead_col2
from
- dfs_test.`%s/window/fewRowsAllData.parquet`
\ No newline at end of file
+ dfs.`window/fewRowsAllData.parquet`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/3606.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/3606.sql b/exec/java-exec/src/test/resources/window/3606.sql
index d96469e..9e39bc2 100644
--- a/exec/java-exec/src/test/resources/window/3606.sql
+++ b/exec/java-exec/src/test/resources/window/3606.sql
@@ -2,4 +2,4 @@ select
col2,
lead(col2) over(order by col0) as lead_col2
from
- dfs_test.`%s/window/fewRowsAllData.parquet`
\ No newline at end of file
+ dfs.`window/fewRowsAllData.parquet`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/3648.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/3648.sql b/exec/java-exec/src/test/resources/window/3648.sql
index c63ba17..baef342 100644
--- a/exec/java-exec/src/test/resources/window/3648.sql
+++ b/exec/java-exec/src/test/resources/window/3648.sql
@@ -2,4 +2,4 @@ select
ntile(5)
over(partition by col7 order by col0) as `ntile`
from
- dfs_test.`%s/window/3648.parquet`
\ No newline at end of file
+ dfs.`window/3648.parquet`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/3668.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/3668.sql b/exec/java-exec/src/test/resources/window/3668.sql
index e7ea7ab..0aec03f 100644
--- a/exec/java-exec/src/test/resources/window/3668.sql
+++ b/exec/java-exec/src/test/resources/window/3668.sql
@@ -1,6 +1,6 @@
select count(fv) as cnt
from (
select first_value(c2) over(partition by c2 order by c1) as fv
- from dfs_test.`%s/window/3668.parquet`
+ from dfs.`window/3668.parquet`
)
where fv = 'e'
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/fval.alltypes.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/fval.alltypes.sql b/exec/java-exec/src/test/resources/window/fval.alltypes.sql
index b11db6f..6ba0508 100644
--- a/exec/java-exec/src/test/resources/window/fval.alltypes.sql
+++ b/exec/java-exec/src/test/resources/window/fval.alltypes.sql
@@ -9,6 +9,6 @@ select
first_value(col7) over w as col7,
first_value(col8) over w as col8
from
- dfs_test.`%s/window/fewRowsAllData.parquet`
+ dfs.`window/fewRowsAllData.parquet`
window w as ()
limit 1
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/fval.pby.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/fval.pby.sql b/exec/java-exec/src/test/resources/window/fval.pby.sql
index 29d7f54..8afc84e 100644
--- a/exec/java-exec/src/test/resources/window/fval.pby.sql
+++ b/exec/java-exec/src/test/resources/window/fval.pby.sql
@@ -1,4 +1,4 @@
select
first_value(employee_id) over(partition by position_id order by line_no) as `first_value`
from
- dfs_test.`%s/window/b4.p4`
\ No newline at end of file
+ dfs.`window/b4.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/lag.oby.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/lag.oby.sql b/exec/java-exec/src/test/resources/window/lag.oby.sql
index 91dadb5..dd776a9 100644
--- a/exec/java-exec/src/test/resources/window/lag.oby.sql
+++ b/exec/java-exec/src/test/resources/window/lag.oby.sql
@@ -1,3 +1,3 @@
select
lag(line_no) over(order by sub, employee_id) as `lag`
-from dfs_test.`%s/window/b4.p4`
\ No newline at end of file
+from dfs.`window/b4.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/lag.pby.oby.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/lag.pby.oby.sql b/exec/java-exec/src/test/resources/window/lag.pby.oby.sql
index 64a890e..1d954c2 100644
--- a/exec/java-exec/src/test/resources/window/lag.pby.oby.sql
+++ b/exec/java-exec/src/test/resources/window/lag.pby.oby.sql
@@ -1,3 +1,3 @@
select
lag(line_no) over(partition by position_id order by sub, employee_id) as `lag`
-from dfs_test.`%s/window/b4.p4`
\ No newline at end of file
+from dfs.`window/b4.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/lead.oby.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/lead.oby.sql b/exec/java-exec/src/test/resources/window/lead.oby.sql
index 211a142..e48f02d 100644
--- a/exec/java-exec/src/test/resources/window/lead.oby.sql
+++ b/exec/java-exec/src/test/resources/window/lead.oby.sql
@@ -1,3 +1,3 @@
select
lead(line_no) over(order by sub, employee_id) as `lead`
-from dfs_test.`%s/window/b4.p4`
\ No newline at end of file
+from dfs.`window/b4.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/lead.pby.oby.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/lead.pby.oby.sql b/exec/java-exec/src/test/resources/window/lead.pby.oby.sql
index 7d9904a..ac53133 100644
--- a/exec/java-exec/src/test/resources/window/lead.pby.oby.sql
+++ b/exec/java-exec/src/test/resources/window/lead.pby.oby.sql
@@ -1,3 +1,3 @@
select
lead(line_no) over(partition by position_id order by sub, employee_id) as `lead`
-from dfs_test.`%s/window/b4.p4`
\ No newline at end of file
+from dfs.`window/b4.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/lval.alltypes.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/lval.alltypes.sql b/exec/java-exec/src/test/resources/window/lval.alltypes.sql
index 9f2e7fd..e70799d 100644
--- a/exec/java-exec/src/test/resources/window/lval.alltypes.sql
+++ b/exec/java-exec/src/test/resources/window/lval.alltypes.sql
@@ -9,6 +9,6 @@ select
last_value(col7) over w as col7,
last_value(col8) over w as col8
from
- dfs_test.`%s/window/fewRowsAllData.parquet`
+ dfs.`window/fewRowsAllData.parquet`
window w as ()
limit 1
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/lval.pby.oby.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/lval.pby.oby.sql b/exec/java-exec/src/test/resources/window/lval.pby.oby.sql
index 6e77a54..d348f02 100644
--- a/exec/java-exec/src/test/resources/window/lval.pby.oby.sql
+++ b/exec/java-exec/src/test/resources/window/lval.pby.oby.sql
@@ -1,4 +1,4 @@
select
last_value(employee_id) over(partition by position_id order by line_no) as `last_value`
from
- dfs_test.`%s/window/b4.p4`
\ No newline at end of file
+ dfs.`window/b4.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/ntile.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/ntile.sql b/exec/java-exec/src/test/resources/window/ntile.sql
index 44184a8..54497b1 100644
--- a/exec/java-exec/src/test/resources/window/ntile.sql
+++ b/exec/java-exec/src/test/resources/window/ntile.sql
@@ -1,4 +1,4 @@
select
ntile(3) over(partition by position_id order by 1) as `ntile`
from
- dfs_test.`%s/window/b2.p4`
\ No newline at end of file
+ dfs.`window/b2.p4`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/q1.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/q1.sql b/exec/java-exec/src/test/resources/window/q1.sql
index 401ee97..7b193cb 100644
--- a/exec/java-exec/src/test/resources/window/q1.sql
+++ b/exec/java-exec/src/test/resources/window/q1.sql
@@ -2,5 +2,5 @@ select
count(*) over pos_win `count`,
sum(salary) over pos_win `sum`
from
- dfs_test.`%s/window/%s`
+ dfs.`window/%s`
window pos_win as %s
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/q2.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/q2.sql b/exec/java-exec/src/test/resources/window/q2.sql
index beae8e0..8e25901 100644
--- a/exec/java-exec/src/test/resources/window/q2.sql
+++ b/exec/java-exec/src/test/resources/window/q2.sql
@@ -6,5 +6,5 @@ select
dense_rank() over pos_win `dense_rank`,
cume_dist() over pos_win `cume_dist`,
percent_rank() over pos_win `percent_rank`
-from dfs_test.`%s/window/%s`
+from dfs.`window/%s`
window pos_win as %s
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/q3.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/q3.sql b/exec/java-exec/src/test/resources/window/q3.sql
index 0efb137..d6875fa 100644
--- a/exec/java-exec/src/test/resources/window/q3.sql
+++ b/exec/java-exec/src/test/resources/window/q3.sql
@@ -6,4 +6,4 @@ SELECT
ORDER by employee_id
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS `last_value`
FROM
- dfs_test.`%s/window/b4.p4`
+ dfs.`window/b4.p4`
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/q3218.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/q3218.sql b/exec/java-exec/src/test/resources/window/q3218.sql
index 45bfd46..6e12c2e 100644
--- a/exec/java-exec/src/test/resources/window/q3218.sql
+++ b/exec/java-exec/src/test/resources/window/q3218.sql
@@ -2,4 +2,4 @@ select
max(cast(columns[2] as char(2)))
over(partition by cast(columns[2] as char(2))
order by cast(columns[0] as int))
-from dfs_test.`%s/window/allData.csv`
\ No newline at end of file
+from dfs.`window/allData.csv`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/q3220.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/q3220.sql b/exec/java-exec/src/test/resources/window/q3220.sql
index 3fdb047..0e33ad6 100644
--- a/exec/java-exec/src/test/resources/window/q3220.sql
+++ b/exec/java-exec/src/test/resources/window/q3220.sql
@@ -1,3 +1,3 @@
select
count(1) over(partition by position_id order by sub)
-from dfs_test.`%s/window/b1.p1`
\ No newline at end of file
+from dfs.`window/b1.p1`
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/window/q4.sql
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/window/q4.sql b/exec/java-exec/src/test/resources/window/q4.sql
index 5e1fb22..34ff9e2 100644
--- a/exec/java-exec/src/test/resources/window/q4.sql
+++ b/exec/java-exec/src/test/resources/window/q4.sql
@@ -4,6 +4,6 @@ SELECT
MAX(employee_id) OVER(PARTITION BY position_id) AS `last_value`
FROM (
SELECT *
- FROM dfs_test.`%s/window/b4.p4`
+ FROM dfs.`window/b4.p4`
ORDER BY position_id, employee_id
)
http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/resources/writer/simple_csv_writer.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/writer/simple_csv_writer.json b/exec/java-exec/src/test/resources/writer/simple_csv_writer.json
index 7980eb5..b3c215a 100644
--- a/exec/java-exec/src/test/resources/writer/simple_csv_writer.json
+++ b/exec/java-exec/src/test/resources/writer/simple_csv_writer.json
@@ -27,17 +27,17 @@
@id: 2,
child: 1,
pop: "fs-writer",
- "location" : "/tmp/csvtest",
+ "location" : "%TEST_DIR%",
"storage" : {
"type" : "file",
"connection" : "file:///",
"workspaces" : {
"root" : {
- "location" : "/",
+ "location" : "%ROOT_DIR%",
"writable" : false
},
"tmp" : {
- "location" : "/tmp",
+ "location" : "%TMP_DIR%",
"writable" : true
}
},
|