flink-issues mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] [flink] wuchong commented on a change in pull request #8109: [FLINK-12017][table-planner-blink] Support translation from Rank/Deduplicate to StreamTransformation
Date Sat, 13 Apr 2019 06:49:48 GMT
wuchong commented on a change in pull request #8109: [FLINK-12017][table-planner-blink] Support
translation from Rank/Deduplicate to StreamTransformation
URL: https://github.com/apache/flink/pull/8109#discussion_r275105033
 
 

 ##########
 File path: flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/runtime/utils/TableUtil.scala
 ##########
 @@ -36,28 +36,39 @@ object TableUtil {
     * Note: The difference between print() and collect() is
     * - print() prints data on workers and collect() collects data to the client.
     * - You have to call TableEnvironment.execute() to run the job for print(), while collect()
-    *   calls execute automatically.
+    * calls execute automatically.
     */
   def collect(table: TableImpl): Seq[Row] = collectSink(table, new CollectRowTableSink, None)
 
   def collect(table: TableImpl, jobName: String): Seq[Row] =
     collectSink(table, new CollectRowTableSink, Option.apply(jobName))
 
-  def collectAsT[T](table: TableImpl, t: TypeInformation[_], jobName : String = null): Seq[T]
=
+  def collectAsT[T](table: TableImpl, t: TypeInformation[_], jobName: String = null): Seq[T]
=
     collectSink(
       table,
       new CollectTableSink(_ => t.asInstanceOf[TypeInformation[T]]), Option(jobName))
 
   def collectSink[T](
-      table: TableImpl, sink: CollectTableSink[T], jobName : Option[String] = None): Seq[T]
= {
+      table: TableImpl, sink: CollectTableSink[T], jobName: Option[String] = None): Seq[T]
= {
     // get schema information of table
     val rowType = table.getRelNode.getRowType
     val fieldNames = rowType.getFieldNames.asScala.toArray
     val fieldTypes = rowType.getFieldList
-        .map(field => FlinkTypeFactory.toInternalType(field.getType)).toArray
+      .map(field => FlinkTypeFactory.toInternalType(field.getType)).toArray
     val configuredSink = sink.configure(
       fieldNames, fieldTypes.map(createExternalTypeInfoFromInternalType))
     BatchTableEnvUtil.collect(table.tableEnv.asInstanceOf[BatchTableEnvironment],
       table, configuredSink.asInstanceOf[CollectTableSink[T]], jobName)
   }
+
+  def writeToSink(table: TableImpl, sink: TableSink[_]): Unit = {
 
 Review comment:
   I think we should avoid to use `writeToSink(table, sink)` in IT case. The logical in `writeToSink`
should be placed in TableEnvironment, not a test util or test base. 
   
   Please Use `tEnv.writeToSink(table, sink)` or `Table.insertInto` instead.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

Mime
View raw message