[ https://issues.apache.org/jira/browse/FLINK-1807?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=14532341#comment-14532341
]
ASF GitHub Bot commented on FLINK-1807:
---------------------------------------
Github user tillrohrmann commented on a diff in the pull request:
https://github.com/apache/flink/pull/613#discussion_r29837516
--- Diff: flink-staging/flink-ml/src/main/scala/org/apache/flink/ml/optimization/Solver.scala
---
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.ml.optimization
+
+import org.apache.flink.api.scala.DataSet
+import org.apache.flink.ml.common._
+import org.apache.flink.ml.math.{Vector => FlinkVector, BLAS, DenseVector}
+import org.apache.flink.api.scala._
+import org.apache.flink.ml.optimization.IterativeSolver._
+import org.apache.flink.ml.optimization.Solver._
+
+/** Base class for optimization algorithms
+ *
+ */
+abstract class Solver extends Serializable with WithParameters {
+
+ /** Provides a solution for the given optimization problem
+ *
+ * @param data A Dataset of LabeledVector (input, output) pairs
+ * @param initialWeights The initial weight that will be optimized
+ * @return A Vector of weights optimized to the given problem
+ */
+ def optimize(data: DataSet[LabeledVector], initialWeights: Option[DataSet[WeightVector]]):
+ DataSet[WeightVector]
+ // TODO(tvas): Maybe we want to pass a WeightVector directly here, instead of a
+ // DataSet[WeightVector]
+
+ /** Creates a DataSet with one zero vector. The zero vector has dimension d, which
is given
+ * by the dimensionDS.
+ *
+ * @param dimensionDS DataSet with one element d, denoting the dimension of the returned
zero
+ * vector
+ * @return DataSet of a zero vector of dimension d
+ */
+ def createInitialWeightVector(dimensionDS: DataSet[Int]): DataSet[WeightVector] =
{
+ dimensionDS.map {
+ dimension =>
+ val values = Array.fill(dimension)(0.0)
+ new WeightVector(DenseVector(values), 0.0)
+ }
+ }
+
+ //Setters for parameters
+ def setLossFunction(lossFunction: LossFunction): Solver = {
+ parameters.add(LossFunction, lossFunction)
+ this
+ }
+
+ def setRegularizationType(regularization: RegularizationType): Solver = {
+ parameters.add(RegularizationType, regularization)
+ this
+ }
+
+ def setRegularizationParameter(regularizationParameter: Double): Solver = {
+ parameters.add(RegularizationParameter, regularizationParameter)
+ this
+ }
+
+ def setPredictionFunction(predictionFunction: PredictionFunction): Solver = {
+ parameters.add(PredictionFunctionParam, predictionFunction)
+ this
+ }
+}
+
+object Solver {
+ // TODO(tvas): Does this belong in IterativeSolver instead?
+ val WEIGHTVECTOR_BROADCAST = "weights_broadcast"
+
+ // Define parameters for Solver
+ case object LossFunction extends Parameter[LossFunction] {
+ // TODO(tvas): Should depend on problem, here is where differentiating between classification
+ // and regression could become useful
+ val defaultValue = Some(new SquaredLoss)
+ }
+
+ case object RegularizationType extends Parameter[RegularizationType] {
+ val defaultValue = Some(new NoRegularization)
+ }
+
+ case object RegularizationParameter extends Parameter[Double] {
+ val defaultValue = Some(0.0) // TODO(tvas): Properly initialize this, ensure Parameter
> 0!
+ }
+
+ case object PredictionFunctionParam extends Parameter[PredictionFunction] {
--- End diff --
Consistent naming Param => Parameter
> Stochastic gradient descent optimizer for ML library
> ----------------------------------------------------
>
> Key: FLINK-1807
> URL: https://issues.apache.org/jira/browse/FLINK-1807
> Project: Flink
> Issue Type: Improvement
> Components: Machine Learning Library
> Reporter: Till Rohrmann
> Assignee: Theodore Vasiloudis
> Labels: ML
>
> Stochastic gradient descent (SGD) is a widely used optimization technique in different
ML algorithms. Thus, it would be helpful to provide a generalized SGD implementation which
can be instantiated with the respective gradient computation. Such a building block would
make the development of future algorithms easier.
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)
|