Class/Object

org.platanios.tensorflow.api.ops.training.optimizers

AdaGrad

Related Docs: object AdaGrad | package optimizers

Permalink

case class AdaGrad(learningRate: Double = 0.01, decay: Decay = NoDecay, epsilon: Double = 1e-8, useLocking: Boolean = false, learningRateSummaryTag: String = null, name: String = "AdaGrad") extends Optimizer with Product with Serializable

Linear Supertypes
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. AdaGrad
  2. Serializable
  3. Serializable
  4. Product
  5. Equals
  6. Optimizer
  7. AnyRef
  8. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Instance Constructors

  1. new AdaGrad(learningRate: Double = 0.01, decay: Decay = NoDecay, epsilon: Double = 1e-8, useLocking: Boolean = false, learningRateSummaryTag: String = null, name: String = "AdaGrad")

    Permalink

Value Members

  1. final def !=(arg0: Any): Boolean

    Permalink
    Definition Classes
    AnyRef → Any
  2. final def ##(): Int

    Permalink
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean

    Permalink
    Definition Classes
    AnyRef → Any
  4. def applyDense(gradient: Output, variable: variables.Variable, iteration: Option[variables.Variable]): Op

    Permalink
    Definition Classes
    AdaGradOptimizer
  5. def applyGradients(gradientsAndVariables: Seq[(OutputLike, variables.Variable)], iteration: Option[variables.Variable] = None, name: String = this.name): Op

    Permalink
    Definition Classes
    Optimizer
  6. def applySparse(gradient: OutputIndexedSlices, variable: variables.Variable, iteration: Option[variables.Variable]): Op

    Permalink
    Definition Classes
    AdaGradOptimizer
  7. def applySparseDuplicateIndices(gradient: OutputIndexedSlices, variable: variables.Variable, iteration: Option[variables.Variable]): Op

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  8. final def asInstanceOf[T0]: T0

    Permalink
    Definition Classes
    Any
  9. def clone(): AnyRef

    Permalink
    Attributes
    protected[java.lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  10. def computeGradients(loss: Output, lossGradients: Seq[OutputLike] = null, variables: Set[variables.Variable] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false): Seq[(OutputLike, variables.Variable)]

    Permalink
    Definition Classes
    Optimizer
  11. def createSlots(variables: Seq[variables.Variable]): Unit

    Permalink
    Attributes
    protected
    Definition Classes
    AdaGradOptimizer
  12. val decay: Decay

    Permalink
  13. val epsilon: Double

    Permalink
  14. final def eq(arg0: AnyRef): Boolean

    Permalink
    Definition Classes
    AnyRef
  15. def finalize(): Unit

    Permalink
    Attributes
    protected[java.lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  16. def finish(updateOps: Set[Op], nameScope: String): Op

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  17. final def getClass(): Class[_]

    Permalink
    Definition Classes
    AnyRef → Any
  18. def getSlot(name: String, variable: variables.Variable): variables.Variable

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  19. def getSlot(name: String, variable: variables.Variable, initializer: Initializer, shape: core.Shape, dataType: types.DataType, variableScope: String): variables.Variable

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  20. final def isInstanceOf[T0]: Boolean

    Permalink
    Definition Classes
    Any
  21. val learningRate: Double

    Permalink
  22. val learningRateSummaryTag: String

    Permalink
  23. def minimize(loss: Output, lossGradients: Seq[OutputLike] = null, variables: Set[variables.Variable] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false, iteration: Option[variables.Variable] = None, name: String = "Minimize"): Op

    Permalink
    Definition Classes
    Optimizer
  24. val name: String

    Permalink
    Definition Classes
    AdaGradOptimizer
  25. final def ne(arg0: AnyRef): Boolean

    Permalink
    Definition Classes
    AnyRef
  26. final def notify(): Unit

    Permalink
    Definition Classes
    AnyRef
  27. final def notifyAll(): Unit

    Permalink
    Definition Classes
    AnyRef
  28. def prepare(iteration: Option[variables.Variable]): Unit

    Permalink
    Definition Classes
    AdaGradOptimizer
  29. def slotNames: Set[String]

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  30. val slots: Map[String, Map[variables.Variable, variables.Variable]]

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  31. val supportedDataTypes: Set[types.DataType]

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer
  32. final def synchronized[T0](arg0: ⇒ T0): T0

    Permalink
    Definition Classes
    AnyRef
  33. val useLocking: Boolean

    Permalink
    Definition Classes
    AdaGradOptimizer
  34. final def wait(): Unit

    Permalink
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  35. final def wait(arg0: Long, arg1: Int): Unit

    Permalink
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  36. final def wait(arg0: Long): Unit

    Permalink
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  37. def zerosSlot(name: String, variable: variables.Variable, variableScope: String): variables.Variable

    Permalink
    Attributes
    protected
    Definition Classes
    Optimizer

Inherited from Serializable

Inherited from Serializable

Inherited from Product

Inherited from Equals

Inherited from Optimizer

Inherited from AnyRef

Inherited from Any

Ungrouped