Packages

class AdaGrad extends Optimizer

Linear Supertypes
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. AdaGrad
  2. Optimizer
  3. AnyRef
  4. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Instance Constructors

  1. new AdaGrad(learningRate: Float = 0.01f, decay: Schedule[Float] = FixedSchedule[Float](), epsilon: Float = 1e-8f, ignoreDuplicateSparseIndices: Boolean = false, useLocking: Boolean = false, learningRateSummaryTag: String = null, name: String = "AdaGrad")
    Attributes
    protected

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##(): Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. def applyDense[T, I](gradient: Output[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
    Definition Classes
    AdaGradOptimizer
  5. def applyGradients[T, I](gradientsAndVariables: Seq[(OutputLike[T], variables.Variable[Any])], iteration: Option[variables.Variable[I]] = None, name: String = this.name)(implicit arg0: core.types.TF[T], arg1: LongDefault[I], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
    Definition Classes
    Optimizer
    Annotations
    @throws( ... )
  6. def applySparse[T, I](gradient: OutputIndexedSlices[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
    Definition Classes
    AdaGradOptimizer
  7. def applySparseDuplicateIndices[T, I](gradient: OutputIndexedSlices[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
    Definition Classes
    Optimizer
  8. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  9. def clone(): AnyRef
    Attributes
    protected[java.lang]
    Definition Classes
    AnyRef
    Annotations
    @native() @throws( ... )
  10. def computeGradients[T](loss: Output[T], lossGradients: Seq[OutputLike[T]] = null, variables: Set[variables.Variable[Any]] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Seq[(OutputLike[T], variables.Variable[Any])]
    Definition Classes
    Optimizer
    Annotations
    @throws( ... )
  11. def createSlots(variables: Seq[variables.Variable[Any]]): Unit
    Definition Classes
    AdaGradOptimizer
  12. val decay: Schedule[Float]
  13. val epsilon: Float
  14. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  15. def equals(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  16. def finalize(): Unit
    Attributes
    protected[java.lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  17. def finish(updateOps: Set[UntypedOp], nameScope: String): UntypedOp
    Definition Classes
    Optimizer
  18. final def getClass(): Class[_]
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  19. def getLearningRate[V, I](variable: variables.Variable[V], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[V], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[V]
    Attributes
    protected
  20. final def getNonSlotVariable[T](name: String, graph: core.Graph = null)(implicit arg0: core.types.TF[T]): variables.Variable[T]
    Attributes
    protected
    Definition Classes
    Optimizer
  21. final def getNonSlotVariables: Iterable[variables.Variable[Any]]
    Attributes
    protected
    Definition Classes
    Optimizer
  22. final def getOrCreateNonSlotVariable[T](name: String, initialValue: tensors.Tensor[T], colocationOps: Set[UntypedOp] = Set.empty, ignoreExisting: Boolean = false)(implicit arg0: core.types.TF[T]): variables.Variable[T]
    Attributes
    protected
    Definition Classes
    Optimizer
  23. final def getSlot[T, R](name: String, variable: variables.Variable[T])(implicit arg0: core.types.TF[T], arg1: core.types.TF[R]): variables.Variable[R]
    Attributes
    protected
    Definition Classes
    Optimizer
  24. final def getSlot[T, R](name: String, variable: variables.Variable[T], dataType: core.types.DataType[R], initializer: Initializer, shape: core.Shape, variableScope: String)(implicit arg0: core.types.TF[T], arg1: core.types.TF[R]): variables.Variable[R]
    Attributes
    protected
    Definition Classes
    Optimizer
  25. def hashCode(): Int
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  26. val ignoreDuplicateSparseIndices: Boolean
    Definition Classes
    AdaGradOptimizer
  27. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  28. val learningRate: Float
  29. val learningRateSummaryTag: String
  30. var learningRateTensor: Output[Float]
    Attributes
    protected
  31. def minimize[T, I](loss: Output[T], lossGradients: Seq[OutputLike[T]] = null, variables: Set[variables.Variable[Any]] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false, iteration: Option[variables.Variable[I]] = None, name: String = "Minimize")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], arg2: LongDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): UntypedOp
    Definition Classes
    Optimizer
    Annotations
    @throws( ... )
  32. val name: String
    Definition Classes
    AdaGradOptimizer
  33. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  34. final val nonSlotVariables: Map[(String, Option[core.Graph]), variables.Variable[Any]]
    Attributes
    protected
    Definition Classes
    Optimizer
  35. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  36. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  37. def prepare[I](iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): Unit
    Definition Classes
    AdaGradOptimizer
  38. final def slotNames: Set[String]
    Attributes
    protected
    Definition Classes
    Optimizer
  39. final val slots: Map[String, Map[variables.Variable[Any], variables.Variable[Any]]]
    Attributes
    protected
    Definition Classes
    Optimizer
  40. final def state: Seq[variables.Variable[Any]]
    Definition Classes
    Optimizer
  41. final def synchronized[T0](arg0: ⇒ T0): T0
    Definition Classes
    AnyRef
  42. def toString(): String
    Definition Classes
    AnyRef → Any
  43. val useLocking: Boolean
    Definition Classes
    AdaGradOptimizer
  44. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  45. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  46. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @native() @throws( ... )
  47. final def zerosSlot[T](name: String, variable: variables.Variable[T], variableScope: String)(implicit arg0: core.types.TF[T]): variables.Variable[T]
    Attributes
    protected
    Definition Classes
    Optimizer

Inherited from Optimizer

Inherited from AnyRef

Inherited from Any

Ungrouped