class RMSProp extends Optimizer
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- RMSProp
- Optimizer
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- All
Instance Constructors
-
new
RMSProp(learningRate: Float = 0.01f, decay: Schedule[Float] = FixedSchedule[Float](), rho: Float = 0.9f, momentum: Float = 0.0f, epsilon: Float = 1e-10f, centered: Boolean = false, ignoreDuplicateSparseIndices: Boolean = false, useLocking: Boolean = false, learningRateSummaryTag: String = null, name: String = "RMSProp")
- Attributes
- protected
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def applyDense[T, I](gradient: Output[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
-
def
applyGradients[T, I](gradientsAndVariables: Seq[(OutputLike[T], variables.Variable[Any])], iteration: Option[variables.Variable[I]] = None, name: String = this.name)(implicit arg0: core.types.TF[T], arg1: LongDefault[I], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- Optimizer
- Annotations
- @throws( ... )
- def applySparse[T, I](gradient: OutputIndexedSlices[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
-
def
applySparseDuplicateIndices[T, I](gradient: OutputIndexedSlices[T], variable: variables.Variable[T], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- Optimizer
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
- val centered: Boolean
-
def
clone(): AnyRef
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
-
def
computeGradients[T](loss: Output[T], lossGradients: Seq[OutputLike[T]] = null, variables: Set[variables.Variable[Any]] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Seq[(OutputLike[T], variables.Variable[Any])]
- Definition Classes
- Optimizer
- Annotations
- @throws( ... )
- def createSlots(variables: Seq[variables.Variable[Any]]): Unit
- val decay: Schedule[Float]
- val epsilon: Float
-
var
epsilonTensor: Output[Float]
- Attributes
- protected
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
finish(updateOps: Set[UntypedOp], nameScope: String): UntypedOp
- Definition Classes
- Optimizer
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
getEpsilon[V](variable: variables.Variable[V])(implicit arg0: core.types.TF[V]): Output[V]
- Attributes
- protected
-
def
getLearningRate[V, I](variable: variables.Variable[V], iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[V], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Output[V]
- Attributes
- protected
-
def
getMomentum[V](variable: variables.Variable[V])(implicit arg0: core.types.TF[V]): Output[V]
- Attributes
- protected
-
final
def
getNonSlotVariable[T](name: String, graph: core.Graph = null)(implicit arg0: core.types.TF[T]): variables.Variable[T]
- Attributes
- protected
- Definition Classes
- Optimizer
-
final
def
getNonSlotVariables: Iterable[variables.Variable[Any]]
- Attributes
- protected
- Definition Classes
- Optimizer
-
final
def
getOrCreateNonSlotVariable[T](name: String, initialValue: tensors.Tensor[T], colocationOps: Set[UntypedOp] = Set.empty, ignoreExisting: Boolean = false)(implicit arg0: core.types.TF[T]): variables.Variable[T]
- Attributes
- protected
- Definition Classes
- Optimizer
-
def
getRho[V](variable: variables.Variable[V])(implicit arg0: core.types.TF[V]): Output[V]
- Attributes
- protected
-
final
def
getSlot[T, R](name: String, variable: variables.Variable[T])(implicit arg0: core.types.TF[T], arg1: core.types.TF[R]): variables.Variable[R]
- Attributes
- protected
- Definition Classes
- Optimizer
-
final
def
getSlot[T, R](name: String, variable: variables.Variable[T], dataType: core.types.DataType[R], initializer: Initializer, shape: core.Shape, variableScope: String)(implicit arg0: core.types.TF[T], arg1: core.types.TF[R]): variables.Variable[R]
- Attributes
- protected
- Definition Classes
- Optimizer
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- val ignoreDuplicateSparseIndices: Boolean
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val learningRate: Float
- val learningRateSummaryTag: String
-
var
learningRateTensor: Output[Float]
- Attributes
- protected
-
def
minimize[T, I](loss: Output[T], lossGradients: Seq[OutputLike[T]] = null, variables: Set[variables.Variable[Any]] = null, gradientsGatingMethod: GatingMethod = Gradients.OpGating, gradientsAggregationMethod: AggregationMethod = Gradients.AddAggregationMethod, colocateGradientsWithOps: Boolean = false, iteration: Option[variables.Variable[I]] = None, name: String = "Minimize")(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], arg2: LongDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): UntypedOp
- Definition Classes
- Optimizer
- Annotations
- @throws( ... )
- val momentum: Float
-
var
momentumTensor: Output[Float]
- Attributes
- protected
- val name: String
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
val
nonSlotVariables: Map[(String, Option[core.Graph]), variables.Variable[Any]]
- Attributes
- protected
- Definition Classes
- Optimizer
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- def prepare[I](iteration: Option[variables.Variable[I]])(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): Unit
- val rho: Float
-
var
rhoTensor: Output[Float]
- Attributes
- protected
-
final
def
slotNames: Set[String]
- Attributes
- protected
- Definition Classes
- Optimizer
-
final
val
slots: Map[String, Map[variables.Variable[Any], variables.Variable[Any]]]
- Attributes
- protected
- Definition Classes
- Optimizer
-
final
def
state: Seq[variables.Variable[Any]]
- Definition Classes
- Optimizer
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
- val useLocking: Boolean
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
-
final
def
zerosSlot[T](name: String, variable: variables.Variable[T], variableScope: String)(implicit arg0: core.types.TF[T]): variables.Variable[T]
- Attributes
- protected
- Definition Classes
- Optimizer