trait NN extends AnyRef
Linear Supertypes
Known Subclasses
Ordering
- Alphabetic
- By Inheritance
Inherited
- NN
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- All
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def addBias[T](value: Output[T], bias: Output[T], cNNDataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "AddBias")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
-
def
addBiasGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[T])
- Attributes
- protected
-
def
addBiasHessian[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- Attributes
- protected
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
- def batchNormalization[T](x: Output[T], mean: Output[T], variance: Output[T], offset: Option[Output[T]] = None, scale: Option[Output[T]] = None, epsilon: Output[T], name: String = "BatchNormalization")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
-
def
clone(): AnyRef
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
- def conv2D[T](input: Output[T], filter: Output[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true, name: String = "Conv2D")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- def conv2DBackpropFilter[T](input: Output[T], filterSizes: Output[Int], outputGradient: Output[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true, name: String = "Conv2DBackpropFilter")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- def conv2DBackpropInput[T](inputSizes: Output[Int], filter: Output[T], outputGradient: Output[T], stride1: Long, stride2: Long, padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, dilations: (Int, Int, Int, Int) = (1, 1, 1, 1), useCuDNNOnGPU: Boolean = true, name: String = "Conv2DBackpropInput")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
-
def
conv2DGradient[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[T])
- Attributes
- protected
- def crelu[T](input: Output[T], axis: Output[Int] = -1, name: String = "CReLU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
-
def
dropout[T, I](input: Output[T], keepProbability: Float, scaleOutput: Boolean = true, noiseShape: Output[I] = null, seed: Option[Int] = None, name: String = "Dropout")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- Annotations
- @throws( ... )
- def dynamicDropout[T, I](input: Output[T], keepProbability: Output[T], scaleOutput: Boolean = true, noiseShape: Output[I] = null, seed: Option[Int] = None, name: String = "Dropout")(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Output[T]
- def elu[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "ELU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
-
def
eluGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
-
def
eluHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
fusedBatchNormalization[T](x: Output[T], scale: Output[Float], offset: Output[Float], mean: Option[Output[Float]] = None, variance: Option[Output[Float]] = None, epsilon: Float = 0.0001f, dataFormat: CNNDataFormat = NWCFormat, isTraining: Boolean = true, name: String = "FusedBatchNormalization")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[Float], Output[Float], Output[Float], Output[Float])
- Annotations
- @throws( ... )
-
def
fusedBatchNormalizationGradient[T](op: Op[(Output[T], Output[Float], Output[Float], Output[Float], Output[Float]), (Output[T], Output[Float], Output[Float], Output[Float], Output[Float])], outputGradient: (Output[T], Output[Float], Output[Float], Output[Float], Output[Float]))(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[Float], Output[Float], Output[Float], Output[Float])
- Attributes
- protected
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def inTopK[I](predictions: Output[Float], targets: Output[I], k: Output[I], name: String = "InTopK")(implicit arg0: core.types.TF[I], arg1: core.types.IsIntOrLong[I]): Output[Boolean]
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def l2Loss[T](input: Output[T], name: String = "L2Loss")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
-
def
l2LossGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- def l2Normalize[T, I](x: Output[T], axes: Output[I], epsilon: Float = 1e-12f, name: String = "L2Normalize")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
- def linear[T](x: Output[T], weights: Output[T], bias: Output[T] = null, name: String = "Linear")(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Output[T]
- def localResponseNormalization[T](input: Output[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f, name: String = "LocalResponseNormalization")(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Output[T]
- def logPoissonLoss[T](logPredictions: Output[T], targets: Output[T], computeFullLoss: Boolean = false, name: String = "LogPoissonLoss")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- def logSoftmax[T](logits: Output[T], axis: Int = -1, name: String = "LogSoftmax")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
-
def
logSoftmaxGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- def lrn[T](input: Output[T], depthRadius: Int = 5, bias: Float = 1.0f, alpha: Float = 1.0f, beta: Float = 0.5f, name: String = "LRN")(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Output[T]
-
def
lrnGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrHalfOrFloat[T]): Output[T]
- Attributes
- protected
- def maxPool[T](input: Output[T], windowSize: Output[Int], strides: Output[Int], padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "MaxPool")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- def maxPoolGrad[T](originalInput: Output[T], originalOutput: Output[T], outputGradient: Output[T], windowSize: Output[Int], strides: Output[Int], padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "MaxPoolGrad")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
- def maxPoolGradGrad[T](originalInput: Output[T], originalOutput: Output[T], outputGradient: Output[T], windowSize: Output[Int], strides: Output[Int], padding: ConvPaddingMode, dataFormat: CNNDataFormat = CNNDataFormat.default, name: String = "MaxPoolGradGrad")(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Output[T]
-
def
maxPoolGradient[T](op: Op[(Output[T], Output[Int], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[Int], Output[Int])
- Attributes
- protected
-
def
maxPoolHessian[T](op: Op[(Output[T], Output[T], Output[T], Output[Int], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[T], Output[T], Output[Int], Output[Int])
- Attributes
- protected
-
def
maxPoolHessianGradient[T](op: Op[(Output[T], Output[T], Output[T], Output[Int], Output[Int]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): (Output[T], Output[T], Output[T], Output[Int], Output[Int])
- Attributes
- protected
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- def relu[T](input: Output[T], alpha: Float = 0.0f, name: String = "ReLU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- def relu6[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "ReLU6")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
-
def
relu6Gradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
-
def
relu6Hessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
-
def
reluGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
-
def
reluHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
- def selu[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "SELU")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[OL, T]): OL[T]
-
def
seluGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Output[T]
- Attributes
- protected
-
def
seluHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[T])
- Attributes
- protected
-
def
sequenceLoss[T, L](logits: Output[T], labels: Output[L], lossFn: (Output[T], Output[L]) ⇒ Output[T], weights: Output[T] = null, averageAcrossTimeSteps: Boolean = true, averageAcrossBatch: Boolean = true, name: String = "SequenceLoss")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[L]): Output[T]
- Annotations
- @throws( ... )
- def sigmoidCrossEntropy[T](logits: Output[T], labels: Output[T], weights: Output[T] = null, name: String = "SigmoidCrossEntropy")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- def softmax[T](logits: Output[T], axis: Int = -1, name: String = "Softmax")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- def softmaxCrossEntropy[T](logits: Output[T], labels: Output[T], axis: Int = -1, name: String = "SoftmaxCrossEntropy")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
-
def
softmaxCrossEntropyGradient[T](op: Op[(Output[T], Output[T]), (Output[T], Output[T])], outputGradient: (Output[T], Output[T]))(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[T])
- Attributes
- protected
-
def
softmaxGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
-
def
softmaxHelper[T](logits: Output[T], opType: String, axis: Int = -1, name: String = "Softmax")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- def softplus[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "Softplus")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], ev: Aux[OL, T]): OL[T]
-
def
softplusGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
-
def
softplusHessian[T](op: Op[(Output[T], Output[T]), Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): (Output[T], Output[T])
- Attributes
- protected
- def softsign[T, OL[A] <: OutputLike[A]](input: OL[T], name: String = "Softsign")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], ev: Aux[OL, T]): OL[T]
-
def
softsignGradient[T](op: Op[Output[T], Output[T]], outputGradient: Output[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T]): Output[T]
- Attributes
- protected
- def sparseSoftmaxCrossEntropy[T, I](logits: Output[T], labels: Output[I], axis: Int = -1, name: String = "SparseSoftmaxCrossEntropy")(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Output[T]
-
def
sparseSoftmaxCrossEntropyGradient[T, I](op: Op[(Output[T], Output[I]), (Output[T], Output[T])], outputGradient: (Output[T], Output[T]))(implicit arg0: core.types.TF[T], arg1: core.types.IsDecimal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): (Output[T], Output[I])
- Attributes
- protected
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
- def topK[T](input: Output[T], k: Output[Int], sorted: Boolean = true, name: String = "TopK")(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[Int])
-
def
topKGradient[T](op: Op[(Output[T], Output[Int]), (Output[T], Output[Int])], outputGradient: (Output[T], Output[Int]))(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): (Output[T], Output[Int])
- Attributes
- protected
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )