object Math extends Math
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- Math
- Math
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- All
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
abs[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
acos[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
acosh[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
add[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
addN[T](inputs: Seq[Tensor[T]])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- Math
-
def
all[I](input: Tensor[Boolean], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: IntDefault[I], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Tensor[Boolean]
- Definition Classes
- Math
-
def
angleDouble[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Angle")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
-
def
angleFloat[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Angle")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
-
def
any[I](input: Tensor[Boolean], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: IntDefault[I], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Tensor[Boolean]
- Definition Classes
- Math
-
def
approximatelyEqual[T](x: Tensor[T], y: Tensor[T], tolerance: Float = 0.00001f)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
def
argmax[T, I, IR](input: Tensor[T], axes: Tensor[I], outputDataType: core.types.DataType[IR])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I], arg4: core.types.TF[IR], arg5: core.types.IsIntOrLong[IR]): Tensor[IR]
- Definition Classes
- Math
-
def
argmax[T, I](input: Tensor[T], axes: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[Long]
- Definition Classes
- Math
-
def
argmin[T, I, IR](input: Tensor[T], axes: Tensor[I], outputDataType: core.types.DataType[IR])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I], arg4: core.types.TF[IR], arg5: core.types.IsIntOrLong[IR]): Tensor[IR]
- Definition Classes
- Math
-
def
argmin[T, I](input: Tensor[T], axes: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[Long]
- Definition Classes
- Math
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
asin[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
asinh[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
atan[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
atan2[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
atanh[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
binCount[T](input: Tensor[Int], dataType: core.types.DataType[T], weights: Tensor[T] = null, minLength: Tensor[Int] = null, maxLength: Tensor[Int] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrLongOrFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
bucketize[T](input: Tensor[T], boundaries: Seq[Float])(implicit arg0: core.types.TF[T], arg1: core.types.IsIntOrLongOrFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
ceil[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
clone(): AnyRef
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
-
def
complexDouble(real: Tensor[Double], imag: Tensor[Double]): Tensor[core.types.ComplexDouble]
- Definition Classes
- Math
-
def
complexFloat(real: Tensor[Float], imag: Tensor[Float]): Tensor[core.types.ComplexFloat]
- Definition Classes
- Math
-
def
conjugate[T, TL[A] <: TensorLike[A]](input: TL[T])(implicit arg0: core.types.TF[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
cos[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
cosh[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
countNonZero[T, I](input: Tensor[T], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[Long]
- Definition Classes
- Math
-
def
cross[T](a: Tensor[T], b: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T]): Tensor[T]
- Definition Classes
- Math
-
def
cumprod[T, I](input: Tensor[T], axis: Tensor[I], exclusive: Boolean = false, reverse: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
cumsum[T, I](input: Tensor[T], axis: Tensor[I], exclusive: Boolean = false, reverse: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
diag[T](diagonal: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
diagPart[T](input: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
digamma[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
divide[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equal[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
erf[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
erfc[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
exp[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
expm1[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
finalize(): Unit
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
floor[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
floorMod[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
greater[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
def
greaterEqual[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
igamma[T](a: Tensor[T], x: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
igammac[T](a: Tensor[T], x: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
imagDouble[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Imag")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
-
def
imagFloat[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Imag")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
-
def
incompleteBeta[T](a: Tensor[T], b: Tensor[T], x: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
isFinite[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[Boolean]
- Definition Classes
- Math
-
def
isInf[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[Boolean]
- Definition Classes
- Math
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
def
isNaN[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[Boolean]
- Definition Classes
- Math
-
def
less[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
def
lessEqual[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
def
linspace[T, I](start: Tensor[T], stop: Tensor[T], numberOfValues: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsTruncatedHalfOrFloatOrDouble[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
log[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
log1p[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
logGamma[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
logSigmoid[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
logSumExp[T](input: Tensor[T], axes: Seq[Int] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
logicalAnd(x: Tensor[Boolean], y: Tensor[Boolean]): Tensor[Boolean]
- Definition Classes
- Math
-
def
logicalNot(x: Tensor[Boolean]): Tensor[Boolean]
- Definition Classes
- Math
-
def
logicalOr(x: Tensor[Boolean], y: Tensor[Boolean]): Tensor[Boolean]
- Definition Classes
- Math
-
def
logicalXOr(x: Tensor[Boolean], y: Tensor[Boolean]): Tensor[Boolean]
- Definition Classes
- Math
-
def
magnitudeDouble[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Magnitude")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
-
def
magnitudeFloat[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Magnitude")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
-
def
matmul[T](a: Tensor[T], b: Tensor[T], transposeA: Boolean = false, transposeB: Boolean = false, conjugateA: Boolean = false, conjugateB: Boolean = false, aIsSparse: Boolean = false, bIsSparse: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
matrixBandPart[T, I](input: Tensor[T], numSubDiagonals: Tensor[I], numSuperDiagonals: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.TF[I], arg2: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
matrixDiag[T](diagonal: Tensor[T])(implicit arg0: core.types.TF[T]): Tensor[T]
- Definition Classes
- Math
-
def
matrixDiagPart[T](input: Tensor[T])(implicit arg0: core.types.TF[T]): Tensor[T]
- Definition Classes
- Math
-
def
matrixSetDiag[T](input: Tensor[T], diagonal: Tensor[T])(implicit arg0: core.types.TF[T]): Tensor[T]
- Definition Classes
- Math
-
def
max[T, I](input: Tensor[T], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
maximum[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
mean[T, I](input: Tensor[T], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
min[T, I](input: Tensor[T], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
minimum[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
mod[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
multiply[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
negate[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
notEqual[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Boolean]
- Definition Classes
- Math
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
def
polygamma[T](n: Tensor[T], x: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
-
def
pow[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
prod[T, I](input: Tensor[T], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
range[T](start: Tensor[T], limit: Tensor[T], delta: Tensor[T] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- Math
-
def
realDivide[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
realDouble[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexDouble], name: String = "Real")(implicit ev: Aux[TL, core.types.ComplexDouble]): TL[Double]
- Definition Classes
- Math
-
def
realFloat[TL[A] <: TensorLike[A]](input: TL[core.types.ComplexFloat], name: String = "Real")(implicit ev: Aux[TL, core.types.ComplexFloat]): TL[Float]
- Definition Classes
- Math
-
def
reciprocal[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
round[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
roundInt[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsHalfOrFloatOrDouble[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
rsqrt[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
scalarMul[T, TL[A] <: TensorLike[A]](scalar: Tensor[T], tensor: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
segmentMax[T, I](data: Tensor[T], segmentIndices: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
segmentMean[T, I](data: Tensor[T], segmentIndices: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
segmentMin[T, I](data: Tensor[T], segmentIndices: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
segmentProd[T, I](data: Tensor[T], segmentIndices: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
segmentSum[T, I](data: Tensor[T], segmentIndices: Tensor[I])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I], arg3: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
def
select[T](condition: Tensor[Boolean], x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T]): Tensor[T]
- Definition Classes
- Math
-
def
sigmoid[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
sign[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
sin[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
sinh[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
sparseSegmentMean[T, I1, I2](data: Tensor[T], indices: Tensor[I1], segmentIndices: Tensor[Int], numSegments: Tensor[I2] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): Tensor[T]
- Definition Classes
- Math
-
def
sparseSegmentSum[T, I1, I2](data: Tensor[T], indices: Tensor[I1], segmentIndices: Tensor[Int], numSegments: Tensor[I2] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): Tensor[T]
- Definition Classes
- Math
-
def
sparseSegmentSumSqrtN[T, I1, I2](data: Tensor[T], indices: Tensor[I1], segmentIndices: Tensor[Int], numSegments: Tensor[I2] = null)(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: IntDefault[I2], arg5: core.types.TF[I2], arg6: core.types.IsIntOrLong[I2]): Tensor[T]
- Definition Classes
- Math
-
def
sqrt[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
square[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
squaredDifference[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
subtract[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
sum[T, I](input: Tensor[T], axes: Tensor[I] = null, keepDims: Boolean = false)(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: IntDefault[I], arg3: core.types.TF[I], arg4: core.types.IsIntOrLong[I]): Tensor[T]
- Definition Classes
- Math
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
tan[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
tanh[T, TL[A] <: TensorLike[A]](x: TL[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T], ev: Aux[TL, T]): TL[T]
- Definition Classes
- Math
-
def
tensorDot[T](a: Tensor[T], b: Tensor[T], axesA: Tensor[Int], axesB: Tensor[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
- Annotations
- @throws( ... )
-
def
tensorDot[T](a: Tensor[T], b: Tensor[T], numAxes: Tensor[Int])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
- Annotations
- @throws( ... )
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
def
trace[T](input: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[T]
- Definition Classes
- Math
-
def
truncateDivide[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
truncateMod[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
-
def
unsortedSegmentMax[T, I1, I2](data: Tensor[T], segmentIndices: Tensor[I1], segmentsNumber: Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.IsReal[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Tensor[T]
- Definition Classes
- Math
-
def
unsortedSegmentSum[T, I1, I2](data: Tensor[T], segmentIndices: Tensor[I1], segmentsNumber: Tensor[I2])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T], arg2: core.types.TF[I1], arg3: core.types.IsIntOrLong[I1], arg4: core.types.TF[I2], arg5: core.types.IsIntOrLong[I2]): Tensor[T]
- Definition Classes
- Math
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
-
def
zerosFraction[T](input: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNumeric[T]): Tensor[Float]
- Definition Classes
- Math
-
def
zeta[T](x: Tensor[T], q: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsFloatOrDouble[T]): Tensor[T]
- Definition Classes
- Math
Deprecated Value Members
-
def
floorDivide[T](x: Tensor[T], y: Tensor[T])(implicit arg0: core.types.TF[T], arg1: core.types.IsNotQuantized[T]): Tensor[T]
- Definition Classes
- Math
- Annotations
- @deprecated
- Deprecated
(Since version 0.1) Use
truncateDivideinstead.