object GpuOverrides extends Logging with Serializable
- Alphabetic
- By Inheritance
- GpuOverrides
- Serializable
- Serializable
- Logging
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- val CASE_MODIFICATION_INCOMPAT: String
- val FLOAT_DIFFERS_GROUP_INCOMPAT: String
- val UTC_TIMEZONE_ID: ZoneId
- def addListener(listener: GpuOverridesListener): Unit
- def areAllSupportedTypes(types: DataType*): Boolean
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
checkAndTagAnsiAgg(checkType: Option[DataType], meta: AggExprMeta[_]): Unit
Helper function specific to ANSI mode for the aggregate functions that should fallback, since we don't have the same overflow checks that Spark provides in the CPU
Helper function specific to ANSI mode for the aggregate functions that should fallback, since we don't have the same overflow checks that Spark provides in the CPU
- checkType
Something other than
Nonetriggers logic to detect whether the agg should fallback in ANSI mode. Otherwise (None), it's an automatic fallback.- meta
agg expression meta
- def checkAndTagFloatAgg(dataType: DataType, conf: RapidsConf, meta: RapidsMeta[_, _, _]): Unit
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- val commonDataWriteCmds: Map[Class[_ <: DataWritingCommand], DataWritingCommandRule[_ <: DataWritingCommand]]
- val commonExecs: Map[Class[_ <: SparkPlan], ExecRule[_ <: SparkPlan]]
- val commonExpressions: Map[Class[_ <: Expression], ExprRule[_ <: Expression]]
- val commonRunnableCmds: Map[Class[_ <: RunnableCommand], RunnableCommandRule[_ <: RunnableCommand]]
- val commonScans: Map[Class[_ <: Scan], ScanRule[_ <: Scan]]
- def dataWriteCmd[INPUT <: DataWritingCommand](desc: String, doWrap: (INPUT, RapidsConf, Option[RapidsMeta[_, _, _]], DataFromReplacementRule) ⇒ DataWritingCommandMeta[INPUT])(implicit tag: ClassTag[INPUT]): DataWritingCommandRule[INPUT]
- val dataWriteCmds: Map[Class[_ <: DataWritingCommand], DataWritingCommandRule[_ <: DataWritingCommand]]
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- def exec[INPUT <: SparkPlan](desc: String, pluginChecks: ExecChecks, doWrap: (INPUT, RapidsConf, Option[RapidsMeta[_, _, _]], DataFromReplacementRule) ⇒ SparkPlanMeta[INPUT])(implicit tag: ClassTag[INPUT]): ExecRule[INPUT]
- lazy val execs: Map[Class[_ <: SparkPlan], ExecRule[_ <: SparkPlan]]
-
def
explainPotentialGpuPlan(df: DataFrame, explain: String): String
Only run the explain and don't actually convert or run on GPU.
Only run the explain and don't actually convert or run on GPU. This gets the plan from the dataframe so it's after catalyst has run through all the rules to modify the plan. This means we have to try to undo some of the last rules to make it close to when the columnar rules would normally run on the plan.
- def expr[INPUT <: Expression](desc: String, pluginChecks: ExprChecks, doWrap: (INPUT, RapidsConf, Option[RapidsMeta[_, _, _]], DataFromReplacementRule) ⇒ BaseExprMeta[INPUT])(implicit tag: ClassTag[INPUT]): ExprRule[INPUT]
- val expressions: Map[Class[_ <: Expression], ExprRule[_ <: Expression]]
-
def
extractLit(exp: Expression): Option[Literal]
- Annotations
- @tailrec()
- def extractStringLit(exp: Expression): Option[String]
- lazy val fileFormats: Map[FileFormatType, Map[FileFormatOp, FileFormatChecks]]
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
def
fixupCpuReusedExchanges(plan: SparkPlan): SparkPlan
On some Spark platforms, AQE planning can result in old CPU exchanges being placed in the plan even after they have been replaced previously.
On some Spark platforms, AQE planning can result in old CPU exchanges being placed in the plan even after they have been replaced previously. This looks for subquery reuses of CPU exchanges that can be replaced with recently planned GPU exchanges that match the original CPU plan
-
def
fixupReusedExchangeOutputs(plan: SparkPlan): SparkPlan
Searches the plan for ReusedExchangeExec instances containing a GPU shuffle where the output types between the two plan nodes do not match.
Searches the plan for ReusedExchangeExec instances containing a GPU shuffle where the output types between the two plan nodes do not match. In such a case the ReusedExchangeExec will be updated to match the GPU shuffle output types.
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- def getTimeParserPolicy: TimeParserPolicy
- val gpuCommonTypes: TypeSig
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
initializeLogIfNecessary(isInterpreter: Boolean, silent: Boolean): Boolean
- Attributes
- protected
- Definition Classes
- Logging
-
def
initializeLogIfNecessary(isInterpreter: Boolean): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
isAnyStringLit(expressions: Seq[Expression]): Boolean
Checks to see if any expressions are a String Literal
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def isLit(exp: Expression): Boolean
- def isNullLit(lit: Literal): Boolean
- def isOfType(l: Option[Literal], t: DataType): Boolean
- def isOrContainsDateOrTimestamp(dataType: DataType): Boolean
- def isOrContainsFloatingPoint(dataType: DataType): Boolean
- def isOrContainsTimestamp(dataType: DataType): Boolean
- def isStringLit(exp: Expression): Boolean
- def isSupportedStringReplacePattern(exp: Expression): Boolean
- def isSupportedStringReplacePattern(strLit: String): Boolean
-
def
isSupportedType(dataType: DataType, allowNull: Boolean = false, allowDecimal: Boolean = false, allowBinary: Boolean = false, allowCalendarInterval: Boolean = false, allowArray: Boolean = false, allowStruct: Boolean = false, allowStringMaps: Boolean = false, allowMaps: Boolean = false, allowNesting: Boolean = false): Boolean
Is this particular type supported or not.
Is this particular type supported or not.
- dataType
the type to check
- allowNull
should NullType be allowed
- allowDecimal
should DecimalType be allowed
- allowBinary
should BinaryType be allowed
- allowCalendarInterval
should CalendarIntervalType be allowed
- allowArray
should ArrayType be allowed
- allowStruct
should StructType be allowed
- allowStringMaps
should a Map[String, String] specifically be allowed
- allowMaps
should MapType be allowed generically
- allowNesting
should nested types like array struct and map allow nested types within them, or only primitive types.
- returns
true if it is allowed else false
-
def
isTraceEnabled(): Boolean
- Attributes
- protected
- Definition Classes
- Logging
- def isUTCTimezone(): Boolean
- def isUTCTimezone(timezoneIdStr: String): Boolean
- def isUTCTimezone(timezoneId: ZoneId): Boolean
- val jsonStructReadTypes: TypeSig
-
def
log: Logger
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDebug(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logDuration[T](shouldLog: Boolean, msg: (Double) ⇒ String)(block: ⇒ T): T
Provides a way to log an info message about how long an operation took in milliseconds.
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logError(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logInfo(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logName: String
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logTrace(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
def
logWarning(msg: ⇒ String): Unit
- Attributes
- protected
- Definition Classes
- Logging
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
neverReplaceExec[INPUT <: SparkPlan](desc: String)(implicit tag: ClassTag[INPUT]): ExecRule[INPUT]
Create an exec rule that should never be replaced, because it is something that should always run on the CPU, or should just be ignored totally for what ever reason.
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- def part[INPUT <: Partitioning](desc: String, checks: PartChecks, doWrap: (INPUT, RapidsConf, Option[RapidsMeta[_, _, _]], DataFromReplacementRule) ⇒ PartMeta[INPUT])(implicit tag: ClassTag[INPUT]): PartRule[INPUT]
- val parts: Map[Class[_ <: Partitioning], PartRule[_ <: Partitioning]]
- val pluginSupportedOrderableSig: TypeSig
- val postColToRowProjection: TreeNodeTag[Seq[NamedExpression]]
- val preRowToColProjection: TreeNodeTag[Seq[NamedExpression]]
-
def
probablyGpuPlan(adaptivePlan: AdaptiveSparkPlanExec, conf: RapidsConf): Boolean
Tries to predict whether an adaptive plan will end up with data on the GPU or not.
- val regexMetaChars: String
- def removeAllListeners(): Unit
-
def
removeExtraneousShuffles(plan: SparkPlan, conf: RapidsConf): SparkPlan
Removes unnecessary CPU shuffles that Spark can add to the plan when it does not realize a GPU partitioning satisfies a CPU distribution because CPU and GPU expressions are not semantically equal.
- def removeListener(listener: GpuOverridesListener): Unit
- def runnableCmd[INPUT <: RunnableCommand](desc: String, doWrap: (INPUT, RapidsConf, Option[RapidsMeta[_, _, _]], DataFromReplacementRule) ⇒ RunnableCommandMeta[INPUT])(implicit tag: ClassTag[INPUT]): RunnableCommandRule[INPUT]
- val runnableCmds: Map[Class[_ <: RunnableCommand], RunnableCommandRule[_ <: RunnableCommand]]
- def scan[INPUT <: Scan](desc: String, doWrap: (INPUT, RapidsConf, Option[RapidsMeta[_, _, _]], DataFromReplacementRule) ⇒ ScanMeta[INPUT])(implicit tag: ClassTag[INPUT]): ScanRule[INPUT]
- val scans: Map[Class[_ <: Scan], ScanRule[_ <: Scan]]
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- def wrapAndTagPlan(plan: SparkPlan, conf: RapidsConf): SparkPlanMeta[SparkPlan]
- def wrapDataWriteCmds[INPUT <: DataWritingCommand](writeCmd: INPUT, conf: RapidsConf, parent: Option[RapidsMeta[_, _, _]]): DataWritingCommandMeta[INPUT]
- def wrapExpr[INPUT <: Expression](expr: INPUT, conf: RapidsConf, parent: Option[RapidsMeta[_, _, _]]): BaseExprMeta[INPUT]
- def wrapPart[INPUT <: Partitioning](part: INPUT, conf: RapidsConf, parent: Option[RapidsMeta[_, _, _]]): PartMeta[INPUT]
- def wrapPlan[INPUT <: SparkPlan](plan: INPUT, conf: RapidsConf, parent: Option[RapidsMeta[_, _, _]]): SparkPlanMeta[INPUT]
- def wrapRunnableCmd[INPUT <: RunnableCommand](cmd: INPUT, conf: RapidsConf, parent: Option[RapidsMeta[_, _, _]]): RunnableCommandMeta[INPUT]
- def wrapScan[INPUT <: Scan](scan: INPUT, conf: RapidsConf, parent: Option[RapidsMeta[_, _, _]]): ScanMeta[INPUT]