p

ai.chronon

spark

package spark

Ordering
  1. Alphabetic
Visibility
  1. Public
  2. All

Type Members

  1. class Analyzer extends AnyRef
  2. class Args extends ScallopConf
  3. sealed trait BaseKvRdd extends AnyRef
  4. case class BootstrapInfo(joinConf: api.Join, joinParts: Seq[JoinPartMetadata], externalParts: Seq[ExternalPartMetadata], derivations: Array[StructField], hashToSchema: Map[String, Array[StructField]]) extends Product with Serializable
  5. class ChrononKryoRegistrator extends KryoRegistrator
  6. class CpcSketchKryoSerializer extends Serializer[CpcSketch]
  7. sealed trait DataRange extends AnyRef
  8. class DummyExtensions extends (SparkSessionExtensions) ⇒ Unit
  9. case class ExternalPartMetadata(externalPart: ExternalPart, keySchema: Array[StructField], valueSchema: Array[StructField]) extends Product with Serializable
  10. class GroupBy extends Serializable
  11. class GroupByUpload extends Serializable
  12. sealed case class IncompatibleSchemaException(inconsistencies: Seq[(String, DataType, DataType)]) extends Exception with Product with Serializable
  13. class ItemSketchSerializable extends Serializable
  14. class ItemsSketchKryoSerializer extends Serializer[ItemSketchSerializable]
  15. class Join extends JoinBase
  16. abstract class JoinBase extends AnyRef
  17. case class JoinPartMetadata(joinPart: JoinPart, keySchema: Array[StructField], valueSchema: Array[StructField], derivationDependencies: Map[StructField, Seq[StructField]]) extends Product with Serializable
  18. case class KeyWithHash(data: Array[Any], hash: Array[Byte], hashInt: Int) extends Serializable with Product
  19. case class KvRdd(data: RDD[(Array[Any], Array[Any])], keySchema: StructType, valueSchema: StructType)(implicit sparkSession: SparkSession) extends BaseKvRdd with Product with Serializable
  20. class LabelJoin extends AnyRef
  21. class LocalTableExporter extends AnyRef
  22. class LogFlattenerJob extends Serializable

    Purpose of LogFlattenerJob is to unpack serialized Avro data from online requests and flatten each field (both keys and values) into individual columns and save to an offline "flattened" log table.

    Purpose of LogFlattenerJob is to unpack serialized Avro data from online requests and flatten each field (both keys and values) into individual columns and save to an offline "flattened" log table.

    Steps: 1. determine unfilled range and pull raw logs from partitioned log table 2. fetch joinCodecs for all unique schema_hash present in the logs 3. build a merged schema from all schema versions, which will be used as output schema 4. unpack each row and adhere to the output schema 5. save the schema info in the flattened log table properties (cumulatively)

  23. case class LoggingSchema(keyCodec: AvroCodec, valueCodec: AvroCodec) extends Product with Serializable
  24. case class PartitionRange(start: String, end: String)(implicit tableUtils: TableUtils) extends DataRange with Ordered[PartitionRange] with Product with Serializable
  25. class StagingQuery extends AnyRef
  26. case class TableUtils(sparkSession: SparkSession) extends Product with Serializable
  27. case class TimeRange(start: Long, end: Long)(implicit tableUtils: TableUtils) extends DataRange with Product with Serializable
  28. case class TimedKvRdd(data: RDD[(Array[Any], Array[Any], Long)], keySchema: StructType, valueSchema: StructType)(implicit sparkSession: SparkSession) extends BaseKvRdd with Product with Serializable

Value Members

  1. object BootstrapInfo extends Serializable
  2. object Comparison
  3. object Driver
  4. object Extensions
  5. object FastHashing
  6. object GenericRowHandler
  7. object GroupBy extends Serializable
  8. object GroupByUpload extends Serializable
  9. object JoinUtils
  10. object LocalDataLoader
  11. object LocalTableExporter
  12. object LogFlattenerJob extends Serializable
  13. object LogUtils
  14. object LoggingSchema extends Serializable
  15. object MetadataExporter
  16. object SparkConstants
  17. object SparkSessionBuilder
  18. object StagingQuery

Ungrouped