Instance Constructors
-
new
CassandraPartitionedRDD(prev: RDD[T], keyspace: String, table: String)(implicit ct: ClassTag[T])
Value Members
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
def
+(other: String): String
-
def
++(other: RDD[T]): RDD[T]
-
-
final
def
==(arg0: Any): Boolean
-
def
aggregate[U](zeroValue: U)(seqOp: (U, T) ⇒ U, combOp: (U, U) ⇒ U)(implicit arg0: ClassTag[U]): U
-
def
aggregateByKey[U](zeroValue: U)(seqOp: (U, V) ⇒ U, combOp: (U, U) ⇒ U)(implicit arg0: ClassTag[U]): RDD[(K, U)]
-
def
aggregateByKey[U](zeroValue: U, numPartitions: Int)(seqOp: (U, V) ⇒ U, combOp: (U, U) ⇒ U)(implicit arg0: ClassTag[U]): RDD[(K, U)]
-
def
aggregateByKey[U](zeroValue: U, partitioner: Partitioner)(seqOp: (U, V) ⇒ U, combOp: (U, U) ⇒ U)(implicit arg0: ClassTag[U]): RDD[(K, U)]
-
final
def
asInstanceOf[T0]: T0
-
-
def
cartesian[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[(T, U)]
-
def
checkpoint(): Unit
-
def
clearDependencies(): Unit
-
def
clone(): AnyRef
-
def
coalesce(numPartitions: Int, shuffle: Boolean)(implicit ord: Ordering[T]): RDD[T]
-
def
cogroup[W1, W2, W3](other1: RDD[(K, W1)], other2: RDD[(K, W2)], other3: RDD[(K, W3)], numPartitions: Int): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))]
-
def
cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)], numPartitions: Int): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))]
-
def
cogroup[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (Iterable[V], Iterable[W]))]
-
def
cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)]): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))]
-
def
cogroup[W](other: RDD[(K, W)]): RDD[(K, (Iterable[V], Iterable[W]))]
-
def
cogroup[W1, W2, W3](other1: RDD[(K, W1)], other2: RDD[(K, W2)], other3: RDD[(K, W3)]): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))]
-
def
cogroup[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)], partitioner: Partitioner): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))]
-
def
cogroup[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (Iterable[V], Iterable[W]))]
-
def
cogroup[W1, W2, W3](other1: RDD[(K, W1)], other2: RDD[(K, W2)], other3: RDD[(K, W3)], partitioner: Partitioner): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))]
-
def
collect[U](f: PartialFunction[T, U])(implicit arg0: ClassTag[U]): RDD[U]
-
def
collect(): Array[T]
-
def
collectAsMap(): Map[K, V]
-
def
collectAsync(): FutureAction[Seq[T]]
-
def
combineByKey[C](createCombiner: (V) ⇒ C, mergeValue: (C, V) ⇒ C, mergeCombiners: (C, C) ⇒ C): RDD[(K, C)]
-
def
combineByKey[C](createCombiner: (V) ⇒ C, mergeValue: (C, V) ⇒ C, mergeCombiners: (C, C) ⇒ C, numPartitions: Int): RDD[(K, C)]
-
def
combineByKey[C](createCombiner: (V) ⇒ C, mergeValue: (C, V) ⇒ C, mergeCombiners: (C, C) ⇒ C, partitioner: Partitioner, mapSideCombine: Boolean, serializer: Serializer): RDD[(K, C)]
-
def
combineByKeyWithClassTag[C](createCombiner: (V) ⇒ C, mergeValue: (C, V) ⇒ C, mergeCombiners: (C, C) ⇒ C)(implicit ct: ClassTag[C]): RDD[(K, C)]
-
def
combineByKeyWithClassTag[C](createCombiner: (V) ⇒ C, mergeValue: (C, V) ⇒ C, mergeCombiners: (C, C) ⇒ C, numPartitions: Int)(implicit ct: ClassTag[C]): RDD[(K, C)]
-
def
combineByKeyWithClassTag[C](createCombiner: (V) ⇒ C, mergeValue: (C, V) ⇒ C, mergeCombiners: (C, C) ⇒ C, partitioner: Partitioner, mapSideCombine: Boolean, serializer: Serializer)(implicit ct: ClassTag[C]): RDD[(K, C)]
-
def
compute(split: Partition, context: TaskContext): Iterator[T]
-
-
def
count(): Long
-
def
countApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble]
-
def
countApproxDistinct(relativeSD: Double): Long
-
def
countApproxDistinct(p: Int, sp: Int): Long
-
def
countApproxDistinctByKey(relativeSD: Double): RDD[(K, Long)]
-
def
countApproxDistinctByKey(relativeSD: Double, numPartitions: Int): RDD[(K, Long)]
-
def
countApproxDistinctByKey(relativeSD: Double, partitioner: Partitioner): RDD[(K, Long)]
-
def
countApproxDistinctByKey(p: Int, sp: Int, partitioner: Partitioner): RDD[(K, Long)]
-
def
countAsync(): FutureAction[Long]
-
def
countByKey(): Map[K, Long]
-
def
countByKeyApprox(timeout: Long, confidence: Double): PartialResult[Map[K, BoundedDouble]]
-
def
countByValue()(implicit ord: Ordering[T]): Map[T, Long]
-
def
countByValueApprox(timeout: Long, confidence: Double)(implicit ord: Ordering[T]): PartialResult[Map[T, BoundedDouble]]
-
-
final
def
dependencies: Seq[Dependency[_]]
-
def
distinct(): RDD[T]
-
def
distinct(numPartitions: Int)(implicit ord: Ordering[T]): RDD[T]
-
-
-
-
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
def
filter(f: (T) ⇒ Boolean): RDD[T]
-
def
filterByRange(lower: K, upper: K): RDD[(K, V)]
-
def
finalize(): Unit
-
def
first(): T
-
def
firstParent[U](implicit arg0: ClassTag[U]): RDD[U]
-
def
flatMap[U](f: (T) ⇒ TraversableOnce[U])(implicit arg0: ClassTag[U]): RDD[U]
-
def
flatMapValues[U](f: (V) ⇒ TraversableOnce[U]): RDD[(K, U)]
-
def
fold(zeroValue: T)(op: (T, T) ⇒ T): T
-
def
foldByKey(zeroValue: V)(func: (V, V) ⇒ V): RDD[(K, V)]
-
def
foldByKey(zeroValue: V, numPartitions: Int)(func: (V, V) ⇒ V): RDD[(K, V)]
-
def
foldByKey(zeroValue: V, partitioner: Partitioner)(func: (V, V) ⇒ V): RDD[(K, V)]
-
def
foreach(f: (T) ⇒ Unit): Unit
-
def
foreachAsync(f: (T) ⇒ Unit): FutureAction[Unit]
-
def
foreachPartition(f: (Iterator[T]) ⇒ Unit): Unit
-
def
foreachPartitionAsync(f: (Iterator[T]) ⇒ Unit): FutureAction[Unit]
-
def
formatted(fmtstr: String): String
-
def
fullOuterJoin[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (Option[V], Option[W]))]
-
def
fullOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (Option[V], Option[W]))]
-
def
fullOuterJoin[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (Option[V], Option[W]))]
-
def
getCheckpointFile: Option[String]
-
final
def
getClass(): Class[_]
-
def
getDependencies: Seq[Dependency[_]]
-
final
def
getNumPartitions: Int
-
def
getPartitions: Array[Partition]
-
def
getPreferredLocations(split: Partition): Seq[String]
-
-
def
glom(): RDD[Array[T]]
-
def
groupBy[K](f: (T) ⇒ K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K]): RDD[(K, Iterable[T])]
-
def
groupBy[K](f: (T) ⇒ K, numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])]
-
def
groupBy[K](f: (T) ⇒ K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])]
-
def
groupByKey(): RDD[(K, Iterable[V])]
-
def
groupByKey(numPartitions: Int): RDD[(K, Iterable[V])]
-
def
groupByKey(partitioner: Partitioner): RDD[(K, Iterable[V])]
-
def
groupWith[W1, W2, W3](other1: RDD[(K, W1)], other2: RDD[(K, W2)], other3: RDD[(K, W3)]): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))]
-
def
groupWith[W1, W2](other1: RDD[(K, W1)], other2: RDD[(K, W2)]): RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))]
-
def
groupWith[W](other: RDD[(K, W)]): RDD[(K, (Iterable[V], Iterable[W]))]
-
def
hashCode(): Int
-
val
id: Int
-
def
intersection(other: RDD[T], numPartitions: Int): RDD[T]
-
def
intersection(other: RDD[T], partitioner: Partitioner)(implicit ord: Ordering[T]): RDD[T]
-
def
intersection(other: RDD[T]): RDD[T]
-
def
isCheckpointed: Boolean
-
def
isEmpty(): Boolean
-
final
def
isInstanceOf[T0]: Boolean
-
def
isTraceEnabled(): Boolean
-
final
def
iterator(split: Partition, context: TaskContext): Iterator[T]
-
def
join[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (V, W))]
-
def
join[W](other: RDD[(K, W)]): RDD[(K, (V, W))]
-
def
join[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (V, W))]
-
-
def
keyBy[K](f: (T) ⇒ K): RDD[(K, T)]
-
def
keyByCassandraReplica(keyspaceName: String, tableName: String, partitionKeyMapper: ColumnSelector = PartitionKeyColumns)(implicit connector: CassandraConnector = ..., currentType: ClassTag[T], rwf: RowWriterFactory[T]): RDD[(Set[InetAddress], T)]
-
def
keys: RDD[K]
-
-
def
leftOuterJoin[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (V, Option[W]))]
-
def
leftOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (V, Option[W]))]
-
def
leftOuterJoin[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (V, Option[W]))]
-
-
def
log: Logger
-
def
logDebug(msg: ⇒ String, throwable: Throwable): Unit
-
def
logDebug(msg: ⇒ String): Unit
-
def
logError(msg: ⇒ String, throwable: Throwable): Unit
-
def
logError(msg: ⇒ String): Unit
-
def
logInfo(msg: ⇒ String, throwable: Throwable): Unit
-
def
logInfo(msg: ⇒ String): Unit
-
def
logName: String
-
def
logTrace(msg: ⇒ String, throwable: Throwable): Unit
-
def
logTrace(msg: ⇒ String): Unit
-
def
logWarning(msg: ⇒ String, throwable: Throwable): Unit
-
def
logWarning(msg: ⇒ String): Unit
-
def
lookup(key: K): Seq[V]
-
def
map[U](f: (T) ⇒ U)(implicit arg0: ClassTag[U]): RDD[U]
-
def
mapPartitions[U](f: (Iterator[T]) ⇒ Iterator[U], preservesPartitioning: Boolean)(implicit arg0: ClassTag[U]): RDD[U]
-
def
mapPartitionsWithIndex[U](f: (Int, Iterator[T]) ⇒ Iterator[U], preservesPartitioning: Boolean)(implicit arg0: ClassTag[U]): RDD[U]
-
def
mapValues[U](f: (V) ⇒ U): RDD[(K, U)]
-
def
max()(implicit ord: Ordering[T]): T
-
def
min()(implicit ord: Ordering[T]): T
-
var
name: String
-
final
def
ne(arg0: AnyRef): Boolean
-
def
newJobContext(conf: Configuration, jobId: JobID): JobContext
-
def
newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext
-
def
newTaskAttemptID(jtIdentifier: String, jobId: Int, isMap: Boolean, taskId: Int, attemptId: Int): TaskAttemptID
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
def
parent[U](j: Int)(implicit arg0: ClassTag[U]): RDD[U]
-
def
partitionBy(partitioner: Partitioner): RDD[(K, V)]
-
val
partitioner: Option[Partitioner]
-
final
def
partitions: Array[Partition]
-
-
-
def
pipe(command: Seq[String], env: Map[String, String], printPipeContext: ((String) ⇒ Unit) ⇒ Unit, printRDDElement: (T, (String) ⇒ Unit) ⇒ Unit, separateWorkingDir: Boolean): RDD[String]
-
def
pipe(command: String, env: Map[String, String]): RDD[String]
-
def
pipe(command: String): RDD[String]
-
final
def
preferredLocations(split: Partition): Seq[String]
-
def
randomSplit(weights: Array[Double], seed: Long): Array[RDD[T]]
-
def
reduce(f: (T, T) ⇒ T): T
-
def
reduceByKey(func: (V, V) ⇒ V): RDD[(K, V)]
-
def
reduceByKey(func: (V, V) ⇒ V, numPartitions: Int): RDD[(K, V)]
-
def
reduceByKey(partitioner: Partitioner, func: (V, V) ⇒ V): RDD[(K, V)]
-
def
reduceByKeyLocally(func: (V, V) ⇒ V): Map[K, V]
-
def
repartition(numPartitions: Int)(implicit ord: Ordering[T]): RDD[T]
-
def
repartitionAndSortWithinPartitions(partitioner: Partitioner): RDD[(K, V)]
-
def
repartitionByCassandraReplica(keyspaceName: String, tableName: String, partitionsPerHost: Int = 10, partitionKeyMapper: ColumnSelector = PartitionKeyColumns)(implicit connector: CassandraConnector = ..., currentType: ClassTag[T], rwf: RowWriterFactory[T]): CassandraPartitionedRDD[T]
-
def
rightOuterJoin[W](other: RDD[(K, W)], numPartitions: Int): RDD[(K, (Option[V], W))]
-
def
rightOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (Option[V], W))]
-
def
rightOuterJoin[W](other: RDD[(K, W)], partitioner: Partitioner): RDD[(K, (Option[V], W))]
-
def
sample(withReplacement: Boolean, fraction: Double, seed: Long): RDD[T]
-
def
sampleByKey(withReplacement: Boolean, fractions: Map[K, Double], seed: Long): RDD[(K, V)]
-
def
sampleByKeyExact(withReplacement: Boolean, fractions: Map[K, Double], seed: Long): RDD[(K, V)]
-
def
saveAsCassandraTable(keyspaceName: String, tableName: String, columns: ColumnSelector = AllColumns, writeConf: WriteConf = ...)(implicit connector: CassandraConnector = ..., rwf: RowWriterFactory[T], columnMapper: ColumnMapper[T]): Unit
-
-
def
saveAsHadoopDataset(conf: JobConf): Unit
-
def
saveAsHadoopFile(path: String, keyClass: Class[_], valueClass: Class[_], outputFormatClass: Class[_ <: OutputFormat[_, _]], conf: JobConf, codec: Option[Class[_ <: CompressionCodec]]): Unit
-
def
saveAsHadoopFile(path: String, keyClass: Class[_], valueClass: Class[_], outputFormatClass: Class[_ <: OutputFormat[_, _]], codec: Class[_ <: CompressionCodec]): Unit
-
def
saveAsHadoopFile[F <: OutputFormat[K, V]](path: String, codec: Class[_ <: CompressionCodec])(implicit fm: ClassTag[F]): Unit
-
def
saveAsHadoopFile[F <: OutputFormat[K, V]](path: String)(implicit fm: ClassTag[F]): Unit
-
def
saveAsNewAPIHadoopDataset(conf: Configuration): Unit
-
def
saveAsNewAPIHadoopFile(path: String, keyClass: Class[_], valueClass: Class[_], outputFormatClass: Class[_ <: OutputFormat[_, _]], conf: Configuration): Unit
-
def
saveAsNewAPIHadoopFile[F <: OutputFormat[K, V]](path: String)(implicit fm: ClassTag[F]): Unit
-
def
saveAsObjectFile(path: String): Unit
-
def
saveAsSequenceFile(path: String, codec: Option[Class[_ <: CompressionCodec]]): Unit
-
def
saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit
-
def
saveAsTextFile(path: String): Unit
-
def
saveToCassandra(keyspaceName: String, tableName: String, columns: ColumnSelector = AllColumns, writeConf: WriteConf = ...)(implicit connector: CassandraConnector = ..., rwf: RowWriterFactory[T]): Unit
-
-
def
sortBy[K](f: (T) ⇒ K, ascending: Boolean, numPartitions: Int)(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T]
-
def
sortByKey(ascending: Boolean, numPartitions: Int): RDD[(K, V)]
-
def
spanBy[U](f: (T) ⇒ U): RDD[(U, Iterable[T])]
-
def
spanByKey: RDD[(K, Seq[V])]
-
-
def
subtract(other: RDD[T], p: Partitioner)(implicit ord: Ordering[T]): RDD[T]
-
def
subtract(other: RDD[T], numPartitions: Int): RDD[T]
-
def
subtract(other: RDD[T]): RDD[T]
-
def
subtractByKey[W](other: RDD[(K, W)], p: Partitioner)(implicit arg0: ClassTag[W]): RDD[(K, V)]
-
def
subtractByKey[W](other: RDD[(K, W)], numPartitions: Int)(implicit arg0: ClassTag[W]): RDD[(K, V)]
-
def
subtractByKey[W](other: RDD[(K, W)])(implicit arg0: ClassTag[W]): RDD[(K, V)]
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
def
take(num: Int): Array[T]
-
def
takeAsync(num: Int): FutureAction[Seq[T]]
-
def
takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T]
-
def
takeSample(withReplacement: Boolean, num: Int, seed: Long): Array[T]
-
def
toDebugString: String
-
def
toJavaRDD(): JavaRDD[T]
-
def
toLocalIterator: Iterator[T]
-
def
toString(): String
-
def
top(num: Int)(implicit ord: Ordering[T]): Array[T]
-
def
treeAggregate[U](zeroValue: U)(seqOp: (U, T) ⇒ U, combOp: (U, U) ⇒ U, depth: Int)(implicit arg0: ClassTag[U]): U
-
def
treeReduce(f: (T, T) ⇒ T, depth: Int): T
-
def
union(other: RDD[T]): RDD[T]
-
-
def
values: RDD[V]
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
-
def
zip[U](other: RDD[U])(implicit arg0: ClassTag[U]): RDD[(T, U)]
-
def
zipPartitions[B, C, D, V](rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[D], arg3: ClassTag[V]): RDD[V]
-
def
zipPartitions[B, C, D, V](rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[D], arg3: ClassTag[V]): RDD[V]
-
def
zipPartitions[B, C, V](rdd2: RDD[B], rdd3: RDD[C])(f: (Iterator[T], Iterator[B], Iterator[C]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[V]): RDD[V]
-
def
zipPartitions[B, C, V](rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)(f: (Iterator[T], Iterator[B], Iterator[C]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[C], arg2: ClassTag[V]): RDD[V]
-
def
zipPartitions[B, V](rdd2: RDD[B])(f: (Iterator[T], Iterator[B]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[V]): RDD[V]
-
def
zipPartitions[B, V](rdd2: RDD[B], preservesPartitioning: Boolean)(f: (Iterator[T], Iterator[B]) ⇒ Iterator[V])(implicit arg0: ClassTag[B], arg1: ClassTag[V]): RDD[V]
-
def
zipWithIndex(): RDD[(T, Long)]
-
def
zipWithUniqueId(): RDD[(T, Long)]
-
Shadowed Implicit Value Members
-
def
histogram(buckets: Array[Double], evenBuckets: Boolean): Array[Long]
-
def
histogram(bucketCount: Int): Pair[Array[Double], Array[Long]]
-
def
histogram(buckets: Array[Double], evenBuckets: Boolean): Array[Long]
-
def
histogram(bucketCount: Int): Pair[Array[Double], Array[Long]]
-
def
mean(): Double
-
def
mean(): Double
-
def
meanApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble]
-
def
meanApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble]
-
def
sampleStdev(): Double
-
def
sampleStdev(): Double
-
def
sampleVariance(): Double
-
def
sampleVariance(): Double
-
-
-
-
def
stdev(): Double
-
def
stdev(): Double
-
def
sum(): Double
-
def
sum(): Double
-
def
sumApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble]
-
def
sumApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble]
-
def
variance(): Double
-
def
variance(): Double
Deprecated Value Members
-
def
filterWith[A](constructA: (Int) ⇒ A)(p: (T, A) ⇒ Boolean): RDD[T]
-
def
flatMapWith[A, U](constructA: (Int) ⇒ A, preservesPartitioning: Boolean)(f: (T, A) ⇒ Seq[U])(implicit arg0: ClassTag[U]): RDD[U]
-
def
foreachWith[A](constructA: (Int) ⇒ A)(f: (T, A) ⇒ Unit): Unit
-
def
mapPartitionsWithContext[U](f: (TaskContext, Iterator[T]) ⇒ Iterator[U], preservesPartitioning: Boolean)(implicit arg0: ClassTag[U]): RDD[U]
-
def
mapPartitionsWithSplit[U](f: (Int, Iterator[T]) ⇒ Iterator[U], preservesPartitioning: Boolean)(implicit arg0: ClassTag[U]): RDD[U]
-
def
mapWith[A, U](constructA: (Int) ⇒ A, preservesPartitioning: Boolean)(f: (T, A) ⇒ U)(implicit arg0: ClassTag[U]): RDD[U]
-
def
reduceByKeyToDriver(func: (V, V) ⇒ V): Map[K, V]
-
def
toArray(): Array[T]
Inherited from Serializable
Inherited from Serializable
Inherited from AnyRef
Inherited from Any
RDD created by repartitionByCassandraReplica with preferred locations mapping to the CassandraReplicas each partition was created for.