object AckableRecord extends Serializable
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- AckableRecord
- Serializable
- Serializable
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- All
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
apply[K, V](closeOnComplete: Boolean, kafkaScheduler: Scheduler = singleThreadedScheduler())(makeConsumer: ⇒ RichKafkaConsumer[K, V]): Observable[AckableRecord[ConsumerRecord[K, V]]]
Creates a stream of ack-able records for this consumer
Creates a stream of ack-able records for this consumer
- K
the key type
- V
the value type
- closeOnComplete
a flag to determine whether the kafka consumer should be closed when the stream completes
- kafkaScheduler
the single-threaded Kafka scheduler
- makeConsumer
a function for creating a consumer
- returns
a stream of AckableRecords
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
def
singleThreadObservable[K, V](closeOnComplete: Boolean, kafkaScheduler: Scheduler = singleThreadedScheduler())(makeConsumer: ⇒ RichKafkaConsumer[K, V]): Observable[(RichKafkaConsumer[K, V], ConsumerRecord[K, V])]
Create a kafka consumer observable which will ensure all the 'consumer.poll(...)' calls happen on a single thread (lest we incur the wrath of apache Kafka "one and only one thread can use a KafkaConsumer" rule
- def singleThreadedExecutor(prepare: (Thread) ⇒ Thread): ExecutorService
- def singleThreadedExecutor(name: String): ExecutorService
- def singleThreadedScheduler(name: String = "SingleThreadForKafkaRead"): SchedulerService
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
-
def
withOffsets[A](records: Observable[A])(implicit arg0: HasRecord[A]): Observable[(PartitionOffsetState, A)]
combine the records with a means of tracking the offsets
- implicit object AckableRecordFunctor extends Functor[AckableRecord]