final case class KafkaConsumerConfig(bootstrapServers: List[String], fetchMinBytes: Int, groupId: String, heartbeatInterval: FiniteDuration, maxPartitionFetchBytes: Int, sessionTimeout: FiniteDuration, sslKeyPassword: Option[String], sslKeyStorePassword: Option[String], sslKeyStoreLocation: Option[String], sslTrustStoreLocation: Option[String], sslTrustStorePassword: Option[String], autoOffsetReset: AutoOffsetReset, connectionsMaxIdleTime: FiniteDuration, enableAutoCommit: Boolean, excludeInternalTopics: Boolean, maxPollRecords: Int, maxPollInterval: FiniteDuration, receiveBufferInBytes: Int, requestTimeout: FiniteDuration, saslKerberosServiceName: Option[String], saslMechanism: String, securityProtocol: SecurityProtocol, sendBufferInBytes: Int, sslEnabledProtocols: List[SSLProtocol], sslKeystoreType: String, sslProtocol: SSLProtocol, sslProvider: Option[String], sslTruststoreType: String, checkCRCs: Boolean, clientId: String, fetchMaxWaitTime: FiniteDuration, metadataMaxAge: FiniteDuration, metricReporters: List[String], metricsNumSamples: Int, metricsSampleWindow: FiniteDuration, reconnectBackoffTime: FiniteDuration, retryBackoffTime: FiniteDuration, observableCommitType: ObservableCommitType, observableCommitOrder: ObservableCommitOrder, observableSeekToEndOnStart: Boolean, properties: Map[String, String]) extends Product with Serializable
Configuration for Kafka Consumer.
For the official documentation on the available configuration
options, see
Consumer Configs
on kafka.apache.org
.
- bootstrapServers
is the
bootstrap.servers
setting, a list of host/port pairs to use for establishing the initial connection to the Kafka cluster.- fetchMinBytes
is the
fetch.min.bytes
setting, the minimum amount of data the server should return for a fetch request.- groupId
is the
group.id
setting, a unique string that identifies the consumer group this consumer belongs to.- heartbeatInterval
is the
heartbeat.interval.ms
setting, the expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities.- maxPartitionFetchBytes
is the
max.partition.fetch.bytes
setting, the maximum amount of data per-partition the server will return.- sessionTimeout
is the
session.timeout.ms
setting, the timeout used to detect failures when using Kafka's group management facilities.- sslKeyPassword
is the
ssl.key.password
setting and represents the password of the private key in the key store file. This is optional for client.- sslKeyStorePassword
is the
ssl.keystore.password
setting, being the password of the private key in the key store file. This is optional for client.- sslKeyStoreLocation
is the
ssl.keystore.location
setting and represents the location of the key store file. This is optional for client and can be used for two-way authentication for client.- sslTrustStoreLocation
is the
ssl.truststore.location
setting and is the location of the trust store file.- sslTrustStorePassword
is the
ssl.truststore.password
setting and is the password for the trust store file.- autoOffsetReset
is the
auto.offset.reset
setting, specifying what to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted).- connectionsMaxIdleTime
is the
connections.max.idle.ms
setting and specifies how much time to wait before closing idle connections.- enableAutoCommit
is the
enable.auto.commit
setting. If true the consumer's offset will be periodically committed in the background.- excludeInternalTopics
is the
exclude.internal.topics
setting. Whether records from internal topics (such as offsets) should be exposed to the consumer. If set to true the only way to receive records from an internal topic is subscribing to it.- maxPollRecords
is the
max.poll.records
setting, the maximum number of records returned in a single call to poll().- receiveBufferInBytes
is the
receive.buffer.bytes
setting, the size of the TCP receive buffer (SO_RCVBUF) to use when reading data.- requestTimeout
is the
request.timeout.ms
setting, The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.- saslKerberosServiceName
is the
sasl.kerberos.service.name
setting, being the Kerberos principal name that Kafka runs as.- saslMechanism
is the
sasl.mechanism
setting, being the SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.- securityProtocol
is the
security.protocol
setting, being the protocol used to communicate with brokers.- sendBufferInBytes
is the
send.buffer.bytes
setting, being the size of the TCP send buffer (SO_SNDBUF) to use when sending data.- sslEnabledProtocols
is the
ssl.enabled.protocols
setting, being the list of protocols enabled for SSL connections.- sslKeystoreType
is the
ssl.keystore.type
setting, being the file format of the key store file.- sslProtocol
is the
ssl.protocol
setting, being the SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.- sslProvider
is the
ssl.provider
setting, being the name of the security provider used for SSL connections. Default value is the default security provider of the JVM.- sslTruststoreType
is the
ssl.truststore.type
setting, being the file format of the trust store file.- checkCRCs
is the
check.crcs
setting, specifying to automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.- clientId
is the
client.id
setting, an id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.- fetchMaxWaitTime
is the
fetch.max.wait.ms
setting, the maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes.- metadataMaxAge
is the
metadata.max.age.ms
setting. The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.- metricReporters
is the
metric.reporters
setting. A list of classes to use as metrics reporters. Implementing theMetricReporter
interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics- metricsNumSamples
is the
metrics.num.samples
setting. The number of samples maintained to compute metrics.- metricsSampleWindow
is the
metrics.sample.window.ms
setting. The metrics system maintains a configurable number of samples over a fixed window size. This configuration controls the size of the window. For example we might maintain two samples each measured over a 30 second period. When a window expires we erase and overwrite the oldest window.- reconnectBackoffTime
is the
reconnect.backoff.ms
setting. The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.- retryBackoffTime
is the
retry.backoff.ms
setting. The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.- observableCommitType
is the
monix.observable.commit.type
setting. Represents the type of commit to make when the enableAutoCommit setting is set tofalse
, in which case the observable has to commit on every batch.- observableCommitOrder
is the
monix.observable.commit.order
setting. Specifies when the commit should happen, like before we receive the acknowledgement from downstream, or afterwards.- properties
map of other properties that will be passed to the underlying kafka client. Any properties not explicitly handled by this object can be set via the map, but in case of a duplicate a value set on the case class will overwrite value set via properties.
- Alphabetic
- By Inheritance
- KafkaConsumerConfig
- Serializable
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
-
new
KafkaConsumerConfig(bootstrapServers: List[String], fetchMinBytes: Int, groupId: String, heartbeatInterval: FiniteDuration, maxPartitionFetchBytes: Int, sessionTimeout: FiniteDuration, sslKeyPassword: Option[String], sslKeyStorePassword: Option[String], sslKeyStoreLocation: Option[String], sslTrustStoreLocation: Option[String], sslTrustStorePassword: Option[String], autoOffsetReset: AutoOffsetReset, connectionsMaxIdleTime: FiniteDuration, enableAutoCommit: Boolean, excludeInternalTopics: Boolean, maxPollRecords: Int, maxPollInterval: FiniteDuration, receiveBufferInBytes: Int, requestTimeout: FiniteDuration, saslKerberosServiceName: Option[String], saslMechanism: String, securityProtocol: SecurityProtocol, sendBufferInBytes: Int, sslEnabledProtocols: List[SSLProtocol], sslKeystoreType: String, sslProtocol: SSLProtocol, sslProvider: Option[String], sslTruststoreType: String, checkCRCs: Boolean, clientId: String, fetchMaxWaitTime: FiniteDuration, metadataMaxAge: FiniteDuration, metricReporters: List[String], metricsNumSamples: Int, metricsSampleWindow: FiniteDuration, reconnectBackoffTime: FiniteDuration, retryBackoffTime: FiniteDuration, observableCommitType: ObservableCommitType, observableCommitOrder: ObservableCommitOrder, observableSeekToEndOnStart: Boolean, properties: Map[String, String])
- bootstrapServers
is the
bootstrap.servers
setting, a list of host/port pairs to use for establishing the initial connection to the Kafka cluster.- fetchMinBytes
is the
fetch.min.bytes
setting, the minimum amount of data the server should return for a fetch request.- groupId
is the
group.id
setting, a unique string that identifies the consumer group this consumer belongs to.- heartbeatInterval
is the
heartbeat.interval.ms
setting, the expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities.- maxPartitionFetchBytes
is the
max.partition.fetch.bytes
setting, the maximum amount of data per-partition the server will return.- sessionTimeout
is the
session.timeout.ms
setting, the timeout used to detect failures when using Kafka's group management facilities.- sslKeyPassword
is the
ssl.key.password
setting and represents the password of the private key in the key store file. This is optional for client.- sslKeyStorePassword
is the
ssl.keystore.password
setting, being the password of the private key in the key store file. This is optional for client.- sslKeyStoreLocation
is the
ssl.keystore.location
setting and represents the location of the key store file. This is optional for client and can be used for two-way authentication for client.- sslTrustStoreLocation
is the
ssl.truststore.location
setting and is the location of the trust store file.- sslTrustStorePassword
is the
ssl.truststore.password
setting and is the password for the trust store file.- autoOffsetReset
is the
auto.offset.reset
setting, specifying what to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted).- connectionsMaxIdleTime
is the
connections.max.idle.ms
setting and specifies how much time to wait before closing idle connections.- enableAutoCommit
is the
enable.auto.commit
setting. If true the consumer's offset will be periodically committed in the background.- excludeInternalTopics
is the
exclude.internal.topics
setting. Whether records from internal topics (such as offsets) should be exposed to the consumer. If set to true the only way to receive records from an internal topic is subscribing to it.- maxPollRecords
is the
max.poll.records
setting, the maximum number of records returned in a single call to poll().- receiveBufferInBytes
is the
receive.buffer.bytes
setting, the size of the TCP receive buffer (SO_RCVBUF) to use when reading data.- requestTimeout
is the
request.timeout.ms
setting, The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.- saslKerberosServiceName
is the
sasl.kerberos.service.name
setting, being the Kerberos principal name that Kafka runs as.- saslMechanism
is the
sasl.mechanism
setting, being the SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.- securityProtocol
is the
security.protocol
setting, being the protocol used to communicate with brokers.- sendBufferInBytes
is the
send.buffer.bytes
setting, being the size of the TCP send buffer (SO_SNDBUF) to use when sending data.- sslEnabledProtocols
is the
ssl.enabled.protocols
setting, being the list of protocols enabled for SSL connections.- sslKeystoreType
is the
ssl.keystore.type
setting, being the file format of the key store file.- sslProtocol
is the
ssl.protocol
setting, being the SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.- sslProvider
is the
ssl.provider
setting, being the name of the security provider used for SSL connections. Default value is the default security provider of the JVM.- sslTruststoreType
is the
ssl.truststore.type
setting, being the file format of the trust store file.- checkCRCs
is the
check.crcs
setting, specifying to automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.- clientId
is the
client.id
setting, an id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.- fetchMaxWaitTime
is the
fetch.max.wait.ms
setting, the maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes.- metadataMaxAge
is the
metadata.max.age.ms
setting. The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.- metricReporters
is the
metric.reporters
setting. A list of classes to use as metrics reporters. Implementing theMetricReporter
interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics- metricsNumSamples
is the
metrics.num.samples
setting. The number of samples maintained to compute metrics.- metricsSampleWindow
is the
metrics.sample.window.ms
setting. The metrics system maintains a configurable number of samples over a fixed window size. This configuration controls the size of the window. For example we might maintain two samples each measured over a 30 second period. When a window expires we erase and overwrite the oldest window.- reconnectBackoffTime
is the
reconnect.backoff.ms
setting. The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.- retryBackoffTime
is the
retry.backoff.ms
setting. The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.- observableCommitType
is the
monix.observable.commit.type
setting. Represents the type of commit to make when the enableAutoCommit setting is set tofalse
, in which case the observable has to commit on every batch.- observableCommitOrder
is the
monix.observable.commit.order
setting. Specifies when the commit should happen, like before we receive the acknowledgement from downstream, or afterwards.- properties
map of other properties that will be passed to the underlying kafka client. Any properties not explicitly handled by this object can be set via the map, but in case of a duplicate a value set on the case class will overwrite value set via properties.
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
- val autoOffsetReset: AutoOffsetReset
- val bootstrapServers: List[String]
- val checkCRCs: Boolean
- val clientId: String
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- val connectionsMaxIdleTime: FiniteDuration
- val enableAutoCommit: Boolean
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- val excludeInternalTopics: Boolean
- val fetchMaxWaitTime: FiniteDuration
- val fetchMinBytes: Int
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
- val groupId: String
- val heartbeatInterval: FiniteDuration
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val maxPartitionFetchBytes: Int
- val maxPollInterval: FiniteDuration
- val maxPollRecords: Int
- val metadataMaxAge: FiniteDuration
- val metricReporters: List[String]
- val metricsNumSamples: Int
- val metricsSampleWindow: FiniteDuration
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- val observableCommitOrder: ObservableCommitOrder
- val observableCommitType: ObservableCommitType
- val observableSeekToEndOnStart: Boolean
- val properties: Map[String, String]
- val receiveBufferInBytes: Int
- val reconnectBackoffTime: FiniteDuration
- val requestTimeout: FiniteDuration
- val retryBackoffTime: FiniteDuration
- val saslKerberosServiceName: Option[String]
- val saslMechanism: String
- val securityProtocol: SecurityProtocol
- val sendBufferInBytes: Int
- val sessionTimeout: FiniteDuration
- val sslEnabledProtocols: List[SSLProtocol]
- val sslKeyPassword: Option[String]
- val sslKeyStoreLocation: Option[String]
- val sslKeyStorePassword: Option[String]
- val sslKeystoreType: String
- val sslProtocol: SSLProtocol
- val sslProvider: Option[String]
- val sslTrustStoreLocation: Option[String]
- val sslTrustStorePassword: Option[String]
- val sslTruststoreType: String
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
- def toJavaMap: Map[String, AnyRef]
- def toMap: Map[String, String]
- def toProperties: Properties
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()