case class KafkaProducerConfig(bootstrapServers: List[String], acks: Acks, bufferMemoryInBytes: Int, compressionType: CompressionType, retries: Int, sslKeyPassword: Option[String], sslKeyStorePassword: Option[String], sslKeyStoreLocation: Option[String], sslTrustStoreLocation: Option[String], sslTrustStorePassword: Option[String], batchSizeInBytes: Int, clientId: String, connectionsMaxIdleTime: FiniteDuration, lingerTime: FiniteDuration, maxBlockTime: FiniteDuration, maxRequestSizeInBytes: Int, maxInFlightRequestsPerConnection: Int, partitionerClass: Option[PartitionerName], receiveBufferInBytes: Int, requestTimeout: FiniteDuration, saslKerberosServiceName: Option[String], saslMechanism: String, securityProtocol: SecurityProtocol, sendBufferInBytes: Int, sslEnabledProtocols: List[SSLProtocol], sslKeystoreType: String, sslProtocol: SSLProtocol, sslProvider: Option[String], sslTruststoreType: String, reconnectBackoffTime: FiniteDuration, retryBackoffTime: FiniteDuration, metadataMaxAge: FiniteDuration, metricReporters: List[String], metricsNumSamples: Int, metricsSampleWindow: FiniteDuration, monixSinkParallelism: Int, properties: Map[String, String]) extends Product with Serializable
The Kafka Producer config.
For the official documentation on the available configuration
options, see
Producer Configs
on kafka.apache.org
.
- bootstrapServers
is the
bootstrap.servers
setting and represents the list of servers to connect to.- acks
is the
acks
setting and represents the number of acknowledgments the producer requires the leader to have received before considering a request complete. See Acks.- bufferMemoryInBytes
is the
buffer.memory
setting and represents the total bytes of memory the producer can use to buffer records waiting to be sent to the server.- compressionType
is the
compression.type
setting and specifies what compression algorithm to apply to all the generated data by the producer. The default is none (no compression applied).- retries
is the
retries
setting. A value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.- sslKeyPassword
is the
ssl.key.password
setting and represents the password of the private key in the key store file. This is optional for client.- sslKeyStorePassword
is the
ssl.keystore.password
setting, being the password of the private key in the key store file. This is optional for client.- sslKeyStoreLocation
is the
ssl.keystore.location
setting and represents the location of the key store file. This is optional for client and can be used for two-way authentication for client.- sslTrustStoreLocation
is the
ssl.truststore.location
setting and is the location of the trust store file.- sslTrustStorePassword
is the
ssl.truststore.password
setting and is the password for the trust store file.- batchSizeInBytes
is the
batch.size
setting. The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This setting specifies the maximum number of records to batch together.- clientId
is the
client.id
setting, an id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.- connectionsMaxIdleTime
is the
connections.max.idle.ms
setting and specifies how much time to wait before closing idle connections.- lingerTime
is the
linger.ms
setting and specifies to buffer records for more efficient batching, up to the maximum batch size or for the maximumlingerTime
. If zero, then no buffering will happen, but if different from zero, then records will be delayed in absence of load.- maxBlockTime
is the
max.block.ms
setting. The configuration controls how longKafkaProducer.send()
andKafkaProducer.partitionsFor()
will block. These methods can be blocked either because the buffer is full or metadata unavailable.- maxRequestSizeInBytes
is the
max.request.size
setting and represents the maximum size of a request in bytes. This is also effectively a cap on the maximum record size.- maxInFlightRequestsPerConnection
is the
max.in.flight.requests.per.connection
setting and represents the maximum number of unacknowledged request the client will send on a single connection before blocking. If this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (if enabled).- partitionerClass
is the
partitioner.class
setting and represents a class that implements theorg.apache.kafka.clients.producer.Partitioner
interface.- receiveBufferInBytes
is the
receive.buffer.bytes
setting being the size of the TCP receive buffer (SO_RCVBUF) to use when reading data.- requestTimeout
is the
request.timeout.ms
setting, a configuration the controls the maximum amount of time the client will wait for the response of a request.- saslKerberosServiceName
is the
sasl.kerberos.service.name
setting, being the Kerberos principal name that Kafka runs as.- saslMechanism
is the
sasl.mechanism
setting, being the SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.- securityProtocol
is the
security.protocol
setting, being the protocol used to communicate with brokers.- sendBufferInBytes
is the
send.buffer.bytes
setting, being the size of the TCP send buffer (SO_SNDBUF) to use when sending data.- sslEnabledProtocols
is the
ssl.enabled.protocols
setting, being the list of protocols enabled for SSL connections.- sslKeystoreType
is the
ssl.keystore.type
setting, being the file format of the key store file.- sslProtocol
is the
ssl.protocol
setting, being the SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.- sslProvider
is the
ssl.provider
setting, being the name of the security provider used for SSL connections. Default value is the default security provider of the JVM.- sslTruststoreType
is the
ssl.truststore.type
setting, being the file format of the trust store file.- reconnectBackoffTime
is the
reconnect.backoff.ms
setting. The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.- retryBackoffTime
is the
retry.backoff.ms
setting. The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.- metadataMaxAge
is the
metadata.max.age.ms
setting. The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.- metricReporters
is the
metric.reporters
setting. A list of classes to use as metrics reporters. Implementing theMetricReporter
interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics- metricsNumSamples
is the
metrics.num.samples
setting. The number of samples maintained to compute metrics.- metricsSampleWindow
is the
metrics.sample.window.ms
setting. The metrics system maintains a configurable number of samples over a fixed window size. This configuration controls the size of the window. For example we might maintain two samples each measured over a 30 second period. When a window expires we erase and overwrite the oldest window.- monixSinkParallelism
is the
monix.producer.sink.parallelism
setting indicating how many requests the KafkaProducerSink can execute in parallel.- properties
map of other properties that will be passed to the underlying kafka client. Any properties not explicitly handled by this object can be set via the map, but in case of a duplicate a value set on the case class will overwrite value set via properties.
- Alphabetic
- By Inheritance
- KafkaProducerConfig
- Serializable
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- All
Instance Constructors
-
new
KafkaProducerConfig(bootstrapServers: List[String], acks: Acks, bufferMemoryInBytes: Int, compressionType: CompressionType, retries: Int, sslKeyPassword: Option[String], sslKeyStorePassword: Option[String], sslKeyStoreLocation: Option[String], sslTrustStoreLocation: Option[String], sslTrustStorePassword: Option[String], batchSizeInBytes: Int, clientId: String, connectionsMaxIdleTime: FiniteDuration, lingerTime: FiniteDuration, maxBlockTime: FiniteDuration, maxRequestSizeInBytes: Int, maxInFlightRequestsPerConnection: Int, partitionerClass: Option[PartitionerName], receiveBufferInBytes: Int, requestTimeout: FiniteDuration, saslKerberosServiceName: Option[String], saslMechanism: String, securityProtocol: SecurityProtocol, sendBufferInBytes: Int, sslEnabledProtocols: List[SSLProtocol], sslKeystoreType: String, sslProtocol: SSLProtocol, sslProvider: Option[String], sslTruststoreType: String, reconnectBackoffTime: FiniteDuration, retryBackoffTime: FiniteDuration, metadataMaxAge: FiniteDuration, metricReporters: List[String], metricsNumSamples: Int, metricsSampleWindow: FiniteDuration, monixSinkParallelism: Int, properties: Map[String, String])
- bootstrapServers
is the
bootstrap.servers
setting and represents the list of servers to connect to.- acks
is the
acks
setting and represents the number of acknowledgments the producer requires the leader to have received before considering a request complete. See Acks.- bufferMemoryInBytes
is the
buffer.memory
setting and represents the total bytes of memory the producer can use to buffer records waiting to be sent to the server.- compressionType
is the
compression.type
setting and specifies what compression algorithm to apply to all the generated data by the producer. The default is none (no compression applied).- retries
is the
retries
setting. A value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.- sslKeyPassword
is the
ssl.key.password
setting and represents the password of the private key in the key store file. This is optional for client.- sslKeyStorePassword
is the
ssl.keystore.password
setting, being the password of the private key in the key store file. This is optional for client.- sslKeyStoreLocation
is the
ssl.keystore.location
setting and represents the location of the key store file. This is optional for client and can be used for two-way authentication for client.- sslTrustStoreLocation
is the
ssl.truststore.location
setting and is the location of the trust store file.- sslTrustStorePassword
is the
ssl.truststore.password
setting and is the password for the trust store file.- batchSizeInBytes
is the
batch.size
setting. The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This setting specifies the maximum number of records to batch together.- clientId
is the
client.id
setting, an id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.- connectionsMaxIdleTime
is the
connections.max.idle.ms
setting and specifies how much time to wait before closing idle connections.- lingerTime
is the
linger.ms
setting and specifies to buffer records for more efficient batching, up to the maximum batch size or for the maximumlingerTime
. If zero, then no buffering will happen, but if different from zero, then records will be delayed in absence of load.- maxBlockTime
is the
max.block.ms
setting. The configuration controls how longKafkaProducer.send()
andKafkaProducer.partitionsFor()
will block. These methods can be blocked either because the buffer is full or metadata unavailable.- maxRequestSizeInBytes
is the
max.request.size
setting and represents the maximum size of a request in bytes. This is also effectively a cap on the maximum record size.- maxInFlightRequestsPerConnection
is the
max.in.flight.requests.per.connection
setting and represents the maximum number of unacknowledged request the client will send on a single connection before blocking. If this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (if enabled).- partitionerClass
is the
partitioner.class
setting and represents a class that implements theorg.apache.kafka.clients.producer.Partitioner
interface.- receiveBufferInBytes
is the
receive.buffer.bytes
setting being the size of the TCP receive buffer (SO_RCVBUF) to use when reading data.- requestTimeout
is the
request.timeout.ms
setting, a configuration the controls the maximum amount of time the client will wait for the response of a request.- saslKerberosServiceName
is the
sasl.kerberos.service.name
setting, being the Kerberos principal name that Kafka runs as.- saslMechanism
is the
sasl.mechanism
setting, being the SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.- securityProtocol
is the
security.protocol
setting, being the protocol used to communicate with brokers.- sendBufferInBytes
is the
send.buffer.bytes
setting, being the size of the TCP send buffer (SO_SNDBUF) to use when sending data.- sslEnabledProtocols
is the
ssl.enabled.protocols
setting, being the list of protocols enabled for SSL connections.- sslKeystoreType
is the
ssl.keystore.type
setting, being the file format of the key store file.- sslProtocol
is the
ssl.protocol
setting, being the SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.- sslProvider
is the
ssl.provider
setting, being the name of the security provider used for SSL connections. Default value is the default security provider of the JVM.- sslTruststoreType
is the
ssl.truststore.type
setting, being the file format of the trust store file.- reconnectBackoffTime
is the
reconnect.backoff.ms
setting. The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.- retryBackoffTime
is the
retry.backoff.ms
setting. The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.- metadataMaxAge
is the
metadata.max.age.ms
setting. The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.- metricReporters
is the
metric.reporters
setting. A list of classes to use as metrics reporters. Implementing theMetricReporter
interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics- metricsNumSamples
is the
metrics.num.samples
setting. The number of samples maintained to compute metrics.- metricsSampleWindow
is the
metrics.sample.window.ms
setting. The metrics system maintains a configurable number of samples over a fixed window size. This configuration controls the size of the window. For example we might maintain two samples each measured over a 30 second period. When a window expires we erase and overwrite the oldest window.- monixSinkParallelism
is the
monix.producer.sink.parallelism
setting indicating how many requests the KafkaProducerSink can execute in parallel.- properties
map of other properties that will be passed to the underlying kafka client. Any properties not explicitly handled by this object can be set via the map, but in case of a duplicate a value set on the case class will overwrite value set via properties.
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- val acks: Acks
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
- val batchSizeInBytes: Int
- val bootstrapServers: List[String]
- val bufferMemoryInBytes: Int
- val clientId: String
-
def
clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()
- val compressionType: CompressionType
- val connectionsMaxIdleTime: FiniteDuration
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws( classOf[java.lang.Throwable] )
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native()
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val lingerTime: FiniteDuration
- val maxBlockTime: FiniteDuration
- val maxInFlightRequestsPerConnection: Int
- val maxRequestSizeInBytes: Int
- val metadataMaxAge: FiniteDuration
- val metricReporters: List[String]
- val metricsNumSamples: Int
- val metricsSampleWindow: FiniteDuration
- val monixSinkParallelism: Int
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native()
- val partitionerClass: Option[PartitionerName]
- val properties: Map[String, String]
- val receiveBufferInBytes: Int
- val reconnectBackoffTime: FiniteDuration
- val requestTimeout: FiniteDuration
- val retries: Int
- val retryBackoffTime: FiniteDuration
- val saslKerberosServiceName: Option[String]
- val saslMechanism: String
- val securityProtocol: SecurityProtocol
- val sendBufferInBytes: Int
- val sslEnabledProtocols: List[SSLProtocol]
- val sslKeyPassword: Option[String]
- val sslKeyStoreLocation: Option[String]
- val sslKeyStorePassword: Option[String]
- val sslKeystoreType: String
- val sslProtocol: SSLProtocol
- val sslProvider: Option[String]
- val sslTrustStoreLocation: Option[String]
- val sslTrustStorePassword: Option[String]
- val sslTruststoreType: String
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
- def toJavaMap: Map[String, AnyRef]
- def toMap: Map[String, String]
- def toProperties: Properties
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... ) @native()