case class KafkaProducerConfig(bootstrapServers: List[String], acks: Acks, bufferMemoryInBytes: Int, compressionType: CompressionType, retries: Int, sslKeyPassword: Option[String], sslKeyStorePassword: Option[String], sslKeyStoreLocation: Option[String], sslTrustStoreLocation: Option[String], sslTrustStorePassword: Option[String], batchSizeInBytes: Int, clientId: String, connectionsMaxIdleTime: FiniteDuration, lingerTime: FiniteDuration, maxBlockTime: FiniteDuration, maxRequestSizeInBytes: Int, maxInFlightRequestsPerConnection: Int, partitionerClass: Option[PartitionerName], receiveBufferInBytes: Int, requestTimeout: FiniteDuration, saslKerberosServiceName: Option[String], saslMechanism: String, securityProtocol: SecurityProtocol, sendBufferInBytes: Int, sslEnabledProtocols: List[SSLProtocol], sslKeystoreType: String, sslProtocol: SSLProtocol, sslProvider: Option[String], sslTruststoreType: String, reconnectBackoffTime: FiniteDuration, retryBackoffTime: FiniteDuration, metadataMaxAge: FiniteDuration, metricReporters: List[String], metricsNumSamples: Int, metricsSampleWindow: FiniteDuration, monixSinkParallelism: Int, properties: Map[String, String]) extends Product with Serializable

The Kafka Producer config.

For the official documentation on the available configuration options, see Producer Configs on kafka.apache.org.

bootstrapServers

is the bootstrap.servers setting and represents the list of servers to connect to.

acks

is the acks setting and represents the number of acknowledgments the producer requires the leader to have received before considering a request complete. See Acks.

bufferMemoryInBytes

is the buffer.memory setting and represents the total bytes of memory the producer can use to buffer records waiting to be sent to the server.

compressionType

is the compression.type setting and specifies what compression algorithm to apply to all the generated data by the producer. The default is none (no compression applied).

retries

is the retries setting. A value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.

sslKeyPassword

is the ssl.key.password setting and represents the password of the private key in the key store file. This is optional for client.

sslKeyStorePassword

is the ssl.keystore.password setting, being the password of the private key in the key store file. This is optional for client.

sslKeyStoreLocation

is the ssl.keystore.location setting and represents the location of the key store file. This is optional for client and can be used for two-way authentication for client.

sslTrustStoreLocation

is the ssl.truststore.location setting and is the location of the trust store file.

sslTrustStorePassword

is the ssl.truststore.password setting and is the password for the trust store file.

batchSizeInBytes

is the batch.size setting. The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This setting specifies the maximum number of records to batch together.

clientId

is the client.id setting, an id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

connectionsMaxIdleTime

is the connections.max.idle.ms setting and specifies how much time to wait before closing idle connections.

lingerTime

is the linger.ms setting and specifies to buffer records for more efficient batching, up to the maximum batch size or for the maximum lingerTime. If zero, then no buffering will happen, but if different from zero, then records will be delayed in absence of load.

maxBlockTime

is the max.block.ms setting. The configuration controls how long KafkaProducer.send() and KafkaProducer.partitionsFor() will block. These methods can be blocked either because the buffer is full or metadata unavailable.

maxRequestSizeInBytes

is the max.request.size setting and represents the maximum size of a request in bytes. This is also effectively a cap on the maximum record size.

maxInFlightRequestsPerConnection

is the max.in.flight.requests.per.connection setting and represents the maximum number of unacknowledged request the client will send on a single connection before blocking. If this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (if enabled).

partitionerClass

is the partitioner.class setting and represents a class that implements the org.apache.kafka.clients.producer.Partitioner interface.

receiveBufferInBytes

is the receive.buffer.bytes setting being the size of the TCP receive buffer (SO_RCVBUF) to use when reading data.

requestTimeout

is the request.timeout.ms setting, a configuration the controls the maximum amount of time the client will wait for the response of a request.

saslKerberosServiceName

is the sasl.kerberos.service.name setting, being the Kerberos principal name that Kafka runs as.

saslMechanism

is the sasl.mechanism setting, being the SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.

securityProtocol

is the security.protocol setting, being the protocol used to communicate with brokers.

sendBufferInBytes

is the send.buffer.bytes setting, being the size of the TCP send buffer (SO_SNDBUF) to use when sending data.

sslEnabledProtocols

is the ssl.enabled.protocols setting, being the list of protocols enabled for SSL connections.

sslKeystoreType

is the ssl.keystore.type setting, being the file format of the key store file.

sslProtocol

is the ssl.protocol setting, being the SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.

sslProvider

is the ssl.provider setting, being the name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

sslTruststoreType

is the ssl.truststore.type setting, being the file format of the trust store file.

reconnectBackoffTime

is the reconnect.backoff.ms setting. The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.

retryBackoffTime

is the retry.backoff.ms setting. The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.

metadataMaxAge

is the metadata.max.age.ms setting. The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

metricReporters

is the metric.reporters setting. A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics

metricsNumSamples

is the metrics.num.samples setting. The number of samples maintained to compute metrics.

metricsSampleWindow

is the metrics.sample.window.ms setting. The metrics system maintains a configurable number of samples over a fixed window size. This configuration controls the size of the window. For example we might maintain two samples each measured over a 30 second period. When a window expires we erase and overwrite the oldest window.

monixSinkParallelism

is the monix.producer.sink.parallelism setting indicating how many requests the KafkaProducerSink can execute in parallel.

properties

map of other properties that will be passed to the underlying kafka client. Any properties not explicitly handled by this object can be set via the map, but in case of a duplicate a value set on the case class will overwrite value set via properties.

Linear Supertypes
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. KafkaProducerConfig
  2. Serializable
  3. Serializable
  4. Product
  5. Equals
  6. AnyRef
  7. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. All

Instance Constructors

  1. new KafkaProducerConfig(bootstrapServers: List[String], acks: Acks, bufferMemoryInBytes: Int, compressionType: CompressionType, retries: Int, sslKeyPassword: Option[String], sslKeyStorePassword: Option[String], sslKeyStoreLocation: Option[String], sslTrustStoreLocation: Option[String], sslTrustStorePassword: Option[String], batchSizeInBytes: Int, clientId: String, connectionsMaxIdleTime: FiniteDuration, lingerTime: FiniteDuration, maxBlockTime: FiniteDuration, maxRequestSizeInBytes: Int, maxInFlightRequestsPerConnection: Int, partitionerClass: Option[PartitionerName], receiveBufferInBytes: Int, requestTimeout: FiniteDuration, saslKerberosServiceName: Option[String], saslMechanism: String, securityProtocol: SecurityProtocol, sendBufferInBytes: Int, sslEnabledProtocols: List[SSLProtocol], sslKeystoreType: String, sslProtocol: SSLProtocol, sslProvider: Option[String], sslTruststoreType: String, reconnectBackoffTime: FiniteDuration, retryBackoffTime: FiniteDuration, metadataMaxAge: FiniteDuration, metricReporters: List[String], metricsNumSamples: Int, metricsSampleWindow: FiniteDuration, monixSinkParallelism: Int, properties: Map[String, String])

    bootstrapServers

    is the bootstrap.servers setting and represents the list of servers to connect to.

    acks

    is the acks setting and represents the number of acknowledgments the producer requires the leader to have received before considering a request complete. See Acks.

    bufferMemoryInBytes

    is the buffer.memory setting and represents the total bytes of memory the producer can use to buffer records waiting to be sent to the server.

    compressionType

    is the compression.type setting and specifies what compression algorithm to apply to all the generated data by the producer. The default is none (no compression applied).

    retries

    is the retries setting. A value greater than zero will cause the client to resend any record whose send fails with a potentially transient error.

    sslKeyPassword

    is the ssl.key.password setting and represents the password of the private key in the key store file. This is optional for client.

    sslKeyStorePassword

    is the ssl.keystore.password setting, being the password of the private key in the key store file. This is optional for client.

    sslKeyStoreLocation

    is the ssl.keystore.location setting and represents the location of the key store file. This is optional for client and can be used for two-way authentication for client.

    sslTrustStoreLocation

    is the ssl.truststore.location setting and is the location of the trust store file.

    sslTrustStorePassword

    is the ssl.truststore.password setting and is the password for the trust store file.

    batchSizeInBytes

    is the batch.size setting. The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This setting specifies the maximum number of records to batch together.

    clientId

    is the client.id setting, an id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    connectionsMaxIdleTime

    is the connections.max.idle.ms setting and specifies how much time to wait before closing idle connections.

    lingerTime

    is the linger.ms setting and specifies to buffer records for more efficient batching, up to the maximum batch size or for the maximum lingerTime. If zero, then no buffering will happen, but if different from zero, then records will be delayed in absence of load.

    maxBlockTime

    is the max.block.ms setting. The configuration controls how long KafkaProducer.send() and KafkaProducer.partitionsFor() will block. These methods can be blocked either because the buffer is full or metadata unavailable.

    maxRequestSizeInBytes

    is the max.request.size setting and represents the maximum size of a request in bytes. This is also effectively a cap on the maximum record size.

    maxInFlightRequestsPerConnection

    is the max.in.flight.requests.per.connection setting and represents the maximum number of unacknowledged request the client will send on a single connection before blocking. If this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (if enabled).

    partitionerClass

    is the partitioner.class setting and represents a class that implements the org.apache.kafka.clients.producer.Partitioner interface.

    receiveBufferInBytes

    is the receive.buffer.bytes setting being the size of the TCP receive buffer (SO_RCVBUF) to use when reading data.

    requestTimeout

    is the request.timeout.ms setting, a configuration the controls the maximum amount of time the client will wait for the response of a request.

    saslKerberosServiceName

    is the sasl.kerberos.service.name setting, being the Kerberos principal name that Kafka runs as.

    saslMechanism

    is the sasl.mechanism setting, being the SASL mechanism used for client connections. This may be any mechanism for which a security provider is available.

    securityProtocol

    is the security.protocol setting, being the protocol used to communicate with brokers.

    sendBufferInBytes

    is the send.buffer.bytes setting, being the size of the TCP send buffer (SO_SNDBUF) to use when sending data.

    sslEnabledProtocols

    is the ssl.enabled.protocols setting, being the list of protocols enabled for SSL connections.

    sslKeystoreType

    is the ssl.keystore.type setting, being the file format of the key store file.

    sslProtocol

    is the ssl.protocol setting, being the SSL protocol used to generate the SSLContext. Default setting is TLS, which is fine for most cases. Allowed values in recent JVMs are TLS, TLSv1.1 and TLSv1.2. SSL, SSLv2 and SSLv3 may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities.

    sslProvider

    is the ssl.provider setting, being the name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    sslTruststoreType

    is the ssl.truststore.type setting, being the file format of the trust store file.

    reconnectBackoffTime

    is the reconnect.backoff.ms setting. The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.

    retryBackoffTime

    is the retry.backoff.ms setting. The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.

    metadataMaxAge

    is the metadata.max.age.ms setting. The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    metricReporters

    is the metric.reporters setting. A list of classes to use as metrics reporters. Implementing the MetricReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics

    metricsNumSamples

    is the metrics.num.samples setting. The number of samples maintained to compute metrics.

    metricsSampleWindow

    is the metrics.sample.window.ms setting. The metrics system maintains a configurable number of samples over a fixed window size. This configuration controls the size of the window. For example we might maintain two samples each measured over a 30 second period. When a window expires we erase and overwrite the oldest window.

    monixSinkParallelism

    is the monix.producer.sink.parallelism setting indicating how many requests the KafkaProducerSink can execute in parallel.

    properties

    map of other properties that will be passed to the underlying kafka client. Any properties not explicitly handled by this object can be set via the map, but in case of a duplicate a value set on the case class will overwrite value set via properties.

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##(): Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. val acks: Acks
  5. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  6. val batchSizeInBytes: Int
  7. val bootstrapServers: List[String]
  8. val bufferMemoryInBytes: Int
  9. val clientId: String
  10. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()
  11. val compressionType: CompressionType
  12. val connectionsMaxIdleTime: FiniteDuration
  13. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  14. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws( classOf[java.lang.Throwable] )
  15. final def getClass(): Class[_]
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  16. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  17. val lingerTime: FiniteDuration
  18. val maxBlockTime: FiniteDuration
  19. val maxInFlightRequestsPerConnection: Int
  20. val maxRequestSizeInBytes: Int
  21. val metadataMaxAge: FiniteDuration
  22. val metricReporters: List[String]
  23. val metricsNumSamples: Int
  24. val metricsSampleWindow: FiniteDuration
  25. val monixSinkParallelism: Int
  26. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  27. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  28. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  29. val partitionerClass: Option[PartitionerName]
  30. val properties: Map[String, String]
  31. val receiveBufferInBytes: Int
  32. val reconnectBackoffTime: FiniteDuration
  33. val requestTimeout: FiniteDuration
  34. val retries: Int
  35. val retryBackoffTime: FiniteDuration
  36. val saslKerberosServiceName: Option[String]
  37. val saslMechanism: String
  38. val securityProtocol: SecurityProtocol
  39. val sendBufferInBytes: Int
  40. val sslEnabledProtocols: List[SSLProtocol]
  41. val sslKeyPassword: Option[String]
  42. val sslKeyStoreLocation: Option[String]
  43. val sslKeyStorePassword: Option[String]
  44. val sslKeystoreType: String
  45. val sslProtocol: SSLProtocol
  46. val sslProvider: Option[String]
  47. val sslTrustStoreLocation: Option[String]
  48. val sslTrustStorePassword: Option[String]
  49. val sslTruststoreType: String
  50. final def synchronized[T0](arg0: ⇒ T0): T0
    Definition Classes
    AnyRef
  51. def toJavaMap: Map[String, AnyRef]
  52. def toMap: Map[String, String]
  53. def toProperties: Properties
  54. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  55. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... )
  56. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws( ... ) @native()

Inherited from Serializable

Inherited from Serializable

Inherited from Product

Inherited from Equals

Inherited from AnyRef

Inherited from Any

Ungrouped