case class ChatBody(model: ChatCompletionModel, messages: Seq[Message], temperature: Option[Double] = None, topP: Option[Double] = None, n: Option[Int] = None, stop: Option[Stop] = None, maxTokens: Option[Int] = None, presencePenalty: Option[Double] = None, frequencyPenalty: Option[Double] = None, logitBias: Option[Map[String, Float]] = None, user: Option[String] = None) extends Product with Serializable
- model
ID of the model to use.
- messages
A list of messages describing the conversation so far.
- temperature
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- topP
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
- n
How many chat completion choices to generate for each input message.
- stop
Up to 4 sequences where the API will stop generating further tokens.
- maxTokens
The maximum number of tokens to generate in the chat completion.
- presencePenalty
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- frequencyPenalty
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- logitBias
Modify the likelihood of specified tokens appearing in the completion.
- user
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
- Alphabetic
- By Inheritance
- ChatBody
- Serializable
- Product
- Equals
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Instance Constructors
- new ChatBody(model: ChatCompletionModel, messages: Seq[Message], temperature: Option[Double] = None, topP: Option[Double] = None, n: Option[Int] = None, stop: Option[Stop] = None, maxTokens: Option[Int] = None, presencePenalty: Option[Double] = None, frequencyPenalty: Option[Double] = None, logitBias: Option[Map[String, Float]] = None, user: Option[String] = None)
- model
ID of the model to use.
- messages
A list of messages describing the conversation so far.
- temperature
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- topP
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
- n
How many chat completion choices to generate for each input message.
- stop
Up to 4 sequences where the API will stop generating further tokens.
- maxTokens
The maximum number of tokens to generate in the chat completion.
- presencePenalty
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
- frequencyPenalty
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
- logitBias
Modify the likelihood of specified tokens appearing in the completion.
- user
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @native() @HotSpotIntrinsicCandidate()
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- val frequencyPenalty: Option[Double]
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- val logitBias: Option[Map[String, Float]]
- val maxTokens: Option[Int]
- val messages: Seq[Message]
- val model: ChatCompletionModel
- val n: Option[Int]
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
- val presencePenalty: Option[Double]
- def productElementNames: Iterator[String]
- Definition Classes
- Product
- val stop: Option[Stop]
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- val temperature: Option[Double]
- val topP: Option[Double]
- val user: Option[String]
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated