Skip to content

Commit 10d72a7

Browse files
author
michal.zyga
committed
start extending chat completion response
1 parent edf5a4a commit 10d72a7

File tree

3 files changed

+155
-6
lines changed

3 files changed

+155
-6
lines changed

core/src/main/scala/sttp/openai/requests/completions/chat/ChatRequestResponseData.scala

+95-2
Original file line numberDiff line numberDiff line change
@@ -18,24 +18,117 @@ object ChatRequestResponseData {
1818
implicit val messageRW: SnakePickle.Reader[Message] = SnakePickle.macroR[Message]
1919
}
2020

21+
/** Represents a choice in the chat completion.
22+
*
23+
* @param message
24+
* The message associated with this choice.
25+
* @param finishReason
26+
* The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence,
27+
* length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag
28+
* from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
29+
* @param index
30+
* The index of this choice.
31+
* @param logprobs
32+
* Log probability information for the choice.
33+
*/
2134
case class Choices(
2235
message: Message,
2336
finishReason: String,
24-
index: Int
37+
index: Int,
38+
logprobs: Option[Logprobs] = None
2539
)
2640

2741
object Choices {
2842
implicit val choicesR: SnakePickle.Reader[Choices] = SnakePickle.macroR[Choices]
2943
}
3044

45+
/** @param content
46+
* A list of message content tokens with log probability information.
47+
* @param refusal
48+
* A list of message refusal tokens with log probability information.
49+
*/
50+
case class Logprobs(
51+
content: Option[Seq[LogprobData]] = None,
52+
refusal: Option[Seq[LogprobData]] = None
53+
)
54+
55+
object Logprobs {
56+
implicit val logprobsR: SnakePickle.Reader[Logprobs] = SnakePickle.macroR[Logprobs]
57+
}
58+
59+
/** @param token
60+
* The token.
61+
* @param logprob
62+
* The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify
63+
* that the token is very unlikely.
64+
* @param bytes
65+
* A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by
66+
* multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is
67+
* no bytes representation for the token.
68+
* @param topLogprobs
69+
* List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number
70+
* of requested top_logprobs returned.
71+
*/
72+
case class LogprobData(
73+
token: String,
74+
logprob: Float,
75+
bytes: Option[Seq[Int]] = None,
76+
topLogprobs: Seq[TopLogprobs]
77+
)
78+
79+
object LogprobData {
80+
implicit val contentR: SnakePickle.Reader[LogprobData] = SnakePickle.macroR[LogprobData]
81+
}
82+
83+
/** @param token
84+
* The token.
85+
* @param logprob
86+
* The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value -9999.0 is used to signify
87+
* that the token is very unlikely.
88+
* @param bytes
89+
* A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by
90+
* multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is
91+
* no bytes representation for the token.
92+
*/
93+
case class TopLogprobs(
94+
token: String,
95+
logprob: Float,
96+
bytes: Option[Seq[Int]] = None
97+
)
98+
99+
object TopLogprobs {
100+
implicit val topLogprobsR: SnakePickle.Reader[TopLogprobs] = SnakePickle.macroR[TopLogprobs]
101+
}
102+
103+
/** Represents the response of a chat completion.
104+
*
105+
* @param id
106+
* A unique identifier for the chat completion.
107+
* @param choices
108+
* A list of chat completion choices. Can be more than one if n is greater than 1.
109+
* @param created
110+
* The Unix timestamp (in seconds) of when the chat completion was created.
111+
* @param model
112+
* The model used for the chat completion.
113+
* @param `object`
114+
* The object type, which is always chat.completion.
115+
* @param usage
116+
* Usage statistics for the completion request.
117+
* @param systemFingerprint
118+
* This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed request
119+
* parameter to understand when backend changes have been made that might impact determinism.
120+
* @param serviceTier
121+
* The service tier used for processing the request.
122+
*/
31123
case class ChatResponse(
32124
id: String,
33125
choices: Seq[Choices],
34126
created: Int,
35127
model: String,
36128
`object`: String,
37129
usage: Usage,
38-
systemFingerprint: Option[String] = None
130+
systemFingerprint: Option[String] = None,
131+
serviceTier: Option[String] = None
39132
)
40133

41134
object ChatResponse {

core/src/test/scala/sttp/openai/fixtures/ChatFixture.scala

+33-2
Original file line numberDiff line numberDiff line change
@@ -176,9 +176,40 @@ object ChatFixture {
176176
| ]
177177
| },
178178
| "finish_reason": "stop",
179-
| "index": 0
179+
| "index": 0,
180+
| "logprobs": {
181+
| "content": [
182+
| {
183+
| "token": "Hello",
184+
| "logprob": -0.1,
185+
| "bytes": [2, 3, 4],
186+
| "top_logprobs": [
187+
| {
188+
| "token": "Hello",
189+
| "logprob": -0.2,
190+
| "bytes": [4, 5, 6]
191+
| }
192+
| ]
193+
| }
194+
| ],
195+
| "refusal": [
196+
| {
197+
| "token": "Hello",
198+
| "logprob": -0.1,
199+
| "bytes": [2, 3, 4],
200+
| "top_logprobs": [
201+
| {
202+
| "token": "Hello",
203+
| "logprob": -0.2,
204+
| "bytes": [4, 5, 6]
205+
| }
206+
| ]
207+
| }
208+
| ]
209+
| }
180210
| }
181-
| ]
211+
| ],
212+
| "service_tier": "advanced"
182213
|}
183214
|""".stripMargin
184215

core/src/test/scala/sttp/openai/requests/completions/chat/ChatDataSpec.scala

+27-2
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,31 @@ class ChatDataSpec extends AnyFlatSpec with Matchers with EitherValues {
3535
val choices: Choices = Choices(
3636
message = message,
3737
finishReason = "stop",
38-
index = 0
38+
index = 0,
39+
logprobs = Some(
40+
Logprobs(
41+
content = Some(
42+
Seq(
43+
LogprobData(
44+
token = "Hello",
45+
logprob = -0.1f,
46+
bytes = Some(Seq(2, 3, 4)),
47+
topLogprobs = Seq(TopLogprobs(token = "Hello", logprob = -0.2f, bytes = Some(Seq(4, 5, 6))))
48+
)
49+
)
50+
),
51+
refusal = Some(
52+
Seq(
53+
LogprobData(
54+
token = "Hello",
55+
logprob = -0.1f,
56+
bytes = Some(Seq(2, 3, 4)),
57+
topLogprobs = Seq(TopLogprobs(token = "Hello", logprob = -0.2f, bytes = Some(Seq(4, 5, 6))))
58+
)
59+
)
60+
)
61+
)
62+
)
3963
)
4064

4165
val expectedResponse: ChatResponse = ChatResponse(
@@ -45,7 +69,8 @@ class ChatDataSpec extends AnyFlatSpec with Matchers with EitherValues {
4569
model = "gpt-3.5-turbo-0301",
4670
usage = usage,
4771
choices = Seq(choices),
48-
systemFingerprint = Some("systemFingerprint")
72+
systemFingerprint = Some("systemFingerprint"),
73+
serviceTier = Some("advanced")
4974
)
5075

5176
// when

0 commit comments

Comments
 (0)