@@ -2,8 +2,60 @@ package sttp.openai.requests.completions
2
2
3
3
import sttp .openai .json .SnakePickle
4
4
5
- case class Usage (promptTokens : Int , completionTokens : Int , totalTokens : Int )
5
+ /** @param promptTokens
6
+ * Number of tokens in the prompt.
7
+ * @param completionTokens
8
+ * Number of tokens in the generated completion.
9
+ * @param totalTokens
10
+ * Total number of tokens used in the request (prompt + completion).
11
+ * @param completionTokensDetails
12
+ * Breakdown of tokens used in a completion.
13
+ * @param promptTokensDetails
14
+ * Breakdown of tokens used in the prompt.
15
+ */
16
+ case class Usage (
17
+ promptTokens : Int ,
18
+ completionTokens : Int ,
19
+ totalTokens : Int ,
20
+ completionTokensDetails : CompletionTokensDetails ,
21
+ promptTokensDetails : PromptTokensDetails
22
+ )
6
23
7
24
object Usage {
8
25
implicit val choicesR : SnakePickle .Reader [Usage ] = SnakePickle .macroR[Usage ]
9
26
}
27
+
28
+ /** @param acceptedPredictionTokens
29
+ * When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion.
30
+ * @param audioTokens
31
+ * Audio input tokens generated by the model.
32
+ * @param reasoningTokens
33
+ * Tokens generated by the model for reasoning.
34
+ * @param rejectedPredictionTokens
35
+ * When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion. However, like reasoning
36
+ * tokens, these tokens are still counted in the total completion tokens for purposes of billing, output, and context window limits.
37
+ */
38
+ case class CompletionTokensDetails (
39
+ acceptedPredictionTokens : Int ,
40
+ audioTokens : Int ,
41
+ reasoningTokens : Int ,
42
+ rejectedPredictionTokens : Int
43
+ )
44
+
45
+ object CompletionTokensDetails {
46
+ implicit val completionTokensDetailsR : SnakePickle .Reader [CompletionTokensDetails ] = SnakePickle .macroR[CompletionTokensDetails ]
47
+ }
48
+
49
+ /** @param audioTokens
50
+ * Audio input tokens present in the prompt.
51
+ * @param cachedTokens
52
+ * Cached tokens present in the prompt.
53
+ */
54
+ case class PromptTokensDetails (
55
+ audioTokens : Int ,
56
+ cachedTokens : Int
57
+ )
58
+
59
+ object PromptTokensDetails {
60
+ implicit val promptTokensDetailsR : SnakePickle .Reader [PromptTokensDetails ] = SnakePickle .macroR[PromptTokensDetails ]
61
+ }
0 commit comments