Skip to content

Commit e8b5300

Browse files
authored
Merge pull request #636 from betalgo/dev
8.7.0
2 parents 63e76f6 + 7ec9fb1 commit e8b5300

File tree

3 files changed

+47
-1
lines changed

3 files changed

+47
-1
lines changed

OpenAI.SDK/ObjectModels/RequestModels/ChatCompletionCreateRequest.cs

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ public IList<string>? StopCalculated
9494

9595

9696
/// <summary>
97-
/// An upper bound for the number of tokens that can be generated for a completion,
97+
/// An upper bound for the number of tokens that can be generated for a completion,
9898
/// including visible output tokens and reasoning tokens.
9999
/// </summary>
100100
/// <see href="https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_completion_tokens" />
@@ -267,6 +267,12 @@ public ResponseFormats? ChatResponseFormat
267267
[JsonPropertyName("top_logprobs")]
268268
public int? TopLogprobs { get; set; }
269269

270+
/// <summary>
271+
/// Whether to enable parallel <a href="https://platform.openai.com/docs/guides/function-calling/parallel-function-calling">function calling</a> during tool use.
272+
/// </summary>
273+
[JsonPropertyName("parallel_tool_calls")]
274+
public bool? ParallelToolCalls { get; set; }
275+
270276
/// <summary>
271277
/// ID of the model to use. For models supported see <see cref="OpenAI.ObjectModels.Models" /> start with <c>Gpt_</c>
272278
/// </summary>
@@ -291,4 +297,15 @@ public IEnumerable<ValidationResult> Validate()
291297
/// </summary>
292298
[JsonPropertyName("user")]
293299
public string User { get; set; }
300+
301+
/// <summary>
302+
/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
303+
/// If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
304+
/// If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
305+
/// If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
306+
/// When not set, the default behavior is 'auto'.
307+
/// When this parameter is set, the response body will include the service_tier utilized.
308+
/// </summary>
309+
[JsonPropertyName("service_tier")]
310+
public string? ServiceTier { get; set; }
294311
}

OpenAI.SDK/ObjectModels/ResponseModels/ChatCompletionCreateResponse.cs

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,21 +5,48 @@ namespace OpenAI.ObjectModels.ResponseModels;
55

66
public record ChatCompletionCreateResponse : BaseResponse, IOpenAiModels.IId, IOpenAiModels.ICreatedAt
77
{
8+
/// <summary>
9+
/// The model used for the chat completion.
10+
/// </summary>
811
[JsonPropertyName("model")]
912
public string Model { get; set; }
1013

14+
/// <summary>
15+
/// A list of chat completion choices. Can be more than one if n is greater than 1.
16+
/// </summary>
1117
[JsonPropertyName("choices")]
1218
public List<ChatChoiceResponse> Choices { get; set; }
1319

20+
/// <summary>
21+
/// Usage statistics for the completion request.
22+
/// </summary>
1423
[JsonPropertyName("usage")]
1524
public UsageResponse Usage { get; set; }
1625

26+
/// <summary>
27+
/// This fingerprint represents the backend configuration that the model runs with.
28+
/// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that
29+
/// might impact determinism.
30+
/// </summary>
1731
[JsonPropertyName("system_fingerprint")]
1832
public string SystemFingerPrint { get; set; }
1933

34+
/// <summary>
35+
/// The service tier used for processing the request. This field is only included if the service_tier parameter is
36+
/// specified in the request.
37+
/// </summary>
38+
[JsonPropertyName("service_tier")]
39+
public string? ServiceTier { get; set; }
40+
41+
/// <summary>
42+
/// The Unix timestamp (in seconds) of when the chat completion was created.
43+
/// </summary>
2044
[JsonPropertyName("created")]
2145
public int CreatedAt { get; set; }
2246

47+
/// <summary>
48+
/// A unique identifier for the chat completion.
49+
/// </summary>
2350
[JsonPropertyName("id")]
2451
public string Id { get; set; }
2552
}

Readme.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,8 @@ Needless to say, I cannot accept responsibility for any damage caused by using t
118118
### 8.7.0
119119
- Added Support for o1 reasing models (`o1-mini` and `o1-preview`).
120120
- Added `MaxCompletionTokens` for `chat completions`.
121+
- Added support for `ParallelToolCalls` for `chat completions`.
122+
- Added support for `ServiceTier` for `chat completions`.
121123
- Added support for `ChunkingStrategy` in `Vector Store` and `Vector Store Files`.
122124
- Added support for `Strict` in `ToolDefinition`.
123125
- Added support for `MaxNumberResults` and `RankingOptions` for `FileSearchTool`.

0 commit comments

Comments
 (0)