You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
@@ -267,6 +267,12 @@ public ResponseFormats? ChatResponseFormat
267
267
[JsonPropertyName("top_logprobs")]
268
268
publicint?TopLogprobs{get;set;}
269
269
270
+
/// <summary>
271
+
/// Whether to enable parallel <a href="https://platform.openai.com/docs/guides/function-calling/parallel-function-calling">function calling</a> during tool use.
272
+
/// </summary>
273
+
[JsonPropertyName("parallel_tool_calls")]
274
+
publicbool?ParallelToolCalls{get;set;}
275
+
270
276
/// <summary>
271
277
/// ID of the model to use. For models supported see <see cref="OpenAI.ObjectModels.Models" /> start with <c>Gpt_</c>
272
278
/// </summary>
@@ -291,4 +297,15 @@ public IEnumerable<ValidationResult> Validate()
291
297
/// </summary>
292
298
[JsonPropertyName("user")]
293
299
publicstringUser{get;set;}
300
+
301
+
/// <summary>
302
+
/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
303
+
/// If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.
304
+
/// If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
305
+
/// If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
306
+
/// When not set, the default behavior is 'auto'.
307
+
/// When this parameter is set, the response body will include the service_tier utilized.
0 commit comments