Skip to content

Commit 7265fa4

Browse files
authored
Merge pull request #712 from betalgo/dev
9.0.3
2 parents 2ddad19 + 418e1fd commit 7265fa4

File tree

3 files changed

+105
-3
lines changed

3 files changed

+105
-3
lines changed

OpenAI.SDK/Betalgo.Ranul.OpenAI.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
<PackageIcon>Betalgo-Ranul-OpenAI-icon.png</PackageIcon>
1111
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
1212
<Title>OpenAI SDK by Betalgo</Title>
13-
<Version>9.0.2</Version>
13+
<Version>9.0.3</Version>
1414
<Authors>Tolga Kayhan, Betalgo</Authors>
1515
<Company>Betalgo Up Ltd.</Company>
1616
<Product>OpenAI .NET library by Betalgo Ranul</Product>

OpenAI.SDK/ObjectModels/Models.cs

Lines changed: 100 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,14 @@ public enum Model
9696
Gpt_4_turbo_preview,
9797
Gpt_4_turbo,
9898
Gpt_4_turbo_2024_04_09,
99+
100+
Gpt_4_1,
101+
Gpt_4_1_2025_04_14,
102+
Gpt_4_1_mini,
103+
Gpt_4_1_mini_2025_04_14,
104+
Gpt_4_1_nano,
105+
Gpt_4_1_nano_2025_04_14,
106+
99107
Gpt_4o,
100108
Gpt_4o_2024_05_13,
101109
Gpt_4o_2024_08_06,
@@ -120,10 +128,13 @@ public enum Model
120128
O1_preview_2024_09_12,
121129
O1_mini,
122130
O1_mini_2024_09_12,
131+
O1_pro_2025_03_19,
123132

133+
O3_2025_04_16,
124134
O3_mini,
125135
O3_mini_2025_01_31,
126-
136+
137+
O4_mini_2025_04_16,
127138
Gpt_4o_realtime_preview_2024_10_01
128139
}
129140

@@ -222,7 +233,55 @@ public enum Subject
222233
public static string Gpt_4_turbo_2024_04_09 => "gpt-4-turbo-2024-04-09";
223234

224235
/// <summary>
225-
/// GPT-4o: Our high-intelligence flagship model for complex, multi-step tasks. GPT-4o is cheaper and faster than GPT-4 Turbo. Currently points to gpt-4o-2024-08-06.
236+
/// GPT-4.1: The flagship model with superior performance in coding, instruction following, and long-context understanding.
237+
/// Context Window: 1,047,576 tokens
238+
/// Max output tokens: 32,768 tokens
239+
/// Training data: Up to May 31, 2024
240+
/// </summary>
241+
public static string Gpt_4_1 => "gpt-4.1";
242+
243+
/// <summary>
244+
/// Original snapshot of GPT-4.1 from April 14th 2025. Has the same capabilities as the base GPT-4.1 model.
245+
/// Context Window: 1,047,576 tokens
246+
/// Max output tokens: 32,768 tokens
247+
/// Training data: Up to May 31, 2024
248+
/// </summary>
249+
public static string Gpt_4_1_2025_04_14 => "gpt-4.1-2025-04-14";
250+
251+
/// <summary>
252+
/// GPT-4.1 Mini: A cost-effective medium-sized model with excellent performance rivaling the full GPT-4.1 model. Optimized for better speed while maintaining high quality results.
253+
/// Context Window: 1,047,576 tokens
254+
/// Max output tokens: 32,768 tokens
255+
/// Training data: Up to May 31, 2024
256+
/// </summary>
257+
public static string Gpt_4_1_mini => "gpt-4.1-mini";
258+
259+
/// <summary>
260+
/// Original snapshot of GPT-4.1-mini from April 14th 2025. Has the same capabilities as the base GPT-4.1-mini model.
261+
/// Context Window: 1,047,576 tokens
262+
/// Max output tokens: 32,768 tokens
263+
/// Training data: Up to May 31, 2024
264+
/// </summary>
265+
public static string Gpt_4_1_mini_2025_04_14 => "gpt-4.1-mini-2025-04-14";
266+
267+
/// <summary>
268+
/// GPT-4.1 Nano: The smallest and most cost-effective model in the GPT-4.1 family. Approximately 75% cheaper than GPT-4.1-mini while still excellent for simpler tasks.
269+
/// Context Window: 1,047,576 tokens
270+
/// Max output tokens: 32,768 tokens
271+
/// Training data: Up to May 31, 2024
272+
/// </summary>
273+
public static string Gpt_4_1_nano => "gpt-4.1-nano";
274+
275+
/// <summary>
276+
/// Snapshot of GPT-4.1-nano from April 14th 2025. Has the same capabilities as the base GPT-4.1-nano model.
277+
/// Context Window: 1,047,576 tokens
278+
/// Max output tokens: 32,768 tokens
279+
/// Training data: Up to May 31, 2024
280+
/// </summary>
281+
public static string Gpt_4_1_nano_2025_04_14 => "gpt-4.1-nano-2025-04-14";
282+
283+
/// <summary>
284+
/// GPT-4o: Our high-intelligence flagship model for complex, multi-step tasks. GPT-4o is cheaper and faster than GPT-4 Turbo. Currently points to gpt-4o-2024-08-06.
226285
/// Context Window: 128,000 tokens
227286
/// Max output tokens: 16,384 tokens
228287
/// Training data: Up to Oct 2023
@@ -453,6 +512,36 @@ public enum Subject
453512
/// </summary>
454513
public static string O3_mini_2025_01_31 => "o3-mini-2025-01-31";
455514

515+
/// <summary>
516+
/// O1-pro is available in the Responses API only to enable support for multi-turn model interactions before responding to API requests,
517+
/// and other advanced API features in the future.
518+
/// 200,000 context window
519+
/// 100,000 max output tokens
520+
/// Oct 01, 2023 knowledge cutoff
521+
/// Reasoning token support
522+
/// </summary>
523+
public static string O1_pro_2025_03_19 => "o1-pro-2025-03-19";
524+
525+
/// <summary>
526+
/// O3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding,
527+
/// and visual reasoning tasks. It also excels at technical writing and instruction-following.
528+
/// 200,000 context window
529+
/// 100,000 max output tokens
530+
/// Jun 01, 2024 knowledge cutoff
531+
/// Reasoning token support
532+
/// </summary>
533+
public static string O3_2025_04_16 => "o3-2025-04-16";
534+
535+
/// <summary>
536+
/// O4-mini is the latest small o-series model. It's optimized for fast, effective reasoning
537+
/// with exceptionally efficient performance in coding and visual tasks.
538+
/// 200,000 context window
539+
/// 100,000 max output tokens
540+
/// Jun 01, 2024 knowledge cutoff
541+
/// Reasoning token support
542+
/// </summary>
543+
public static string O4_mini_2025_04_16 => "o4-mini-2025-04-16";
544+
456545
/// <summary>
457546
/// This method does not guarantee returned model exists.
458547
/// </summary>
@@ -543,6 +632,12 @@ public static string EnumToString(this Model model)
543632
Model.Gpt_4_turbo_preview => Gpt_4_turbo_preview,
544633
Model.Gpt_4_turbo => Gpt_4_turbo,
545634
Model.Gpt_4_turbo_2024_04_09 => Gpt_4_turbo_2024_04_09,
635+
Model.Gpt_4_1 => Gpt_4_1,
636+
Model.Gpt_4_1_2025_04_14 => Gpt_4_1_2025_04_14,
637+
Model.Gpt_4_1_mini => Gpt_4_1_mini,
638+
Model.Gpt_4_1_mini_2025_04_14 => Gpt_4_1_mini_2025_04_14,
639+
Model.Gpt_4_1_nano => Gpt_4_1_nano,
640+
Model.Gpt_4_1_nano_2025_04_14 => Gpt_4_1_nano_2025_04_14,
546641
Model.Gpt_4o => Gpt_4o,
547642
Model.Gpt_4o_2024_05_13 => Gpt_4o_2024_05_13,
548643
Model.Gpt_4o_2024_08_06 => Gpt_4o_2024_08_06,
@@ -563,6 +658,9 @@ public static string EnumToString(this Model model)
563658
Model.O3_mini => O3_mini,
564659
Model.O3_mini_2025_01_31 => O3_mini_2025_01_31,
565660
Model.Gpt_4o_realtime_preview_2024_10_01 => Gpt_4o_realtime_preview_2024_10_01,
661+
Model.O1_pro_2025_03_19 => O1_pro_2025_03_19,
662+
Model.O3_2025_04_16 => O3_2025_04_16,
663+
Model.O4_mini_2025_04_16 => O4_mini_2025_04_16,
566664
_ => throw new ArgumentOutOfRangeException(nameof(model), model, null)
567665
};
568666
}

Readme.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,10 @@ Due to time constraints, not all methods have been thoroughly tested or fully do
117117
Needless to say, I cannot accept responsibility for any damage caused by using the library.
118118

119119
## Changelog
120+
### 9.0.3
121+
- Updated `Microsoft.Extensions.AI` to version `9.4.0-preview.1.25207.5`
122+
- Added new models to the model list
123+
120124
### 9.0.2
121125
- Updated `Microsoft.Extensions.AI` to version `9.3.0-preview.1.25114.11`
122126
- Added reasoning effort parameters

0 commit comments

Comments
 (0)