Skip to content

Commit cf58906

Browse files
authored
Merge pull request #80 from betalgo/dev
v6.6.6
2 parents 912cfe4 + 681fa0e commit cf58906

File tree

9 files changed

+152
-20
lines changed

9 files changed

+152
-20
lines changed

OpenAI.Playground/Program.cs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,10 @@
3030
//await ImageTestHelper.RunSimpleCreateImageEditTest(sdk);
3131
//await ImageTestHelper.RunSimpleCreateImageVariationTest(sdk);
3232
//await ModerationTestHelper.CreateModerationTest(sdk);
33-
await CompletionTestHelper.RunSimpleCompletionTest(sdk);
34-
await CompletionTestHelper.RunSimpleCompletionTest2(sdk);
35-
await CompletionTestHelper.RunSimpleCompletionTest3(sdk);
33+
//await CompletionTestHelper.RunSimpleCompletionTest(sdk);
34+
//await CompletionTestHelper.RunSimpleCompletionTest2(sdk);
35+
//await CompletionTestHelper.RunSimpleCompletionTest3(sdk);
36+
await CompletionTestHelper.RunSimpleCompletionStreamTest(sdk);
3637
//await EmbeddingTestHelper.RunSimpleEmbeddingTest(sdk);
3738
//////await FileTestHelper.RunSimpleFileTest(sdk); //will delete files
3839
//////await FineTuningTestHelper.CleanUpAllFineTunings(sdk); //!!!!! will delete all fine-tunings

OpenAI.Playground/TestHelpers/CompletionTestHelper.cs

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,5 +122,45 @@ public static async Task RunSimpleCompletionTest3(IOpenAIService sdk)
122122
throw;
123123
}
124124
}
125+
126+
public static async Task RunSimpleCompletionStreamTest(IOpenAIService sdk)
127+
{
128+
ConsoleExtensions.WriteLine("Completion Stream Testing is starting:", ConsoleColor.Cyan);
129+
130+
try
131+
{
132+
ConsoleExtensions.WriteLine("Completion Stream Test:", ConsoleColor.DarkCyan);
133+
var completionResult = sdk.Completions.CreateCompletionAsStream(new CompletionCreateRequest()
134+
{
135+
Prompt = "Once upon a time",
136+
MaxTokens = 50
137+
}, Models.Davinci);
138+
139+
await foreach (var completion in completionResult)
140+
{
141+
if (completion.Successful)
142+
{
143+
Console.Write(completion.Choices.FirstOrDefault()?.Text);
144+
}
145+
else
146+
{
147+
if (completion.Error == null)
148+
{
149+
throw new Exception("Unknown Error");
150+
}
151+
152+
Console.WriteLine($"{completion.Error.Code}: {completion.Error.Message}");
153+
}
154+
}
155+
156+
Console.WriteLine("");
157+
Console.WriteLine("Complete");
158+
}
159+
catch (Exception e)
160+
{
161+
Console.WriteLine(e);
162+
throw;
163+
}
164+
}
125165
}
126166
}

OpenAI.SDK/Extensions/HttpclientExtensions.cs

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
using System.Net.Http.Json;
1+
using System.Net.Http.Headers;
2+
using System.Net.Http.Json;
23
using System.Text.Json;
34
using System.Text.Json.Serialization;
45

@@ -21,6 +22,22 @@ public static async Task<TResponse> PostAndReadAsAsync<TResponse>(this HttpClien
2122
return await response.Content.ReadFromJsonAsync<TResponse>() ?? throw new InvalidOperationException();
2223
}
2324

25+
public static HttpResponseMessage PostAsStreamAsync(this HttpClient client, string uri, object requestModel)
26+
{
27+
var settings = new JsonSerializerOptions()
28+
{
29+
DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingDefault
30+
};
31+
32+
var content = JsonContent.Create(requestModel, null, settings);
33+
34+
using var request = new HttpRequestMessage(HttpMethod.Post, uri);
35+
request.Headers.Accept.Add(new MediaTypeWithQualityHeaderValue("text/event-stream"));
36+
request.Content = content;
37+
38+
return client.Send(request, HttpCompletionOption.ResponseHeadersRead);
39+
}
40+
2441
public static async Task<TResponse> PostFileAndReadAsAsync<TResponse>(this HttpClient client, string uri, HttpContent content)
2542
{
2643
var response = await client.PostAsync(uri, content);
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
namespace OpenAI.GPT3.Extensions
2+
{
3+
/// <summary>
4+
/// Extension methods for string manipulation
5+
/// </summary>
6+
public static class StringExtensions
7+
{
8+
/// <summary>
9+
/// Remove the search string from the begging of string if exist
10+
/// </summary>
11+
/// <param name="text"></param>
12+
/// <param name="search"></param>
13+
/// <returns></returns>
14+
public static string RemoveIfStartWith(this string text, string search)
15+
{
16+
var pos = text.IndexOf(search, StringComparison.Ordinal);
17+
return pos != 0 ? text : text[search.Length..];
18+
}
19+
}
20+
}

OpenAI.SDK/Interfaces/ICompletionService.cs

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,19 +13,27 @@ public interface ICompletionService
1313
/// <summary>
1414
/// Creates a new completion for the provided prompt and parameters
1515
/// </summary>
16-
/// <param name="engineId">The ID of the engine to use for this request</param>
16+
/// <param name="modelId">The ID of the engine to use for this request</param>
1717
/// <param name="createCompletionModel"></param>
1818
/// <returns></returns>
19-
Task<CompletionCreateResponse> CreateCompletion(CompletionCreateRequest createCompletionModel, string? engineId = null);
19+
Task<CompletionCreateResponse> CreateCompletion(CompletionCreateRequest createCompletionModel, string? modelId = null);
20+
21+
/// <summary>
22+
/// Creates a new completion for the provided prompt and parameters and returns a stream of CompletionCreateRequests
23+
/// </summary>
24+
/// <param name="modelId">The ID of the engine to use for this request</param>
25+
/// <param name="createCompletionModel"></param>
26+
/// <returns></returns>
27+
IAsyncEnumerable<CompletionCreateResponse> CreateCompletionAsStream(CompletionCreateRequest createCompletionModel, string? modelId = null);
2028

2129
/// <summary>
2230
/// Creates a new completion for the provided prompt and parameters
2331
/// </summary>
2432
/// <param name="createCompletionModel"></param>
25-
/// <param name="engineId">The ID of the engine to use for this request</param>
33+
/// <param name="modelId">The ID of the engine to use for this request</param>
2634
/// <returns></returns>
27-
Task<CompletionCreateResponse> Create(CompletionCreateRequest createCompletionModel, Models.Model engineId)
35+
Task<CompletionCreateResponse> Create(CompletionCreateRequest createCompletionModel, Models.Model modelId)
2836
{
29-
return CreateCompletion(createCompletionModel, engineId.EnumToString());
37+
return CreateCompletion(createCompletionModel, modelId.EnumToString());
3038
}
3139
}
Lines changed: 46 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,60 @@
1-
using OpenAI.GPT3.Extensions;
1+
using System.Text.Json;
2+
using OpenAI.GPT3.Extensions;
23
using OpenAI.GPT3.Interfaces;
34
using OpenAI.GPT3.ObjectModels.RequestModels;
45
using OpenAI.GPT3.ObjectModels.ResponseModels;
56

67
namespace OpenAI.GPT3.Managers;
78

9+
810
public partial class OpenAIService : ICompletionService
911
{
12+
/// <inheritdoc />
1013
public async Task<CompletionCreateResponse> CreateCompletion(CompletionCreateRequest createCompletionRequest, string? modelId = null)
1114
{
1215
createCompletionRequest.ProcessModelId(modelId, _defaultModelId);
13-
1416
return await _httpClient.PostAndReadAsAsync<CompletionCreateResponse>(_endpointProvider.CompletionCreate(), createCompletionRequest);
1517
}
18+
19+
/// <inheritdoc />
20+
public async IAsyncEnumerable<CompletionCreateResponse> CreateCompletionAsStream(CompletionCreateRequest createCompletionRequest, string? modelId = null)
21+
{
22+
// Mark the request as streaming
23+
createCompletionRequest.Stream = true;
24+
25+
// Send the request to the CompletionCreate endpoint
26+
createCompletionRequest.ProcessModelId(modelId, _defaultModelId);
27+
28+
using var response = _httpClient.PostAsStreamAsync(_endpointProvider.CompletionCreate(), createCompletionRequest);
29+
await using var stream = await response.Content.ReadAsStreamAsync();
30+
using var reader = new StreamReader(stream);
31+
// Continuously read the stream until the end of it
32+
while (!reader.EndOfStream)
33+
{
34+
var line = await reader.ReadLineAsync();
35+
// Skip empty lines
36+
if (string.IsNullOrEmpty(line)) continue;
37+
38+
line = line.RemoveIfStartWith("data: ");
39+
40+
// Exit the loop if the stream is done
41+
if (line.StartsWith("[DONE]")) break;
42+
43+
CompletionCreateResponse? block;
44+
try
45+
{
46+
// When the response is good, each line is a serializable CompletionCreateRequest
47+
block = JsonSerializer.Deserialize<CompletionCreateResponse>(line);
48+
}
49+
catch (Exception)
50+
{
51+
// When the API returns an error, it does not come back as a block, it returns a single character of text ("{").
52+
// In this instance, read through the rest of the response, which should be a complete object to parse.
53+
line += await reader.ReadToEndAsync();
54+
block = JsonSerializer.Deserialize<CompletionCreateResponse>(line);
55+
}
56+
57+
if (null != block) yield return block;
58+
}
59+
}
1660
}

OpenAI.SDK/ObjectModels/RequestModels/CompletionCreateRequest.cs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
1-
using System.ComponentModel.DataAnnotations;
1+
using System.ComponentModel.DataAnnotations;
22
using System.Text.Json.Serialization;
33
using OpenAI.GPT3.Interfaces;
44
using OpenAI.GPT3.ObjectModels.SharedModels;
55

66
namespace OpenAI.GPT3.ObjectModels.RequestModels
77
{
88
//TODO add model validation
9-
//TODO check what is string or array for prompt,..
109
/// <summary>
1110
/// Create Completion Request Model
1211
/// </summary>

OpenAI.SDK/OpenAI.GPT3.csproj

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,13 @@
99
<PackageProjectUrl>https://openai.com/</PackageProjectUrl>
1010
<PackageIcon>OpenAI-Betalgo.png</PackageIcon>
1111
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
12-
<Version>6.6.5</Version>
12+
<Version>6.6.6</Version>
1313
<Authors>Tolga Kayhan, Betalgo</Authors>
1414
<Company>Betalgo Up Ltd.</Company>
1515
<Product>OpenAI GPT-3 and DALL·E dotnet SDK</Product>
1616
<Description>Dotnet SDK for OpenAI GPT-3 and DALL·E</Description>
1717
<RepositoryUrl>https://github.com/betalgo/openai/</RepositoryUrl>
18-
<PackageTags>openAI,gpt-3,ai,betalgo,NLP,dalle,DALL·E,dall-e,OpenAI,OpenAi,openAi,</PackageTags>
18+
<PackageTags>openAI,gpt-3,ai,betalgo,NLP,dalle,DALL·E,dall-e,OpenAI,OpenAi,openAi</PackageTags>
1919
<PackageId>Betalgo.$(AssemblyName)</PackageId>
2020
<PackageReadmeFile>Readme.md</PackageReadmeFile>
2121
<GenerateDocumentationFile>True</GenerateDocumentationFile>

Readme.md

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ Dotnet SDK for OpenAI GPT-3 and DALL·E
1111
*GPT-3 doesn't have any official .Net SDK.*
1212

1313
## Features
14+
- [ ] ChatGPT (coming soon)
1415
- [x] Image (DALL·E)
1516
- [x] Models
1617
- [x] Completions
@@ -20,6 +21,7 @@ Dotnet SDK for OpenAI GPT-3 and DALL·E
2021
- [x] Files
2122
- [x] Fine-tunes
2223
- [x] Moderation
24+
- [ ] Rate limit support
2325

2426
For changelogs please go to end of the document.
2527

@@ -67,8 +69,6 @@ After injecting your service you will be able to get it from service provider
6769
var openAiService = serviceProvider.GetRequiredService<IOpenAIService>();
6870
```
6971

70-
71-
7272
You can set default model(optional):
7373
```csharp
7474
openAiService.SetDefaultModelId(Engines.Davinci);
@@ -79,8 +79,8 @@ openAiService.SetDefaultModelId(Engines.Davinci);
7979
var completionResult = await openAiService.Completions.CreateCompletion(new CompletionCreateRequest()
8080
{
8181
Prompt = "Once upon a time",
82-
MaxTokens = 5
83-
}, Models.Davinci);
82+
Model = Models.TextDavinciV3
83+
});
8484

8585
if (completionResult.Successful)
8686
{
@@ -125,6 +125,9 @@ As you can guess I do not accept any damage caused by use of the library. You ar
125125

126126

127127
## Changelog
128+
### 6.6.6
129+
* CreateCompletionAsStream is now avaliable, big thanks to @qbm5
130+
128131
### 6.6.5
129132
* Sad news, we have Breaking Changes.
130133
* `SetDefaultEngineId()` replaced by `SetDefaultModelId()`
@@ -138,7 +141,7 @@ As you can guess I do not accept any damage caused by use of the library. You ar
138141
* If you find this complicated please have a look at the implementation, OpenAI.SDK/Extensions/ModelExtension.cs -> ProcessModelId()
139142
* New Method introduced: GetDefaultModelId();
140143
* Some name changes about the legacy `engine` keyword with the new `model` keyword
141-
* Started to use the latest Completion endpoint. This expecting to solve finetuning issues
144+
* Started to use the latest Completion endpoint. This expecting to solve finetuning issues. Thanks to @maiemy and other reporters.
142145
### 6.6.4
143146
* Bug-fix, ImageEditRequest.Mask now is optional. thanks to @hanialaraj
144147
*(if you are using edit request without mask your image has to be RGBA, RGB is not allowed)*

0 commit comments

Comments
 (0)