Skip to content

Commit f8b8eee

Browse files
authored
Merge pull request #2622 from 6vision/support_gpt-5
feat:Support for the GPT-5 series models
2 parents 8c16227 + a4260cc commit f8b8eee

File tree

3 files changed

+11
-5
lines changed

3 files changed

+11
-5
lines changed

bot/chatgpt/chat_gpt_bot.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,13 @@ def __init__(self):
4444
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
4545
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
4646
}
47-
# o1相关模型固定了部分参数,暂时去掉
48-
if conf_model in [const.O1, const.O1_MINI]:
49-
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
47+
# 部分模型暂不支持一些参数,特殊处理
48+
if conf_model in [const.O1, const.O1_MINI, const.GPT_5, const.GPT_5_MINI, const.GPT_5_NANO]:
5049
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
5150
for key in remove_keys:
52-
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误
51+
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错、
52+
if conf_model in [const.O1, const.O1_MINI]: # o1系列模型不支持系统提示词,使用文心模型的session
53+
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
5354

5455
def reply(self, query, context=None):
5556
# acquire reply content

bot/chatgpt/chat_gpt_session.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def num_tokens_from_messages(messages, model):
6767
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613",
6868
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview",
6969
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW, const.GPT4_TURBO_01_25,
70-
const.GPT_4o, const.GPT_4O_0806, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO]:
70+
const.GPT_4o, const.GPT_4O_0806, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO, const.GPT_5, const.GPT_5_MINI, const.GPT_5_NANO]:
7171
return num_tokens_from_messages(messages, model="gpt-4")
7272
elif model.startswith("claude-3"):
7373
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")

common/const.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,10 @@
4141
GPT_41_MINI = "gpt-4.1-mini"
4242
GPT_41_NANO = "gpt-4.1-nano"
4343

44+
GPT_5 = "gpt-5"
45+
GPT_5_MINI = "gpt-5-mini"
46+
GPT_5_NANO = "gpt-5-nano"
47+
4448
O1 = "o1-preview"
4549
O1_MINI = "o1-mini"
4650

@@ -102,6 +106,7 @@
102106
MODEL_LIST = [
103107
GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k",
104108
GPT_41, GPT_41_MINI, GPT_41_NANO, O1, O1_MINI, GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13,
109+
GPT_5, GPT_5_MINI, GPT_5_NANO,
105110
WEN_XIN, WEN_XIN_4,
106111
XUNFEI,
107112
ZHIPU_AI, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS, GLM_4_0520, GLM_4_AIR, GLM_4_AIRX,

0 commit comments

Comments
 (0)