镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
fix temp issue of o1
这个提交包含在:
@@ -273,6 +273,7 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
"openai_disable_system_prompt": True,
|
"openai_disable_system_prompt": True,
|
||||||
"openai_disable_stream": True,
|
"openai_disable_stream": True,
|
||||||
|
"openai_force_temperature_one": True,
|
||||||
},
|
},
|
||||||
|
|
||||||
"o1-mini": {
|
"o1-mini": {
|
||||||
@@ -284,6 +285,7 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
"openai_disable_system_prompt": True,
|
"openai_disable_system_prompt": True,
|
||||||
"openai_disable_stream": True,
|
"openai_disable_stream": True,
|
||||||
|
"openai_force_temperature_one": True,
|
||||||
},
|
},
|
||||||
|
|
||||||
"o1-2024-12-17": {
|
"o1-2024-12-17": {
|
||||||
@@ -295,6 +297,7 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
"openai_disable_system_prompt": True,
|
"openai_disable_system_prompt": True,
|
||||||
"openai_disable_stream": True,
|
"openai_disable_stream": True,
|
||||||
|
"openai_force_temperature_one": True,
|
||||||
},
|
},
|
||||||
|
|
||||||
"o1": {
|
"o1": {
|
||||||
@@ -306,6 +309,7 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
"openai_disable_system_prompt": True,
|
"openai_disable_system_prompt": True,
|
||||||
"openai_disable_stream": True,
|
"openai_disable_stream": True,
|
||||||
|
"openai_force_temperature_one": True,
|
||||||
},
|
},
|
||||||
|
|
||||||
"gpt-4-turbo": {
|
"gpt-4-turbo": {
|
||||||
|
|||||||
@@ -351,7 +351,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
||||||
# 前者是API2D & One-API的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D & One-API的结束条件,后者是OPENAI的结束条件
|
||||||
one_api_terminate = ('data: [DONE]' in chunk_decoded)
|
one_api_terminate = ('data: [DONE]' in chunk_decoded)
|
||||||
openai_terminate = (len(chunkjson['choices'][0]["delta"]) == 0)
|
openai_terminate = (has_choices) and (len(chunkjson['choices'][0]["delta"]) == 0)
|
||||||
if one_api_terminate or openai_terminate:
|
if one_api_terminate or openai_terminate:
|
||||||
is_termination_certain = False
|
is_termination_certain = False
|
||||||
if one_api_terminate: is_termination_certain = True # 抓取符合规范的结束条件
|
if one_api_terminate: is_termination_certain = True # 抓取符合规范的结束条件
|
||||||
@@ -563,6 +563,8 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st
|
|||||||
"n": 1,
|
"n": 1,
|
||||||
"stream": stream,
|
"stream": stream,
|
||||||
}
|
}
|
||||||
|
openai_force_temperature_one = model_info[llm_kwargs['llm_model']].get('openai_force_temperature_one', False)
|
||||||
|
if openai_force_temperature_one:
|
||||||
|
payload.pop('temperature')
|
||||||
return headers,payload
|
return headers,payload
|
||||||
|
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户