阿里云百炼(原灵积)增加对deepseek-r1、deepseek-v3模型支持 (#2182)

* 阿里云百炼(原灵积)增加对deepseek-r1、deepseek-v3模型支持

* update reasoning display

---------

Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
BZfei
2025-03-25 00:11:55 +08:00
提交者 GitHub
父节点 045cdb15d8
当前提交 82aac97980
共有 4 个文件被更改,包括 51 次插入11 次删除

查看文件

@@ -3,6 +3,7 @@ from toolbox import get_conf
import threading
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
model_prefix_to_remove = 'dashscope-'
class QwenRequestInstance():
def __init__(self):
@@ -20,6 +21,13 @@ class QwenRequestInstance():
raise RuntimeError('请配置 DASHSCOPE_API_KEY')
dashscope.api_key = get_conf("DASHSCOPE_API_KEY")
def format_reasoning(self, reasoning_content:str, main_content:str):
if reasoning_content:
reasoning_content_paragraphs = ''.join([f'<p style="margin: 1.25em 0;">{line}</p>' for line in reasoning_content.split('\n')])
formatted_reasoning_content = f'<div class="reasoning_process">{reasoning_content_paragraphs}</div>\n\n---\n\n'
return formatted_reasoning_content + main_content
else:
return main_content
def generate(self, inputs, llm_kwargs, history, system_prompt):
# import _thread as thread
@@ -28,9 +36,13 @@ class QwenRequestInstance():
if top_p == 0: top_p += 1e-5
if top_p == 1: top_p -= 1e-5
model_name = llm_kwargs['llm_model']
if model_name.startswith(model_prefix_to_remove): model_name = model_name[len(model_prefix_to_remove):]
self.reasoning_buf = ""
self.result_buf = ""
responses = Generation.call(
model=llm_kwargs['llm_model'],
model=model_name,
messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
top_p=top_p,
temperature=llm_kwargs.get('temperature', 1.0),
@@ -46,18 +58,24 @@ class QwenRequestInstance():
self.result_buf += response.output.choices[0].message.content
except:
pass
yield self.result_buf
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
break
elif response.output.choices[0].finish_reason == 'length':
self.result_buf += "[Local Message] 生成长度过长,后续输出被截断"
yield self.result_buf
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
break
else:
try:
contain_reasoning = hasattr(response.output.choices[0].message, 'reasoning_content')
except:
contain_reasoning = False
if contain_reasoning:
self.reasoning_buf += response.output.choices[0].message.reasoning_content
self.result_buf += response.output.choices[0].message.content
yield self.result_buf
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
else:
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
yield self.result_buf
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
break
# 耗尽generator避免报错