From a1f7ae5b552c15e8e00636a6e3daa5dc2d0b5c8c Mon Sep 17 00:00:00 2001 From: memset0 Date: Fri, 24 Jan 2025 14:43:49 +0800 Subject: [PATCH 1/8] feat: add support for R1 model and display CoT --- request_llms/bridge_all.py | 16 +++- request_llms/oai_std_model_template.py | 114 ++++++++++++++----------- 2 files changed, 77 insertions(+), 53 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 2d6d3f50..06b403fd 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -1071,18 +1071,18 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder except: logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=- -if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS: +if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS or "deepseek-reasoner" in AVAIL_LLM_MODELS: try: deepseekapi_noui, deepseekapi_ui = get_predict_function( api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False - ) + ) model_info.update({ "deepseek-chat":{ "fn_with_ui": deepseekapi_ui, "fn_without_ui": deepseekapi_noui, "endpoint": deepseekapi_endpoint, "can_multi_thread": True, - "max_token": 32000, + "max_token": 64000, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -1095,6 +1095,16 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS: "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + "deepseek-reasoner":{ + "fn_with_ui": deepseekapi_ui, + "fn_without_ui": deepseekapi_noui, + "endpoint": deepseekapi_endpoint, + "can_multi_thread": True, + "max_token": 64000, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + "enable_reasoning": True + }, }) except: logger.error(trimmed_format_exc()) diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index a19f05bf..140ae054 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -36,11 +36,12 @@ def get_full_error(chunk, stream_response): def decode_chunk(chunk): """ - 用于解读"content"和"finish_reason"的内容 + 用于解读"content"和"finish_reason"的内容(如果支持思维链也会返回"reasoning_content"内容) """ chunk = chunk.decode() respose = "" finish_reason = "False" + reasoning_content = "" try: chunk = json.loads(chunk[6:]) except: @@ -57,14 +58,20 @@ def decode_chunk(chunk): return respose, finish_reason try: - respose = chunk["choices"][0]["delta"]["content"] + if chunk["choices"][0]["delta"]["content"] is not None: + respose = chunk["choices"][0]["delta"]["content"] except: pass try: finish_reason = chunk["choices"][0]["finish_reason"] except: pass - return respose, finish_reason + try: + if chunk["choices"][0]["delta"]["reasoning_content"] is not None: + reasoning_content = chunk["choices"][0]["delta"]["reasoning_content"] + except: + pass + return respose, finish_reason, reasoning_content def generate_message(input, model, key, history, max_output_token, system_prompt, temperature): @@ -163,29 +170,23 @@ def get_predict_function( system_prompt=sys_prompt, temperature=llm_kwargs["temperature"], ) + + from .bridge_all import model_info + + reasoning = model_info[llm_kwargs['llm_model']].get('enable_reasoning', False) + retry = 0 while True: try: - from .bridge_all import model_info - endpoint = model_info[llm_kwargs["llm_model"]]["endpoint"] - if not disable_proxy: - response = requests.post( - endpoint, - headers=headers, - proxies=proxies, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) - else: - response = requests.post( - endpoint, - headers=headers, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) + response = requests.post( + endpoint, + headers=headers, + proxies=None if disable_proxy else proxies, + json=playload, + stream=True, + timeout=TIMEOUT_SECONDS, + ) break except: retry += 1 @@ -194,10 +195,13 @@ def get_predict_function( raise TimeoutError if MAX_RETRY != 0: logger.error(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……") - - stream_response = response.iter_lines() + result = "" finish_reason = "" + if reasoning: + resoning_buffer = "" + + stream_response = response.iter_lines() while True: try: chunk = next(stream_response) @@ -207,9 +211,12 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - response_text, finish_reason = decode_chunk(chunk) + if reasoning: + response_text, finish_reason, reasoning_content = decode_chunk(chunk) + else: + response_text, finish_reason = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 - if response_text == "" and finish_reason != "False": + if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": continue if response_text == "API_ERROR" and ( finish_reason != "False" or finish_reason != "stop" @@ -227,6 +234,8 @@ def get_predict_function( print(f"[response] {result}") break result += response_text + if reasoning: + resoning_buffer += reasoning_content if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -241,6 +250,8 @@ def get_predict_function( error_msg = chunk_decoded logger.error(error_msg) raise RuntimeError("Json解析不合常规") + if reasoning: + return '\n'.join(map(lambda x: '> ' + x, resoning_buffer.split('\n'))) + '\n\n' + result return result def predict( @@ -298,32 +309,25 @@ def get_predict_function( system_prompt=system_prompt, temperature=llm_kwargs["temperature"], ) + + from .bridge_all import model_info + + reasoning = model_info[llm_kwargs['llm_model']].get('enable_reasoning', False) history.append(inputs) history.append("") retry = 0 while True: try: - from .bridge_all import model_info - endpoint = model_info[llm_kwargs["llm_model"]]["endpoint"] - if not disable_proxy: - response = requests.post( - endpoint, - headers=headers, - proxies=proxies, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) - else: - response = requests.post( - endpoint, - headers=headers, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) + response = requests.post( + endpoint, + headers=headers, + proxies=None if disable_proxy else proxies, + json=playload, + stream=True, + timeout=TIMEOUT_SECONDS, + ) break except: retry += 1 @@ -338,6 +342,8 @@ def get_predict_function( raise TimeoutError gpt_replying_buffer = "" + if reasoning: + gpt_reasoning_buffer = "" stream_response = response.iter_lines() while True: @@ -347,9 +353,12 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - response_text, finish_reason = decode_chunk(chunk) + if reasoning: + response_text, finish_reason, reasoning_content = decode_chunk(chunk) + else: + response_text, finish_reason = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 - if response_text == "" and finish_reason != "False": + if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": status_text = f"finish_reason: {finish_reason}" yield from update_ui( chatbot=chatbot, history=history, msg=status_text @@ -379,9 +388,14 @@ def get_predict_function( logger.info(f"[response] {gpt_replying_buffer}") break status_text = f"finish_reason: {finish_reason}" - gpt_replying_buffer += response_text - # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 - history[-1] = gpt_replying_buffer + if reasoning: + gpt_replying_buffer += response_text + gpt_reasoning_buffer += reasoning_content + history[-1] = '\n'.join(map(lambda x: '> ' + x, gpt_reasoning_buffer.split('\n'))) + '\n\n' + gpt_replying_buffer + else: + gpt_replying_buffer += response_text + # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 + history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui( chatbot=chatbot, history=history, msg=status_text From d07e736214495f7dba97cbac0edd5b2994f76cc9 Mon Sep 17 00:00:00 2001 From: memset0 Date: Sat, 25 Jan 2025 00:00:13 +0800 Subject: [PATCH 2/8] fix unpacking --- request_llms/oai_std_model_template.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index 140ae054..91304acc 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -40,8 +40,8 @@ def decode_chunk(chunk): """ chunk = chunk.decode() respose = "" - finish_reason = "False" reasoning_content = "" + finish_reason = "False" try: chunk = json.loads(chunk[6:]) except: @@ -62,16 +62,16 @@ def decode_chunk(chunk): respose = chunk["choices"][0]["delta"]["content"] except: pass - try: - finish_reason = chunk["choices"][0]["finish_reason"] - except: - pass try: if chunk["choices"][0]["delta"]["reasoning_content"] is not None: reasoning_content = chunk["choices"][0]["delta"]["reasoning_content"] except: pass - return respose, finish_reason, reasoning_content + try: + finish_reason = chunk["choices"][0]["finish_reason"] + except: + pass + return respose, reasoning_content, finish_reason def generate_message(input, model, key, history, max_output_token, system_prompt, temperature): @@ -211,10 +211,7 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - if reasoning: - response_text, finish_reason, reasoning_content = decode_chunk(chunk) - else: - response_text, finish_reason = decode_chunk(chunk) + response_text, reasoning_content, finish_reason = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": continue @@ -353,10 +350,7 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - if reasoning: - response_text, finish_reason, reasoning_content = decode_chunk(chunk) - else: - response_text, finish_reason = decode_chunk(chunk) + response_text, reasoning_content, finish_reason = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": status_text = f"finish_reason: {finish_reason}" From 44fe78fff56679a0ae1e69191f53c579fa89d721 Mon Sep 17 00:00:00 2001 From: Steven Moder Date: Wed, 29 Jan 2025 21:40:30 +0800 Subject: [PATCH 3/8] fix: Enhance API key validation in is_any_api_key function (#2113) --- shared_utils/key_pattern_manager.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/shared_utils/key_pattern_manager.py b/shared_utils/key_pattern_manager.py index e0d101d1..b7191638 100644 --- a/shared_utils/key_pattern_manager.py +++ b/shared_utils/key_pattern_manager.py @@ -45,6 +45,13 @@ def is_cohere_api_key(key): def is_any_api_key(key): + # key 一般只包含字母、数字、下划线、逗号、中划线 + if not re.match(r"^[a-zA-Z0-9_\-,]+$", key): + # 如果配置了 CUSTOM_API_KEY_PATTERN,再检查以下以免误杀 + if CUSTOM_API_KEY_PATTERN := get_conf('CUSTOM_API_KEY_PATTERN'): + return bool(re.match(CUSTOM_API_KEY_PATTERN, key)) + return False + if ',' in key: keys = key.split(',') for k in keys: From 0458590a776616112191752f267cf5acd7791298 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 29 Jan 2025 21:30:54 +0800 Subject: [PATCH 4/8] support qwen2.5-max! --- request_llms/bridge_all.py | 32 ++++++++++++++++++++++++++++---- request_llms/com_qwenapi.py | 7 +------ 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 2d6d3f50..612f62ff 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -812,7 +812,8 @@ if "qwen-local" in AVAIL_LLM_MODELS: except: logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=- -if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai +qwen_models = ["qwen-max-latest", "qwen-max-2025-01-25","qwen-max","qwen-turbo","qwen-plus"] +if any(item in qwen_models for item in AVAIL_LLM_MODELS): try: from .bridge_qwen import predict_no_ui_long_connection as qwen_noui from .bridge_qwen import predict as qwen_ui @@ -822,7 +823,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen- "fn_without_ui": qwen_noui, "can_multi_thread": True, "endpoint": None, - "max_token": 6144, + "max_token": 100000, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -831,7 +832,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen- "fn_without_ui": qwen_noui, "can_multi_thread": True, "endpoint": None, - "max_token": 30720, + "max_token": 129024, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -840,7 +841,25 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen- "fn_without_ui": qwen_noui, "can_multi_thread": True, "endpoint": None, - "max_token": 28672, + "max_token": 30720, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "qwen-max-latest": { + "fn_with_ui": qwen_ui, + "fn_without_ui": qwen_noui, + "can_multi_thread": True, + "endpoint": None, + "max_token": 30720, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "qwen-max-2025-01-25": { + "fn_with_ui": qwen_ui, + "fn_without_ui": qwen_noui, + "can_multi_thread": True, + "endpoint": None, + "max_token": 30720, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, } @@ -1362,6 +1381,11 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot, inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm") + if llm_kwargs['llm_model'] not in model_info: + from toolbox import update_ui + chatbot.append([inputs, f"很抱歉,模型 '{llm_kwargs['llm_model']}' 暂不支持
(1) 检查config中的AVAIL_LLM_MODELS选项
(2) 检查request_llms/bridge_all.py中的模型路由"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项 if additional_fn: # 根据基础功能区 ModelOverride 参数调整模型类型 diff --git a/request_llms/com_qwenapi.py b/request_llms/com_qwenapi.py index 8e037781..70872e16 100644 --- a/request_llms/com_qwenapi.py +++ b/request_llms/com_qwenapi.py @@ -24,18 +24,13 @@ class QwenRequestInstance(): def generate(self, inputs, llm_kwargs, history, system_prompt): # import _thread as thread from dashscope import Generation - QWEN_MODEL = { - 'qwen-turbo': Generation.Models.qwen_turbo, - 'qwen-plus': Generation.Models.qwen_plus, - 'qwen-max': Generation.Models.qwen_max, - }[llm_kwargs['llm_model']] top_p = llm_kwargs.get('top_p', 0.8) if top_p == 0: top_p += 1e-5 if top_p == 1: top_p -= 1e-5 self.result_buf = "" responses = Generation.call( - model=QWEN_MODEL, + model=llm_kwargs['llm_model'], messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt), top_p=top_p, temperature=llm_kwargs.get('temperature', 1.0), From 39d50c1c95405b4eeb9e8b78d1d64134a79cc815 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 4 Feb 2025 15:57:35 +0800 Subject: [PATCH 5/8] update minior adjustment --- config.py | 9 ++-- request_llms/oai_std_model_template.py | 14 ++--- themes/welcome.js | 75 +++++++++++++++++++------- 3 files changed, 69 insertions(+), 29 deletions(-) diff --git a/config.py b/config.py index 4740717f..39df8bdd 100644 --- a/config.py +++ b/config.py @@ -13,6 +13,9 @@ API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文 # [step 1-2]>> ( 接入通义 qwen-max ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/ DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY +# [step 1-3]>> ( 接入通义 deepseek-reasoner ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions" +DEEPSEEK_API_KEY = "" + # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改 USE_PROXY = False if USE_PROXY: @@ -39,7 +42,8 @@ AVAIL_LLM_MODELS = ["qwen-max", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024- "gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo", - "gemini-1.5-pro", "chatglm3", "chatglm4" + "gemini-1.5-pro", "chatglm3", "chatglm4", + "deepseek-chat", "deepseek-coder", "deepseek-reasoner" ] EMBEDDING_MODEL = "text-embedding-3-small" @@ -261,9 +265,6 @@ MOONSHOT_API_KEY = "" # 零一万物(Yi Model) API KEY YIMODEL_API_KEY = "" -# 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions" -DEEPSEEK_API_KEY = "" - # 紫东太初大模型 https://ai-maas.wair.ac.cn TAICHU_API_KEY = "" diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index 91304acc..973eb10d 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -156,6 +156,7 @@ def get_predict_function( observe_window = None: 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 """ + from .bridge_all import model_info watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人(咬的也不是人 if len(APIKEY) == 0: raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") @@ -170,11 +171,9 @@ def get_predict_function( system_prompt=sys_prompt, temperature=llm_kwargs["temperature"], ) - - from .bridge_all import model_info - + reasoning = model_info[llm_kwargs['llm_model']].get('enable_reasoning', False) - + retry = 0 while True: try: @@ -248,7 +247,9 @@ def get_predict_function( logger.error(error_msg) raise RuntimeError("Json解析不合常规") if reasoning: - return '\n'.join(map(lambda x: '> ' + x, resoning_buffer.split('\n'))) + '\n\n' + result + # reasoning 的部分加上框 (>) + return '\n'.join(map(lambda x: '> ' + x, resoning_buffer.split('\n'))) + \ + '\n\n' + result return result def predict( @@ -270,6 +271,7 @@ def get_predict_function( chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 additional_fn代表点击的哪个按钮,按钮见functional.py """ + from .bridge_all import model_info if len(APIKEY) == 0: raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") if inputs == "": @@ -307,8 +309,6 @@ def get_predict_function( temperature=llm_kwargs["temperature"], ) - from .bridge_all import model_info - reasoning = model_info[llm_kwargs['llm_model']].get('enable_reasoning', False) history.append(inputs) diff --git a/themes/welcome.js b/themes/welcome.js index 54c15d07..c07645de 100644 --- a/themes/welcome.js +++ b/themes/welcome.js @@ -2,12 +2,19 @@ class WelcomeMessage { constructor() { this.static_welcome_message = [ { - title: "环境配置教程", - content: "配置模型和插件,释放大语言模型的学术应用潜力。", - svg: "file=themes/svg/conf.svg", + title: "改变主题外观", + content: "点击「界面外观」,然后「更换UI主题」或「切换界面明暗」。", + svg: "file=themes/svg/theme.svg", url: "https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E", }, { + title: "修改回答语言偏好", + content: "点击「更改模型」,删除「System prompt」并输入「用某语言回答」。", + svg: "file=themes/svg/prompt.svg", + url: "https://github.com/binary-husky/gpt_academic", + }, + { + title: "Arxiv论文一键翻译", title: "Arxiv论文翻译", content: "无缝切换学术阅读语言,最优英文转中文的学术论文阅读体验。", svg: "file=themes/svg/arxiv.svg", @@ -19,6 +26,12 @@ class WelcomeMessage { svg: "file=themes/svg/mm.svg", url: "https://github.com/binary-husky/gpt_academic", }, + { + title: "获取多个模型的答案", + content: "输入问题后点击「询问多个GPT模型」,消耗算子低于单词询问gpt-4o。", + svg: "file=themes/svg/model_multiple.svg", + url: "https://github.com/binary-husky/gpt_academic", + }, { title: "文档与源码批处理", content: "您可以将任意文件拖入「此处」,随后调用对应插件功能。", @@ -52,7 +65,13 @@ class WelcomeMessage { { title: "实时语音对话", content: "配置实时语音对话功能,无须任何激活词,我将一直倾听。", - svg: "file=themes/svg/default.svg", + svg: "file=themes/svg/voice.svg", + url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md", + }, + { + title: "联网回答问题", + content: "输入问题后,点击右侧插件区的「查互联网后回答」插件。", + svg: "file=themes/svg/Internet.svg", url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md", }, { @@ -85,6 +104,7 @@ class WelcomeMessage { this.card_array = []; this.static_welcome_message_previous = []; this.reflesh_time_interval = 15 * 1000; + this.update_time_interval = 2 * 1000; this.major_title = "欢迎使用GPT-Academic"; const reflesh_render_status = () => { @@ -101,12 +121,19 @@ class WelcomeMessage { window.addEventListener('resize', this.update.bind(this)); // add a loop to reflesh cards this.startRefleshCards(); + this.startAutoUpdate(); } begin_render() { this.update(); } + async startAutoUpdate() { + // sleep certain time + await new Promise(r => setTimeout(r, this.update_time_interval)); + this.update(); + } + async startRefleshCards() { // sleep certain time await new Promise(r => setTimeout(r, this.reflesh_time_interval)); @@ -134,6 +161,7 @@ class WelcomeMessage { // combine two lists this.static_welcome_message_previous = not_shown_previously.concat(already_shown_previously); + this.static_welcome_message_previous = this.static_welcome_message_previous.slice(0, this.max_welcome_card_num); (async () => { // 使用 for...of 循环来处理异步操作 @@ -198,12 +226,11 @@ class WelcomeMessage { return array; } - async update() { + async can_display() { // update the card visibility const elem_chatbot = document.getElementById('gpt-chatbot'); const chatbot_top = elem_chatbot.getBoundingClientRect().top; const welcome_card_container = document.getElementsByClassName('welcome-card-container')[0]; - // detect if welcome card overflow let welcome_card_overflow = false; if (welcome_card_container) { @@ -215,22 +242,22 @@ class WelcomeMessage { var page_width = document.documentElement.clientWidth; const width_to_hide_welcome = 1200; if (!await this.isChatbotEmpty() || page_width < width_to_hide_welcome || welcome_card_overflow) { - // overflow ! - if (this.visible) { - // console.log("remove welcome"); - this.removeWelcome(); - this.card_array = []; - this.static_welcome_message_previous = []; - } + // cannot display + return false; + } + return true; + } + + async update() { + const can_display = await this.can_display(); + if (can_display && !this.visible) { + this.showWelcome(); return; } - if (this.visible) { - // console.log("already visible"); + if (!can_display && this.visible) { + this.removeWelcome(); return; } - // not overflow, not yet shown, then create and display welcome card - // console.log("show welcome"); - this.showWelcome(); } showCard(message) { @@ -297,6 +324,16 @@ class WelcomeMessage { }); elem_chatbot.appendChild(welcome_card_container); + const can_display = await this.can_display(); + if (!can_display) { + // undo + this.visible = false; + this.card_array = []; + this.static_welcome_message_previous = []; + elem_chatbot.removeChild(welcome_card_container); + await new Promise(r => setTimeout(r, this.update_time_interval / 2)); + return; + } // 添加显示动画 requestAnimationFrame(() => { @@ -313,6 +350,8 @@ class WelcomeMessage { welcome_card_container.classList.add('hide'); welcome_card_container.addEventListener('transitionend', () => { elem_chatbot.removeChild(welcome_card_container); + this.card_array = []; + this.static_welcome_message_previous = []; }, { once: true }); // add a fail safe timeout const timeout = 600; // 与 CSS 中 transition 的时间保持一致(1s) From caaebe4296cabec3f915aaca4fb4095724a5d2f9 Mon Sep 17 00:00:00 2001 From: "Memento mori." <34177126+memset0@users.noreply.github.com> Date: Tue, 4 Feb 2025 16:02:02 +0800 Subject: [PATCH 6/8] add support for Deepseek R1 model and display CoT (#2118) * feat: add support for R1 model and display CoT * fix unpacking * feat: customized font & font size * auto hide tooltip when scoll down * tooltip glass transparent css * fix: Enhance API key validation in is_any_api_key function (#2113) * support qwen2.5-max! * update minior adjustment --------- Co-authored-by: binary-husky Co-authored-by: Steven Moder --- config.py | 9 ++- request_llms/bridge_all.py | 16 +++- request_llms/oai_std_model_template.py | 108 +++++++++++++------------ themes/welcome.js | 75 ++++++++++++----- 4 files changed, 133 insertions(+), 75 deletions(-) diff --git a/config.py b/config.py index 4740717f..39df8bdd 100644 --- a/config.py +++ b/config.py @@ -13,6 +13,9 @@ API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文 # [step 1-2]>> ( 接入通义 qwen-max ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/ DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY +# [step 1-3]>> ( 接入通义 deepseek-reasoner ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions" +DEEPSEEK_API_KEY = "" + # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改 USE_PROXY = False if USE_PROXY: @@ -39,7 +42,8 @@ AVAIL_LLM_MODELS = ["qwen-max", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024- "gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo", - "gemini-1.5-pro", "chatglm3", "chatglm4" + "gemini-1.5-pro", "chatglm3", "chatglm4", + "deepseek-chat", "deepseek-coder", "deepseek-reasoner" ] EMBEDDING_MODEL = "text-embedding-3-small" @@ -261,9 +265,6 @@ MOONSHOT_API_KEY = "" # 零一万物(Yi Model) API KEY YIMODEL_API_KEY = "" -# 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions" -DEEPSEEK_API_KEY = "" - # 紫东太初大模型 https://ai-maas.wair.ac.cn TAICHU_API_KEY = "" diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 612f62ff..0679000a 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -1090,18 +1090,18 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder except: logger.error(trimmed_format_exc()) # -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=- -if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS: +if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS or "deepseek-reasoner" in AVAIL_LLM_MODELS: try: deepseekapi_noui, deepseekapi_ui = get_predict_function( api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False - ) + ) model_info.update({ "deepseek-chat":{ "fn_with_ui": deepseekapi_ui, "fn_without_ui": deepseekapi_noui, "endpoint": deepseekapi_endpoint, "can_multi_thread": True, - "max_token": 32000, + "max_token": 64000, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, @@ -1114,6 +1114,16 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS: "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + "deepseek-reasoner":{ + "fn_with_ui": deepseekapi_ui, + "fn_without_ui": deepseekapi_noui, + "endpoint": deepseekapi_endpoint, + "can_multi_thread": True, + "max_token": 64000, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + "enable_reasoning": True + }, }) except: logger.error(trimmed_format_exc()) diff --git a/request_llms/oai_std_model_template.py b/request_llms/oai_std_model_template.py index a19f05bf..973eb10d 100644 --- a/request_llms/oai_std_model_template.py +++ b/request_llms/oai_std_model_template.py @@ -36,10 +36,11 @@ def get_full_error(chunk, stream_response): def decode_chunk(chunk): """ - 用于解读"content"和"finish_reason"的内容 + 用于解读"content"和"finish_reason"的内容(如果支持思维链也会返回"reasoning_content"内容) """ chunk = chunk.decode() respose = "" + reasoning_content = "" finish_reason = "False" try: chunk = json.loads(chunk[6:]) @@ -57,14 +58,20 @@ def decode_chunk(chunk): return respose, finish_reason try: - respose = chunk["choices"][0]["delta"]["content"] + if chunk["choices"][0]["delta"]["content"] is not None: + respose = chunk["choices"][0]["delta"]["content"] + except: + pass + try: + if chunk["choices"][0]["delta"]["reasoning_content"] is not None: + reasoning_content = chunk["choices"][0]["delta"]["reasoning_content"] except: pass try: finish_reason = chunk["choices"][0]["finish_reason"] except: pass - return respose, finish_reason + return respose, reasoning_content, finish_reason def generate_message(input, model, key, history, max_output_token, system_prompt, temperature): @@ -149,6 +156,7 @@ def get_predict_function( observe_window = None: 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 """ + from .bridge_all import model_info watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人(咬的也不是人 if len(APIKEY) == 0: raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") @@ -163,29 +171,21 @@ def get_predict_function( system_prompt=sys_prompt, temperature=llm_kwargs["temperature"], ) + + reasoning = model_info[llm_kwargs['llm_model']].get('enable_reasoning', False) + retry = 0 while True: try: - from .bridge_all import model_info - endpoint = model_info[llm_kwargs["llm_model"]]["endpoint"] - if not disable_proxy: - response = requests.post( - endpoint, - headers=headers, - proxies=proxies, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) - else: - response = requests.post( - endpoint, - headers=headers, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) + response = requests.post( + endpoint, + headers=headers, + proxies=None if disable_proxy else proxies, + json=playload, + stream=True, + timeout=TIMEOUT_SECONDS, + ) break except: retry += 1 @@ -194,10 +194,13 @@ def get_predict_function( raise TimeoutError if MAX_RETRY != 0: logger.error(f"请求超时,正在重试 ({retry}/{MAX_RETRY}) ……") - - stream_response = response.iter_lines() + result = "" finish_reason = "" + if reasoning: + resoning_buffer = "" + + stream_response = response.iter_lines() while True: try: chunk = next(stream_response) @@ -207,9 +210,9 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - response_text, finish_reason = decode_chunk(chunk) + response_text, reasoning_content, finish_reason = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 - if response_text == "" and finish_reason != "False": + if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": continue if response_text == "API_ERROR" and ( finish_reason != "False" or finish_reason != "stop" @@ -227,6 +230,8 @@ def get_predict_function( print(f"[response] {result}") break result += response_text + if reasoning: + resoning_buffer += reasoning_content if observe_window is not None: # 观测窗,把已经获取的数据显示出去 if len(observe_window) >= 1: @@ -241,6 +246,10 @@ def get_predict_function( error_msg = chunk_decoded logger.error(error_msg) raise RuntimeError("Json解析不合常规") + if reasoning: + # reasoning 的部分加上框 (>) + return '\n'.join(map(lambda x: '> ' + x, resoning_buffer.split('\n'))) + \ + '\n\n' + result return result def predict( @@ -262,6 +271,7 @@ def get_predict_function( chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 additional_fn代表点击的哪个按钮,按钮见functional.py """ + from .bridge_all import model_info if len(APIKEY) == 0: raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}") if inputs == "": @@ -298,32 +308,23 @@ def get_predict_function( system_prompt=system_prompt, temperature=llm_kwargs["temperature"], ) + + reasoning = model_info[llm_kwargs['llm_model']].get('enable_reasoning', False) history.append(inputs) history.append("") retry = 0 while True: try: - from .bridge_all import model_info - endpoint = model_info[llm_kwargs["llm_model"]]["endpoint"] - if not disable_proxy: - response = requests.post( - endpoint, - headers=headers, - proxies=proxies, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) - else: - response = requests.post( - endpoint, - headers=headers, - json=playload, - stream=True, - timeout=TIMEOUT_SECONDS, - ) + response = requests.post( + endpoint, + headers=headers, + proxies=None if disable_proxy else proxies, + json=playload, + stream=True, + timeout=TIMEOUT_SECONDS, + ) break except: retry += 1 @@ -338,6 +339,8 @@ def get_predict_function( raise TimeoutError gpt_replying_buffer = "" + if reasoning: + gpt_reasoning_buffer = "" stream_response = response.iter_lines() while True: @@ -347,9 +350,9 @@ def get_predict_function( break except requests.exceptions.ConnectionError: chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。 - response_text, finish_reason = decode_chunk(chunk) + response_text, reasoning_content, finish_reason = decode_chunk(chunk) # 返回的数据流第一次为空,继续等待 - if response_text == "" and finish_reason != "False": + if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False": status_text = f"finish_reason: {finish_reason}" yield from update_ui( chatbot=chatbot, history=history, msg=status_text @@ -379,9 +382,14 @@ def get_predict_function( logger.info(f"[response] {gpt_replying_buffer}") break status_text = f"finish_reason: {finish_reason}" - gpt_replying_buffer += response_text - # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 - history[-1] = gpt_replying_buffer + if reasoning: + gpt_replying_buffer += response_text + gpt_reasoning_buffer += reasoning_content + history[-1] = '\n'.join(map(lambda x: '> ' + x, gpt_reasoning_buffer.split('\n'))) + '\n\n' + gpt_replying_buffer + else: + gpt_replying_buffer += response_text + # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 + history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui( chatbot=chatbot, history=history, msg=status_text diff --git a/themes/welcome.js b/themes/welcome.js index 54c15d07..c07645de 100644 --- a/themes/welcome.js +++ b/themes/welcome.js @@ -2,12 +2,19 @@ class WelcomeMessage { constructor() { this.static_welcome_message = [ { - title: "环境配置教程", - content: "配置模型和插件,释放大语言模型的学术应用潜力。", - svg: "file=themes/svg/conf.svg", + title: "改变主题外观", + content: "点击「界面外观」,然后「更换UI主题」或「切换界面明暗」。", + svg: "file=themes/svg/theme.svg", url: "https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E", }, { + title: "修改回答语言偏好", + content: "点击「更改模型」,删除「System prompt」并输入「用某语言回答」。", + svg: "file=themes/svg/prompt.svg", + url: "https://github.com/binary-husky/gpt_academic", + }, + { + title: "Arxiv论文一键翻译", title: "Arxiv论文翻译", content: "无缝切换学术阅读语言,最优英文转中文的学术论文阅读体验。", svg: "file=themes/svg/arxiv.svg", @@ -19,6 +26,12 @@ class WelcomeMessage { svg: "file=themes/svg/mm.svg", url: "https://github.com/binary-husky/gpt_academic", }, + { + title: "获取多个模型的答案", + content: "输入问题后点击「询问多个GPT模型」,消耗算子低于单词询问gpt-4o。", + svg: "file=themes/svg/model_multiple.svg", + url: "https://github.com/binary-husky/gpt_academic", + }, { title: "文档与源码批处理", content: "您可以将任意文件拖入「此处」,随后调用对应插件功能。", @@ -52,7 +65,13 @@ class WelcomeMessage { { title: "实时语音对话", content: "配置实时语音对话功能,无须任何激活词,我将一直倾听。", - svg: "file=themes/svg/default.svg", + svg: "file=themes/svg/voice.svg", + url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md", + }, + { + title: "联网回答问题", + content: "输入问题后,点击右侧插件区的「查互联网后回答」插件。", + svg: "file=themes/svg/Internet.svg", url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md", }, { @@ -85,6 +104,7 @@ class WelcomeMessage { this.card_array = []; this.static_welcome_message_previous = []; this.reflesh_time_interval = 15 * 1000; + this.update_time_interval = 2 * 1000; this.major_title = "欢迎使用GPT-Academic"; const reflesh_render_status = () => { @@ -101,12 +121,19 @@ class WelcomeMessage { window.addEventListener('resize', this.update.bind(this)); // add a loop to reflesh cards this.startRefleshCards(); + this.startAutoUpdate(); } begin_render() { this.update(); } + async startAutoUpdate() { + // sleep certain time + await new Promise(r => setTimeout(r, this.update_time_interval)); + this.update(); + } + async startRefleshCards() { // sleep certain time await new Promise(r => setTimeout(r, this.reflesh_time_interval)); @@ -134,6 +161,7 @@ class WelcomeMessage { // combine two lists this.static_welcome_message_previous = not_shown_previously.concat(already_shown_previously); + this.static_welcome_message_previous = this.static_welcome_message_previous.slice(0, this.max_welcome_card_num); (async () => { // 使用 for...of 循环来处理异步操作 @@ -198,12 +226,11 @@ class WelcomeMessage { return array; } - async update() { + async can_display() { // update the card visibility const elem_chatbot = document.getElementById('gpt-chatbot'); const chatbot_top = elem_chatbot.getBoundingClientRect().top; const welcome_card_container = document.getElementsByClassName('welcome-card-container')[0]; - // detect if welcome card overflow let welcome_card_overflow = false; if (welcome_card_container) { @@ -215,22 +242,22 @@ class WelcomeMessage { var page_width = document.documentElement.clientWidth; const width_to_hide_welcome = 1200; if (!await this.isChatbotEmpty() || page_width < width_to_hide_welcome || welcome_card_overflow) { - // overflow ! - if (this.visible) { - // console.log("remove welcome"); - this.removeWelcome(); - this.card_array = []; - this.static_welcome_message_previous = []; - } + // cannot display + return false; + } + return true; + } + + async update() { + const can_display = await this.can_display(); + if (can_display && !this.visible) { + this.showWelcome(); return; } - if (this.visible) { - // console.log("already visible"); + if (!can_display && this.visible) { + this.removeWelcome(); return; } - // not overflow, not yet shown, then create and display welcome card - // console.log("show welcome"); - this.showWelcome(); } showCard(message) { @@ -297,6 +324,16 @@ class WelcomeMessage { }); elem_chatbot.appendChild(welcome_card_container); + const can_display = await this.can_display(); + if (!can_display) { + // undo + this.visible = false; + this.card_array = []; + this.static_welcome_message_previous = []; + elem_chatbot.removeChild(welcome_card_container); + await new Promise(r => setTimeout(r, this.update_time_interval / 2)); + return; + } // 添加显示动画 requestAnimationFrame(() => { @@ -313,6 +350,8 @@ class WelcomeMessage { welcome_card_container.classList.add('hide'); welcome_card_container.addEventListener('transitionend', () => { elem_chatbot.removeChild(welcome_card_container); + this.card_array = []; + this.static_welcome_message_previous = []; }, { once: true }); // add a fail safe timeout const timeout = 600; // 与 CSS 中 transition 的时间保持一致(1s) From c68285aeac90916007272bd2e83c03f5005d67d3 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 4 Feb 2025 16:03:01 +0800 Subject: [PATCH 7/8] update config and version --- README.md | 2 +- config.py | 4 ++-- version | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4c10f885..f404c472 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ > [!IMPORTANT] -> `master主分支`最新动态(2025.1.28): 增加字体自定义功能 +> `master主分支`最新动态(2025.2.4): 增加deepseek-r1支持;增加字体自定义功能 > `frontier开发分支`最新动态(2024.12.9): 更新对话时间线功能,优化xelatex论文翻译 > `wiki文档`最新动态(2024.12.5): 更新ollama接入指南 > diff --git a/config.py b/config.py index 39df8bdd..11ee666c 100644 --- a/config.py +++ b/config.py @@ -13,10 +13,10 @@ API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文 # [step 1-2]>> ( 接入通义 qwen-max ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/ DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY -# [step 1-3]>> ( 接入通义 deepseek-reasoner ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions" +# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions" DEEPSEEK_API_KEY = "" -# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改 +# [step 2]>> 改为True应用代理。如果使用本地或无地域限制的大模型时,此处不修改;如果直接在海外服务器部署,此处不修改 USE_PROXY = False if USE_PROXY: """ diff --git a/version b/version index a1885152..d6c348d5 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.92, + "version": 3.93, "show_feature": true, - "new_feature": "字体和字体大小自定义 <-> 优化前端并修复TTS的BUG <-> 添加时间线回溯功能 <-> 支持chatgpt-4o-latest <-> 增加RAG组件 <-> 升级多合一主提交键" + "new_feature": "支持deepseek-reason(r1) <-> 字体和字体大小自定义 <-> 优化前端并修复TTS的BUG <-> 添加时间线回溯功能 <-> 支持chatgpt-4o-latest <-> 增加RAG组件 <-> 升级多合一主提交键" } From 7f4b87a6335e13830faead5e10e42dd05f4824e5 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 4 Feb 2025 16:08:18 +0800 Subject: [PATCH 8/8] update readme --- README.md | 1 + themes/welcome.js | 27 ++++----------------------- 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index f404c472..5f22691d 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ > [!IMPORTANT] > `master主分支`最新动态(2025.2.4): 增加deepseek-r1支持;增加字体自定义功能 +> `master主分支`最新动态(2025.2.2): 三分钟快速接入最强qwen2.5-max[视频](https://www.bilibili.com/video/BV1LeFuerEG4) > `frontier开发分支`最新动态(2024.12.9): 更新对话时间线功能,优化xelatex论文翻译 > `wiki文档`最新动态(2024.12.5): 更新ollama接入指南 > diff --git a/themes/welcome.js b/themes/welcome.js index c07645de..6271971e 100644 --- a/themes/welcome.js +++ b/themes/welcome.js @@ -2,19 +2,12 @@ class WelcomeMessage { constructor() { this.static_welcome_message = [ { - title: "改变主题外观", - content: "点击「界面外观」,然后「更换UI主题」或「切换界面明暗」。", - svg: "file=themes/svg/theme.svg", + title: "环境配置教程", + content: "配置模型和插件,释放大语言模型的学术应用潜力。", + svg: "file=themes/svg/conf.svg", url: "https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E", }, { - title: "修改回答语言偏好", - content: "点击「更改模型」,删除「System prompt」并输入「用某语言回答」。", - svg: "file=themes/svg/prompt.svg", - url: "https://github.com/binary-husky/gpt_academic", - }, - { - title: "Arxiv论文一键翻译", title: "Arxiv论文翻译", content: "无缝切换学术阅读语言,最优英文转中文的学术论文阅读体验。", svg: "file=themes/svg/arxiv.svg", @@ -26,12 +19,6 @@ class WelcomeMessage { svg: "file=themes/svg/mm.svg", url: "https://github.com/binary-husky/gpt_academic", }, - { - title: "获取多个模型的答案", - content: "输入问题后点击「询问多个GPT模型」,消耗算子低于单词询问gpt-4o。", - svg: "file=themes/svg/model_multiple.svg", - url: "https://github.com/binary-husky/gpt_academic", - }, { title: "文档与源码批处理", content: "您可以将任意文件拖入「此处」,随后调用对应插件功能。", @@ -65,13 +52,7 @@ class WelcomeMessage { { title: "实时语音对话", content: "配置实时语音对话功能,无须任何激活词,我将一直倾听。", - svg: "file=themes/svg/voice.svg", - url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md", - }, - { - title: "联网回答问题", - content: "输入问题后,点击右侧插件区的「查互联网后回答」插件。", - svg: "file=themes/svg/Internet.svg", + svg: "file=themes/svg/default.svg", url: "https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md", }, {