typo: Fix typos and rename functions across multiple files (#2130)

* typo: Fix typos and rename functions across multiple files

This commit addresses several minor issues:
- Corrected spelling of function names (e.g., `update_ui_lastest_msg` to `update_ui_latest_msg`)
- Fixed typos in comments and variable names
- Corrected capitalization in some strings (e.g., "ArXiv" instead of "Arixv")
- Renamed some variables for consistency
- Corrected some console-related parameter names (e.g., `console_slience` to `console_silence`)

The changes span multiple files across the project, including request LLM bridges, crazy functions, and utility modules.

* fix: f-string expression part cannot include a backslash (#2139)

* raise error when the uploaded tar contain hard/soft link (#2136)

* minor bug fix

* fine tune reasoning css

* upgrade internet gpt plugin

* Update README.md

* fix GHSA-gqp5-wm97-qxcv

* typo fix

* update readme

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
Steven Moder
2025-03-02 02:16:10 +08:00
提交者 GitHub
父节点 5dffe8627f
当前提交 4a79aa6a93
共有 74 个文件被更改,包括 325 次插入325 次删除

查看文件

@@ -1265,9 +1265,9 @@ def LLM_CATCH_EXCEPTION(f):
"""
装饰器函数,将错误显示出来
"""
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_slience:bool):
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_silence:bool):
try:
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence)
except Exception as e:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
observe_window[0] = tb_str
@@ -1275,7 +1275,7 @@ def LLM_CATCH_EXCEPTION(f):
return decorated
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_slience:bool=False):
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_silence:bool=False):
"""
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部尽可能地用stream的方法避免中途网线被掐。
inputs
@@ -1297,7 +1297,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
if '&' not in model:
# 如果只询问“一个”大语言模型(多数情况):
method = model_info[model]["fn_without_ui"]
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence)
else:
# 如果同时询问“多个”大语言模型,这个稍微啰嗦一点,但思路相同,您不必读这个else分支
executor = ThreadPoolExecutor(max_workers=4)
@@ -1314,7 +1314,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys
method = model_info[model]["fn_without_ui"]
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
llm_kwargs_feedin['llm_model'] = model
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_silence)
futures.append(future)
def mutex_manager(window_mutex, observe_window):

查看文件

@@ -139,7 +139,7 @@ global glmft_handle
glmft_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py

查看文件

@@ -125,7 +125,7 @@ def verify_endpoint(endpoint):
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
return endpoint
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_silence:bool=False):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
@@ -203,7 +203,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
if has_content: # has_role = True/False
result += delta["content"]
if not console_slience: print(delta["content"], end='')
if not console_silence: print(delta["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
@@ -231,7 +231,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
from request_llms.bridge_all import model_info

查看文件

@@ -16,7 +16,7 @@ import base64
import glob
from loguru import logger
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, \
update_ui_lastest_msg, get_max_token, encode_image, have_any_recent_upload_image_files, log_chat
update_ui_latest_msg, get_max_token, encode_image, have_any_recent_upload_image_files, log_chat
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
@@ -67,7 +67,7 @@ def verify_endpoint(endpoint):
"""
return endpoint
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_silence=False):
raise NotImplementedError
@@ -183,7 +183,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束,gpt_replying_buffer也写完了
lastmsg = chatbot[-1][-1] + f"\n\n\n\n{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
yield from update_ui_latest_msg(lastmsg, chatbot, history, delay=1)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
break
# 处理数据流的主体

查看文件

@@ -69,7 +69,7 @@ def decode_chunk(chunk):
return need_to_pass, chunkjson, is_last_chunk
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_silence=False):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
@@ -151,7 +151,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
if inputs == "": inputs = "空空如也的输入栏"

查看文件

@@ -68,7 +68,7 @@ def verify_endpoint(endpoint):
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
return endpoint
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_silence:bool=False):
"""
发送,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
@@ -111,7 +111,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
if chunkjson['event_type'] == 'stream-start': continue
if chunkjson['event_type'] == 'text-generation':
result += chunkjson["text"]
if not console_slience: print(chunkjson["text"], end='')
if not console_silence: print(chunkjson["text"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
@@ -132,7 +132,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
# if is_any_api_key(inputs):

查看文件

@@ -8,7 +8,7 @@ import os
import time
from request_llms.com_google import GoogleChatInit
from toolbox import ChatBotWithCookies
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc, log_chat, encode_image
from toolbox import get_conf, update_ui, update_ui_latest_msg, have_any_recent_upload_image_files, trimmed_format_exc, log_chat, encode_image
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
@@ -16,7 +16,7 @@ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[],
console_slience:bool=False):
console_silence:bool=False):
# 检查API_KEY
if get_conf("GEMINI_API_KEY") == "":
raise ValueError(f"请配置 GEMINI_API_KEY。")
@@ -60,7 +60,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
# 检查API_KEY
if get_conf("GEMINI_API_KEY") == "":
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
return
# 适配润色区域

查看文件

@@ -55,7 +55,7 @@ class GetGLMHandle(Process):
if self.jittorllms_model is None:
device = get_conf('LOCAL_MODEL_DEVICE')
from .jittorllms.models import get_model
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
# available_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
args_dict = {'model': 'llama'}
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
@@ -107,7 +107,7 @@ global llama_glm_handle
llama_glm_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py

查看文件

@@ -55,7 +55,7 @@ class GetGLMHandle(Process):
if self.jittorllms_model is None:
device = get_conf('LOCAL_MODEL_DEVICE')
from .jittorllms.models import get_model
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
# available_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
args_dict = {'model': 'pangualpha'}
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
@@ -107,7 +107,7 @@ global pangu_glm_handle
pangu_glm_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py

查看文件

@@ -55,7 +55,7 @@ class GetGLMHandle(Process):
if self.jittorllms_model is None:
device = get_conf('LOCAL_MODEL_DEVICE')
from .jittorllms.models import get_model
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
# available_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
args_dict = {'model': 'chatrwkv'}
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
@@ -107,7 +107,7 @@ global rwkv_glm_handle
rwkv_glm_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py

查看文件

@@ -46,8 +46,8 @@ class GetLlamaHandle(LocalLLMHandle):
top_p = kwargs['top_p']
temperature = kwargs['temperature']
history = kwargs['history']
console_slience = kwargs.get('console_slience', True)
return query, max_length, top_p, temperature, history, console_slience
console_silence = kwargs.get('console_silence', True)
return query, max_length, top_p, temperature, history, console_silence
def convert_messages_to_prompt(query, history):
prompt = ""
@@ -57,7 +57,7 @@ class GetLlamaHandle(LocalLLMHandle):
prompt += f"\n[INST]{query}[/INST]"
return prompt
query, max_length, top_p, temperature, history, console_slience = adaptor(kwargs)
query, max_length, top_p, temperature, history, console_silence = adaptor(kwargs)
prompt = convert_messages_to_prompt(query, history)
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-
# code from transformers.llama
@@ -72,9 +72,9 @@ class GetLlamaHandle(LocalLLMHandle):
generated_text = ""
for new_text in streamer:
generated_text += new_text
if not console_slience: print(new_text, end='')
if not console_silence: print(new_text, end='')
yield generated_text.lstrip(prompt_tk_back).rstrip("</s>")
if not console_slience: print()
if not console_silence: print()
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-
def try_to_import_special_deps(self, **kwargs):

查看文件

@@ -169,7 +169,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result)
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
console_slience=False):
console_silence=False):
gpt_bro_init = MoonShotInit()
watch_dog_patience = 60 # 看门狗的耐心, 设置10秒即可
stream_response = gpt_bro_init.generate_messages(inputs, llm_kwargs, history, sys_prompt, True)

查看文件

@@ -95,7 +95,7 @@ class GetGLMHandle(Process):
- Its responses must not be vague, accusatory, rude, controversial, off-topic, or defensive.
- It should avoid giving subjective opinions but rely on objective facts or phrases like \"in this context a human might say...\", \"some people might think...\", etc.
- Its responses must also be positive, polite, interesting, entertaining, and engaging.
- It can provide additional relevant details to answer in-depth and comprehensively covering mutiple aspects.
- It can provide additional relevant details to answer in-depth and comprehensively covering multiple aspects.
- It apologizes and accepts the user's suggestion if the user corrects the incorrect answer generated by MOSS.
Capabilities and tools that MOSS can possess.
"""
@@ -172,7 +172,7 @@ global moss_handle
moss_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py

查看文件

@@ -209,7 +209,7 @@ def predict_no_ui_long_connection(
history=[],
sys_prompt="",
observe_window=[],
console_slience=False,
console_silence=False,
):
"""
多线程方法

查看文件

@@ -52,7 +52,7 @@ def decode_chunk(chunk):
pass
return chunk_decoded, chunkjson, is_last_chunk
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_silence=False):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
@@ -99,7 +99,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
logger.info(f'[response] {result}')
break
result += chunkjson['message']["content"]
if not console_slience: print(chunkjson['message']["content"], end='')
if not console_silence: print(chunkjson['message']["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
@@ -124,7 +124,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
if inputs == "": inputs = "空空如也的输入栏"

查看文件

@@ -119,7 +119,7 @@ def verify_endpoint(endpoint):
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
return endpoint
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_silence:bool=False):
"""
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
inputs
@@ -188,7 +188,7 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
if has_content: # has_role = True/False
result += delta["content"]
if not console_slience: print(delta["content"], end='')
if not console_silence: print(delta["content"], end='')
if observe_window is not None:
# 观测窗,把已经获取的数据显示出去
if len(observe_window) >= 1:
@@ -213,7 +213,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
from request_llms.bridge_all import model_info

查看文件

@@ -121,7 +121,7 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py

查看文件

@@ -1,12 +1,12 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import update_ui, get_conf, update_ui_latest_msg
from toolbox import check_packages, report_exception, log_chat
model_name = 'Qwen'
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py
@@ -35,13 +35,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
try:
check_packages(["dashscope"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
yield from update_ui_latest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
chatbot=chatbot, history=history, delay=0)
return
# 检查DASHSCOPE_API_KEY
if get_conf("DASHSCOPE_API_KEY") == "":
yield from update_ui_lastest_msg(f"请配置 DASHSCOPE_API_KEY。",
yield from update_ui_latest_msg(f"请配置 DASHSCOPE_API_KEY。",
chatbot=chatbot, history=history, delay=0)
return

查看文件

@@ -1,5 +1,5 @@
import time
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import update_ui, get_conf, update_ui_latest_msg
from toolbox import check_packages, report_exception
model_name = '云雀大模型'
@@ -10,7 +10,7 @@ def validate_key():
return True
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
⭐ 多线程方法
函数的说明请见 request_llms/bridge_all.py
@@ -42,12 +42,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
yield from update_ui_latest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
chatbot=chatbot, history=history, delay=0)
return
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:

查看文件

@@ -2,7 +2,7 @@
import time
import threading
import importlib
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import update_ui, get_conf, update_ui_latest_msg
from multiprocessing import Process, Pipe
model_name = '星火认知大模型'
@@ -14,7 +14,7 @@ def validate_key():
return True
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py
@@ -43,7 +43,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
yield from update_ui(chatbot=chatbot, history=history)
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:

查看文件

@@ -225,7 +225,7 @@ def predict_no_ui_long_connection(
history=[],
sys_prompt="",
observe_window=None,
console_slience=False,
console_silence=False,
):
"""
多线程方法

查看文件

@@ -1,6 +1,6 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
from toolbox import update_ui, get_conf, update_ui_latest_msg, log_chat
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
from toolbox import ChatBotWithCookies
@@ -13,7 +13,7 @@ def validate_key():
return True
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py
@@ -49,7 +49,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
yield from update_ui(chatbot=chatbot, history=history)
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:

查看文件

@@ -91,7 +91,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
inputs 是本次问询的输入
top_p, temperature是chatGPT的内部调优参数
history 是之前的对话列表注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
additional_fn代表点击的哪个按钮,按钮见functional.py
"""
if additional_fn is not None:
@@ -112,7 +112,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
mutable = ["", time.time()]
def run_coorotine(mutable):
def run_coroutine(mutable):
async def get_result(mutable):
# "tgui:galactica-1.3b@localhost:7860"
@@ -126,7 +126,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
break
asyncio.run(get_result(mutable))
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
thread_listen = threading.Thread(target=run_coroutine, args=(mutable,), daemon=True)
thread_listen.start()
while thread_listen.is_alive():
@@ -142,7 +142,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_silence=False):
raw_input = "What I would like to say is the following: " + inputs
prompt = raw_input
tgui_say = ""
@@ -151,7 +151,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
addr, port = addr_port.split(':')
def run_coorotine(observe_window):
def run_coroutine(observe_window):
async def get_result(observe_window):
async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
temperature=llm_kwargs['temperature'],
@@ -162,6 +162,6 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
print('exit when no listener')
break
asyncio.run(get_result(observe_window))
thread_listen = threading.Thread(target=run_coorotine, args=(observe_window,))
thread_listen = threading.Thread(target=run_coroutine, args=(observe_window,))
thread_listen.start()
return observe_window[0]

查看文件

@@ -1,6 +1,6 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
from toolbox import update_ui, get_conf, update_ui_latest_msg, log_chat
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
from toolbox import ChatBotWithCookies
@@ -18,7 +18,7 @@ def make_media_input(inputs, image_paths):
return inputs
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
observe_window:list=[], console_silence:bool=False):
"""
⭐多线程方法
函数的说明请见 request_llms/bridge_all.py
@@ -57,12 +57,12 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
try:
check_packages(["zhipuai"])
except:
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
yield from update_ui_latest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
chatbot=chatbot, history=history, delay=0)
return
if validate_key() is False:
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
return
if additional_fn is not None:

查看文件

@@ -216,7 +216,7 @@ class LocalLLMHandle(Process):
def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'):
load_message = f"{model_name}尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,{model_name}消耗大量的内存CPU或显存GPU,也许会导致低配计算机卡死 ……"
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[], console_slience:bool=False):
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[], console_silence:bool=False):
"""
refer to request_llms/bridge_all.py
"""

查看文件

@@ -4,7 +4,7 @@ import traceback
import requests
from loguru import logger
from toolbox import get_conf, is_the_upload_folder, update_ui, update_ui_lastest_msg
from toolbox import get_conf, is_the_upload_folder, update_ui, update_ui_latest_msg
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf(
"proxies", "TIMEOUT_SECONDS", "MAX_RETRY"
@@ -350,14 +350,14 @@ def get_predict_function(
chunk = next(stream_response)
except StopIteration:
if wait_counter != 0 and gpt_replying_buffer == "":
yield from update_ui_lastest_msg(lastmsg="模型调用失败 ...", chatbot=chatbot, history=history, msg="failed")
yield from update_ui_latest_msg(lastmsg="模型调用失败 ...", chatbot=chatbot, history=history, msg="failed")
break
except requests.exceptions.ConnectionError:
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk)
if decoded_chunk == ': keep-alive':
wait_counter += 1
yield from update_ui_lastest_msg(lastmsg="等待中 " + "".join(["."] * (wait_counter%10)), chatbot=chatbot, history=history, msg="waiting ...")
yield from update_ui_latest_msg(lastmsg="等待中 " + "".join(["."] * (wait_counter%10)), chatbot=chatbot, history=history, msg="waiting ...")
continue
# 返回的数据流第一次为空,继续等待
if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False":