From 996057e588e54df709fcfbee9f5bacbc5358c049 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:41:04 +0800 Subject: [PATCH 1/3] support chatglm3 --- config.py | 6 +++++- crazy_functions/询问多个大语言模型.py | 7 ++++--- main.py | 4 ++-- request_llms/local_llm_class.py | 14 +++++++------- version | 2 +- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/config.py b/config.py index 06840dd8..f578aa85 100644 --- a/config.py +++ b/config.py @@ -90,11 +90,15 @@ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm", "moss", "newbing", "claude-2"] + "chatglm3", "moss", "newbing", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] +# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4" +MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3" + + # 百度千帆(LLM_MODEL="qianfan") BAIDU_CLOUD_API_KEY = '' BAIDU_CLOUD_SECRET_KEY = '' diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index 80e09fcd..4210fb21 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui +from toolbox import CatchException, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime @CatchException @@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt web_port 当前软件运行的端口号 """ history = [] # 清空历史,以免输入溢出 - chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) + MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS') + chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS)) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 - llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 + llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=txt, inputs_show_user=txt, llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, diff --git a/main.py b/main.py index bf843825..a621deb1 100644 --- a/main.py +++ b/main.py @@ -433,7 +433,7 @@ def main(): server_port=PORT, favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, - blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"]) + blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') @@ -442,7 +442,7 @@ def main(): # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) # else: # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", - # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"]) + # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) if __name__ == "__main__": main() diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6f49ba4..b6ce801e 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -5,18 +5,18 @@ from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe -class DebugLock(object): +class ThreadLock(object): def __init__(self): self._lock = threading.Lock() def acquire(self): - print("acquiring", self) + # print("acquiring", self) #traceback.print_tb self._lock.acquire() - print("acquired", self) + # print("acquired", self) def release(self): - print("released", self) + # print("released", self) #traceback.print_tb self._lock.release() @@ -85,7 +85,7 @@ class LocalLLMHandle(Process): self.is_main_process = False # state wrap for child process self.start() self.is_main_process = True # state wrap for child process - self.threadLock = DebugLock() + self.threadLock = ThreadLock() def get_state(self): # ⭐run in main process @@ -159,7 +159,7 @@ class LocalLLMHandle(Process): try: for response_full in self.llm_stream_generator(**kwargs): self.child.send(response_full) - print('debug' + response_full) + # print('debug' + response_full) self.child.send('[Finish]') # 请求处理结束,开始下一个循环 except: @@ -200,7 +200,7 @@ class LocalLLMHandle(Process): if res.startswith(self.std_tag): new_output = res[len(self.std_tag):] std_out = std_out[:std_out_clip_len] - print(new_output, end='') + # print(new_output, end='') std_out = new_output + std_out yield self.std_tag + '\n```\n' + std_out + '\n```\n' elif res == '[Finish]': diff --git a/version b/version index 1470eb40..f9db97e5 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { "version": 3.56, "show_feature": true, - "new_feature": "支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验 <-> 新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率" + "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From e9cf3d3d1219b365d813835e985ce57cb6b4217c Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 7 Nov 2023 15:52:08 +0800 Subject: [PATCH 2/3] version 3.57 --- README.md | 5 +++-- version | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5e1f2d4f..d8b4756c 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ > > `pip install -r requirements.txt` > -> 2023.11.7: 本项目开源免费,**近期发现有人蔑视开源协议,利用本项目违法圈钱**,请各位提高警惕,谨防上当受骗。 +> 2023.11.7: 本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。 @@ -288,7 +288,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: -- version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件 +- version 3.60(todo): 优化虚空终端,并引入AutoGen作为新一代插件的基石 +- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面 - version 3.55: 重构前端界面,引入悬浮窗口与菜单栏 - version 3.54: 新增动态代码解释器(Code Interpreter)(待完善) diff --git a/version b/version index f9db97e5..5e4fb7d0 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.56, + "version": 3.57, "show_feature": true, "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" } From 12df41563a3446a8ca284b0837949d14a9025806 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 8 Nov 2023 18:40:36 +0800 Subject: [PATCH 3/3] hide audio btn border --- main.py | 2 +- themes/default.css | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index a621deb1..89ca7811 100644 --- a/main.py +++ b/main.py @@ -94,7 +94,7 @@ def main(): clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm") if ENABLE_AUDIO: with gr.Row(): - audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False) + audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False) with gr.Row(): status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel") with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn: diff --git a/themes/default.css b/themes/default.css index 65d5940b..7c1d400f 100644 --- a/themes/default.css +++ b/themes/default.css @@ -1,3 +1,8 @@ +/* 插件下拉菜单 */ +#elem_audio { + border-style: hidden !important; +} + .dark { --background-fill-primary: #050810; --body-background-fill: var(--background-fill-primary);