diff --git a/config.py b/config.py index 611b1589..ea603c3b 100644 --- a/config.py +++ b/config.py @@ -212,7 +212,7 @@ ALLOW_RESET_CONFIG = False # 在使用AutoGen插件时,是否使用Docker容器运行代码 -AUTOGEN_USE_DOCKER = True +AUTOGEN_USE_DOCKER = False # 临时的上传文件夹位置,请勿修改 diff --git a/crazy_functional.py b/crazy_functional.py index 2e94570c..1e7ca584 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -74,7 +74,7 @@ def get_crazy_functions(): "批量总结Word文档": { "Group": "学术", "Color": "stop", - "AsButton": True, + "AsButton": False, "Info": "批量总结word文档 | 输入参数为路径", "Function": HotReload(总结word文档) }, @@ -178,6 +178,13 @@ def get_crazy_functions(): "Info": "批量生成函数的注释 | 输入参数为路径", "Function": HotReload(批量生成函数注释) }, + "精准翻译PDF论文": { + "Group": "学术", + "Color": "stop", + "AsButton": True, + "Info": "精准翻译PDF论文为中文 | 输入参数为路径", + "Function": HotReload(批量翻译PDF文档) + }, "保存当前的对话": { "Group": "对话", "AsButton": True, @@ -196,13 +203,6 @@ def get_crazy_functions(): "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数", "Function": HotReload(高阶功能模板函数) }, - "精准翻译PDF论文": { - "Group": "学术", - "Color": "stop", - "AsButton": True, - "Info": "精准翻译PDF论文为中文 | 输入参数为路径", - "Function": HotReload(批量翻译PDF文档) - }, "询问多个GPT模型": { "Group": "对话", "Color": "stop", @@ -561,18 +561,15 @@ def get_crazy_functions(): except: print('Load function plugin failed') - # try: - # from crazy_functions.多智能体 import 多智能体终端 - # function_plugins.update({ - # "多智能体终端(微软AutoGen)": { - # "Group": "智能体", - # "Color": "stop", - # "AsButton": True, - # "Function": HotReload(多智能体终端) - # } - # }) - # except: - # print('Load function plugin failed') + from crazy_functions.多智能体 import 多智能体终端 + function_plugins.update({ + "AutoGen多智能体终端": { + "Group": "智能体", + "Color": "stop", + "AsButton": True, + "Function": HotReload(多智能体终端) + } + }) # try: # from crazy_functions.chatglm微调工具 import 微调数据集生成 diff --git a/crazy_functions/Latex全文润色.py b/crazy_functions/Latex全文润色.py index 268a3446..0bc7d401 100644 --- a/crazy_functions/Latex全文润色.py +++ b/crazy_functions/Latex全文润色.py @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder -from toolbox import CatchException, report_execption, write_history_to_file, zip_folder +from toolbox import CatchException, report_exception, write_history_to_file, zip_folder class PaperFileGroup(): @@ -146,7 +146,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en') @@ -184,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -195,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh') @@ -220,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -231,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread') diff --git a/crazy_functions/Latex全文翻译.py b/crazy_functions/Latex全文翻译.py index 697f5ac8..846bd80d 100644 --- a/crazy_functions/Latex全文翻译.py +++ b/crazy_functions/Latex全文翻译.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file fast_debug = False class PaperFileGroup(): @@ -117,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -128,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') @@ -154,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -165,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') \ No newline at end of file diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index 9edfea68..a2545ddd 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -1,5 +1,5 @@ from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str +from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial import glob, os, requests, time pj = os.path.join @@ -171,12 +171,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -249,7 +249,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history = [] txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) if txt.endswith('.pdf'): - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -258,13 +258,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/agent_fns/auto_agent.py b/crazy_functions/agent_fns/auto_agent.py index 16ca2959..4f8fda9d 100644 --- a/crazy_functions/agent_fns/auto_agent.py +++ b/crazy_functions/agent_fns/auto_agent.py @@ -1,8 +1,8 @@ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom from crazy_functions.agent_fns.general import AutoGenGeneral -import time + class AutoGenMath(AutoGenGeneral): diff --git a/crazy_functions/agent_fns/general.py b/crazy_functions/agent_fns/general.py index beb6d7eb..49bc4dc8 100644 --- a/crazy_functions/agent_fns/general.py +++ b/crazy_functions/agent_fns/general.py @@ -1,23 +1,50 @@ -from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate -from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton +from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom +from request_llms.bridge_all import predict_no_ui_long_connection import time +def gpt_academic_generate_oai_reply( + self, + messages, + sender, + config, +): + llm_config = self.llm_config if config is None else config + if llm_config is False: + return False, None + if messages is None: + messages = self._oai_messages[sender] + + inputs = messages[-1]['content'] + history = [] + for message in messages[:-1]: + history.append(message['content']) + context=messages[-1].pop("context", None) + assert context is None, "预留参数 context 未实现" + + reply = predict_no_ui_long_connection( + inputs=inputs, + llm_kwargs=llm_config, + history=history, + sys_prompt=self._oai_system_message[0]['content'], + console_slience=True + ) + assumed_done = reply.endswith('\nTERMINATE') + return True, reply class AutoGenGeneral(PluginMultiprocessManager): - def gpt_academic_print_override(self, user_proxy, message, sender): - # ⭐⭐ 子进程执行 - self.child_conn.send(PipeCom("show", sender.name + '\n\n---\n\n' + message['content'])) + # ⭐⭐ run in subprocess + self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"])) def gpt_academic_get_human_input(self, user_proxy, message): - # ⭐⭐ 子进程执行 + # ⭐⭐ run in subprocess patience = 300 begin_waiting_time = time.time() self.child_conn.send(PipeCom("interact", message)) while True: time.sleep(0.5) - if self.child_conn.poll(): + if self.child_conn.poll(): wait_success = True break if time.time() - begin_waiting_time > patience: @@ -32,26 +59,26 @@ class AutoGenGeneral(PluginMultiprocessManager): def define_agents(self): raise NotImplementedError - def do_audogen(self, input): - # ⭐⭐ 子进程执行 + def exe_autogen(self, input): + # ⭐⭐ run in subprocess input = input.content with ProxyNetworkActivate("AutoGen"): - config_list = self.get_config_list() - code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker} + code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} agents = self.define_agents() user_proxy = None assistant = None for agent_kwargs in agents: agent_cls = agent_kwargs.pop('cls') kwargs = { - 'llm_config':{ - "config_list": config_list, - }, + 'llm_config':self.llm_kwargs, 'code_execution_config':code_execution_config } kwargs.update(agent_kwargs) agent_handle = agent_cls(**kwargs) agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b) + for d in agent_handle._reply_func_list: + if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply': + d['reply_func'] = gpt_academic_generate_oai_reply if agent_kwargs['name'] == 'user_proxy': agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) user_proxy = agent_handle @@ -63,23 +90,45 @@ class AutoGenGeneral(PluginMultiprocessManager): tb_str = '```\n' + trimmed_format_exc() + '```' self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str)) - def get_config_list(self): - model = self.llm_kwargs['llm_model'] - api_base = None - if self.llm_kwargs['llm_model'].startswith('api2d-'): - model = self.llm_kwargs['llm_model'][len('api2d-'):] - api_base = "https://openai.api2d.net/v1" - config_list = [{ - 'model': model, - 'api_key': self.llm_kwargs['api_key'], - },] - if api_base is not None: - config_list[0]['api_base'] = api_base - return config_list - def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程执行 + # ⭐⭐ run in subprocess self.child_conn = child_conn while True: - msg = self.child_conn.recv() # PipeCom - self.do_audogen(msg) + msg = self.child_conn.recv() # PipeCom + self.exe_autogen(msg) + + +class AutoGenGroupChat(AutoGenGeneral): + def exe_autogen(self, input): + # ⭐⭐ run in subprocess + import autogen + + input = input.content + with ProxyNetworkActivate("AutoGen"): + code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker} + agents = self.define_agents() + agents_instances = [] + for agent_kwargs in agents: + agent_cls = agent_kwargs.pop("cls") + kwargs = {"code_execution_config": code_execution_config} + kwargs.update(agent_kwargs) + agent_handle = agent_cls(**kwargs) + agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) + agents_instances.append(agent_handle) + if agent_kwargs["name"] == "user_proxy": + user_proxy = agent_handle + user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a) + try: + groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50) + manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config()) + manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b) + manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a) + if user_proxy is None: + raise Exception("user_proxy is not defined") + user_proxy.initiate_chat(manager, message=input) + except Exception: + tb_str = "```\n" + trimmed_format_exc() + "```" + self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str)) + + def define_group_chat_manager_config(self): + raise NotImplementedError diff --git a/crazy_functions/agent_fns/pipe.py b/crazy_functions/agent_fns/pipe.py index 5ebe3fc6..bb3bc785 100644 --- a/crazy_functions/agent_fns/pipe.py +++ b/crazy_functions/agent_fns/pipe.py @@ -2,28 +2,28 @@ from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_f from crazy_functions.agent_fns.watchdog import WatchDog import time, os -class PipeCom(): +class PipeCom: def __init__(self, cmd, content) -> None: self.cmd = cmd self.content = content -class PluginMultiprocessManager(): +class PluginMultiprocessManager: def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # ⭐ 主进程 - self.autogen_work_dir = os.path.join(get_log_folder('autogen'), gen_time_str()) + # ⭐ run in main process + self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str()) self.previous_work_dir_files = {} self.llm_kwargs = llm_kwargs self.plugin_kwargs = plugin_kwargs self.chatbot = chatbot self.history = history self.system_prompt = system_prompt - self.web_port = web_port + # self.web_port = web_port self.alive = True - self.use_docker = get_conf('AUTOGEN_USE_DOCKER') - + self.use_docker = get_conf("AUTOGEN_USE_DOCKER") + self.last_user_input = "" # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time - timeout_seconds = 5*60 + timeout_seconds = 5 * 60 self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5) self.heartbeat_watchdog.begin_watch() @@ -35,8 +35,9 @@ class PluginMultiprocessManager(): return self.alive def launch_subprocess_with_pipe(self): - # ⭐ 主进程 + # ⭐ run in main process from multiprocessing import Process, Pipe + parent_conn, child_conn = Pipe() self.p = Process(target=self.subprocess_worker, args=(child_conn,)) self.p.daemon = True @@ -46,15 +47,22 @@ class PluginMultiprocessManager(): def terminate(self): self.p.terminate() self.alive = False - print('[debug] instance terminated') + print("[debug] instance terminated") def subprocess_worker(self, child_conn): - # ⭐⭐ 子进程 + # ⭐⭐ run in subprocess raise NotImplementedError def send_command(self, cmd): - # ⭐ 主进程 + # ⭐ run in main process + repeated = False + if cmd == self.last_user_input: + repeated = True + cmd = "" + else: + self.last_user_input = cmd self.parent_conn.send(PipeCom("user_input", cmd)) + return repeated, cmd def immediate_showoff_when_possible(self, fp): # ⭐ 主进程 @@ -63,7 +71,10 @@ class PluginMultiprocessManager(): # 如果是文本文件, 则直接显示文本内容 if file_type.lower() in ['png', 'jpg']: image_path = os.path.abspath(fp) - self.chatbot.append(['检测到新生图像:', f'本地文件预览:
']) + self.chatbot.append([ + '检测到新生图像:', + f'本地文件预览:
' + ]) yield from update_ui(chatbot=self.chatbot, history=self.history) def overwatch_workdir_file_change(self): @@ -78,7 +89,7 @@ class PluginMultiprocessManager(): file_path = os.path.join(root, file) if file_path not in self.previous_work_dir_files.keys(): last_modified_time = os.stat(file_path).st_mtime - self.previous_work_dir_files.update({file_path:last_modified_time}) + self.previous_work_dir_files.update({file_path: last_modified_time}) change_list.append(file_path) else: last_modified_time = os.stat(file_path).st_mtime @@ -86,8 +97,8 @@ class PluginMultiprocessManager(): self.previous_work_dir_files[file_path] = last_modified_time change_list.append(file_path) if len(change_list) > 0: - file_links = '' - for f in change_list: + file_links = "" + for f in change_list: res = promote_file_to_downloadzone(f) file_links += f'
{res}' yield from self.immediate_showoff_when_possible(f) @@ -102,7 +113,7 @@ class PluginMultiprocessManager(): if create_or_resume == 'create': self.cnt = 1 self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ - self.send_command(txt) + repeated, cmd_to_autogen = self.send_command(txt) if txt == 'exit': self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"]) yield from update_ui(chatbot=self.chatbot, history=self.history) @@ -117,19 +128,27 @@ class PluginMultiprocessManager(): # the heartbeat watchdog might have it killed self.terminate() return "terminate" - if self.parent_conn.poll(): self.feed_heartbeat_watchdog() + if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line + if "等待您的进一步指令" in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]: self.chatbot.pop(-1) # remove the last line msg = self.parent_conn.recv() # PipeCom if msg.cmd == "done": - self.chatbot.append([f"结束", msg.content]); self.cnt += 1 + self.chatbot.append([f"结束", msg.content]) + self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) - self.terminate(); break + self.terminate() + break if msg.cmd == "show": yield from self.overwatch_workdir_file_change() - self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]); self.cnt += 1 + notice = "" + if repeated: notice = "(自动忽略重复的输入)" + self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content]) + self.cnt += 1 yield from update_ui(chatbot=self.chatbot, history=self.history) if msg.cmd == "interact": yield from self.overwatch_workdir_file_change() @@ -159,13 +178,13 @@ class PluginMultiprocessManager(): return "terminate" def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"): - # ⭐⭐ 子进程 + # ⭐⭐ run in subprocess patience = 5 * 60 begin_waiting_time = time.time() self.child_conn.send(PipeCom("interact", wait_msg)) while True: time.sleep(0.5) - if self.child_conn.poll(): + if self.child_conn.poll(): wait_success = True break if time.time() - begin_waiting_time > patience: @@ -173,4 +192,3 @@ class PluginMultiprocessManager(): wait_success = False break return wait_success - diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py index c711cf45..1e0fe630 100644 --- a/crazy_functions/下载arxiv论文翻译摘要.py +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -1,6 +1,6 @@ from toolbox import update_ui, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone -from toolbox import CatchException, report_execption, get_conf +from toolbox import CatchException, report_exception, get_conf import re, requests, unicodedata, os from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def download_arxiv_(url_pdf): @@ -144,7 +144,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: import bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -157,7 +157,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi try: pdf_path, info = download_arxiv_(txt) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"下载pdf文件未成功") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/多智能体.py b/crazy_functions/多智能体.py index 3fab4c31..d2adee00 100644 --- a/crazy_functions/多智能体.py +++ b/crazy_functions/多智能体.py @@ -3,11 +3,6 @@ 测试: - show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg -Testing: - - Crop the image, keeping the bottom half. - - Swap the blue channel and red channel of the image. - - Convert the image to grayscale. - - Convert the CSV file to an Excel spreadsheet. """ @@ -38,18 +33,23 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ """ # 检查当前的模型是否符合要求 supported_llms = [ - 'gpt-3.5-16k', - 'gpt-3.5-turbo-16k', + "gpt-3.5-turbo-16k", 'gpt-3.5-turbo-1106', - 'gpt-4', - 'gpt-4-32k', + "gpt-4", + "gpt-4-32k", 'gpt-4-1106-preview', + "azure-gpt-3.5-turbo-16k", + "azure-gpt-3.5-16k", + "azure-gpt-4", + "azure-gpt-4-32k", ] - llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - if remove_model_prefix(llm_kwargs['llm_model']) not in supported_llms: - chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."]) + from request_llms.bridge_all import model_info + if model_info[llm_kwargs['llm_model']]["max_token"] < 8000: # 至少是8k上下文的模型 + chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}的最大上下文长度太短, 不能支撑AutoGen运行。"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return + if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY + llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) # 检查当前的模型是否符合要求 API_URL_REDIRECT = get_conf('API_URL_REDIRECT') @@ -60,7 +60,9 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: - import autogen, docker + import autogen + if get_conf("AUTOGEN_USE_DOCKER"): + import docker except: chatbot.append([ f"处理任务: {txt}", f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"]) @@ -71,7 +73,8 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ try: import autogen import glob, os, time, subprocess - subprocess.Popen(['docker', '--version']) + if get_conf("AUTOGEN_USE_DOCKER"): + subprocess.Popen(["docker", "--version"]) except: chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index 7c822e9f..b3923071 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -97,7 +97,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr try: from docx import Document except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -111,7 +111,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -124,7 +124,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index b88775b4..b27bcce0 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf +from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder @@ -144,7 +144,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro try: from moviepy.editor import AudioFileClip except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -158,7 +158,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -174,7 +174,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量Markdown翻译.py b/crazy_functions/批量Markdown翻译.py index 2bdffc86..12b4ef09 100644 --- a/crazy_functions/批量Markdown翻译.py +++ b/crazy_functions/批量Markdown翻译.py @@ -1,6 +1,6 @@ import glob, time, os, re, logging from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion -from toolbox import CatchException, report_execption, get_log_folder +from toolbox import CatchException, report_exception, get_log_folder from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = False @@ -165,7 +165,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -177,12 +177,12 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -205,7 +205,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -215,11 +215,11 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') @@ -238,7 +238,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, try: import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -248,11 +248,11 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, if not success: # 什么都没有 if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index 57a6cdf1..7fc3e415 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import read_and_clean_pdf_text @@ -119,7 +119,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -133,7 +133,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -142,7 +142,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量总结PDF文档pdfminer.py b/crazy_functions/批量总结PDF文档pdfminer.py index 213d8bb2..a729efaa 100644 --- a/crazy_functions/批量总结PDF文档pdfminer.py +++ b/crazy_functions/批量总结PDF文档pdfminer.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from toolbox import write_history_to_file, promote_file_to_downloadzone @@ -138,7 +138,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo try: import pdfminer, bs4 except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -147,7 +147,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ @@ -155,7 +155,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/批量翻译PDF文档_NOUGAT.py b/crazy_functions/批量翻译PDF文档_NOUGAT.py index 16dfd6bf..97170d0e 100644 --- a/crazy_functions/批量翻译PDF文档_NOUGAT.py +++ b/crazy_functions/批量翻译PDF文档_NOUGAT.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -68,7 +68,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import nougat import tiktoken except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -84,7 +84,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/批量翻译PDF文档_多线程.py b/crazy_functions/批量翻译PDF文档_多线程.py index f2e5cf99..333b529b 100644 --- a/crazy_functions/批量翻译PDF文档_多线程.py +++ b/crazy_functions/批量翻译PDF文档_多线程.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, get_log_folder, gen_time_str +from toolbox import CatchException, report_exception, get_log_folder, gen_time_str from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -26,7 +26,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import tiktoken import scipdf except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -43,7 +43,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/理解PDF文档内容.py b/crazy_functions/理解PDF文档内容.py index 4c0a1052..ef967889 100644 --- a/crazy_functions/理解PDF文档内容.py +++ b/crazy_functions/理解PDF文档内容.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from .crazy_utils import read_and_clean_pdf_text from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -81,7 +81,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat try: import fitz except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -96,7 +96,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -105,7 +105,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # 如果没找到任何文件 if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/生成函数注释.py b/crazy_functions/生成函数注释.py index bf3da6a4..d71a5680 100644 --- a/crazy_functions/生成函数注释.py +++ b/crazy_functions/生成函数注释.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive fast_debug = False @@ -43,14 +43,14 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index 709b7e1c..eeccadf7 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone fast_debug = True @@ -131,7 +131,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return @@ -141,7 +141,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p file_manifest = [f for f in glob.glob( f'{project_folder}/**/*.ipynb', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index f17a584d..e319d5a8 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -1,5 +1,5 @@ from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion -from toolbox import CatchException, report_execption, write_history_to_file +from toolbox import CatchException, report_exception, write_history_to_file from .crazy_utils import input_clipping def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): @@ -113,7 +113,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ [f for f in glob.glob('./*/*.py')] project_folder = './' if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -126,12 +126,12 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -144,12 +144,12 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") + report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -162,14 +162,14 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -182,7 +182,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ @@ -190,7 +190,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -204,7 +204,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ @@ -212,7 +212,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -226,7 +226,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ @@ -241,7 +241,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -255,7 +255,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \ @@ -263,7 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -276,14 +276,14 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -296,7 +296,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \ @@ -304,7 +304,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -318,13 +318,13 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) @@ -352,7 +352,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件 @@ -365,7 +365,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \ os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) \ No newline at end of file diff --git a/crazy_functions/读文章写摘要.py b/crazy_functions/读文章写摘要.py index acdf632c..a43b6aa2 100644 --- a/crazy_functions/读文章写摘要.py +++ b/crazy_functions/读文章写摘要.py @@ -1,5 +1,5 @@ from toolbox import update_ui -from toolbox import CatchException, report_execption +from toolbox import CatchException, report_exception from toolbox import write_history_to_file, promote_file_to_downloadzone from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive @@ -51,14 +51,14 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_ project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 return yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/crazy_functions/谷歌检索小助手.py b/crazy_functions/谷歌检索小助手.py index 5924a286..14b21bfc 100644 --- a/crazy_functions/谷歌检索小助手.py +++ b/crazy_functions/谷歌检索小助手.py @@ -1,5 +1,5 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from toolbox import CatchException, report_execption, promote_file_to_downloadzone +from toolbox import CatchException, report_exception, promote_file_to_downloadzone from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file import logging import requests @@ -29,7 +29,7 @@ def get_meta_information(url, chatbot, history): try: session.proxies.update(proxies) except: - report_execption(chatbot, history, + report_exception(chatbot, history, a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。", b=f"尝试直接连接") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -146,7 +146,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst import math from bs4 import BeautifulSoup except: - report_execption(chatbot, history, + report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/docs/self_analysis.md b/docs/self_analysis.md index c3736193..0b76c7bd 100644 --- a/docs/self_analysis.md +++ b/docs/self_analysis.md @@ -217,7 +217,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和 ## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。 +这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_exception、write_results_to_file等。 ## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py diff --git a/main.py b/main.py index 89ca7811..42895599 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,5 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 import pickle -import codecs import base64 def main(): diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 139d3ae9..7d13bbdd 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -242,6 +242,13 @@ for model in AVAIL_LLM_MODELS: mi.update({"endpoint": api2d_endpoint}) model_info.update({model: mi}) +# -=-=-=-=-=-=- azure 对齐支持 -=-=-=-=-=-=- +for model in AVAIL_LLM_MODELS: + if model.startswith('azure-') and (model.replace('azure-','') in model_info.keys()): + mi = model_info[model.replace('azure-','')] + mi.update({"endpoint": azure_endpoint}) + model_info.update({model: mi}) + # -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=- if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS: from .bridge_claude import predict_no_ui_long_connection as claude_noui @@ -564,7 +571,7 @@ def LLM_CATCH_EXCEPTION(f): return decorated -def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False): +def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False): """ 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 inputs: diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 16e1d8fc..83c50da1 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM2Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 461c3064..44656608 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM3Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py index 312c6846..4b905718 100644 --- a/request_llms/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -8,7 +8,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from .chatglmoonx import ChatGLMModel, chat_template @@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index c2f7af38..5de07e84 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_chatgpt_website.py b/request_llms/bridge_chatgpt_website.py index 7f3147b1..f2f07090 100644 --- a/request_llms/bridge_chatgpt_website.py +++ b/request_llms/bridge_chatgpt_website.py @@ -7,8 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import json diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index 6084b1f1..42b75052 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -7,7 +7,7 @@ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 + 2. predict_no_ui_long_connection:支持多线程 """ import os diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 073c193a..b2be36a4 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -5,9 +5,9 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ @@ -34,7 +34,6 @@ def combine_history(prompt, hist): # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetInternlmHandle(LocalLLMHandle): def load_model_info(self): @@ -53,14 +52,15 @@ class GetInternlmHandle(LocalLLMHandle): import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = get_conf('LOCAL_MODEL_DEVICE') - if self._model is None: - tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) - if device=='cpu': - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) - else: - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() + with ProxyNetworkActivate('Download_LLM'): + if self._model is None: + tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) + if device=='cpu': + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16) + else: + model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() - model = model.eval() + model = model.eval() return model, tokenizer def llm_stream_generator(self, **kwargs): @@ -94,8 +94,9 @@ class GetInternlmHandle(LocalLLMHandle): inputs = tokenizer([prompt], padding=True, return_tensors="pt") input_length = len(inputs["input_ids"][0]) + device = get_conf('LOCAL_MODEL_DEVICE') for k, v in inputs.items(): - inputs[k] = v.cuda() + inputs[k] = v.to(device) input_ids = inputs["input_ids"] batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] if generation_config is None: diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index bc8ef7eb..e6da4b75 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 62682cfa..afd886bf 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -6,16 +6,15 @@ from transformers import AutoModel, AutoTokenizer import time import threading import importlib -from toolbox import update_ui, get_conf +from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # 🔌💻 Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): @@ -30,13 +29,13 @@ class GetONNXGLMHandle(LocalLLMHandle): import platform from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig - model_id = 'qwen/Qwen-7B-Chat' - revision = 'v1.0.1' - self._tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True) - # use fp16 - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", revision=revision, trust_remote_code=True, fp16=True).eval() - model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 - self._model = model + with ProxyNetworkActivate('Download_LLM'): + model_id = 'qwen/Qwen-7B-Chat' + self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) + # use fp16 + model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() + model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参 + self._model = model return self._model, self._tokenizer diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6ce801e..091707a7 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -1,6 +1,6 @@ import time import threading -from toolbox import update_ui +from toolbox import update_ui, Singleton from multiprocessing import Process, Pipe from contextlib import redirect_stdout from request_llms.queued_pipe import create_queue_pipe @@ -26,23 +26,20 @@ class ThreadLock(object): def __exit__(self, type, value, traceback): self.release() -def SingletonLocalLLM(cls): - """ - Singleton Decroator for LocalLLMHandle - """ - _instance = {} +@Singleton +class GetSingletonHandle(): + def __init__(self): + self.llm_model_already_running = {} - def _singleton(*args, **kargs): - if cls not in _instance: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] - elif _instance[cls].corrupted: - _instance[cls] = cls(*args, **kargs) - return _instance[cls] + def get_llm_model_instance(self, cls, *args, **kargs): + if cls not in self.llm_model_already_running: + self.llm_model_already_running[cls] = cls(*args, **kargs) + return self.llm_model_already_running[cls] + elif self.llm_model_already_running[cls].corrupted: + self.llm_model_already_running[cls] = cls(*args, **kargs) + return self.llm_model_already_running[cls] else: - return _instance[cls] - return _singleton - + return self.llm_model_already_running[cls] def reset_tqdm_output(): import sys, tqdm @@ -76,7 +73,6 @@ class LocalLLMHandle(Process): self.parent_state, self.child_state = create_queue_pipe() # allow redirect_stdout self.std_tag = "[Subprocess Message] " - self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None @@ -137,6 +133,8 @@ class LocalLLMHandle(Process): def run(self): # 🏃‍♂️🏃‍♂️🏃‍♂️ run in child process # 第一次运行,加载参数 + self.child.flush = lambda *args: None + self.child.write = lambda x: self.child.send(self.std_tag + x) reset_tqdm_output() self.set_state("`尝试加载模型`") try: @@ -220,7 +218,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ refer to request_llms/bridge_all.py """ - _llm_handle = LLMSingletonClass() + _llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass) if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -268,7 +266,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ chatbot.append((inputs, "")) - _llm_handle = LLMSingletonClass() + _llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass) chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: diff --git a/requirements.txt b/requirements.txt index e832a28c..1f86d336 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ Markdown pygments pymupdf openai +pyautogen numpy arxiv rich diff --git a/tests/test_llms.py b/tests/test_llms.py index 5c5d2f6c..6285f030 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -15,11 +15,11 @@ if __name__ == "__main__": # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection # from request_llms.bridge_claude import predict_no_ui_long_connection - # from request_llms.bridge_internlm import predict_no_ui_long_connection + from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection # from request_llms.bridge_spark import predict_no_ui_long_connection # from request_llms.bridge_zhipu import predict_no_ui_long_connection - from request_llms.bridge_chatglm3 import predict_no_ui_long_connection + # from request_llms.bridge_chatglm3 import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/toolbox.py b/toolbox.py index b0a96e76..ac04fdee 100644 --- a/toolbox.py +++ b/toolbox.py @@ -187,7 +187,7 @@ def HotReload(f): 其他小工具: - write_history_to_file: 将结果写入markdown文件中 - regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。 - - report_execption: 向chatbot中添加简单的意外错误信息 + - report_exception: 向chatbot中添加简单的意外错误信息 - text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 - markdown_convertion: 用多种方式组合,将markdown转化为好看的html - format_io: 接管gradio默认的markdown处理方式 @@ -260,7 +260,7 @@ def regular_txt_to_markdown(text): -def report_execption(chatbot, history, a, b): +def report_exception(chatbot, history, a, b): """ 向chatbot中添加错误信息 """ diff --git a/version b/version index 5e4fb7d0..9b33c4f4 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.57, + "version": 3.59, "show_feature": true, - "new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" + "new_feature": "AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验" }