镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
Merge branch 'frontier' into production
这个提交包含在:
@@ -212,7 +212,7 @@ ALLOW_RESET_CONFIG = False
|
|||||||
|
|
||||||
|
|
||||||
# 在使用AutoGen插件时,是否使用Docker容器运行代码
|
# 在使用AutoGen插件时,是否使用Docker容器运行代码
|
||||||
AUTOGEN_USE_DOCKER = True
|
AUTOGEN_USE_DOCKER = False
|
||||||
|
|
||||||
|
|
||||||
# 临时的上传文件夹位置,请勿修改
|
# 临时的上传文件夹位置,请勿修改
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ def get_crazy_functions():
|
|||||||
"批量总结Word文档": {
|
"批量总结Word文档": {
|
||||||
"Group": "学术",
|
"Group": "学术",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": False,
|
||||||
"Info": "批量总结word文档 | 输入参数为路径",
|
"Info": "批量总结word文档 | 输入参数为路径",
|
||||||
"Function": HotReload(总结word文档)
|
"Function": HotReload(总结word文档)
|
||||||
},
|
},
|
||||||
@@ -178,6 +178,13 @@ def get_crazy_functions():
|
|||||||
"Info": "批量生成函数的注释 | 输入参数为路径",
|
"Info": "批量生成函数的注释 | 输入参数为路径",
|
||||||
"Function": HotReload(批量生成函数注释)
|
"Function": HotReload(批量生成函数注释)
|
||||||
},
|
},
|
||||||
|
"精准翻译PDF论文": {
|
||||||
|
"Group": "学术",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": True,
|
||||||
|
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
||||||
|
"Function": HotReload(批量翻译PDF文档)
|
||||||
|
},
|
||||||
"保存当前的对话": {
|
"保存当前的对话": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
@@ -196,13 +203,6 @@ def get_crazy_functions():
|
|||||||
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
||||||
"Function": HotReload(高阶功能模板函数)
|
"Function": HotReload(高阶功能模板函数)
|
||||||
},
|
},
|
||||||
"精准翻译PDF论文": {
|
|
||||||
"Group": "学术",
|
|
||||||
"Color": "stop",
|
|
||||||
"AsButton": True,
|
|
||||||
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
|
||||||
"Function": HotReload(批量翻译PDF文档)
|
|
||||||
},
|
|
||||||
"询问多个GPT模型": {
|
"询问多个GPT模型": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
@@ -561,18 +561,15 @@ def get_crazy_functions():
|
|||||||
except:
|
except:
|
||||||
print('Load function plugin failed')
|
print('Load function plugin failed')
|
||||||
|
|
||||||
# try:
|
from crazy_functions.多智能体 import 多智能体终端
|
||||||
# from crazy_functions.多智能体 import 多智能体终端
|
function_plugins.update({
|
||||||
# function_plugins.update({
|
"AutoGen多智能体终端": {
|
||||||
# "多智能体终端(微软AutoGen)": {
|
"Group": "智能体",
|
||||||
# "Group": "智能体",
|
"Color": "stop",
|
||||||
# "Color": "stop",
|
"AsButton": True,
|
||||||
# "AsButton": True,
|
"Function": HotReload(多智能体终端)
|
||||||
# "Function": HotReload(多智能体终端)
|
}
|
||||||
# }
|
})
|
||||||
# })
|
|
||||||
# except:
|
|
||||||
# print('Load function plugin failed')
|
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
|
from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
|
||||||
from toolbox import CatchException, report_execption, write_history_to_file, zip_folder
|
from toolbox import CatchException, report_exception, write_history_to_file, zip_folder
|
||||||
|
|
||||||
|
|
||||||
class PaperFileGroup():
|
class PaperFileGroup():
|
||||||
@@ -146,7 +146,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -157,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
||||||
@@ -184,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -195,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
||||||
@@ -220,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -231,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread')
|
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread')
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, promote_file_to_downloadzone
|
from toolbox import update_ui, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_execption, write_history_to_file
|
from toolbox import CatchException, report_exception, write_history_to_file
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
class PaperFileGroup():
|
class PaperFileGroup():
|
||||||
@@ -117,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -128,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
||||||
@@ -154,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -165,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
|
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import glob, os, requests, time
|
import glob, os, requests, time
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
@@ -171,12 +171,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -249,7 +249,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
history = []
|
history = []
|
||||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||||
if txt.endswith('.pdf'):
|
if txt.endswith('.pdf'):
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -258,13 +258,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||||
from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton
|
from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton
|
||||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||||
from crazy_functions.agent_fns.general import AutoGenGeneral
|
from crazy_functions.agent_fns.general import AutoGenGeneral
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
class AutoGenMath(AutoGenGeneral):
|
class AutoGenMath(AutoGenGeneral):
|
||||||
|
|||||||
@@ -1,23 +1,50 @@
|
|||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate
|
||||||
from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton
|
|
||||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||||
|
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
def gpt_academic_generate_oai_reply(
|
||||||
|
self,
|
||||||
|
messages,
|
||||||
|
sender,
|
||||||
|
config,
|
||||||
|
):
|
||||||
|
llm_config = self.llm_config if config is None else config
|
||||||
|
if llm_config is False:
|
||||||
|
return False, None
|
||||||
|
if messages is None:
|
||||||
|
messages = self._oai_messages[sender]
|
||||||
|
|
||||||
|
inputs = messages[-1]['content']
|
||||||
|
history = []
|
||||||
|
for message in messages[:-1]:
|
||||||
|
history.append(message['content'])
|
||||||
|
context=messages[-1].pop("context", None)
|
||||||
|
assert context is None, "预留参数 context 未实现"
|
||||||
|
|
||||||
|
reply = predict_no_ui_long_connection(
|
||||||
|
inputs=inputs,
|
||||||
|
llm_kwargs=llm_config,
|
||||||
|
history=history,
|
||||||
|
sys_prompt=self._oai_system_message[0]['content'],
|
||||||
|
console_slience=True
|
||||||
|
)
|
||||||
|
assumed_done = reply.endswith('\nTERMINATE')
|
||||||
|
return True, reply
|
||||||
|
|
||||||
class AutoGenGeneral(PluginMultiprocessManager):
|
class AutoGenGeneral(PluginMultiprocessManager):
|
||||||
|
|
||||||
def gpt_academic_print_override(self, user_proxy, message, sender):
|
def gpt_academic_print_override(self, user_proxy, message, sender):
|
||||||
# ⭐⭐ 子进程执行
|
# ⭐⭐ run in subprocess
|
||||||
self.child_conn.send(PipeCom("show", sender.name + '\n\n---\n\n' + message['content']))
|
self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"]))
|
||||||
|
|
||||||
def gpt_academic_get_human_input(self, user_proxy, message):
|
def gpt_academic_get_human_input(self, user_proxy, message):
|
||||||
# ⭐⭐ 子进程执行
|
# ⭐⭐ run in subprocess
|
||||||
patience = 300
|
patience = 300
|
||||||
begin_waiting_time = time.time()
|
begin_waiting_time = time.time()
|
||||||
self.child_conn.send(PipeCom("interact", message))
|
self.child_conn.send(PipeCom("interact", message))
|
||||||
while True:
|
while True:
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
if self.child_conn.poll():
|
if self.child_conn.poll():
|
||||||
wait_success = True
|
wait_success = True
|
||||||
break
|
break
|
||||||
if time.time() - begin_waiting_time > patience:
|
if time.time() - begin_waiting_time > patience:
|
||||||
@@ -32,26 +59,26 @@ class AutoGenGeneral(PluginMultiprocessManager):
|
|||||||
def define_agents(self):
|
def define_agents(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def do_audogen(self, input):
|
def exe_autogen(self, input):
|
||||||
# ⭐⭐ 子进程执行
|
# ⭐⭐ run in subprocess
|
||||||
input = input.content
|
input = input.content
|
||||||
with ProxyNetworkActivate("AutoGen"):
|
with ProxyNetworkActivate("AutoGen"):
|
||||||
config_list = self.get_config_list()
|
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
|
||||||
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker}
|
|
||||||
agents = self.define_agents()
|
agents = self.define_agents()
|
||||||
user_proxy = None
|
user_proxy = None
|
||||||
assistant = None
|
assistant = None
|
||||||
for agent_kwargs in agents:
|
for agent_kwargs in agents:
|
||||||
agent_cls = agent_kwargs.pop('cls')
|
agent_cls = agent_kwargs.pop('cls')
|
||||||
kwargs = {
|
kwargs = {
|
||||||
'llm_config':{
|
'llm_config':self.llm_kwargs,
|
||||||
"config_list": config_list,
|
|
||||||
},
|
|
||||||
'code_execution_config':code_execution_config
|
'code_execution_config':code_execution_config
|
||||||
}
|
}
|
||||||
kwargs.update(agent_kwargs)
|
kwargs.update(agent_kwargs)
|
||||||
agent_handle = agent_cls(**kwargs)
|
agent_handle = agent_cls(**kwargs)
|
||||||
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||||
|
for d in agent_handle._reply_func_list:
|
||||||
|
if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
|
||||||
|
d['reply_func'] = gpt_academic_generate_oai_reply
|
||||||
if agent_kwargs['name'] == 'user_proxy':
|
if agent_kwargs['name'] == 'user_proxy':
|
||||||
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
||||||
user_proxy = agent_handle
|
user_proxy = agent_handle
|
||||||
@@ -63,23 +90,45 @@ class AutoGenGeneral(PluginMultiprocessManager):
|
|||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
||||||
|
|
||||||
def get_config_list(self):
|
|
||||||
model = self.llm_kwargs['llm_model']
|
|
||||||
api_base = None
|
|
||||||
if self.llm_kwargs['llm_model'].startswith('api2d-'):
|
|
||||||
model = self.llm_kwargs['llm_model'][len('api2d-'):]
|
|
||||||
api_base = "https://openai.api2d.net/v1"
|
|
||||||
config_list = [{
|
|
||||||
'model': model,
|
|
||||||
'api_key': self.llm_kwargs['api_key'],
|
|
||||||
},]
|
|
||||||
if api_base is not None:
|
|
||||||
config_list[0]['api_base'] = api_base
|
|
||||||
return config_list
|
|
||||||
|
|
||||||
def subprocess_worker(self, child_conn):
|
def subprocess_worker(self, child_conn):
|
||||||
# ⭐⭐ 子进程执行
|
# ⭐⭐ run in subprocess
|
||||||
self.child_conn = child_conn
|
self.child_conn = child_conn
|
||||||
while True:
|
while True:
|
||||||
msg = self.child_conn.recv() # PipeCom
|
msg = self.child_conn.recv() # PipeCom
|
||||||
self.do_audogen(msg)
|
self.exe_autogen(msg)
|
||||||
|
|
||||||
|
|
||||||
|
class AutoGenGroupChat(AutoGenGeneral):
|
||||||
|
def exe_autogen(self, input):
|
||||||
|
# ⭐⭐ run in subprocess
|
||||||
|
import autogen
|
||||||
|
|
||||||
|
input = input.content
|
||||||
|
with ProxyNetworkActivate("AutoGen"):
|
||||||
|
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
|
||||||
|
agents = self.define_agents()
|
||||||
|
agents_instances = []
|
||||||
|
for agent_kwargs in agents:
|
||||||
|
agent_cls = agent_kwargs.pop("cls")
|
||||||
|
kwargs = {"code_execution_config": code_execution_config}
|
||||||
|
kwargs.update(agent_kwargs)
|
||||||
|
agent_handle = agent_cls(**kwargs)
|
||||||
|
agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||||
|
agents_instances.append(agent_handle)
|
||||||
|
if agent_kwargs["name"] == "user_proxy":
|
||||||
|
user_proxy = agent_handle
|
||||||
|
user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
||||||
|
try:
|
||||||
|
groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50)
|
||||||
|
manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config())
|
||||||
|
manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||||
|
manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a)
|
||||||
|
if user_proxy is None:
|
||||||
|
raise Exception("user_proxy is not defined")
|
||||||
|
user_proxy.initiate_chat(manager, message=input)
|
||||||
|
except Exception:
|
||||||
|
tb_str = "```\n" + trimmed_format_exc() + "```"
|
||||||
|
self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str))
|
||||||
|
|
||||||
|
def define_group_chat_manager_config(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|||||||
@@ -2,28 +2,28 @@ from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_f
|
|||||||
from crazy_functions.agent_fns.watchdog import WatchDog
|
from crazy_functions.agent_fns.watchdog import WatchDog
|
||||||
import time, os
|
import time, os
|
||||||
|
|
||||||
class PipeCom():
|
class PipeCom:
|
||||||
def __init__(self, cmd, content) -> None:
|
def __init__(self, cmd, content) -> None:
|
||||||
self.cmd = cmd
|
self.cmd = cmd
|
||||||
self.content = content
|
self.content = content
|
||||||
|
|
||||||
|
|
||||||
class PluginMultiprocessManager():
|
class PluginMultiprocessManager:
|
||||||
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
# ⭐ 主进程
|
# ⭐ run in main process
|
||||||
self.autogen_work_dir = os.path.join(get_log_folder('autogen'), gen_time_str())
|
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
|
||||||
self.previous_work_dir_files = {}
|
self.previous_work_dir_files = {}
|
||||||
self.llm_kwargs = llm_kwargs
|
self.llm_kwargs = llm_kwargs
|
||||||
self.plugin_kwargs = plugin_kwargs
|
self.plugin_kwargs = plugin_kwargs
|
||||||
self.chatbot = chatbot
|
self.chatbot = chatbot
|
||||||
self.history = history
|
self.history = history
|
||||||
self.system_prompt = system_prompt
|
self.system_prompt = system_prompt
|
||||||
self.web_port = web_port
|
# self.web_port = web_port
|
||||||
self.alive = True
|
self.alive = True
|
||||||
self.use_docker = get_conf('AUTOGEN_USE_DOCKER')
|
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
|
||||||
|
self.last_user_input = ""
|
||||||
# create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
|
# create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
|
||||||
timeout_seconds = 5*60
|
timeout_seconds = 5 * 60
|
||||||
self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5)
|
self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5)
|
||||||
self.heartbeat_watchdog.begin_watch()
|
self.heartbeat_watchdog.begin_watch()
|
||||||
|
|
||||||
@@ -35,8 +35,9 @@ class PluginMultiprocessManager():
|
|||||||
return self.alive
|
return self.alive
|
||||||
|
|
||||||
def launch_subprocess_with_pipe(self):
|
def launch_subprocess_with_pipe(self):
|
||||||
# ⭐ 主进程
|
# ⭐ run in main process
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
|
|
||||||
parent_conn, child_conn = Pipe()
|
parent_conn, child_conn = Pipe()
|
||||||
self.p = Process(target=self.subprocess_worker, args=(child_conn,))
|
self.p = Process(target=self.subprocess_worker, args=(child_conn,))
|
||||||
self.p.daemon = True
|
self.p.daemon = True
|
||||||
@@ -46,15 +47,22 @@ class PluginMultiprocessManager():
|
|||||||
def terminate(self):
|
def terminate(self):
|
||||||
self.p.terminate()
|
self.p.terminate()
|
||||||
self.alive = False
|
self.alive = False
|
||||||
print('[debug] instance terminated')
|
print("[debug] instance terminated")
|
||||||
|
|
||||||
def subprocess_worker(self, child_conn):
|
def subprocess_worker(self, child_conn):
|
||||||
# ⭐⭐ 子进程
|
# ⭐⭐ run in subprocess
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def send_command(self, cmd):
|
def send_command(self, cmd):
|
||||||
# ⭐ 主进程
|
# ⭐ run in main process
|
||||||
|
repeated = False
|
||||||
|
if cmd == self.last_user_input:
|
||||||
|
repeated = True
|
||||||
|
cmd = ""
|
||||||
|
else:
|
||||||
|
self.last_user_input = cmd
|
||||||
self.parent_conn.send(PipeCom("user_input", cmd))
|
self.parent_conn.send(PipeCom("user_input", cmd))
|
||||||
|
return repeated, cmd
|
||||||
|
|
||||||
def immediate_showoff_when_possible(self, fp):
|
def immediate_showoff_when_possible(self, fp):
|
||||||
# ⭐ 主进程
|
# ⭐ 主进程
|
||||||
@@ -63,7 +71,10 @@ class PluginMultiprocessManager():
|
|||||||
# 如果是文本文件, 则直接显示文本内容
|
# 如果是文本文件, 则直接显示文本内容
|
||||||
if file_type.lower() in ['png', 'jpg']:
|
if file_type.lower() in ['png', 'jpg']:
|
||||||
image_path = os.path.abspath(fp)
|
image_path = os.path.abspath(fp)
|
||||||
self.chatbot.append(['检测到新生图像:', f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'])
|
self.chatbot.append([
|
||||||
|
'检测到新生图像:',
|
||||||
|
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||||
|
])
|
||||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
|
|
||||||
def overwatch_workdir_file_change(self):
|
def overwatch_workdir_file_change(self):
|
||||||
@@ -78,7 +89,7 @@ class PluginMultiprocessManager():
|
|||||||
file_path = os.path.join(root, file)
|
file_path = os.path.join(root, file)
|
||||||
if file_path not in self.previous_work_dir_files.keys():
|
if file_path not in self.previous_work_dir_files.keys():
|
||||||
last_modified_time = os.stat(file_path).st_mtime
|
last_modified_time = os.stat(file_path).st_mtime
|
||||||
self.previous_work_dir_files.update({file_path:last_modified_time})
|
self.previous_work_dir_files.update({file_path: last_modified_time})
|
||||||
change_list.append(file_path)
|
change_list.append(file_path)
|
||||||
else:
|
else:
|
||||||
last_modified_time = os.stat(file_path).st_mtime
|
last_modified_time = os.stat(file_path).st_mtime
|
||||||
@@ -86,8 +97,8 @@ class PluginMultiprocessManager():
|
|||||||
self.previous_work_dir_files[file_path] = last_modified_time
|
self.previous_work_dir_files[file_path] = last_modified_time
|
||||||
change_list.append(file_path)
|
change_list.append(file_path)
|
||||||
if len(change_list) > 0:
|
if len(change_list) > 0:
|
||||||
file_links = ''
|
file_links = ""
|
||||||
for f in change_list:
|
for f in change_list:
|
||||||
res = promote_file_to_downloadzone(f)
|
res = promote_file_to_downloadzone(f)
|
||||||
file_links += f'<br/><a href="file={res}" target="_blank">{res}</a>'
|
file_links += f'<br/><a href="file={res}" target="_blank">{res}</a>'
|
||||||
yield from self.immediate_showoff_when_possible(f)
|
yield from self.immediate_showoff_when_possible(f)
|
||||||
@@ -102,7 +113,7 @@ class PluginMultiprocessManager():
|
|||||||
if create_or_resume == 'create':
|
if create_or_resume == 'create':
|
||||||
self.cnt = 1
|
self.cnt = 1
|
||||||
self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐
|
self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐
|
||||||
self.send_command(txt)
|
repeated, cmd_to_autogen = self.send_command(txt)
|
||||||
if txt == 'exit':
|
if txt == 'exit':
|
||||||
self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"])
|
self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"])
|
||||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
@@ -117,19 +128,27 @@ class PluginMultiprocessManager():
|
|||||||
# the heartbeat watchdog might have it killed
|
# the heartbeat watchdog might have it killed
|
||||||
self.terminate()
|
self.terminate()
|
||||||
return "terminate"
|
return "terminate"
|
||||||
|
|
||||||
if self.parent_conn.poll():
|
if self.parent_conn.poll():
|
||||||
self.feed_heartbeat_watchdog()
|
self.feed_heartbeat_watchdog()
|
||||||
|
if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]:
|
||||||
|
self.chatbot.pop(-1) # remove the last line
|
||||||
|
if "等待您的进一步指令" in self.chatbot[-1][-1]:
|
||||||
|
self.chatbot.pop(-1) # remove the last line
|
||||||
if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
|
if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
|
||||||
self.chatbot.pop(-1) # remove the last line
|
self.chatbot.pop(-1) # remove the last line
|
||||||
msg = self.parent_conn.recv() # PipeCom
|
msg = self.parent_conn.recv() # PipeCom
|
||||||
if msg.cmd == "done":
|
if msg.cmd == "done":
|
||||||
self.chatbot.append([f"结束", msg.content]); self.cnt += 1
|
self.chatbot.append([f"结束", msg.content])
|
||||||
|
self.cnt += 1
|
||||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
self.terminate(); break
|
self.terminate()
|
||||||
|
break
|
||||||
if msg.cmd == "show":
|
if msg.cmd == "show":
|
||||||
yield from self.overwatch_workdir_file_change()
|
yield from self.overwatch_workdir_file_change()
|
||||||
self.chatbot.append([f"运行阶段-{self.cnt}", msg.content]); self.cnt += 1
|
notice = ""
|
||||||
|
if repeated: notice = "(自动忽略重复的输入)"
|
||||||
|
self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content])
|
||||||
|
self.cnt += 1
|
||||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||||
if msg.cmd == "interact":
|
if msg.cmd == "interact":
|
||||||
yield from self.overwatch_workdir_file_change()
|
yield from self.overwatch_workdir_file_change()
|
||||||
@@ -159,13 +178,13 @@ class PluginMultiprocessManager():
|
|||||||
return "terminate"
|
return "terminate"
|
||||||
|
|
||||||
def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"):
|
def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"):
|
||||||
# ⭐⭐ 子进程
|
# ⭐⭐ run in subprocess
|
||||||
patience = 5 * 60
|
patience = 5 * 60
|
||||||
begin_waiting_time = time.time()
|
begin_waiting_time = time.time()
|
||||||
self.child_conn.send(PipeCom("interact", wait_msg))
|
self.child_conn.send(PipeCom("interact", wait_msg))
|
||||||
while True:
|
while True:
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
if self.child_conn.poll():
|
if self.child_conn.poll():
|
||||||
wait_success = True
|
wait_success = True
|
||||||
break
|
break
|
||||||
if time.time() - begin_waiting_time > patience:
|
if time.time() - begin_waiting_time > patience:
|
||||||
@@ -173,4 +192,3 @@ class PluginMultiprocessManager():
|
|||||||
wait_success = False
|
wait_success = False
|
||||||
break
|
break
|
||||||
return wait_success
|
return wait_success
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from toolbox import update_ui, get_log_folder
|
from toolbox import update_ui, get_log_folder
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_execption, get_conf
|
from toolbox import CatchException, report_exception, get_conf
|
||||||
import re, requests, unicodedata, os
|
import re, requests, unicodedata, os
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
def download_arxiv_(url_pdf):
|
def download_arxiv_(url_pdf):
|
||||||
@@ -144,7 +144,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
|
|||||||
try:
|
try:
|
||||||
import bs4
|
import bs4
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a = f"解析项目: {txt}",
|
a = f"解析项目: {txt}",
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。")
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -157,7 +157,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
|
|||||||
try:
|
try:
|
||||||
pdf_path, info = download_arxiv_(txt)
|
pdf_path, info = download_arxiv_(txt)
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a = f"解析项目: {txt}",
|
a = f"解析项目: {txt}",
|
||||||
b = f"下载pdf文件未成功")
|
b = f"下载pdf文件未成功")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -3,11 +3,6 @@
|
|||||||
测试:
|
测试:
|
||||||
- show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg
|
- show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg
|
||||||
|
|
||||||
Testing:
|
|
||||||
- Crop the image, keeping the bottom half.
|
|
||||||
- Swap the blue channel and red channel of the image.
|
|
||||||
- Convert the image to grayscale.
|
|
||||||
- Convert the CSV file to an Excel spreadsheet.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@@ -38,18 +33,23 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
"""
|
"""
|
||||||
# 检查当前的模型是否符合要求
|
# 检查当前的模型是否符合要求
|
||||||
supported_llms = [
|
supported_llms = [
|
||||||
'gpt-3.5-16k',
|
"gpt-3.5-turbo-16k",
|
||||||
'gpt-3.5-turbo-16k',
|
|
||||||
'gpt-3.5-turbo-1106',
|
'gpt-3.5-turbo-1106',
|
||||||
'gpt-4',
|
"gpt-4",
|
||||||
'gpt-4-32k',
|
"gpt-4-32k",
|
||||||
'gpt-4-1106-preview',
|
'gpt-4-1106-preview',
|
||||||
|
"azure-gpt-3.5-turbo-16k",
|
||||||
|
"azure-gpt-3.5-16k",
|
||||||
|
"azure-gpt-4",
|
||||||
|
"azure-gpt-4-32k",
|
||||||
]
|
]
|
||||||
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
from request_llms.bridge_all import model_info
|
||||||
if remove_model_prefix(llm_kwargs['llm_model']) not in supported_llms:
|
if model_info[llm_kwargs['llm_model']]["max_token"] < 8000: # 至少是8k上下文的模型
|
||||||
chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."])
|
chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}的最大上下文长度太短, 不能支撑AutoGen运行。"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY
|
||||||
|
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||||
|
|
||||||
# 检查当前的模型是否符合要求
|
# 检查当前的模型是否符合要求
|
||||||
API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
|
API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
|
||||||
@@ -60,7 +60,9 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
try:
|
try:
|
||||||
import autogen, docker
|
import autogen
|
||||||
|
if get_conf("AUTOGEN_USE_DOCKER"):
|
||||||
|
import docker
|
||||||
except:
|
except:
|
||||||
chatbot.append([ f"处理任务: {txt}",
|
chatbot.append([ f"处理任务: {txt}",
|
||||||
f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"])
|
f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"])
|
||||||
@@ -71,7 +73,8 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
try:
|
try:
|
||||||
import autogen
|
import autogen
|
||||||
import glob, os, time, subprocess
|
import glob, os, time, subprocess
|
||||||
subprocess.Popen(['docker', '--version'])
|
if get_conf("AUTOGEN_USE_DOCKER"):
|
||||||
|
subprocess.Popen(["docker", "--version"])
|
||||||
except:
|
except:
|
||||||
chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"])
|
chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
@@ -97,7 +97,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
|
|||||||
try:
|
try:
|
||||||
from docx import Document
|
from docx import Document
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -111,7 +111,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -124,7 +124,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
|
|||||||
|
|
||||||
# 如果没找到任何文件
|
# 如果没找到任何文件
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf
|
from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder
|
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder
|
||||||
|
|
||||||
@@ -144,7 +144,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
try:
|
try:
|
||||||
from moviepy.editor import AudioFileClip
|
from moviepy.editor import AudioFileClip
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -158,7 +158,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -174,7 +174,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
|
|
||||||
# 如果没找到任何文件
|
# 如果没找到任何文件
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import glob, time, os, re, logging
|
import glob, time, os, re, logging
|
||||||
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
||||||
from toolbox import CatchException, report_execption, get_log_folder
|
from toolbox import CatchException, report_exception, get_log_folder
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
@@ -165,7 +165,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -177,12 +177,12 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
if not success:
|
if not success:
|
||||||
# 什么都没有
|
# 什么都没有
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -205,7 +205,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -215,11 +215,11 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
if not success:
|
if not success:
|
||||||
# 什么都没有
|
# 什么都没有
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||||
@@ -238,7 +238,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|||||||
try:
|
try:
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -248,11 +248,11 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|||||||
if not success:
|
if not success:
|
||||||
# 什么都没有
|
# 什么都没有
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
|
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
@@ -119,7 +119,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
try:
|
try:
|
||||||
import fitz
|
import fitz
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a = f"解析项目: {txt}",
|
a = f"解析项目: {txt}",
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -133,7 +133,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -142,7 +142,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
# 如果没找到任何文件
|
# 如果没找到任何文件
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
try:
|
try:
|
||||||
import pdfminer, bs4
|
import pdfminer, bs4
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a = f"解析项目: {txt}",
|
a = f"解析项目: {txt}",
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -147,7 +147,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
||||||
@@ -155,7 +155,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
|||||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, report_execption, get_log_folder, gen_time_str
|
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
@@ -68,7 +68,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
import nougat
|
import nougat
|
||||||
import tiktoken
|
import tiktoken
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -84,7 +84,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
# 如果没找到任何文件
|
# 如果没找到任何文件
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
|
a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, report_execption, get_log_folder, gen_time_str
|
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
@@ -26,7 +26,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
import tiktoken
|
import tiktoken
|
||||||
import scipdf
|
import scipdf
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}",
|
a=f"解析项目: {txt}",
|
||||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。")
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -43,7 +43,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
|
|
||||||
# 如果没找到任何文件
|
# 如果没找到任何文件
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
|
a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
@@ -81,7 +81,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
|
|||||||
try:
|
try:
|
||||||
import fitz
|
import fitz
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a = f"解析项目: {txt}",
|
a = f"解析项目: {txt}",
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -96,7 +96,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
|
|||||||
else:
|
else:
|
||||||
if txt == "":
|
if txt == "":
|
||||||
txt = '空空如也的输入栏'
|
txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
@@ -105,7 +105,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
|
|||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
||||||
# 如果没找到任何文件
|
# 如果没找到任何文件
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
@@ -43,14 +43,14 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
|
||||||
|
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
fast_debug = True
|
fast_debug = True
|
||||||
|
|
||||||
@@ -131,7 +131,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
else:
|
else:
|
||||||
if txt == "":
|
if txt == "":
|
||||||
txt = '空空如也的输入栏'
|
txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
@@ -141,7 +141,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|||||||
file_manifest = [f for f in glob.glob(
|
file_manifest = [f for f in glob.glob(
|
||||||
f'{project_folder}/**/*.ipynb', recursive=True)]
|
f'{project_folder}/**/*.ipynb', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
|
a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
|
||||||
from toolbox import CatchException, report_execption, write_history_to_file
|
from toolbox import CatchException, report_exception, write_history_to_file
|
||||||
from .crazy_utils import input_clipping
|
from .crazy_utils import input_clipping
|
||||||
|
|
||||||
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||||
@@ -113,7 +113,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
[f for f in glob.glob('./*/*.py')]
|
[f for f in glob.glob('./*/*.py')]
|
||||||
project_folder = './'
|
project_folder = './'
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -126,12 +126,12 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -144,12 +144,12 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)]
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}")
|
report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -162,14 +162,14 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
|
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
|
||||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -182,7 +182,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
||||||
@@ -190,7 +190,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
|
|||||||
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
|
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -204,7 +204,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
||||||
@@ -212,7 +212,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
[f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
|
[f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -226,7 +226,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
||||||
@@ -241,7 +241,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
[f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \
|
[f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -255,7 +255,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
|
||||||
@@ -263,7 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
[f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
|
[f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -276,14 +276,14 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \
|
[f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -296,7 +296,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
|
||||||
@@ -304,7 +304,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
[f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
|
[f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -318,13 +318,13 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \
|
||||||
[f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)]
|
[f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -352,7 +352,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
# 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件
|
# 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件
|
||||||
@@ -365,7 +365,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \
|
file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \
|
||||||
os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))]
|
os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption
|
from toolbox import CatchException, report_exception
|
||||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
|
||||||
@@ -51,14 +51,14 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
project_folder = txt
|
project_folder = txt
|
||||||
else:
|
else:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
||||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from toolbox import CatchException, report_execption, promote_file_to_downloadzone
|
from toolbox import CatchException, report_exception, promote_file_to_downloadzone
|
||||||
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file
|
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file
|
||||||
import logging
|
import logging
|
||||||
import requests
|
import requests
|
||||||
@@ -29,7 +29,7 @@ def get_meta_information(url, chatbot, history):
|
|||||||
try:
|
try:
|
||||||
session.proxies.update(proxies)
|
session.proxies.update(proxies)
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。",
|
a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。",
|
||||||
b=f"尝试直接连接")
|
b=f"尝试直接连接")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -146,7 +146,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|||||||
import math
|
import math
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
except:
|
except:
|
||||||
report_execption(chatbot, history,
|
report_exception(chatbot, history,
|
||||||
a = f"解析项目: {txt}",
|
a = f"解析项目: {txt}",
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -217,7 +217,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
|
|||||||
|
|
||||||
## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py
|
## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py
|
||||||
|
|
||||||
这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。
|
这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_exception、write_results_to_file等。
|
||||||
|
|
||||||
## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py
|
## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py
|
||||||
|
|
||||||
|
|||||||
1
main.py
1
main.py
@@ -1,6 +1,5 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
import pickle
|
import pickle
|
||||||
import codecs
|
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -242,6 +242,13 @@ for model in AVAIL_LLM_MODELS:
|
|||||||
mi.update({"endpoint": api2d_endpoint})
|
mi.update({"endpoint": api2d_endpoint})
|
||||||
model_info.update({model: mi})
|
model_info.update({model: mi})
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=- azure 对齐支持 -=-=-=-=-=-=-
|
||||||
|
for model in AVAIL_LLM_MODELS:
|
||||||
|
if model.startswith('azure-') and (model.replace('azure-','') in model_info.keys()):
|
||||||
|
mi = model_info[model.replace('azure-','')]
|
||||||
|
mi.update({"endpoint": azure_endpoint})
|
||||||
|
model_info.update({model: mi})
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
||||||
if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
|
if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
|
||||||
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
||||||
@@ -564,7 +571,7 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
return decorated
|
return decorated
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False):
|
||||||
"""
|
"""
|
||||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
|
|||||||
@@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
from toolbox import get_conf, ProxyNetworkActivate
|
from toolbox import get_conf, ProxyNetworkActivate
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetGLM2Handle(LocalLLMHandle):
|
class GetGLM2Handle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
|||||||
@@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
from toolbox import get_conf, ProxyNetworkActivate
|
from toolbox import get_conf, ProxyNetworkActivate
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetGLM3Handle(LocalLLMHandle):
|
class GetGLM3Handle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import threading
|
|||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
from .chatglmoonx import ChatGLMModel, chat_template
|
from .chatglmoonx import ChatGLMModel, chat_template
|
||||||
|
|
||||||
@@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template
|
|||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetONNXGLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
|||||||
@@ -7,8 +7,7 @@
|
|||||||
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||||
|
|
||||||
具备多线程调用能力的函数
|
具备多线程调用能力的函数
|
||||||
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|||||||
@@ -7,8 +7,7 @@
|
|||||||
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||||
|
|
||||||
具备多线程调用能力的函数
|
具备多线程调用能力的函数
|
||||||
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||||
|
|
||||||
具备多线程调用能力的函数
|
具备多线程调用能力的函数
|
||||||
2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ from transformers import AutoModel, AutoTokenizer
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@@ -34,7 +34,6 @@ def combine_history(prompt, hist):
|
|||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetInternlmHandle(LocalLLMHandle):
|
class GetInternlmHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
@@ -53,14 +52,15 @@ class GetInternlmHandle(LocalLLMHandle):
|
|||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
device = get_conf('LOCAL_MODEL_DEVICE')
|
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
if self._model is None:
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
if self._model is None:
|
||||||
if device=='cpu':
|
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16)
|
if device=='cpu':
|
||||||
else:
|
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16)
|
||||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
|
else:
|
||||||
|
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
|
||||||
|
|
||||||
model = model.eval()
|
model = model.eval()
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
def llm_stream_generator(self, **kwargs):
|
def llm_stream_generator(self, **kwargs):
|
||||||
@@ -94,8 +94,9 @@ class GetInternlmHandle(LocalLLMHandle):
|
|||||||
|
|
||||||
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
|
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
|
||||||
input_length = len(inputs["input_ids"][0])
|
input_length = len(inputs["input_ids"][0])
|
||||||
|
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||||
for k, v in inputs.items():
|
for k, v in inputs.items():
|
||||||
inputs[k] = v.cuda()
|
inputs[k] = v.to(device)
|
||||||
input_ids = inputs["input_ids"]
|
input_ids = inputs["input_ids"]
|
||||||
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
||||||
if generation_config is None:
|
if generation_config is None:
|
||||||
|
|||||||
@@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
|||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
||||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetONNXGLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
|
|||||||
@@ -6,16 +6,15 @@ from transformers import AutoModel, AutoTokenizer
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@SingletonLocalLLM
|
|
||||||
class GetONNXGLMHandle(LocalLLMHandle):
|
class GetONNXGLMHandle(LocalLLMHandle):
|
||||||
|
|
||||||
def load_model_info(self):
|
def load_model_info(self):
|
||||||
@@ -30,13 +29,13 @@ class GetONNXGLMHandle(LocalLLMHandle):
|
|||||||
import platform
|
import platform
|
||||||
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||||
|
|
||||||
model_id = 'qwen/Qwen-7B-Chat'
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
revision = 'v1.0.1'
|
model_id = 'qwen/Qwen-7B-Chat'
|
||||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision, trust_remote_code=True)
|
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
|
||||||
# use fp16
|
# use fp16
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", revision=revision, trust_remote_code=True, fp16=True).eval()
|
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
|
||||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||||
self._model = model
|
self._model = model
|
||||||
|
|
||||||
return self._model, self._tokenizer
|
return self._model, self._tokenizer
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
from toolbox import update_ui
|
from toolbox import update_ui, Singleton
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from contextlib import redirect_stdout
|
from contextlib import redirect_stdout
|
||||||
from request_llms.queued_pipe import create_queue_pipe
|
from request_llms.queued_pipe import create_queue_pipe
|
||||||
@@ -26,23 +26,20 @@ class ThreadLock(object):
|
|||||||
def __exit__(self, type, value, traceback):
|
def __exit__(self, type, value, traceback):
|
||||||
self.release()
|
self.release()
|
||||||
|
|
||||||
def SingletonLocalLLM(cls):
|
@Singleton
|
||||||
"""
|
class GetSingletonHandle():
|
||||||
Singleton Decroator for LocalLLMHandle
|
def __init__(self):
|
||||||
"""
|
self.llm_model_already_running = {}
|
||||||
_instance = {}
|
|
||||||
|
|
||||||
def _singleton(*args, **kargs):
|
def get_llm_model_instance(self, cls, *args, **kargs):
|
||||||
if cls not in _instance:
|
if cls not in self.llm_model_already_running:
|
||||||
_instance[cls] = cls(*args, **kargs)
|
self.llm_model_already_running[cls] = cls(*args, **kargs)
|
||||||
return _instance[cls]
|
return self.llm_model_already_running[cls]
|
||||||
elif _instance[cls].corrupted:
|
elif self.llm_model_already_running[cls].corrupted:
|
||||||
_instance[cls] = cls(*args, **kargs)
|
self.llm_model_already_running[cls] = cls(*args, **kargs)
|
||||||
return _instance[cls]
|
return self.llm_model_already_running[cls]
|
||||||
else:
|
else:
|
||||||
return _instance[cls]
|
return self.llm_model_already_running[cls]
|
||||||
return _singleton
|
|
||||||
|
|
||||||
|
|
||||||
def reset_tqdm_output():
|
def reset_tqdm_output():
|
||||||
import sys, tqdm
|
import sys, tqdm
|
||||||
@@ -76,7 +73,6 @@ class LocalLLMHandle(Process):
|
|||||||
self.parent_state, self.child_state = create_queue_pipe()
|
self.parent_state, self.child_state = create_queue_pipe()
|
||||||
# allow redirect_stdout
|
# allow redirect_stdout
|
||||||
self.std_tag = "[Subprocess Message] "
|
self.std_tag = "[Subprocess Message] "
|
||||||
self.child.write = lambda x: self.child.send(self.std_tag + x)
|
|
||||||
self.running = True
|
self.running = True
|
||||||
self._model = None
|
self._model = None
|
||||||
self._tokenizer = None
|
self._tokenizer = None
|
||||||
@@ -137,6 +133,8 @@ class LocalLLMHandle(Process):
|
|||||||
def run(self):
|
def run(self):
|
||||||
# 🏃♂️🏃♂️🏃♂️ run in child process
|
# 🏃♂️🏃♂️🏃♂️ run in child process
|
||||||
# 第一次运行,加载参数
|
# 第一次运行,加载参数
|
||||||
|
self.child.flush = lambda *args: None
|
||||||
|
self.child.write = lambda x: self.child.send(self.std_tag + x)
|
||||||
reset_tqdm_output()
|
reset_tqdm_output()
|
||||||
self.set_state("`尝试加载模型`")
|
self.set_state("`尝试加载模型`")
|
||||||
try:
|
try:
|
||||||
@@ -220,7 +218,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
|
|||||||
"""
|
"""
|
||||||
refer to request_llms/bridge_all.py
|
refer to request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
_llm_handle = LLMSingletonClass()
|
_llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass)
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1:
|
||||||
observe_window[0] = load_message + "\n\n" + _llm_handle.get_state()
|
observe_window[0] = load_message + "\n\n" + _llm_handle.get_state()
|
||||||
if not _llm_handle.running:
|
if not _llm_handle.running:
|
||||||
@@ -268,7 +266,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
|
|||||||
"""
|
"""
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
|
|
||||||
_llm_handle = LLMSingletonClass()
|
_llm_handle = GetSingletonHandle().get_llm_model_instance(LLMSingletonClass)
|
||||||
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state())
|
chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state())
|
||||||
yield from update_ui(chatbot=chatbot, history=[])
|
yield from update_ui(chatbot=chatbot, history=[])
|
||||||
if not _llm_handle.running:
|
if not _llm_handle.running:
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ Markdown
|
|||||||
pygments
|
pygments
|
||||||
pymupdf
|
pymupdf
|
||||||
openai
|
openai
|
||||||
|
pyautogen
|
||||||
numpy
|
numpy
|
||||||
arxiv
|
arxiv
|
||||||
rich
|
rich
|
||||||
|
|||||||
@@ -15,11 +15,11 @@ if __name__ == "__main__":
|
|||||||
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
|
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
|
||||||
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
|
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
|
||||||
# from request_llms.bridge_claude import predict_no_ui_long_connection
|
# from request_llms.bridge_claude import predict_no_ui_long_connection
|
||||||
# from request_llms.bridge_internlm import predict_no_ui_long_connection
|
from request_llms.bridge_internlm import predict_no_ui_long_connection
|
||||||
# from request_llms.bridge_qwen import predict_no_ui_long_connection
|
# from request_llms.bridge_qwen import predict_no_ui_long_connection
|
||||||
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
||||||
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
||||||
from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
||||||
|
|
||||||
llm_kwargs = {
|
llm_kwargs = {
|
||||||
'max_length': 4096,
|
'max_length': 4096,
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ def HotReload(f):
|
|||||||
其他小工具:
|
其他小工具:
|
||||||
- write_history_to_file: 将结果写入markdown文件中
|
- write_history_to_file: 将结果写入markdown文件中
|
||||||
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
||||||
- report_execption: 向chatbot中添加简单的意外错误信息
|
- report_exception: 向chatbot中添加简单的意外错误信息
|
||||||
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||||
- markdown_convertion: 用多种方式组合,将markdown转化为好看的html
|
- markdown_convertion: 用多种方式组合,将markdown转化为好看的html
|
||||||
- format_io: 接管gradio默认的markdown处理方式
|
- format_io: 接管gradio默认的markdown处理方式
|
||||||
@@ -260,7 +260,7 @@ def regular_txt_to_markdown(text):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def report_execption(chatbot, history, a, b):
|
def report_exception(chatbot, history, a, b):
|
||||||
"""
|
"""
|
||||||
向chatbot中添加错误信息
|
向chatbot中添加错误信息
|
||||||
"""
|
"""
|
||||||
|
|||||||
4
version
4
version
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version": 3.57,
|
"version": 3.59,
|
||||||
"show_feature": true,
|
"show_feature": true,
|
||||||
"new_feature": "支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验"
|
"new_feature": "AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验"
|
||||||
}
|
}
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户