镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-05 22:16:49 +00:00
normalize source code names
这个提交包含在:
@@ -168,7 +168,7 @@ class InterviewAssistant(AliyunASR):
|
||||
|
||||
|
||||
@CatchException
|
||||
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Audio_Assistant(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# pip install -U openai-whisper
|
||||
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -132,13 +132,13 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
||||
|
||||
|
||||
@CatchException
|
||||
def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT):
|
||||
def Audio_Summary(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"])
|
||||
"Audio_Summary内容,函数插件贡献者: dalvqw & BinaryHusky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
try:
|
||||
@@ -4,7 +4,7 @@ from crazy_functions.crazy_utils import input_clipping
|
||||
import copy, json
|
||||
|
||||
@CatchException
|
||||
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Commandline_Assistant(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
||||
@@ -139,7 +139,7 @@ def get_recent_file_prompt_support(chatbot):
|
||||
return path
|
||||
|
||||
@CatchException
|
||||
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Dynamic_Function_Generate(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
@@ -159,7 +159,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
|
||||
# ⭐ 文件上传区是否有东西
|
||||
# 1. 如果有文件: 作为函数参数
|
||||
# 2. 如果没有文件:需要用GPT提取参数 (太懒了,以后再写,虚空终端已经实现了类似的代码)
|
||||
# 2. 如果没有文件:需要用GPT提取参数 (太懒了,以后再写,Void_Terminal已经实现了类似的代码)
|
||||
file_list = []
|
||||
if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False):
|
||||
file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None)
|
||||
@@ -132,7 +132,7 @@ def get_meta_information(url, chatbot, history):
|
||||
return profile
|
||||
|
||||
@CatchException
|
||||
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Google_Scholar_Assistant_Legacy(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
disable_auto_promotion(chatbot=chatbot)
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
@@ -13,13 +13,13 @@ def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
|
||||
chatbot.append(("这是什么功能?", "Interactive_Func_Template。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
state = chatbot._cookies.get('plugin_state_0001', None) # 初始化插件状态
|
||||
|
||||
if state is None:
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.交互功能函数模板->交互功能模板函数' # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.Interactive_Func_Template->交互功能模板函数' # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
|
||||
chatbot._cookies['plugin_state_0001'] = 'wait_user_keyword' # 赋予插件状态
|
||||
|
||||
chatbot.append(("第一次调用:", "请输入关键词, 我将为您查找相关壁纸, 建议使用英文单词, 插件锁定中,请直接提交即可。"))
|
||||
@@ -16,7 +16,7 @@ def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
llm_kwargs,
|
||||
cls,
|
||||
plugin_name='MiniGame_ResumeStory',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏',
|
||||
callback_fn='crazy_functions.Interactive_Mini_Game->随机小游戏',
|
||||
lock_plugin=True
|
||||
)
|
||||
yield from state.continue_game(prompt, chatbot, history)
|
||||
@@ -34,7 +34,7 @@ def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system
|
||||
llm_kwargs,
|
||||
cls,
|
||||
plugin_name='MiniGame_ASCII_Art',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏1',
|
||||
callback_fn='crazy_functions.Interactive_Mini_Game->随机小游戏1',
|
||||
lock_plugin=True
|
||||
)
|
||||
yield from state.continue_game(prompt, chatbot, history)
|
||||
@@ -297,7 +297,7 @@ def 解析历史输入(history, llm_kwargs, file_manifest, chatbot, plugin_kwarg
|
||||
|
||||
|
||||
@CatchException
|
||||
def 生成多种Mermaid图表(
|
||||
def Mermaid_Figure_Gen(
|
||||
txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port
|
||||
):
|
||||
"""
|
||||
@@ -426,7 +426,7 @@ class Mermaid_Gen(GptAcademicPluginTemplate):
|
||||
"思维导图",
|
||||
]
|
||||
plugin_kwargs = options.index(plugin_kwargs['Type_of_Mermaid'])
|
||||
yield from 生成多种Mermaid图表(
|
||||
yield from Mermaid_Figure_Gen(
|
||||
txt,
|
||||
llm_kwargs,
|
||||
plugin_kwargs,
|
||||
@@ -22,7 +22,7 @@ def remove_model_prefix(llm):
|
||||
|
||||
|
||||
@CatchException
|
||||
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Multi_Agent_Legacy终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
@@ -78,17 +78,17 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
chatbot.get_cookies()['lock_plugin'] = None
|
||||
persistent_class_multi_user_manager = GradioMultiuserManagerForPersistentClasses()
|
||||
user_uuid = chatbot.get_cookies().get('uuid')
|
||||
persistent_key = f"{user_uuid}->多智能体终端"
|
||||
persistent_key = f"{user_uuid}->Multi_Agent_Legacy终端"
|
||||
if persistent_class_multi_user_manager.already_alive(persistent_key):
|
||||
# 当已经存在一个正在运行的多智能体终端时,直接将用户输入传递给它,而不是再次启动一个新的多智能体终端
|
||||
# 当已经存在一个正在运行的Multi_Agent_Legacy终端时,直接将用户输入传递给它,而不是再次启动一个新的Multi_Agent_Legacy终端
|
||||
logger.info('[debug] feed new user input')
|
||||
executor = persistent_class_multi_user_manager.get(persistent_key)
|
||||
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="resume")
|
||||
else:
|
||||
# 运行多智能体终端 (首次)
|
||||
# 运行Multi_Agent_Legacy终端 (首次)
|
||||
logger.info('[debug] create new executor instance')
|
||||
history = []
|
||||
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
||||
chatbot.append(["正在启动: Multi_Agent_Legacy终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||
persistent_class_multi_user_manager.set(persistent_key, executor)
|
||||
@@ -96,7 +96,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
|
||||
if exit_reason == "wait_feedback":
|
||||
# 当用户点击了“等待反馈”按钮时,将executor存储到cookie中,等待用户的再次调用
|
||||
executor.chatbot.get_cookies()['lock_plugin'] = 'crazy_functions.多智能体->多智能体终端'
|
||||
executor.chatbot.get_cookies()['lock_plugin'] = 'crazy_functions.Multi_Agent_Legacy->Multi_Agent_Legacy终端'
|
||||
else:
|
||||
executor.chatbot.get_cookies()['lock_plugin'] = None
|
||||
yield from update_ui(chatbot=executor.chatbot, history=executor.history) # 更新状态
|
||||
@@ -62,7 +62,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
|
||||
|
||||
@CatchException
|
||||
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def PDF_QA标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
@@ -103,13 +103,13 @@ do not have too much repetitive information, numerical values using the original
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def PDF_Summary(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"])
|
||||
"PDF_Summary。函数插件贡献者: ValeriaWong,Eralien"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
@@ -43,7 +43,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
||||
|
||||
|
||||
@CatchException
|
||||
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Paper_Abstract_Writer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -4,7 +4,7 @@ from toolbox import CatchException, report_exception
|
||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
def Program_Comment_Gen(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, os
|
||||
logger.info('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
@@ -34,7 +34,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def 批量Program_Comment_Gen(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -51,4 +51,4 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
yield from Program_Comment_Gen(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
@@ -79,8 +79,8 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
||||
# yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
# chatbot._cookies['langchain_plugin_embedding'] = kai.get_current_archive_id()
|
||||
# chatbot._cookies['lock_plugin'] = 'crazy_functions.知识库文件注入->读取知识库作答'
|
||||
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出知识库问答模式。"])
|
||||
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“知识库问答”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
||||
# chatbot.append(['完成', "“根据知识库作答”函数插件已经接管问答系统, 提问吧! 但注意, 您接下来不能再使用其他插件了,刷新页面即可以退出Vectorstore_QA模式。"])
|
||||
chatbot.append(['构建完成', f"当前知识库内的有效文件:\n\n---\n\n{kai_files}\n\n---\n\n请切换至“Vectorstore_QA”插件进行知识库访问, 或者使用此插件继续上传更多文件。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
@CatchException
|
||||
@@ -21,7 +21,7 @@ Please describe in natural language what you want to do.
|
||||
5. If you don't need to upload a file, you can simply repeat your command again.
|
||||
"""
|
||||
explain_msg = """
|
||||
## 虚空终端插件说明:
|
||||
## Void_Terminal插件说明:
|
||||
|
||||
1. 请用**自然语言**描述您需要做什么。例如:
|
||||
- 「请调用插件,为我翻译PDF论文,论文我刚刚放到上传区了」
|
||||
@@ -104,9 +104,9 @@ def analyze_intention_with_simple_rules(txt):
|
||||
|
||||
|
||||
@CatchException
|
||||
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Void_Terminal(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
disable_auto_promotion(chatbot=chatbot)
|
||||
# 获取当前虚空终端状态
|
||||
# 获取当前Void_Terminal状态
|
||||
state = VoidTerminalState.get_state(chatbot)
|
||||
appendix_msg = ""
|
||||
|
||||
@@ -121,21 +121,21 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=True)
|
||||
state.unlock_plugin(chatbot=chatbot)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||
yield from Void_Terminal主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||
return
|
||||
else:
|
||||
# 如果意图模糊,提示
|
||||
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=True)
|
||||
state.lock_plugin(chatbot=chatbot)
|
||||
chatbot.append(("虚空终端状态:", explain_msg+appendix_msg))
|
||||
chatbot.append(("Void_Terminal状态:", explain_msg+appendix_msg))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
return
|
||||
|
||||
|
||||
|
||||
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Void_Terminal主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
history = []
|
||||
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
|
||||
chatbot.append(("Void_Terminal状态: ", f"正在执行任务: {txt}"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# ⭐ ⭐ ⭐ 分析用户意图
|
||||
@@ -79,13 +79,13 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
||||
|
||||
|
||||
@CatchException
|
||||
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
def Word_Summary(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结Word文档。函数插件贡献者: JasonGuo1。注意, 如果是.doc文件, 请先转化为.docx格式。"])
|
||||
"批量Word_Summary。函数插件贡献者: JasonGuo1。注意, 如果是.doc文件, 请先转化为.docx格式。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
@@ -9,7 +9,7 @@ from tqdm import tqdm
|
||||
|
||||
class ArxivSource(DataSource):
|
||||
"""arXiv API实现"""
|
||||
|
||||
|
||||
CATEGORIES = {
|
||||
# 物理学
|
||||
"Physics": {
|
||||
@@ -27,7 +27,7 @@ class ArxivSource(DataSource):
|
||||
"physics": "物理学",
|
||||
"quant-ph": "量子物理",
|
||||
},
|
||||
|
||||
|
||||
# 数学
|
||||
"Mathematics": {
|
||||
"math.AG": "代数几何",
|
||||
@@ -63,7 +63,7 @@ class ArxivSource(DataSource):
|
||||
"math.ST": "统计理论",
|
||||
"math.SG": "辛几何",
|
||||
},
|
||||
|
||||
|
||||
# 计算机科学
|
||||
"Computer Science": {
|
||||
"cs.AI": "人工智能",
|
||||
@@ -107,7 +107,7 @@ class ArxivSource(DataSource):
|
||||
"cs.SC": "符号计算",
|
||||
"cs.SY": "系统与控制",
|
||||
},
|
||||
|
||||
|
||||
# 定量生物学
|
||||
"Quantitative Biology": {
|
||||
"q-bio.BM": "生物分子",
|
||||
@@ -121,7 +121,7 @@ class ArxivSource(DataSource):
|
||||
"q-bio.SC": "亚细胞过程",
|
||||
"q-bio.TO": "组织与器官",
|
||||
},
|
||||
|
||||
|
||||
# 定量金融
|
||||
"Quantitative Finance": {
|
||||
"q-fin.CP": "计算金融",
|
||||
@@ -134,7 +134,7 @@ class ArxivSource(DataSource):
|
||||
"q-fin.ST": "统计金融",
|
||||
"q-fin.TR": "交易与市场微观结构",
|
||||
},
|
||||
|
||||
|
||||
# 统计学
|
||||
"Statistics": {
|
||||
"stat.AP": "应用统计",
|
||||
@@ -144,7 +144,7 @@ class ArxivSource(DataSource):
|
||||
"stat.OT": "其他统计",
|
||||
"stat.TH": "统计理论",
|
||||
},
|
||||
|
||||
|
||||
# 电气工程与系统科学
|
||||
"Electrical Engineering and Systems Science": {
|
||||
"eess.AS": "音频与语音处理",
|
||||
@@ -152,7 +152,7 @@ class ArxivSource(DataSource):
|
||||
"eess.SP": "信号处理",
|
||||
"eess.SY": "系统与控制",
|
||||
},
|
||||
|
||||
|
||||
# 经济学
|
||||
"Economics": {
|
||||
"econ.EM": "计量经济学",
|
||||
@@ -170,15 +170,15 @@ class ArxivSource(DataSource):
|
||||
'lastUpdatedDate': arxiv.SortCriterion.LastUpdatedDate, # 最后更新日期
|
||||
'submittedDate': arxiv.SortCriterion.SubmittedDate, # 提交日期
|
||||
}
|
||||
|
||||
|
||||
self.sort_order_options = {
|
||||
'ascending': arxiv.SortOrder.Ascending,
|
||||
'descending': arxiv.SortOrder.Descending
|
||||
}
|
||||
|
||||
|
||||
self.default_sort = 'lastUpdatedDate'
|
||||
self.default_order = 'descending'
|
||||
|
||||
|
||||
def _initialize(self) -> None:
|
||||
"""初始化客户端,设置默认参数"""
|
||||
self.client = arxiv.Client()
|
||||
@@ -196,22 +196,22 @@ class ArxivSource(DataSource):
|
||||
# 使用默认排序如果提供的排序选项无效
|
||||
if not sort_by or sort_by not in self.sort_options:
|
||||
sort_by = self.default_sort
|
||||
|
||||
# 使用默认排序顺序如果提供的顺序无效
|
||||
|
||||
# 使用默认排序顺序如果提供的顺序无效
|
||||
if not sort_order or sort_order not in self.sort_order_options:
|
||||
sort_order = self.default_order
|
||||
|
||||
|
||||
# 如果指定了起始年份,添加到查询中
|
||||
if start_year:
|
||||
query = f"{query} AND submittedDate:[{start_year}0101 TO 99991231]"
|
||||
|
||||
|
||||
search = arxiv.Search(
|
||||
query=query,
|
||||
max_results=limit,
|
||||
sort_by=self.sort_options[sort_by],
|
||||
sort_order=self.sort_order_options[sort_order]
|
||||
)
|
||||
|
||||
|
||||
results = list(self.client.results(search))
|
||||
return [self._parse_paper_data(result) for result in results]
|
||||
except Exception as e:
|
||||
@@ -220,13 +220,13 @@ class ArxivSource(DataSource):
|
||||
|
||||
async def search_by_id(self, paper_id: Union[str, List[str]]) -> List[PaperMetadata]:
|
||||
"""按ID搜索论文
|
||||
|
||||
|
||||
Args:
|
||||
paper_id: 单个arXiv ID或ID列表,例如:'2005.14165' 或 ['2005.14165', '2103.14030']
|
||||
"""
|
||||
if isinstance(paper_id, str):
|
||||
paper_id = [paper_id]
|
||||
|
||||
|
||||
search = arxiv.Search(
|
||||
id_list=paper_id,
|
||||
max_results=len(paper_id)
|
||||
@@ -235,8 +235,8 @@ class ArxivSource(DataSource):
|
||||
return [self._parse_paper_data(result) for result in results]
|
||||
|
||||
async def search_by_category(
|
||||
self,
|
||||
category: str,
|
||||
self,
|
||||
category: str,
|
||||
limit: int = 100,
|
||||
sort_by: str = 'relevance',
|
||||
sort_order: str = 'descending',
|
||||
@@ -244,11 +244,11 @@ class ArxivSource(DataSource):
|
||||
) -> List[PaperMetadata]:
|
||||
"""按类别搜索论文"""
|
||||
query = f"cat:{category}"
|
||||
|
||||
|
||||
# 如果指定了起始年份,添加到查询中
|
||||
if start_year:
|
||||
query = f"{query} AND submittedDate:[{start_year}0101 TO 99991231]"
|
||||
|
||||
|
||||
return await self.search(
|
||||
query=query,
|
||||
limit=limit,
|
||||
@@ -257,19 +257,19 @@ class ArxivSource(DataSource):
|
||||
)
|
||||
|
||||
async def search_by_authors(
|
||||
self,
|
||||
authors: List[str],
|
||||
self,
|
||||
authors: List[str],
|
||||
limit: int = 100,
|
||||
sort_by: str = 'relevance',
|
||||
start_year: int = None
|
||||
) -> List[PaperMetadata]:
|
||||
"""按作者搜索论文"""
|
||||
query = " AND ".join([f"au:\"{author}\"" for author in authors])
|
||||
|
||||
|
||||
# 如果指定了起始年份,添加到查询中
|
||||
if start_year:
|
||||
query = f"{query} AND submittedDate:[{start_year}0101 TO 99991231]"
|
||||
|
||||
|
||||
return await self.search(
|
||||
query=query,
|
||||
limit=limit,
|
||||
@@ -277,9 +277,9 @@ class ArxivSource(DataSource):
|
||||
)
|
||||
|
||||
async def search_by_date_range(
|
||||
self,
|
||||
start_date: datetime,
|
||||
end_date: datetime,
|
||||
self,
|
||||
start_date: datetime,
|
||||
end_date: datetime,
|
||||
limit: int = 100,
|
||||
sort_by: Literal['relevance', 'updated', 'submitted'] = 'submitted',
|
||||
sort_order: Literal['ascending', 'descending'] = 'descending'
|
||||
@@ -287,20 +287,20 @@ class ArxivSource(DataSource):
|
||||
"""按日期范围搜索论文"""
|
||||
query = f"submittedDate:[{start_date.strftime('%Y%m%d')} TO {end_date.strftime('%Y%m%d')}]"
|
||||
return await self.search(
|
||||
query,
|
||||
limit=limit,
|
||||
sort_by=sort_by,
|
||||
query,
|
||||
limit=limit,
|
||||
sort_by=sort_by,
|
||||
sort_order=sort_order
|
||||
)
|
||||
|
||||
async def download_pdf(self, paper_id: str, dirpath: str = "./", filename: str = "") -> str:
|
||||
"""下载论文PDF
|
||||
|
||||
|
||||
Args:
|
||||
paper_id: arXiv ID
|
||||
dirpath: 保存目录
|
||||
filename: 文件名,如果为空则使用默认格式:{paper_id}_{标题}.pdf
|
||||
|
||||
|
||||
Returns:
|
||||
保存的文件路径
|
||||
"""
|
||||
@@ -308,24 +308,24 @@ class ArxivSource(DataSource):
|
||||
if not papers:
|
||||
raise ValueError(f"未找到ID为 {paper_id} 的论文")
|
||||
paper = papers[0]
|
||||
|
||||
|
||||
if not filename:
|
||||
# 清理标题中的非法字符
|
||||
safe_title = "".join(c if c.isalnum() else "_" for c in paper.title)
|
||||
filename = f"{paper_id}_{safe_title}.pdf"
|
||||
|
||||
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
urlretrieve(paper.url, filepath)
|
||||
return filepath
|
||||
|
||||
async def download_source(self, paper_id: str, dirpath: str = "./", filename: str = "") -> str:
|
||||
"""下载论文源文件(通常是LaTeX源码)
|
||||
|
||||
|
||||
Args:
|
||||
paper_id: arXiv ID
|
||||
dirpath: 保存目录
|
||||
filename: 文件名,如果为空则使用默认格式:{paper_id}_{标题}.tar.gz
|
||||
|
||||
|
||||
Returns:
|
||||
保存的文件路径
|
||||
"""
|
||||
@@ -333,11 +333,11 @@ class ArxivSource(DataSource):
|
||||
if not papers:
|
||||
raise ValueError(f"未找到ID为 {paper_id} 的论文")
|
||||
paper = papers[0]
|
||||
|
||||
|
||||
if not filename:
|
||||
safe_title = "".join(c if c.isalnum() else "_" for c in paper.title)
|
||||
filename = f"{paper_id}_{safe_title}.tar.gz"
|
||||
|
||||
|
||||
filepath = os.path.join(dirpath, filename)
|
||||
source_url = paper.url.replace("/pdf/", "/src/")
|
||||
urlretrieve(source_url, filepath)
|
||||
@@ -353,10 +353,10 @@ class ArxivSource(DataSource):
|
||||
|
||||
async def get_paper_details(self, paper_id: str) -> Optional[PaperMetadata]:
|
||||
"""获取论文详情
|
||||
|
||||
|
||||
Args:
|
||||
paper_id: arXiv ID 或 DOI
|
||||
|
||||
|
||||
Returns:
|
||||
论文详细信息,如果未找到返回 None
|
||||
"""
|
||||
@@ -367,7 +367,7 @@ class ArxivSource(DataSource):
|
||||
# 如果是 DOI 格式且是 arXiv 论文,提取 ID
|
||||
elif paper_id.startswith("10.48550/arXiv."):
|
||||
paper_id = paper_id.split(".")[-1]
|
||||
|
||||
|
||||
papers = await self.search_by_id(paper_id)
|
||||
return papers[0] if papers else None
|
||||
except Exception as e:
|
||||
@@ -379,7 +379,7 @@ class ArxivSource(DataSource):
|
||||
# 解析主要类别和次要类别
|
||||
primary_category = result.primary_category
|
||||
categories = result.categories
|
||||
|
||||
|
||||
# 构建venue信息
|
||||
venue_info = {
|
||||
'primary_category': primary_category,
|
||||
@@ -387,7 +387,7 @@ class ArxivSource(DataSource):
|
||||
'comments': getattr(result, 'comment', None),
|
||||
'journal_ref': getattr(result, 'journal_ref', None)
|
||||
}
|
||||
|
||||
|
||||
return PaperMetadata(
|
||||
title=result.title,
|
||||
authors=[author.name for author in result.authors],
|
||||
@@ -405,15 +405,15 @@ class ArxivSource(DataSource):
|
||||
)
|
||||
|
||||
async def get_latest_papers(
|
||||
self,
|
||||
category: str,
|
||||
self,
|
||||
category: str,
|
||||
debug: bool = False,
|
||||
batch_size: int = 50
|
||||
) -> List[PaperMetadata]:
|
||||
"""获取指定类别的最新论文
|
||||
|
||||
|
||||
通过 RSS feed 获取最新发布的论文,然后批量获取详细信息
|
||||
|
||||
|
||||
Args:
|
||||
category: arXiv类别,例如:
|
||||
- 整个领域: 'cs'
|
||||
@@ -421,10 +421,10 @@ class ArxivSource(DataSource):
|
||||
- 多个类别: 'cs.AI+q-bio.NC'
|
||||
debug: 是否为调试模式,如果为True则只返回5篇最新论文
|
||||
batch_size: 批量获取论文的数量,默认50
|
||||
|
||||
|
||||
Returns:
|
||||
论文列表
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: 如果类别无效
|
||||
"""
|
||||
@@ -433,22 +433,22 @@ class ArxivSource(DataSource):
|
||||
# 1. 转换为小写
|
||||
# 2. 确保多个类别之间使用+连接
|
||||
category = category.lower().replace(' ', '+')
|
||||
|
||||
|
||||
# 构建RSS feed URL
|
||||
feed_url = f"https://rss.arxiv.org/rss/{category}"
|
||||
print(f"正在获取RSS feed: {feed_url}") # 添加调试信息
|
||||
|
||||
|
||||
feed = feedparser.parse(feed_url)
|
||||
|
||||
|
||||
# 检查feed是否有效
|
||||
if hasattr(feed, 'status') and feed.status != 200:
|
||||
raise ValueError(f"获取RSS feed失败,状态码: {feed.status}")
|
||||
|
||||
|
||||
if not feed.entries:
|
||||
print(f"警告:未在feed中找到任何条目") # 添加调试信息
|
||||
print(f"Feed标题: {feed.feed.title if hasattr(feed, 'feed') else '无标题'}")
|
||||
raise ValueError(f"无效的arXiv类别或未找到论文: {category}")
|
||||
|
||||
|
||||
if debug:
|
||||
# 调试模式:只获取5篇最新论文
|
||||
search = arxiv.Search(
|
||||
@@ -459,7 +459,7 @@ class ArxivSource(DataSource):
|
||||
)
|
||||
results = list(self.client.results(search))
|
||||
return [self._parse_paper_data(result) for result in results]
|
||||
|
||||
|
||||
# 正常模式:获取所有新论文
|
||||
# 从RSS条目中提取arXiv ID
|
||||
paper_ids = []
|
||||
@@ -476,13 +476,13 @@ class ArxivSource(DataSource):
|
||||
except Exception as e:
|
||||
print(f"警告:处理条目时出错: {str(e)}") # 添加调试信息
|
||||
continue
|
||||
|
||||
|
||||
if not paper_ids:
|
||||
print("未能从feed中提取到任何论文ID") # 添加调试信息
|
||||
return []
|
||||
|
||||
|
||||
print(f"成功提取到 {len(paper_ids)} 个论文ID") # 添加调试信息
|
||||
|
||||
|
||||
# 批量获取论文详情
|
||||
papers = []
|
||||
with tqdm(total=len(paper_ids), desc="获取arXiv论文") as pbar:
|
||||
@@ -495,9 +495,9 @@ class ArxivSource(DataSource):
|
||||
batch_results = list(self.client.results(search))
|
||||
papers.extend([self._parse_paper_data(result) for result in batch_results])
|
||||
pbar.update(len(batch_results))
|
||||
|
||||
|
||||
return papers
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"获取最新论文时发生错误: {str(e)}")
|
||||
import traceback
|
||||
@@ -507,18 +507,18 @@ class ArxivSource(DataSource):
|
||||
async def example_usage():
|
||||
"""ArxivSource使用示例"""
|
||||
arxiv_source = ArxivSource()
|
||||
|
||||
|
||||
try:
|
||||
# 示例1:基本搜索,使用不同的排序方式
|
||||
# print("\n=== 示例1:搜索最新的机器学习论文(按提交时间排序)===")
|
||||
# papers = await arxiv_source.search(
|
||||
# "ti:\"machine learning\"",
|
||||
# "ti:\"machine learning\"",
|
||||
# limit=3,
|
||||
# sort_by='submitted',
|
||||
# sort_order='descending'
|
||||
# )
|
||||
# print(f"找到 {len(papers)} 篇论文")
|
||||
|
||||
|
||||
# for i, paper in enumerate(papers, 1):
|
||||
# print(f"\n--- 论文 {i} ---")
|
||||
# print(f"标题: {paper.title}")
|
||||
@@ -544,7 +544,7 @@ async def example_usage():
|
||||
# # 示例3:按类别搜索
|
||||
# print("\n=== 示例3:搜索人工智能领域最新论文 ===")
|
||||
# ai_papers = await arxiv_source.search_by_category(
|
||||
# "cs.AI",
|
||||
# "cs.AI",
|
||||
# limit=2,
|
||||
# sort_by='updated',
|
||||
# sort_order='descending'
|
||||
@@ -558,7 +558,7 @@ async def example_usage():
|
||||
# # 示例4:按作者搜索
|
||||
# print("\n=== 示例4:搜索特定作者的论文 ===")
|
||||
# author_papers = await arxiv_source.search_by_authors(
|
||||
# ["Bengio"],
|
||||
# ["Bengio"],
|
||||
# limit=2,
|
||||
# sort_by='relevance'
|
||||
# )
|
||||
@@ -598,7 +598,7 @@ async def example_usage():
|
||||
|
||||
# 示例6:获取最新论文
|
||||
print("\n=== 示例8:获取最新论文 ===")
|
||||
|
||||
|
||||
# 获取CS.AI领域的最新论文
|
||||
print("\n--- 获取AI领域最新论文 ---")
|
||||
ai_latest = await arxiv_source.get_latest_papers("cs.AI", debug=True)
|
||||
@@ -607,7 +607,7 @@ async def example_usage():
|
||||
print(f"标题: {paper.title}")
|
||||
print(f"作者: {', '.join(paper.authors)}")
|
||||
print(f"发表年份: {paper.year}")
|
||||
|
||||
|
||||
# 获取整个计算机科学领域的最新论文
|
||||
print("\n--- 获取整个CS领域最新论文 ---")
|
||||
cs_latest = await arxiv_source.get_latest_papers("cs", debug=True)
|
||||
@@ -616,7 +616,7 @@ async def example_usage():
|
||||
print(f"标题: {paper.title}")
|
||||
print(f"作者: {', '.join(paper.authors)}")
|
||||
print(f"发表年份: {paper.year}")
|
||||
|
||||
|
||||
# 获取多个类别的最新论文
|
||||
print("\n--- 获取AI和机器学习领域最新论文 ---")
|
||||
multi_latest = await arxiv_source.get_latest_papers("cs.AI+cs.LG", debug=True)
|
||||
@@ -633,4 +633,4 @@ async def example_usage():
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
asyncio.run(example_usage())
|
||||
asyncio.run(example_usage())
|
||||
@@ -8,7 +8,7 @@ class VoidTerminalState():
|
||||
self.has_provided_explanation = False
|
||||
|
||||
def lock_plugin(self, chatbot):
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'
|
||||
chatbot._cookies['lock_plugin'] = 'crazy_functions.Void_Terminal->Void_Terminal'
|
||||
chatbot._cookies['plugin_state'] = pickle.dumps(self)
|
||||
|
||||
def unlock_plugin(self, chatbot):
|
||||
|
||||
@@ -1,162 +0,0 @@
|
||||
from loguru import logger
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_exception
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||
|
||||
fast_debug = False
|
||||
|
||||
def readPdf(pdfPath):
|
||||
"""
|
||||
读取pdf文件,返回文本内容
|
||||
"""
|
||||
import pdfminer
|
||||
from pdfminer.pdfparser import PDFParser
|
||||
from pdfminer.pdfdocument import PDFDocument
|
||||
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
|
||||
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
|
||||
from pdfminer.pdfdevice import PDFDevice
|
||||
from pdfminer.layout import LAParams
|
||||
from pdfminer.converter import PDFPageAggregator
|
||||
|
||||
fp = open(pdfPath, 'rb')
|
||||
|
||||
# Create a PDF parser object associated with the file object
|
||||
parser = PDFParser(fp)
|
||||
|
||||
# Create a PDF document object that stores the document structure.
|
||||
# Password for initialization as 2nd parameter
|
||||
document = PDFDocument(parser)
|
||||
# Check if the document allows text extraction. If not, abort.
|
||||
if not document.is_extractable:
|
||||
raise PDFTextExtractionNotAllowed
|
||||
|
||||
# Create a PDF resource manager object that stores shared resources.
|
||||
rsrcmgr = PDFResourceManager()
|
||||
|
||||
# Create a PDF device object.
|
||||
# device = PDFDevice(rsrcmgr)
|
||||
|
||||
# BEGIN LAYOUT ANALYSIS.
|
||||
# Set parameters for analysis.
|
||||
laparams = LAParams(
|
||||
char_margin=10.0,
|
||||
line_margin=0.2,
|
||||
boxes_flow=0.2,
|
||||
all_texts=False,
|
||||
)
|
||||
# Create a PDF page aggregator object.
|
||||
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
|
||||
# Create a PDF interpreter object.
|
||||
interpreter = PDFPageInterpreter(rsrcmgr, device)
|
||||
|
||||
# loop over all pages in the document
|
||||
outTextList = []
|
||||
for page in PDFPage.create_pages(document):
|
||||
# read the page into a layout object
|
||||
interpreter.process_page(page)
|
||||
layout = device.get_result()
|
||||
for obj in layout._objs:
|
||||
if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
|
||||
outTextList.append(obj.get_text())
|
||||
|
||||
return outTextList
|
||||
|
||||
|
||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os
|
||||
from bs4 import BeautifulSoup
|
||||
logger.info('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
if ".tex" in fp:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
if ".pdf" in fp.lower():
|
||||
file_content = readPdf(fp)
|
||||
file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_history_to_file(history)
|
||||
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import pdfminer, bs4
|
||||
except:
|
||||
report_exception(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
在新工单中引用
屏蔽一个用户