镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
* logging sys to loguru: stage 1 complete * import loguru: stage 2 * logging -> loguru: stage 3 * support o1-preview and o1-mini * logging -> loguru stage 4 * update social helper * logging -> loguru: final stage * fix: console output * update translation matrix * fix: loguru argument error with proxy enabled (#1977) * relax llama index version * remove comment * Added some modules to support openrouter (#1975) * Added some modules for supporting openrouter model Added some modules for supporting openrouter model * Update config.py * Update .gitignore * Update bridge_openrouter.py * Not changed actually * Refactor logging in bridge_openrouter.py --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * remove logging extra --------- Co-authored-by: Steven Moder <java20131114@gmail.com> Co-authored-by: Ren Lifei <2602264455@qq.com>
163 行
7.1 KiB
Python
163 行
7.1 KiB
Python
from loguru import logger
|
|
from toolbox import update_ui
|
|
from toolbox import CatchException, report_exception
|
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
|
|
|
fast_debug = False
|
|
|
|
def readPdf(pdfPath):
|
|
"""
|
|
读取pdf文件,返回文本内容
|
|
"""
|
|
import pdfminer
|
|
from pdfminer.pdfparser import PDFParser
|
|
from pdfminer.pdfdocument import PDFDocument
|
|
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
|
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
|
|
from pdfminer.pdfdevice import PDFDevice
|
|
from pdfminer.layout import LAParams
|
|
from pdfminer.converter import PDFPageAggregator
|
|
|
|
fp = open(pdfPath, 'rb')
|
|
|
|
# Create a PDF parser object associated with the file object
|
|
parser = PDFParser(fp)
|
|
|
|
# Create a PDF document object that stores the document structure.
|
|
# Password for initialization as 2nd parameter
|
|
document = PDFDocument(parser)
|
|
# Check if the document allows text extraction. If not, abort.
|
|
if not document.is_extractable:
|
|
raise PDFTextExtractionNotAllowed
|
|
|
|
# Create a PDF resource manager object that stores shared resources.
|
|
rsrcmgr = PDFResourceManager()
|
|
|
|
# Create a PDF device object.
|
|
# device = PDFDevice(rsrcmgr)
|
|
|
|
# BEGIN LAYOUT ANALYSIS.
|
|
# Set parameters for analysis.
|
|
laparams = LAParams(
|
|
char_margin=10.0,
|
|
line_margin=0.2,
|
|
boxes_flow=0.2,
|
|
all_texts=False,
|
|
)
|
|
# Create a PDF page aggregator object.
|
|
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
|
|
# Create a PDF interpreter object.
|
|
interpreter = PDFPageInterpreter(rsrcmgr, device)
|
|
|
|
# loop over all pages in the document
|
|
outTextList = []
|
|
for page in PDFPage.create_pages(document):
|
|
# read the page into a layout object
|
|
interpreter.process_page(page)
|
|
layout = device.get_result()
|
|
for obj in layout._objs:
|
|
if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
|
|
outTextList.append(obj.get_text())
|
|
|
|
return outTextList
|
|
|
|
|
|
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
|
import time, glob, os
|
|
from bs4 import BeautifulSoup
|
|
logger.info('begin analysis on:', file_manifest)
|
|
for index, fp in enumerate(file_manifest):
|
|
if ".tex" in fp:
|
|
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
|
file_content = f.read()
|
|
if ".pdf" in fp.lower():
|
|
file_content = readPdf(fp)
|
|
file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
|
|
|
|
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
|
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
|
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
|
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
|
if not fast_debug:
|
|
msg = '正常'
|
|
# ** gpt request **
|
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
inputs=i_say,
|
|
inputs_show_user=i_say_show_user,
|
|
llm_kwargs=llm_kwargs,
|
|
chatbot=chatbot,
|
|
history=[],
|
|
sys_prompt="总结文章。"
|
|
) # 带超时倒计时
|
|
chatbot[-1] = (i_say_show_user, gpt_say)
|
|
history.append(i_say_show_user); history.append(gpt_say)
|
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
if not fast_debug: time.sleep(2)
|
|
|
|
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
|
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
|
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
|
if not fast_debug:
|
|
msg = '正常'
|
|
# ** gpt request **
|
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
inputs=i_say,
|
|
inputs_show_user=i_say,
|
|
llm_kwargs=llm_kwargs,
|
|
chatbot=chatbot,
|
|
history=history,
|
|
sys_prompt="总结文章。"
|
|
) # 带超时倒计时
|
|
chatbot[-1] = (i_say, gpt_say)
|
|
history.append(i_say); history.append(gpt_say)
|
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
res = write_history_to_file(history)
|
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
|
chatbot.append(("完成了吗?", res))
|
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
|
|
|
|
|
|
@CatchException
|
|
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
|
history = [] # 清空历史,以免输入溢出
|
|
import glob, os
|
|
|
|
# 基本信息:功能、贡献者
|
|
chatbot.append([
|
|
"函数插件功能?",
|
|
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
|
try:
|
|
import pdfminer, bs4
|
|
except:
|
|
report_exception(chatbot, history,
|
|
a = f"解析项目: {txt}",
|
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
return
|
|
if os.path.exists(txt):
|
|
project_folder = txt
|
|
else:
|
|
if txt == "": txt = '空空如也的输入栏'
|
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
return
|
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
|
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
|
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
|
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
|
if len(file_manifest) == 0:
|
|
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
return
|
|
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
|
|