镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 06:26:47 +00:00
Latex全文润色
这个提交包含在:
@@ -2,51 +2,125 @@ from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
import tiktoken
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
|
||||
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
file_content = f.read()
|
||||
# 定义注释的正则表达式
|
||||
comment_pattern = r'%.*'
|
||||
# 使用正则表达式查找注释,并替换为空字符串
|
||||
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(clean_tex_content)
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
# <-------- 拆分过长的latex文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, llm_kwargs, plugin_kwargs, history=[]) # 带超时倒计时
|
||||
# <-------- 抽取摘要 ---------->
|
||||
# if language == 'en':
|
||||
# abs_extract_inputs = f"Please write an abstract for this paper"
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=chatbot, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
# # 单线,获取文章meta信息
|
||||
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=abs_extract_inputs,
|
||||
# inputs_show_user=f"正在抽取摘要信息。",
|
||||
# llm_kwargs=llm_kwargs,
|
||||
# chatbot=chatbot, history=[],
|
||||
# sys_prompt="Your job is to collect information from materials。",
|
||||
# )
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
# <-------- 多线程润色开始(第一次) ---------->
|
||||
if language == 'en':
|
||||
inputs_array = [f"Below is an academic paper, polish the writing to meet the academic style, "+
|
||||
f"improve the spelling, grammar, clarity, concision and overall readability. " +
|
||||
f"The paper begins now: \n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||
elif language == 'zh':
|
||||
inputs_array = [f"这里有一个使用Latex格式的学术论文,请把写作风格要求的学术风格进行润色,改进拼写、语法、清晰度、简洁度和整体可读性。" +
|
||||
f"论文现在开始:\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array=["你是一位专业的学术论文作家。润色以下论文。输出中保留Latex格式。" for _ in range(n_split)]
|
||||
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
max_workers=10, # OpenAI所允许的最大并行过载
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, llm_kwargs, plugin_kwargs, history=history) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=chatbot, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=chatbot, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -56,11 +130,47 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
||||
在新工单中引用
屏蔽一个用户