镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
Merge branch 'frontier' into production
这个提交包含在:
@@ -1,6 +1,6 @@
|
|||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> 2023.11.12: 紧急修复了endpoint异常的问题。
|
> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||||
>
|
>
|
||||||
> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。
|
> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。
|
||||||
|
|
||||||
@@ -108,7 +108,7 @@ cd gpt_academic
|
|||||||
|
|
||||||
3. 安装依赖
|
3. 安装依赖
|
||||||
```sh
|
```sh
|
||||||
# (选择I: 如熟悉python, python>=3.9)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||||
python -m pip install -r requirements.txt
|
python -m pip install -r requirements.txt
|
||||||
|
|
||||||
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||||
|
|||||||
@@ -205,13 +205,12 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
retry_op = retry_times_at_unknown_error
|
retry_op = retry_times_at_unknown_error
|
||||||
exceeded_cnt = 0
|
exceeded_cnt = 0
|
||||||
mutable[index][2] = "执行中"
|
mutable[index][2] = "执行中"
|
||||||
|
detect_timeout = lambda: len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience
|
||||||
while True:
|
while True:
|
||||||
# watchdog error
|
# watchdog error
|
||||||
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience:
|
if detect_timeout(): raise RuntimeError("检测到程序终止。")
|
||||||
raise RuntimeError("检测到程序终止。")
|
|
||||||
try:
|
try:
|
||||||
# 【第一种情况】:顺利完成
|
# 【第一种情况】:顺利完成
|
||||||
# time.sleep(10); raise RuntimeError("测试")
|
|
||||||
gpt_say = predict_no_ui_long_connection(
|
gpt_say = predict_no_ui_long_connection(
|
||||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||||
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
||||||
@@ -219,7 +218,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
mutable[index][2] = "已成功"
|
mutable[index][2] = "已成功"
|
||||||
return gpt_say
|
return gpt_say
|
||||||
except ConnectionAbortedError as token_exceeded_error:
|
except ConnectionAbortedError as token_exceeded_error:
|
||||||
# 【第二种情况】:Token溢出,
|
# 【第二种情况】:Token溢出
|
||||||
if handle_token_exceed:
|
if handle_token_exceed:
|
||||||
exceeded_cnt += 1
|
exceeded_cnt += 1
|
||||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||||
@@ -240,6 +239,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
return gpt_say # 放弃
|
return gpt_say # 放弃
|
||||||
except:
|
except:
|
||||||
# 【第三种情况】:其他错误
|
# 【第三种情况】:其他错误
|
||||||
|
if detect_timeout(): raise RuntimeError("检测到程序终止。")
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
print(tb_str)
|
print(tb_str)
|
||||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||||
@@ -256,6 +256,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
for i in range(wait):
|
for i in range(wait):
|
||||||
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
|
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
|
||||||
# 开始重试
|
# 开始重试
|
||||||
|
if detect_timeout(): raise RuntimeError("检测到程序终止。")
|
||||||
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
||||||
continue # 返回重试
|
continue # 返回重试
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
||||||
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone
|
||||||
from .latex_toolbox import PRESERVE, TRANSFORM
|
from .latex_toolbox import PRESERVE, TRANSFORM
|
||||||
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
||||||
from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
|
from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
|
||||||
from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
|
from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
|
||||||
|
from .latex_toolbox import find_title_and_abs
|
||||||
|
|
||||||
import os, shutil
|
import os, shutil
|
||||||
import re
|
import re
|
||||||
@@ -90,7 +91,18 @@ class LatexPaperSplit():
|
|||||||
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||||
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||||
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||||
|
self.title = "unknown"
|
||||||
|
self.abstract = "unknown"
|
||||||
|
|
||||||
|
def read_title_and_abstract(self, txt):
|
||||||
|
try:
|
||||||
|
title, abstract = find_title_and_abs(txt)
|
||||||
|
if title is not None:
|
||||||
|
self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
||||||
|
if abstract is not None:
|
||||||
|
self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
|
def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
|
||||||
"""
|
"""
|
||||||
@@ -234,8 +246,8 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
|||||||
chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
|
chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
lps = LatexPaperSplit()
|
lps = LatexPaperSplit()
|
||||||
|
lps.read_title_and_abstract(merged_content)
|
||||||
res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
|
res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
|
||||||
|
|
||||||
# <-------- 拆分过长的latex片段 ---------->
|
# <-------- 拆分过长的latex片段 ---------->
|
||||||
pfg = LatexPaperFileGroup()
|
pfg = LatexPaperFileGroup()
|
||||||
for index, r in enumerate(res):
|
for index, r in enumerate(res):
|
||||||
@@ -256,12 +268,19 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
# <-------- gpt 多线程请求 ---------->
|
# <-------- gpt 多线程请求 ---------->
|
||||||
|
history_array = [[""] for _ in range(n_split)]
|
||||||
|
# LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL')
|
||||||
|
# if LATEX_EXPERIMENTAL:
|
||||||
|
# paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`"
|
||||||
|
# paper_meta_max_len = 888
|
||||||
|
# history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)]
|
||||||
|
|
||||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
inputs_array=inputs_array,
|
inputs_array=inputs_array,
|
||||||
inputs_show_user_array=inputs_show_user_array,
|
inputs_show_user_array=inputs_show_user_array,
|
||||||
llm_kwargs=llm_kwargs,
|
llm_kwargs=llm_kwargs,
|
||||||
chatbot=chatbot,
|
chatbot=chatbot,
|
||||||
history_array=[[""] for _ in range(n_split)],
|
history_array=history_array,
|
||||||
sys_prompt_array=sys_prompt_array,
|
sys_prompt_array=sys_prompt_array,
|
||||||
# max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
|
# max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
|
||||||
scroller_max_len = 40
|
scroller_max_len = 40
|
||||||
|
|||||||
@@ -318,6 +318,41 @@ def merge_tex_files_(project_foler, main_file, mode):
|
|||||||
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
|
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
|
||||||
return main_file
|
return main_file
|
||||||
|
|
||||||
|
|
||||||
|
def find_title_and_abs(main_file):
|
||||||
|
|
||||||
|
def extract_abstract_1(text):
|
||||||
|
pattern = r"\\abstract\{(.*?)\}"
|
||||||
|
match = re.search(pattern, text, re.DOTALL)
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def extract_abstract_2(text):
|
||||||
|
pattern = r"\\begin\{abstract\}(.*?)\\end\{abstract\}"
|
||||||
|
match = re.search(pattern, text, re.DOTALL)
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def extract_title(string):
|
||||||
|
pattern = r"\\title\{(.*?)\}"
|
||||||
|
match = re.search(pattern, string, re.DOTALL)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
return match.group(1)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
abstract = extract_abstract_1(main_file)
|
||||||
|
if abstract is None:
|
||||||
|
abstract = extract_abstract_2(main_file)
|
||||||
|
title = extract_title(main_file)
|
||||||
|
return title, abstract
|
||||||
|
|
||||||
|
|
||||||
def merge_tex_files(project_foler, main_file, mode):
|
def merge_tex_files(project_foler, main_file, mode):
|
||||||
"""
|
"""
|
||||||
Merge Tex project recrusively
|
Merge Tex project recrusively
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder
|
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
f_prefix = 'GPT-Academic对话存档'
|
||||||
|
|
||||||
def write_chat_to_file(chatbot, history=None, file_name=None):
|
def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||||
"""
|
"""
|
||||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||||
@@ -9,8 +10,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
if file_name is None:
|
if file_name is None:
|
||||||
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
||||||
fp = os.path.join(get_log_folder(), file_name)
|
fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name)
|
||||||
with open(fp, 'w', encoding='utf8') as f:
|
with open(fp, 'w', encoding='utf8') as f:
|
||||||
from themes.theme import advanced_css
|
from themes.theme import advanced_css
|
||||||
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
|
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
|
||||||
@@ -80,7 +81,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
chatbot.append(("保存当前对话",
|
chatbot.append(("保存当前对话",
|
||||||
f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用“载入对话历史存档”还原当下的对话。\n警告!被保存的对话历史可以被使用该系统的任何人查阅。"))
|
f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||||
|
|
||||||
def hide_cwd(str):
|
def hide_cwd(str):
|
||||||
@@ -106,7 +107,12 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
if not success:
|
if not success:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
import glob
|
import glob
|
||||||
local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
|
local_history = "<br/>".join([
|
||||||
|
"`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`"
|
||||||
|
for f in glob.glob(
|
||||||
|
f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html',
|
||||||
|
recursive=True
|
||||||
|
)])
|
||||||
chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
|
chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
@@ -132,8 +138,12 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import glob, os
|
import glob, os
|
||||||
local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
|
local_history = "<br/>".join([
|
||||||
for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True):
|
"`"+hide_cwd(f)+"`"
|
||||||
|
for f in glob.glob(
|
||||||
|
f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True
|
||||||
|
)])
|
||||||
|
for f in glob.glob(f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True):
|
||||||
os.remove(f)
|
os.remove(f)
|
||||||
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|||||||
@@ -2,9 +2,12 @@
|
|||||||
# @Time : 2023/4/19
|
# @Time : 2023/4/19
|
||||||
# @Author : Spike
|
# @Author : Spike
|
||||||
# @Descr :
|
# @Descr :
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf, get_user
|
||||||
from toolbox import CatchException
|
from toolbox import CatchException
|
||||||
|
from toolbox import default_user_name
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
@@ -33,10 +36,19 @@ def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
import shutil, os
|
def _get_log_folder(user=default_user_name):
|
||||||
PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
|
PATH_LOGGING = get_conf('PATH_LOGGING')
|
||||||
shutil.rmtree(PATH_LOGGING, ignore_errors=True)
|
_dir = os.path.join(PATH_LOGGING, user)
|
||||||
shutil.rmtree(PATH_PRIVATE_UPLOAD, ignore_errors=True)
|
if not os.path.exists(_dir): os.makedirs(_dir)
|
||||||
|
return _dir
|
||||||
|
|
||||||
|
def _get_upload_folder(user=default_user_name):
|
||||||
|
PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
|
_dir = os.path.join(PATH_PRIVATE_UPLOAD, user)
|
||||||
|
return _dir
|
||||||
|
|
||||||
|
shutil.rmtree(_get_log_folder(get_user(chatbot)), ignore_errors=True)
|
||||||
|
shutil.rmtree(_get_upload_folder(get_user(chatbot)), ignore_errors=True)
|
||||||
|
|
||||||
chatbot.append(['清除本地缓存数据', '执行完成'])
|
chatbot.append(['清除本地缓存数据', '执行完成'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
@@ -326,25 +326,26 @@ You can change the theme by modifying the `THEME` option (config.py).
|
|||||||
1. `master` branch: Main branch, stable version
|
1. `master` branch: Main branch, stable version
|
||||||
2. `frontier` branch: Development branch, test version
|
2. `frontier` branch: Development branch, test version
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
### V: References and Learning
|
### V: References and Learning
|
||||||
|
|
||||||
```
|
|
||||||
The code references the designs of many other excellent projects, in no particular order:
|
The code references the designs of many other excellent projects, in no particular order:
|
||||||
|
|
||||||
# THU ChatGLM2-6B:
|
[THU ChatGLM2-6B](https://github.com/THUDM/ChatGLM2-6B)
|
||||||
https://github.com/THUDM/ChatGLM2-6B
|
|
||||||
|
|
||||||
# THU JittorLLMs:
|
|
||||||
https://github.com/Jittor/JittorLLMs
|
|
||||||
|
|
||||||
# ChatPaper:
|
[THU JittorLLMs](https://github.com/Jittor/JittorLLMs)
|
||||||
https://github.com/kaixindelele/ChatPaper
|
|
||||||
|
|
||||||
# Edge-GPT:
|
|
||||||
https://github.com/acheong08/EdgeGPT
|
|
||||||
|
|
||||||
# ChuanhuChatGPT:
|
[ChatPaper](https://github.com/kaixindelele/ChatPaper)
|
||||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
|
||||||
|
|
||||||
|
[Edge-GPT](https://github.com/acheong08/EdgeGPT)
|
||||||
|
|
||||||
|
|
||||||
|
[ChuanhuChatGPT](https://github.com/GaiZhenbiao/ChuanhuChatGPT)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,9 @@ if __name__ == "__main__":
|
|||||||
from tests.test_utils import plugin_test
|
from tests.test_utils import plugin_test
|
||||||
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||||
|
|
||||||
plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||||
|
|
||||||
|
plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix")
|
||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||||
|
|
||||||
|
|||||||
101
toolbox.py
101
toolbox.py
@@ -11,7 +11,7 @@ import math
|
|||||||
from latex2mathml.converter import convert as tex2mathml
|
from latex2mathml.converter import convert as tex2mathml
|
||||||
from functools import wraps, lru_cache
|
from functools import wraps, lru_cache
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
|
default_user_name = 'default_user'
|
||||||
"""
|
"""
|
||||||
========================================================================
|
========================================================================
|
||||||
第一部分
|
第一部分
|
||||||
@@ -61,11 +61,16 @@ def ArgsGeneralWrapper(f):
|
|||||||
txt_passon = txt
|
txt_passon = txt
|
||||||
if txt == "" and txt2 != "": txt_passon = txt2
|
if txt == "" and txt2 != "": txt_passon = txt2
|
||||||
# 引入一个有cookie的chatbot
|
# 引入一个有cookie的chatbot
|
||||||
|
if request.username is not None:
|
||||||
|
user_name = request.username
|
||||||
|
else:
|
||||||
|
user_name = default_user_name
|
||||||
cookies.update({
|
cookies.update({
|
||||||
'top_p':top_p,
|
'top_p':top_p,
|
||||||
'api_key': cookies['api_key'],
|
'api_key': cookies['api_key'],
|
||||||
'llm_model': llm_model,
|
'llm_model': llm_model,
|
||||||
'temperature':temperature,
|
'temperature':temperature,
|
||||||
|
'user_name': user_name,
|
||||||
})
|
})
|
||||||
llm_kwargs = {
|
llm_kwargs = {
|
||||||
'api_key': cookies['api_key'],
|
'api_key': cookies['api_key'],
|
||||||
@@ -537,40 +542,60 @@ def find_recent_files(directory):
|
|||||||
|
|
||||||
return recent_files
|
return recent_files
|
||||||
|
|
||||||
|
|
||||||
|
def file_already_in_downloadzone(file, user_path):
|
||||||
|
try:
|
||||||
|
parent_path = os.path.abspath(user_path)
|
||||||
|
child_path = os.path.abspath(file)
|
||||||
|
if os.path.samefile(os.path.commonpath([parent_path, child_path]), parent_path):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
||||||
# 将文件复制一份到下载区
|
# 将文件复制一份到下载区
|
||||||
import shutil
|
import shutil
|
||||||
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
if chatbot is not None:
|
||||||
new_path = pj(get_log_folder(), rename_file)
|
user_name = get_user(chatbot)
|
||||||
# 如果已经存在,先删除
|
else:
|
||||||
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
user_name = default_user_name
|
||||||
# 把文件复制过去
|
|
||||||
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
|
user_path = get_log_folder(user_name, plugin_name=None)
|
||||||
# 将文件添加到chatbot cookie中,避免多用户干扰
|
if file_already_in_downloadzone(file, user_path):
|
||||||
|
new_path = file
|
||||||
|
else:
|
||||||
|
user_path = get_log_folder(user_name, plugin_name='downloadzone')
|
||||||
|
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
||||||
|
new_path = pj(user_path, rename_file)
|
||||||
|
# 如果已经存在,先删除
|
||||||
|
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
||||||
|
# 把文件复制过去
|
||||||
|
if not os.path.exists(new_path): shutil.copyfile(file, new_path)
|
||||||
|
# 将文件添加到chatbot cookie中
|
||||||
if chatbot is not None:
|
if chatbot is not None:
|
||||||
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
|
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
|
||||||
else: current = []
|
else: current = []
|
||||||
chatbot._cookies.update({'files_to_promote': [new_path] + current})
|
chatbot._cookies.update({'files_to_promote': [new_path] + current})
|
||||||
return new_path
|
return new_path
|
||||||
|
|
||||||
|
|
||||||
def disable_auto_promotion(chatbot):
|
def disable_auto_promotion(chatbot):
|
||||||
chatbot._cookies.update({'files_to_promote': []})
|
chatbot._cookies.update({'files_to_promote': []})
|
||||||
return
|
return
|
||||||
|
|
||||||
def is_the_upload_folder(string):
|
|
||||||
PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
|
|
||||||
pattern = r'^PATH_PRIVATE_UPLOAD/[A-Za-z0-9_-]+/\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$'
|
|
||||||
pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD)
|
|
||||||
if re.match(pattern, string): return True
|
|
||||||
else: return False
|
|
||||||
|
|
||||||
def del_outdated_uploads(outdate_time_seconds):
|
def del_outdated_uploads(outdate_time_seconds, target_path_base=None):
|
||||||
PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
|
if target_path_base is None:
|
||||||
|
user_upload_dir = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
|
else:
|
||||||
|
user_upload_dir = target_path_base
|
||||||
current_time = time.time()
|
current_time = time.time()
|
||||||
one_hour_ago = current_time - outdate_time_seconds
|
one_hour_ago = current_time - outdate_time_seconds
|
||||||
# Get a list of all subdirectories in the PATH_PRIVATE_UPLOAD folder
|
# Get a list of all subdirectories in the user_upload_dir folder
|
||||||
# Remove subdirectories that are older than one hour
|
# Remove subdirectories that are older than one hour
|
||||||
for subdirectory in glob.glob(f'{PATH_PRIVATE_UPLOAD}/*/*'):
|
for subdirectory in glob.glob(f'{user_upload_dir}/*'):
|
||||||
subdirectory_time = os.path.getmtime(subdirectory)
|
subdirectory_time = os.path.getmtime(subdirectory)
|
||||||
if subdirectory_time < one_hour_ago:
|
if subdirectory_time < one_hour_ago:
|
||||||
try: shutil.rmtree(subdirectory)
|
try: shutil.rmtree(subdirectory)
|
||||||
@@ -583,17 +608,16 @@ def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkbo
|
|||||||
"""
|
"""
|
||||||
if len(files) == 0:
|
if len(files) == 0:
|
||||||
return chatbot, txt
|
return chatbot, txt
|
||||||
|
|
||||||
# 移除过时的旧文件从而节省空间&保护隐私
|
|
||||||
outdate_time_seconds = 60
|
|
||||||
del_outdated_uploads(outdate_time_seconds)
|
|
||||||
|
|
||||||
# 创建工作路径
|
# 创建工作路径
|
||||||
user_name = "default" if not request.username else request.username
|
user_name = default_user_name if not request.username else request.username
|
||||||
time_tag = gen_time_str()
|
time_tag = gen_time_str()
|
||||||
PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
|
target_path_base = get_upload_folder(user_name, tag=time_tag)
|
||||||
target_path_base = pj(PATH_PRIVATE_UPLOAD, user_name, time_tag)
|
|
||||||
os.makedirs(target_path_base, exist_ok=True)
|
os.makedirs(target_path_base, exist_ok=True)
|
||||||
|
|
||||||
|
# 移除过时的旧文件从而节省空间&保护隐私
|
||||||
|
outdate_time_seconds = 3600 # 一小时
|
||||||
|
del_outdated_uploads(outdate_time_seconds, get_upload_folder(user_name))
|
||||||
|
|
||||||
# 逐个文件转移到目标路径
|
# 逐个文件转移到目标路径
|
||||||
upload_msg = ''
|
upload_msg = ''
|
||||||
@@ -1003,12 +1027,35 @@ def gen_time_str():
|
|||||||
import time
|
import time
|
||||||
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||||
|
|
||||||
def get_log_folder(user='default', plugin_name='shared'):
|
def get_log_folder(user=default_user_name, plugin_name='shared'):
|
||||||
|
if user is None: user = default_user_name
|
||||||
PATH_LOGGING = get_conf('PATH_LOGGING')
|
PATH_LOGGING = get_conf('PATH_LOGGING')
|
||||||
_dir = pj(PATH_LOGGING, user, plugin_name)
|
if plugin_name is None:
|
||||||
|
_dir = pj(PATH_LOGGING, user)
|
||||||
|
else:
|
||||||
|
_dir = pj(PATH_LOGGING, user, plugin_name)
|
||||||
if not os.path.exists(_dir): os.makedirs(_dir)
|
if not os.path.exists(_dir): os.makedirs(_dir)
|
||||||
return _dir
|
return _dir
|
||||||
|
|
||||||
|
def get_upload_folder(user=default_user_name, tag=None):
|
||||||
|
PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
|
if user is None: user = default_user_name
|
||||||
|
if tag is None or len(tag)==0:
|
||||||
|
target_path_base = pj(PATH_PRIVATE_UPLOAD, user)
|
||||||
|
else:
|
||||||
|
target_path_base = pj(PATH_PRIVATE_UPLOAD, user, tag)
|
||||||
|
return target_path_base
|
||||||
|
|
||||||
|
def is_the_upload_folder(string):
|
||||||
|
PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
|
pattern = r'^PATH_PRIVATE_UPLOAD[\\/][A-Za-z0-9_-]+[\\/]\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$'
|
||||||
|
pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD)
|
||||||
|
if re.match(pattern, string): return True
|
||||||
|
else: return False
|
||||||
|
|
||||||
|
def get_user(chatbotwithcookies):
|
||||||
|
return chatbotwithcookies._cookies.get('user_name', default_user_name)
|
||||||
|
|
||||||
class ProxyNetworkActivate():
|
class ProxyNetworkActivate():
|
||||||
"""
|
"""
|
||||||
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
|
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
|
||||||
|
|||||||
2
version
2
version
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version": 3.60,
|
"version": 3.60,
|
||||||
"show_feature": true,
|
"show_feature": true,
|
||||||
"new_feature": "11月12日紧急BUG修复 <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验"
|
"new_feature": "修复多个BUG <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮 <-> 新汇报PDF汇总页面 <-> 重新编译Gradio优化使用体验"
|
||||||
}
|
}
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户