typo: Fix typos and rename functions across multiple files (#2130)

* typo: Fix typos and rename functions across multiple files

This commit addresses several minor issues:
- Corrected spelling of function names (e.g., `update_ui_lastest_msg` to `update_ui_latest_msg`)
- Fixed typos in comments and variable names
- Corrected capitalization in some strings (e.g., "ArXiv" instead of "Arixv")
- Renamed some variables for consistency
- Corrected some console-related parameter names (e.g., `console_slience` to `console_silence`)

The changes span multiple files across the project, including request LLM bridges, crazy functions, and utility modules.

* fix: f-string expression part cannot include a backslash (#2139)

* raise error when the uploaded tar contain hard/soft link (#2136)

* minor bug fix

* fine tune reasoning css

* upgrade internet gpt plugin

* Update README.md

* fix GHSA-gqp5-wm97-qxcv

* typo fix

* update readme

---------

Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com>
Co-authored-by: binary-husky <qingxu.fu@outlook.com>
这个提交包含在:
Steven Moder
2025-03-02 02:16:10 +08:00
提交者 GitHub
父节点 5dffe8627f
当前提交 4a79aa6a93
共有 74 个文件被更改,包括 325 次插入325 次删除

查看文件

@@ -7,7 +7,7 @@ from bs4 import BeautifulSoup
from functools import lru_cache
from itertools import zip_longest
from check_proxy import check_proxy
from toolbox import CatchException, update_ui, get_conf, update_ui_lastest_msg
from toolbox import CatchException, update_ui, get_conf, update_ui_latest_msg
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
from request_llms.bridge_all import model_info
from request_llms.bridge_all import predict_no_ui_long_connection
@@ -49,7 +49,7 @@ def search_optimizer(
mutable = ["", time.time(), ""]
llm_kwargs["temperature"] = 0.8
try:
querys_json = predict_no_ui_long_connection(
query_json = predict_no_ui_long_connection(
inputs=query,
llm_kwargs=llm_kwargs,
history=[],
@@ -57,31 +57,31 @@ def search_optimizer(
observe_window=mutable,
)
except Exception:
querys_json = "1234"
query_json = "null"
#* 尝试解码优化后的搜索结果
querys_json = re.sub(r"```json|```", "", querys_json)
query_json = re.sub(r"```json|```", "", query_json)
try:
querys = json.loads(querys_json)
queries = json.loads(query_json)
except Exception:
#* 如果解码失败,降低温度再试一次
try:
llm_kwargs["temperature"] = 0.4
querys_json = predict_no_ui_long_connection(
query_json = predict_no_ui_long_connection(
inputs=query,
llm_kwargs=llm_kwargs,
history=[],
sys_prompt=sys_prompt,
observe_window=mutable,
)
querys_json = re.sub(r"```json|```", "", querys_json)
querys = json.loads(querys_json)
query_json = re.sub(r"```json|```", "", query_json)
queries = json.loads(query_json)
except Exception:
#* 如果再次失败,直接返回原始问题
querys = [query]
queries = [query]
links = []
success = 0
Exceptions = ""
for q in querys:
for q in queries:
try:
link = searxng_request(q, proxies, categories, searxng_url, engines=engines)
if len(link) > 0:
@@ -224,15 +224,15 @@ def internet_search_with_analysis_prompt(prompt, analysis_prompt, llm_kwargs, ch
categories = 'general'
searxng_url = None # 使用默认的searxng_url
engines = None # 使用默认的搜索引擎
yield from update_ui_lastest_msg(lastmsg=f"检索中: {prompt} ...", chatbot=chatbot, history=[], delay=1)
yield from update_ui_latest_msg(lastmsg=f"检索中: {prompt} ...", chatbot=chatbot, history=[], delay=1)
urls = searxng_request(prompt, proxies, categories, searxng_url, engines=engines)
yield from update_ui_lastest_msg(lastmsg=f"依次访问搜索到的网站 ...", chatbot=chatbot, history=[], delay=1)
yield from update_ui_latest_msg(lastmsg=f"依次访问搜索到的网站 ...", chatbot=chatbot, history=[], delay=1)
if len(urls) == 0:
return None
max_search_result = 5 # 最多收纳多少个网页的结果
history = []
for index, url in enumerate(urls[:max_search_result]):
yield from update_ui_lastest_msg(lastmsg=f"依次访问搜索到的网站: {url['link']} ...", chatbot=chatbot, history=[], delay=1)
yield from update_ui_latest_msg(lastmsg=f"依次访问搜索到的网站: {url['link']} ...", chatbot=chatbot, history=[], delay=1)
res = scrape_text(url['link'], proxies)
prefix = f"{index}份搜索结果 [源自{url['source'][0]}搜索] {url['title'][:25]}"
history.extend([prefix, res])
@@ -247,7 +247,7 @@ def internet_search_with_analysis_prompt(prompt, analysis_prompt, llm_kwargs, ch
llm_kwargs=llm_kwargs,
history=history,
sys_prompt="请从搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。",
console_slience=False,
console_silence=False,
)
return gpt_say
@@ -304,7 +304,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
# 开始
prefix = f"正在加载 第{index+1}份搜索结果 [源自{url['source'][0]}搜索] {url['title'][:25]}"
string_structure = template.format(TITLE=prefix, URL=url['link'], CONTENT="正在加载,请稍后 ......")
yield from update_ui_lastest_msg(lastmsg=(buffer + string_structure), chatbot=chatbot, history=history, delay=0.1) # 刷新界面
yield from update_ui_latest_msg(lastmsg=(buffer + string_structure), chatbot=chatbot, history=history, delay=0.1) # 刷新界面
# 获取结果
res = future.result()
@@ -316,7 +316,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
# 更新历史
history.extend([prefix, res])
yield from update_ui_lastest_msg(lastmsg=buffer, chatbot=chatbot, history=history, delay=0.1) # 刷新界面
yield from update_ui_latest_msg(lastmsg=buffer, chatbot=chatbot, history=history, delay=0.1) # 刷新界面
# ------------- < 第3步ChatGPT综合 > -------------
if (optimizer != "开启(增强)"):

查看文件

@@ -1,5 +1,5 @@
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
from toolbox import CatchException, report_exception, update_ui_latest_msg, zip_result, gen_time_str
from functools import partial
from loguru import logger
@@ -41,7 +41,7 @@ def switch_prompt(pfg, mode, more_requirement):
return inputs_array, sys_prompt_array
def desend_to_extracted_folder_if_exist(project_folder):
def descend_to_extracted_folder_if_exist(project_folder):
"""
Descend into the extracted folder if it exists, otherwise return the original folder.
@@ -130,7 +130,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
if not txt.startswith('https://arxiv.org/abs/'):
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}"
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
yield from update_ui_latest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
return msg, None
# <-------------- set format ------------->
arxiv_id = url_.split('/abs/')[-1]
@@ -156,16 +156,16 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
return False
if os.path.exists(dst) and allow_cache:
yield from update_ui_lastest_msg(f"调用缓存 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
yield from update_ui_latest_msg(f"调用缓存 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
success = True
else:
yield from update_ui_lastest_msg(f"开始下载 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
yield from update_ui_latest_msg(f"开始下载 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
success = fix_url_and_download()
yield from update_ui_lastest_msg(f"下载完成 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
yield from update_ui_latest_msg(f"下载完成 {arxiv_id}", chatbot=chatbot, history=history) # 刷新界面
if not success:
yield from update_ui_lastest_msg(f"下载失败 {arxiv_id}", chatbot=chatbot, history=history)
yield from update_ui_latest_msg(f"下载失败 {arxiv_id}", chatbot=chatbot, history=history)
raise tarfile.ReadError(f"论文下载失败 {arxiv_id}")
# <-------------- extract file ------------->
@@ -288,7 +288,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
project_folder = descend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
from shared_utils.fastapi_server import validate_path_safety
@@ -365,7 +365,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
try:
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
except tarfile.ReadError as e:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
chatbot=chatbot, history=history)
return
@@ -404,7 +404,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
project_folder = descend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
from shared_utils.fastapi_server import validate_path_safety
@@ -518,7 +518,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
# repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
# if repeat:
# yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
# yield from update_ui_latest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
# try:
# translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
# promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
@@ -531,7 +531,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
# report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现重复上传,但是无法找到相关文件")
# yield from update_ui(chatbot=chatbot, history=history)
# else:
# yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
# yield from update_ui_latest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
# <-------------- convert pdf into tex ------------->
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
@@ -543,7 +543,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
return False
# <-------------- translate latex file into Chinese ------------->
yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
yield from update_ui_latest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
if len(file_manifest) == 0:
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
@@ -551,7 +551,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
return
# <-------------- if is a zip/tar file ------------->
project_folder = desend_to_extracted_folder_if_exist(project_folder)
project_folder = descend_to_extracted_folder_if_exist(project_folder)
# <-------------- move latex project away from temp folder ------------->
from shared_utils.fastapi_server import validate_path_safety
@@ -571,7 +571,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
switch_prompt=_switch_prompt_)
# <-------------- compile PDF ------------->
yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
yield from update_ui_latest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
main_file_modified='merge_translate_zh', mode='translate_zh',
work_folder_original=project_folder, work_folder_modified=project_folder,

查看文件

@@ -1,5 +1,5 @@
from toolbox import CatchException, check_packages, get_conf
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion
from toolbox import update_ui, update_ui_latest_msg, disable_auto_promotion
from toolbox import trimmed_format_exc_markdown
from crazy_functions.crazy_utils import get_files_from_everything
from crazy_functions.pdf_fns.parse_pdf import get_avail_grobid_url
@@ -57,9 +57,9 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
return
if method == "ClASSIC":
if method == "Classic":
# ------- 第三种方法,早期代码,效果不理想 -------
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from update_ui_latest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
return
@@ -77,7 +77,7 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
if grobid_url is not None:
yield from 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url)
return
yield from update_ui_lastest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from update_ui_latest_msg("GROBID服务不可用,请检查config中的GROBID_URL。作为替代,现在将执行效果稍差的旧版代码。", chatbot, history, delay=3)
yield from 解析PDF_简单拆解(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
return

查看文件

@@ -19,7 +19,7 @@ class PDF_Tran(GptAcademicPluginTemplate):
"additional_prompt":
ArgProperty(title="额外提示词", description="例如:对专有名词、翻译语气等方面的要求", default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
"pdf_parse_method":
ArgProperty(title="PDF解析方法", options=["DOC2X", "GROBID", "ClASSIC"], description="", default_value="GROBID", type="dropdown").model_dump_json(),
ArgProperty(title="PDF解析方法", options=["DOC2X", "GROBID", "Classic"], description="", default_value="GROBID", type="dropdown").model_dump_json(),
}
return gui_definition

查看文件

@@ -4,7 +4,7 @@ from typing import List
from shared_utils.fastapi_server import validate_path_safety
from toolbox import report_exception
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_latest_msg
from shared_utils.fastapi_server import validate_path_safety
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@@ -92,7 +92,7 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
chatbot.append([txt, f'正在清空 ({current_context}) ...'])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
rag_worker.purge_vector_store()
yield from update_ui_lastest_msg('已清空', chatbot, history, delay=0) # 刷新界面
yield from update_ui_latest_msg('已清空', chatbot, history, delay=0) # 刷新界面
return
# 3. Normal Q&A processing
@@ -109,10 +109,10 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
# 5. If input is clipped, add input to vector store before retrieve
if input_is_clipped_flag:
yield from update_ui_lastest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
yield from update_ui_latest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
# Save input to vector store
rag_worker.add_text_to_vector_store(txt_origin)
yield from update_ui_lastest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
yield from update_ui_latest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
if len(txt_origin) > REMEMBER_PREVIEW:
HALF = REMEMBER_PREVIEW // 2
@@ -142,7 +142,7 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
)
# 8. Remember Q&A
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
model_say + '</br></br>' + f'对话记忆中, 请稍等 ({current_context}) ...',
chatbot, history, delay=0.5
)
@@ -150,4 +150,4 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
history.extend([i_say, model_say])
# 9. Final UI Update
yield from update_ui_lastest_msg(model_say, chatbot, history, delay=0, msg=tip)
yield from update_ui_latest_msg(model_say, chatbot, history, delay=0, msg=tip)

查看文件

@@ -1,5 +1,5 @@
import pickle, os, random
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_latest_msg
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
@@ -9,7 +9,7 @@ from loguru import logger
from typing import List
SOCIAL_NETWOK_WORKER_REGISTER = {}
SOCIAL_NETWORK_WORKER_REGISTER = {}
class SocialNetwork():
def __init__(self):
@@ -78,7 +78,7 @@ class SocialNetworkWorker(SaveAndLoad):
for f in friend.friends_list:
self.add_friend(f)
msg = f"成功添加{len(friend.friends_list)}个联系人: {str(friend.friends_list)}"
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=0)
def run(self, txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
@@ -104,12 +104,12 @@ class SocialNetworkWorker(SaveAndLoad):
}
try:
Explaination = '\n'.join([f'{k}: {v["explain_to_llm"]}' for k, v in self.tools_to_select.items()])
Explanation = '\n'.join([f'{k}: {v["explain_to_llm"]}' for k, v in self.tools_to_select.items()])
class UserSociaIntention(BaseModel):
intention_type: str = Field(
description=
f"The type of user intention. You must choose from {self.tools_to_select.keys()}.\n\n"
f"Explaination:\n{Explaination}",
f"Explanation:\n{Explanation}",
default="SocialAdvice"
)
pydantic_cls_instance, err_msg = select_tool(
@@ -118,7 +118,7 @@ class SocialNetworkWorker(SaveAndLoad):
pydantic_cls=UserSociaIntention
)
except Exception as e:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"无法理解用户意图 {err_msg}",
chatbot=chatbot,
history=history,
@@ -150,10 +150,10 @@ def I人助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
# 1. we retrieve worker from global context
user_name = chatbot.get_user()
checkpoint_dir=get_log_folder(user_name, plugin_name='experimental_rag')
if user_name in SOCIAL_NETWOK_WORKER_REGISTER:
social_network_worker = SOCIAL_NETWOK_WORKER_REGISTER[user_name]
if user_name in SOCIAL_NETWORK_WORKER_REGISTER:
social_network_worker = SOCIAL_NETWORK_WORKER_REGISTER[user_name]
else:
social_network_worker = SOCIAL_NETWOK_WORKER_REGISTER[user_name] = SocialNetworkWorker(
social_network_worker = SOCIAL_NETWORK_WORKER_REGISTER[user_name] = SocialNetworkWorker(
user_name,
llm_kwargs,
checkpoint_dir=checkpoint_dir,

查看文件

@@ -1,5 +1,5 @@
import os, copy, time
from toolbox import CatchException, report_exception, update_ui, zip_result, promote_file_to_downloadzone, update_ui_lastest_msg, get_conf, generate_file_link
from toolbox import CatchException, report_exception, update_ui, zip_result, promote_file_to_downloadzone, update_ui_latest_msg, get_conf, generate_file_link
from shared_utils.fastapi_server import validate_path_safety
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
@@ -117,7 +117,7 @@ def 注释源代码(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
logger.error(f"文件: {fp} 的注释结果未能成功")
file_links = generate_file_link(preview_html_list)
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
f"当前任务: <br/>{'<br/>'.join(tasks)}.<br/>" +
f"剩余源文件数量: {remain}.<br/>" +
f"已完成的文件: {sum(worker_done)}.<br/>" +

查看文件

@@ -7,7 +7,7 @@ from bs4 import BeautifulSoup
from functools import lru_cache
from itertools import zip_longest
from check_proxy import check_proxy
from toolbox import CatchException, update_ui, get_conf, promote_file_to_downloadzone, update_ui_lastest_msg, generate_file_link
from toolbox import CatchException, update_ui, get_conf, promote_file_to_downloadzone, update_ui_latest_msg, generate_file_link
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
from request_llms.bridge_all import model_info
from request_llms.bridge_all import predict_no_ui_long_connection
@@ -46,7 +46,7 @@ def download_video(bvid, user_name, chatbot, history):
# pause a while
tic_time = 8
for i in range(tic_time):
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"即将下载音频。等待{tic_time-i}秒后自动继续, 点击“停止”键取消此操作。",
chatbot=chatbot, history=[], delay=1)
@@ -61,13 +61,13 @@ def download_video(bvid, user_name, chatbot, history):
# preview
preview_list = [promote_file_to_downloadzone(fp) for fp in downloaded_files]
file_links = generate_file_link(preview_list)
yield from update_ui_lastest_msg(f"已完成的文件: <br/>" + file_links, chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(f"已完成的文件: <br/>" + file_links, chatbot=chatbot, history=history, delay=0)
chatbot.append((None, f"即将下载视频。"))
# pause a while
tic_time = 16
for i in range(tic_time):
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"即将下载视频。等待{tic_time-i}秒后自动继续, 点击“停止”键取消此操作。",
chatbot=chatbot, history=[], delay=1)
@@ -78,7 +78,7 @@ def download_video(bvid, user_name, chatbot, history):
# preview
preview_list = [promote_file_to_downloadzone(fp) for fp in downloaded_files_part2]
file_links = generate_file_link(preview_list)
yield from update_ui_lastest_msg(f"已完成的文件: <br/>" + file_links, chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(f"已完成的文件: <br/>" + file_links, chatbot=chatbot, history=history, delay=0)
# return
return downloaded_files + downloaded_files_part2
@@ -110,7 +110,7 @@ def 多媒体任务(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
# 结构化生成
internet_search_keyword = user_wish
yield from update_ui_lastest_msg(lastmsg=f"发起互联网检索: {internet_search_keyword} ...", chatbot=chatbot, history=[], delay=1)
yield from update_ui_latest_msg(lastmsg=f"发起互联网检索: {internet_search_keyword} ...", chatbot=chatbot, history=[], delay=1)
from crazy_functions.Internet_GPT import internet_search_with_analysis_prompt
result = yield from internet_search_with_analysis_prompt(
prompt=internet_search_keyword,
@@ -119,7 +119,7 @@ def 多媒体任务(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
chatbot=chatbot
)
yield from update_ui_lastest_msg(lastmsg=f"互联网检索结论: {result} \n\n 正在生成进一步检索方案 ...", chatbot=chatbot, history=[], delay=1)
yield from update_ui_latest_msg(lastmsg=f"互联网检索结论: {result} \n\n 正在生成进一步检索方案 ...", chatbot=chatbot, history=[], delay=1)
rf_req = dedent(f"""
The user wish to get the following resource:
{user_wish}
@@ -132,7 +132,7 @@ def 多媒体任务(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
rf_req = dedent(f"""
The user wish to get the following resource:
{user_wish}
Generate reseach keywords (less than 5 keywords) accordingly.
Generate research keywords (less than 5 keywords) accordingly.
""")
gpt_json_io = GptJsonIO(Query)
inputs = rf_req + gpt_json_io.format_instructions
@@ -146,12 +146,12 @@ def 多媒体任务(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 获取候选资源
candadate_dictionary: dict = get_video_resource(video_engine_keywords)
candadate_dictionary_as_str = json.dumps(candadate_dictionary, ensure_ascii=False, indent=4)
candidate_dictionary: dict = get_video_resource(video_engine_keywords)
candidate_dictionary_as_str = json.dumps(candidate_dictionary, ensure_ascii=False, indent=4)
# 展示候选资源
candadate_display = "\n".join([f"{i+1}. {it['title']}" for i, it in enumerate(candadate_dictionary)])
chatbot.append((None, f"候选:\n\n{candadate_display}"))
candidate_display = "\n".join([f"{i+1}. {it['title']}" for i, it in enumerate(candidate_dictionary)])
chatbot.append((None, f"候选:\n\n{candidate_display}"))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# 结构化生成
@@ -160,7 +160,7 @@ def 多媒体任务(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
{user_wish}
Select the most relevant and suitable video resource from the following search results:
{candadate_dictionary_as_str}
{candidate_dictionary_as_str}
Note:
1. The first several search video results are more likely to satisfy the user's wish.

查看文件

@@ -1,5 +1,5 @@
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton
from toolbox import report_exception, get_log_folder, update_ui_latest_msg, Singleton
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
from crazy_functions.agent_fns.general import AutoGenGeneral

查看文件

@@ -8,7 +8,7 @@ class EchoDemo(PluginMultiprocessManager):
while True:
msg = self.child_conn.recv() # PipeCom
if msg.cmd == "user_input":
# wait futher user input
# wait father user input
self.child_conn.send(PipeCom("show", msg.content))
wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
if not wait_success:

查看文件

@@ -27,7 +27,7 @@ def gpt_academic_generate_oai_reply(
llm_kwargs=llm_config,
history=history,
sys_prompt=self._oai_system_message[0]['content'],
console_slience=True
console_silence=True
)
assumed_done = reply.endswith('\nTERMINATE')
return True, reply

查看文件

@@ -10,7 +10,7 @@ from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_
# TODO: 解决缩进问题
find_function_end_prompt = '''
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to sperate functions, class functions etc.
Below is a page of code that you need to read. This page may not yet complete, you job is to split this page to separate functions, class functions etc.
- Provide the line number where the first visible function ends.
- Provide the line number where the next visible function begins.
- If there are no other functions in this page, you should simply return the line number of the last line.
@@ -59,7 +59,7 @@ OUTPUT:
revise_funtion_prompt = '''
revise_function_prompt = '''
You need to read the following code, and revise the source code ({FILE_BASENAME}) according to following instructions:
1. You should analyze the purpose of the functions (if there are any).
2. You need to add docstring for the provided functions (if there are any).
@@ -117,7 +117,7 @@ def zip_result(folder):
'''
revise_funtion_prompt_chinese = '''
revise_function_prompt_chinese = '''
您需要阅读以下代码,并根据以下说明修订源代码({FILE_BASENAME}):
1. 如果源代码中包含函数的话, 你应该分析给定函数实现了什么功能
2. 如果源代码中包含函数的话, 你需要为函数添加docstring, docstring必须使用中文
@@ -188,9 +188,9 @@ class PythonCodeComment():
self.language = language
self.observe_window_update = observe_window_update
if self.language == "chinese":
self.core_prompt = revise_funtion_prompt_chinese
self.core_prompt = revise_function_prompt_chinese
else:
self.core_prompt = revise_funtion_prompt
self.core_prompt = revise_function_prompt
self.path = None
self.file_basename = None
self.file_brief = ""
@@ -222,7 +222,7 @@ class PythonCodeComment():
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
console_silence=True
)
def extract_number(text):
@@ -316,7 +316,7 @@ class PythonCodeComment():
def tag_code(self, fn, hint):
code = fn
_, n_indent = self.dedent(code)
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preseve them in the OUTPUT.)"
indent_reminder = "" if n_indent == 0 else "(Reminder: as you can see, this piece of code has indent made up with {n_indent} whitespace, please preserve them in the OUTPUT.)"
brief_reminder = "" if self.file_brief == "" else f"({self.file_basename} abstract: {self.file_brief})"
hint_reminder = "" if hint is None else f"(Reminder: do not ignore or modify code such as `{hint}`, provide complete code in the OUTPUT.)"
self.llm_kwargs['temperature'] = 0
@@ -333,7 +333,7 @@ class PythonCodeComment():
history=[],
sys_prompt="",
observe_window=[],
console_slience=True
console_silence=True
)
def get_code_block(reply):
@@ -400,7 +400,7 @@ class PythonCodeComment():
return revised
def begin_comment_source_code(self, chatbot=None, history=None):
# from toolbox import update_ui_lastest_msg
# from toolbox import update_ui_latest_msg
assert self.path is not None
assert '.py' in self.path # must be python source code
# write_target = self.path + '.revised.py'
@@ -409,10 +409,10 @@ class PythonCodeComment():
# with open(self.path + '.revised.py', 'w+', encoding='utf8') as f:
while True:
try:
# yield from update_ui_lastest_msg(f"({self.file_basename}) 正在读取下一段代码片段:\n", chatbot=chatbot, history=history, delay=0)
# yield from update_ui_latest_msg(f"({self.file_basename}) 正在读取下一段代码片段:\n", chatbot=chatbot, history=history, delay=0)
next_batch, line_no_start, line_no_end = self.get_next_batch()
self.observe_window_update(f"正在处理{self.file_basename} - {line_no_start}/{len(self.full_context)}\n")
# yield from update_ui_lastest_msg(f"({self.file_basename}) 处理代码片段:\n\n{next_batch}", chatbot=chatbot, history=history, delay=0)
# yield from update_ui_latest_msg(f"({self.file_basename}) 处理代码片段:\n\n{next_batch}", chatbot=chatbot, history=history, delay=0)
hint = None
MAX_ATTEMPT = 2

查看文件

@@ -1,7 +1,7 @@
import os
import threading
from loguru import logger
from shared_utils.char_visual_effect import scolling_visual_effect
from shared_utils.char_visual_effect import scrolling_visual_effect
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
def input_clipping(inputs, history, max_token_limit, return_clip_flags=False):
@@ -256,7 +256,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
# 【第一种情况】:顺利完成
gpt_say = predict_no_ui_long_connection(
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
sys_prompt=sys_prompt, observe_window=mutable[index], console_silence=True
)
mutable[index][2] = "已成功"
return gpt_say
@@ -326,7 +326,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
mutable[thread_index][1] = time.time()
# 在前端打印些好玩的东西
for thread_index, _ in enumerate(worker_done):
print_something_really_funny = f"[ ...`{scolling_visual_effect(mutable[thread_index][0], scroller_max_len)}`... ]"
print_something_really_funny = f"[ ...`{scrolling_visual_effect(mutable[thread_index][0], scroller_max_len)}`... ]"
observe_win.append(print_something_really_funny)
# 在前端打印些好玩的东西
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
@@ -389,11 +389,11 @@ def read_and_clean_pdf_text(fp):
"""
提取文本块主字体
"""
fsize_statiscs = {}
fsize_statistics = {}
for wtf in l['spans']:
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
fsize_statiscs[wtf['size']] += len(wtf['text'])
return max(fsize_statiscs, key=fsize_statiscs.get)
if wtf['size'] not in fsize_statistics: fsize_statistics[wtf['size']] = 0
fsize_statistics[wtf['size']] += len(wtf['text'])
return max(fsize_statistics, key=fsize_statistics.get)
def ffsize_same(a,b):
"""
@@ -433,11 +433,11 @@ def read_and_clean_pdf_text(fp):
############################## <第 2 步,获取正文主字体> ##################################
try:
fsize_statiscs = {}
fsize_statistics = {}
for span in meta_span:
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
fsize_statiscs[span[1]] += span[2]
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
if span[1] not in fsize_statistics: fsize_statistics[span[1]] = 0
fsize_statistics[span[1]] += span[2]
main_fsize = max(fsize_statistics, key=fsize_statistics.get)
if REMOVE_FOOT_NOTE:
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
except:
@@ -610,9 +610,9 @@ class nougat_interface():
def NOUGAT_parse_pdf(self, fp, chatbot, history):
from toolbox import update_ui_lastest_msg
from toolbox import update_ui_latest_msg
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
yield from update_ui_latest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
chatbot=chatbot, history=history, delay=0)
self.threadLock.acquire()
import glob, threading, os
@@ -620,7 +620,7 @@ class nougat_interface():
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
os.makedirs(dst)
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度正在加载NOUGAT... 提示首次运行需要花费较长时间下载NOUGAT参数",
yield from update_ui_latest_msg("正在解析论文, 请稍候。进度正在加载NOUGAT... 提示首次运行需要花费较长时间下载NOUGAT参数",
chatbot=chatbot, history=history, delay=0)
command = ['nougat', '--out', os.path.abspath(dst), os.path.abspath(fp)]
self.nougat_with_timeout(command, cwd=os.getcwd(), timeout=3600)

查看文件

@@ -1,4 +1,4 @@
from toolbox import CatchException, update_ui, update_ui_lastest_msg
from toolbox import CatchException, update_ui, update_ui_latest_msg
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
@@ -13,7 +13,7 @@ class MiniGame_ASCII_Art(GptAcademicGameBaseState):
else:
if prompt.strip() == 'exit':
self.delete_game = True
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
return
chatbot.append([prompt, ""])
yield from update_ui(chatbot=chatbot, history=history)
@@ -31,12 +31,12 @@ class MiniGame_ASCII_Art(GptAcademicGameBaseState):
self.cur_task = 'identify user guess'
res = get_code_block(raw_res)
history += ['', f'the answer is {self.obj}', inputs, res]
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
elif self.cur_task == 'identify user guess':
if is_same_thing(self.obj, prompt, self.llm_kwargs):
self.delete_game = True
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
else:
self.cur_task = 'identify user guess'
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)

查看文件

@@ -63,7 +63,7 @@ prompts_terminate = """小说的前文回顾:
"""
from toolbox import CatchException, update_ui, update_ui_lastest_msg
from toolbox import CatchException, update_ui, update_ui_latest_msg
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection
@@ -112,7 +112,7 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
# should we terminate game here?
self.delete_game = True
yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
return
if '剧情收尾' in prompt:
self.cur_task = 'story_terminate'
@@ -137,8 +137,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
)
self.story.append(story_paragraph)
# # 配图
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
# # 构建后续剧情引导
previously_on_story = ""
@@ -171,8 +171,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
)
self.story.append(story_paragraph)
# # 配图
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
# # 构建后续剧情引导
previously_on_story = ""
@@ -204,8 +204,8 @@ class MiniGame_ResumeStory(GptAcademicGameBaseState):
chatbot, history_, self.sys_prompt_
)
# # 配图
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
yield from update_ui_latest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
# terminate game
self.delete_game = True

查看文件

@@ -2,7 +2,7 @@ import time
import importlib
from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_latest_msg
import multiprocessing
def get_class_name(class_string):

查看文件

@@ -102,10 +102,10 @@ class GptJsonIO():
logging.info(f'Repairing json{response}')
repair_prompt = self.generate_repair_prompt(broken_json = response, error=repr(e))
result = self.generate_output(gpt_gen_fn(repair_prompt, self.format_instructions))
logging.info('Repaire json success.')
logging.info('Repair json success.')
except Exception as e:
# 没辙了,放弃治疗
logging.info('Repaire json fail.')
logging.info('Repair json fail.')
raise JsonStringError('Cannot repair json.', str(e))
return result

查看文件

@@ -3,7 +3,7 @@ import re
import shutil
import numpy as np
from loguru import logger
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder, gen_time_str
from toolbox import update_ui, update_ui_latest_msg, get_log_folder, gen_time_str
from toolbox import get_conf, promote_file_to_downloadzone
from crazy_functions.latex_fns.latex_toolbox import PRESERVE, TRANSFORM
from crazy_functions.latex_fns.latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
@@ -20,7 +20,7 @@ def split_subprocess(txt, project_folder, return_dict, opts):
"""
break down latex file to a linked list,
each node use a preserve flag to indicate whether it should
be proccessed by GPT.
be processed by GPT.
"""
text = txt
mask = np.zeros(len(txt), dtype=np.uint8) + TRANSFORM
@@ -85,14 +85,14 @@ class LatexPaperSplit():
"""
break down latex file to a linked list,
each node use a preserve flag to indicate whether it should
be proccessed by GPT.
be processed by GPT.
"""
def __init__(self) -> None:
self.nodes = None
self.msg = "*{\\scriptsize\\textbf{警告该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \
"版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
# 请您不要删除或修改这行警告,除非您是论文的原作者如果您是论文原作者,欢迎加REAME中的QQ联系开发者
# 请您不要删除或修改这行警告,除非您是论文的原作者如果您是论文原作者,欢迎加README中的QQ联系开发者
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
self.title = "unknown"
self.abstract = "unknown"
@@ -151,7 +151,7 @@ class LatexPaperSplit():
"""
break down latex file to a linked list,
each node use a preserve flag to indicate whether it should
be proccessed by GPT.
be processed by GPT.
P.S. use multiprocessing to avoid timeout error
"""
import multiprocessing
@@ -351,7 +351,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
max_try = 32
chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history)
chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面
yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面
# 检查是否需要使用xelatex
def check_if_need_xelatex(tex_path):
try:
@@ -396,32 +396,32 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
shutil.copyfile(may_exist_bbl, target_bbl)
# https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_original), work_folder_original)
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_modified), work_folder_modified)
if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')):
# 只有第二步成功,才能继续下面的步骤
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面
if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')):
ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original)
if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')):
ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified)
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_original), work_folder_original)
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_modified), work_folder_modified)
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_original), work_folder_original)
ok = compile_latex_with_timeout(get_compile_command(compiler, main_file_modified), work_folder_modified)
if mode!='translate_zh':
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
logger.info( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd())
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
ok = compile_latex_with_timeout(get_compile_command(compiler, 'merge_diff'), work_folder)
ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder)
ok = compile_latex_with_timeout(get_compile_command(compiler, 'merge_diff'), work_folder)
@@ -435,13 +435,13 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
results_ += f"原始PDF编译是否成功: {original_pdf_success};"
results_ += f"转化PDF编译是否成功: {modified_pdf_success};"
results_ += f"对比PDF编译是否成功: {diff_pdf_success};"
yield from update_ui_lastest_msg(f'{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'{n_fix}编译结束:<br/>{results_}...', chatbot, history) # 刷新Gradio前端界面
if diff_pdf_success:
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
if modified_pdf_success:
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
if os.path.exists(pj(work_folder, '..', 'translation')):
@@ -472,7 +472,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
work_folder_modified=work_folder_modified,
fixed_line=fixed_line
)
yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
yield from update_ui_latest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面
if not can_retry: break
return False # 失败啦

查看文件

@@ -168,7 +168,7 @@ def set_forbidden_text(text, mask, pattern, flags=0):
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
count the number of the braces so as to catch complete text area.
e.g.
\begin{abstract} blablablablablabla. \end{abstract}
"""
@@ -188,7 +188,7 @@ def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
"""
Add a preserve text area in this paper (text become untouchable for GPT).
count the number of the braces so as to catch compelete text area.
count the number of the braces so as to catch complete text area.
e.g.
\caption{blablablablabla\texbf{blablabla}blablabla.}
"""
@@ -214,7 +214,7 @@ def reverse_forbidden_text_careful_brace(
):
"""
Move area out of preserve area (make text editable for GPT)
count the number of the braces so as to catch compelete text area.
count the number of the braces so as to catch complete text area.
e.g.
\caption{blablablablabla\texbf{blablabla}blablabla.}
"""
@@ -287,23 +287,23 @@ def find_main_tex_file(file_manifest, mode):
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码)
"""
canidates = []
candidates = []
for texf in file_manifest:
if os.path.basename(texf).startswith("merge"):
continue
with open(texf, "r", encoding="utf8", errors="ignore") as f:
file_content = f.read()
if r"\documentclass" in file_content:
canidates.append(texf)
candidates.append(texf)
else:
continue
if len(canidates) == 0:
if len(candidates) == 0:
raise RuntimeError("无法找到一个主Tex文件包含documentclass关键字")
elif len(canidates) == 1:
return canidates[0]
else: # if len(canidates) >= 2 通过一些Latex模板中常见但通常不会出现在正文的单词,对不同latex源文件扣分,取评分最高者返回
canidates_score = []
elif len(candidates) == 1:
return candidates[0]
else: # if len(candidates) >= 2 通过一些Latex模板中常见但通常不会出现在正文的单词,对不同latex源文件扣分,取评分最高者返回
candidates_score = []
# 给出一些判定模板文档的词作为扣分项
unexpected_words = [
"\\LaTeX",
@@ -316,19 +316,19 @@ def find_main_tex_file(file_manifest, mode):
"reviewers",
]
expected_words = ["\\input", "\\ref", "\\cite"]
for texf in canidates:
canidates_score.append(0)
for texf in candidates:
candidates_score.append(0)
with open(texf, "r", encoding="utf8", errors="ignore") as f:
file_content = f.read()
file_content = rm_comments(file_content)
for uw in unexpected_words:
if uw in file_content:
canidates_score[-1] -= 1
candidates_score[-1] -= 1
for uw in expected_words:
if uw in file_content:
canidates_score[-1] += 1
select = np.argmax(canidates_score) # 取评分最高者返回
return canidates[select]
candidates_score[-1] += 1
select = np.argmax(candidates_score) # 取评分最高者返回
return candidates[select]
def rm_comments(main_file):
@@ -374,7 +374,7 @@ def find_tex_file_ignore_case(fp):
def merge_tex_files_(project_foler, main_file, mode):
"""
Merge Tex project recrusively
Merge Tex project recursively
"""
main_file = rm_comments(main_file)
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
@@ -429,7 +429,7 @@ def find_title_and_abs(main_file):
def merge_tex_files(project_foler, main_file, mode):
"""
Merge Tex project recrusively
Merge Tex project recursively
P.S. 顺便把CTEX塞进去以支持中文
P.S. 顺便把Latex的注释去除
"""

查看文件

@@ -1,4 +1,4 @@
from toolbox import update_ui, get_conf, promote_file_to_downloadzone, update_ui_lastest_msg, generate_file_link
from toolbox import update_ui, get_conf, promote_file_to_downloadzone, update_ui_latest_msg, generate_file_link
from shared_utils.docker_as_service_api import stream_daas
from shared_utils.docker_as_service_api import DockerServiceApiComModel
import random
@@ -25,7 +25,7 @@ def download_video(video_id, only_audio, user_name, chatbot, history):
status_buf += "\n\n"
status_buf += "DaaS file attach: \n\n"
status_buf += str(output_manifest['server_file_attach'])
yield from update_ui_lastest_msg(status_buf, chatbot, history)
yield from update_ui_latest_msg(status_buf, chatbot, history)
return output_manifest['server_file_attach']

查看文件

@@ -1,6 +1,6 @@
from pydantic import BaseModel, Field
from typing import List
from toolbox import update_ui_lastest_msg, disable_auto_promotion
from toolbox import update_ui_latest_msg, disable_auto_promotion
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError

查看文件

@@ -113,7 +113,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
return [txt]
else:
# raw_token_num > TOKEN_LIMIT_PER_FRAGMENT
# find a smooth token limit to achieve even seperation
# find a smooth token limit to achieve even separation
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
token_limit_smooth = raw_token_num // count + count
return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model'])

查看文件

@@ -1,6 +1,6 @@
import os
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_latest_msg, disable_auto_promotion
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_conf, extract_archive
from crazy_functions.pdf_fns.parse_pdf import parse_pdf, translate_pdf

查看文件

@@ -14,17 +14,17 @@ def extract_text_from_files(txt, chatbot, history):
final_result(list):文本内容
page_one(list):第一页内容/摘要
file_manifest(list):文件路径
excption(string):需要用户手动处理的信息,如没出错则保持为空
exception(string):需要用户手动处理的信息,如没出错则保持为空
"""
final_result = []
page_one = []
file_manifest = []
excption = ""
exception = ""
if txt == "":
final_result.append(txt)
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
return False, final_result, page_one, file_manifest, exception #如输入区内容不是文件则直接返回输入区内容
#查找输入区内容中的文件
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
@@ -33,20 +33,20 @@ def extract_text_from_files(txt, chatbot, history):
file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc')
if file_doc:
excption = "word"
return False, final_result, page_one, file_manifest, excption
exception = "word"
return False, final_result, page_one, file_manifest, exception
file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest)
if file_num == 0:
final_result.append(txt)
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
return False, final_result, page_one, file_manifest, exception #如输入区内容不是文件则直接返回输入区内容
if file_pdf:
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
import fitz
except:
excption = "pdf"
return False, final_result, page_one, file_manifest, excption
exception = "pdf"
return False, final_result, page_one, file_manifest, exception
for index, fp in enumerate(pdf_manifest):
file_content, pdf_one = read_and_clean_pdf_text(fp) # 尝试按照章节切割PDF
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
@@ -72,8 +72,8 @@ def extract_text_from_files(txt, chatbot, history):
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
from docx import Document
except:
excption = "word_pip"
return False, final_result, page_one, file_manifest, excption
exception = "word_pip"
return False, final_result, page_one, file_manifest, exception
for index, fp in enumerate(word_manifest):
doc = Document(fp)
file_content = '\n'.join([p.text for p in doc.paragraphs])
@@ -82,4 +82,4 @@ def extract_text_from_files(txt, chatbot, history):
final_result.append(file_content)
file_manifest.append(os.path.relpath(fp, folder_word))
return True, final_result, page_one, file_manifest, excption
return True, final_result, page_one, file_manifest, exception

查看文件

@@ -60,7 +60,7 @@ def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4
) -> List[Tuple[Document, float]]:
def seperate_list(ls: List[int]) -> List[List[int]]:
def separate_list(ls: List[int]) -> List[List[int]]:
lists = []
ls1 = [ls[0]]
for i in range(1, len(ls)):
@@ -82,7 +82,7 @@ def similarity_search_with_score_by_vector(
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not self.chunk_conent:
if not self.chunk_content:
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc.metadata["score"] = int(scores[0][j])
@@ -104,12 +104,12 @@ def similarity_search_with_score_by_vector(
id_set.add(l)
if break_flag:
break
if not self.chunk_conent:
if not self.chunk_content:
return docs
if len(id_set) == 0 and self.score_threshold > 0:
return []
id_list = sorted(list(id_set))
id_lists = seperate_list(id_list)
id_lists = separate_list(id_list)
for id_seq in id_lists:
for id in id_seq:
if id == id_seq[0]:
@@ -132,7 +132,7 @@ class LocalDocQA:
embeddings: object = None
top_k: int = VECTOR_SEARCH_TOP_K
chunk_size: int = CHUNK_SIZE
chunk_conent: bool = True
chunk_content: bool = True
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
def init_cfg(self,
@@ -209,16 +209,16 @@ class LocalDocQA:
# query 查询内容
# vs_path 知识库路径
# chunk_conent 是否启用上下文关联
# chunk_content 是否启用上下文关联
# score_threshold 搜索匹配score阈值
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
# chunk_sizes 匹配单段内容的连接上下文长度
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
def get_knowledge_based_content_test(self, query, vs_path, chunk_content,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE,
text2vec=None):
self.vector_store = FAISS.load_local(vs_path, text2vec)
self.vector_store.chunk_conent = chunk_conent
self.vector_store.chunk_content = chunk_content
self.vector_store.score_threshold = score_threshold
self.vector_store.chunk_size = chunk_size
@@ -241,7 +241,7 @@ class LocalDocQA:
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_conent, one_content_segmentation, text2vec):
def construct_vector_store(vs_id, vs_path, files, sentence_size, history, one_content, one_content_segmentation, text2vec):
for file in files:
assert os.path.exists(file), "输入文件不存在:" + file
import nltk
@@ -297,7 +297,7 @@ class knowledge_archive_interface():
files=file_manifest,
sentence_size=100,
history=[],
one_conent="",
one_content="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
@@ -319,19 +319,19 @@ class knowledge_archive_interface():
files=[],
sentence_size=100,
history=[],
one_conent="",
one_content="",
one_content_segmentation="",
text2vec = self.get_chinese_text2vec(),
)
VECTOR_SEARCH_SCORE_THRESHOLD = 0
VECTOR_SEARCH_TOP_K = 4
CHUNK_SIZE = 512
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
resp, prompt = self.qa_handle.get_knowledge_based_content_test(
query = txt,
vs_path = self.kai_path,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K,
chunk_conent=True,
chunk_content=True,
chunk_size=CHUNK_SIZE,
text2vec = self.get_chinese_text2vec(),
)

查看文件

@@ -1,6 +1,6 @@
from pydantic import BaseModel, Field
from typing import List
from toolbox import update_ui_lastest_msg, disable_auto_promotion
from toolbox import update_ui_latest_msg, disable_auto_promotion
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
import copy, json, pickle, os, sys, time
@@ -9,14 +9,14 @@ import copy, json, pickle, os, sys, time
def read_avail_plugin_enum():
from crazy_functional import get_crazy_functions
plugin_arr = get_crazy_functions()
# remove plugins with out explaination
# remove plugins with out explanation
plugin_arr = {k:v for k, v in plugin_arr.items() if ('Info' in v) and ('Function' in v)}
plugin_arr_info = {"F_{:04d}".format(i):v["Info"] for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict_parse = {"F_{:04d}".format(i):v for i, v in enumerate(plugin_arr.values(), start=1)}
plugin_arr_dict_parse.update({f"F_{i}":v for i, v in enumerate(plugin_arr.values(), start=1)})
prompt = json.dumps(plugin_arr_info, ensure_ascii=False, indent=2)
prompt = "\n\nThe defination of PluginEnum:\nPluginEnum=" + prompt
prompt = "\n\nThe definition of PluginEnum:\nPluginEnum=" + prompt
return prompt, plugin_arr_dict, plugin_arr_dict_parse
def wrap_code(txt):
@@ -55,7 +55,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
plugin_selection: str = Field(description="The most related plugin from one of the PluginEnum.", default="F_0000")
reason_of_selection: str = Field(description="The reason why you should select this plugin.", default="This plugin satisfy user requirement most")
# ⭐ ⭐ ⭐ 选择插件
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg=f"正在执行任务: {txt}\n\n查找可用插件中...", chatbot=chatbot, history=history, delay=0)
gpt_json_io = GptJsonIO(Plugin)
gpt_json_io.format_instructions = "The format of your output should be a json that can be parsed by json.loads.\n"
gpt_json_io.format_instructions += """Output example: {"plugin_selection":"F_1234", "reason_of_selection":"F_1234 plugin satisfy user requirement most"}\n"""
@@ -74,13 +74,13 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
msg += "请求的Prompt为\n" + wrap_code(get_inputs_show_user(inputs, plugin_arr_enum_prompt))
msg += "语言模型回复为:\n" + wrap_code(gpt_reply)
msg += "\n但您可以尝试再试一次\n"
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
return
if plugin_sel.plugin_selection not in plugin_arr_dict_parse:
msg = f"抱歉, 找不到合适插件执行该任务, 或者{llm_kwargs['llm_model']}无法理解您的需求。"
msg += f"语言模型{llm_kwargs['llm_model']}选择了不存在的插件:\n" + wrap_code(gpt_reply)
msg += "\n但您可以尝试再试一次\n"
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
return
# ⭐ ⭐ ⭐ 确认插件参数
@@ -90,7 +90,7 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
appendix_info = get_recent_file_prompt_support(chatbot)
plugin = plugin_arr_dict_parse[plugin_sel.plugin_selection]
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg=f"正在执行任务: {txt}\n\n提取插件参数...", chatbot=chatbot, history=history, delay=0)
class PluginExplicit(BaseModel):
plugin_selection: str = plugin_sel.plugin_selection
plugin_arg: str = Field(description="The argument of the plugin.", default="")
@@ -109,6 +109,6 @@ def execute_plugin(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
fn = plugin['Function']
fn_name = fn.__name__
msg = f'{llm_kwargs["llm_model"]}为您选择了插件: `{fn_name}`\n\n插件说明:{plugin["Info"]}\n\n插件参数:{plugin_sel.plugin_arg}\n\n假如偏离了您的要求,按停止键终止。'
yield from update_ui_lastest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
yield from update_ui_latest_msg(lastmsg=msg, chatbot=chatbot, history=history, delay=2)
yield from fn(plugin_sel.plugin_arg, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, -1)
return

查看文件

@@ -1,6 +1,6 @@
from pydantic import BaseModel, Field
from typing import List
from toolbox import update_ui_lastest_msg, get_conf
from toolbox import update_ui_latest_msg, get_conf
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.json_fns.pydantic_io import GptJsonIO
import copy, json, pickle, os, sys
@@ -9,7 +9,7 @@ import copy, json, pickle, os, sys
def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
if not ALLOW_RESET_CONFIG:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"当前配置不允许被修改如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
chatbot=chatbot, history=history, delay=2
)
@@ -30,7 +30,7 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
new_option_value: str = Field(description="the new value of the option", default=None)
# ⭐ ⭐ ⭐ 分析用户意图
yield from update_ui_lastest_msg(lastmsg=f"正在执行任务: {txt}\n\n读取新配置中", chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg=f"正在执行任务: {txt}\n\n读取新配置中", chatbot=chatbot, history=history, delay=0)
gpt_json_io = GptJsonIO(ModifyConfigurationIntention)
inputs = "Analyze how to change configuration according to following user input, answer me with json: \n\n" + \
">> " + txt.rstrip('\n').replace('\n','\n>> ') + '\n\n' + \
@@ -44,11 +44,11 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
ok = (explicit_conf in txt)
if ok:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}",
chatbot=chatbot, history=history, delay=1
)
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n新配置{explicit_conf}={user_intention.new_option_value}\n\n正在修改配置中",
chatbot=chatbot, history=history, delay=2
)
@@ -57,25 +57,25 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
from toolbox import set_conf
set_conf(explicit_conf, user_intention.new_option_value)
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,重新页面即可生效。", chatbot=chatbot, history=history, delay=1
)
else:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"失败,如果需要配置{explicit_conf},您需要明确说明并在指令中提到它。", chatbot=chatbot, history=history, delay=5
)
def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
if not ALLOW_RESET_CONFIG:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"当前配置不允许被修改如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
chatbot=chatbot, history=history, delay=2
)
return
yield from modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention)
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n配置修改完成,五秒后即将重启!若出现报错请无视即可。", chatbot=chatbot, history=history, delay=5
)
os.execl(sys.executable, sys.executable, *sys.argv)

查看文件

@@ -5,7 +5,7 @@ class VoidTerminalState():
self.reset_state()
def reset_state(self):
self.has_provided_explaination = False
self.has_provided_explanation = False
def lock_plugin(self, chatbot):
chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'

查看文件

@@ -1,4 +1,4 @@
from toolbox import CatchException, update_ui, update_ui_lastest_msg
from toolbox import CatchException, update_ui, update_ui_latest_msg
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from request_llms.bridge_all import predict_no_ui_long_connection

查看文件

@@ -15,7 +15,7 @@ Testing:
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_latest_msg
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
from crazy_functions.crazy_utils import input_clipping, try_install_deps
from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated
@@ -27,7 +27,7 @@ import time
import glob
import multiprocessing
templete = """
template = """
```python
import ... # Put dependencies here, e.g. import numpy as np.
@@ -77,10 +77,10 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
# 第二步
prompt_compose = [
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
templete
"If previous stage is successful, rewrite the function you have just written to satisfy following template: \n",
template
]
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable template. "
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=i_say, inputs_show_user=inputs_show_user,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
@@ -164,18 +164,18 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False):
file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None)
file_list.append(file_path)
yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1)
yield from update_ui_latest_msg(f"当前文件: {file_path}", chatbot, history, 1)
elif have_any_recent_upload_files(chatbot):
file_dir = get_recent_file_prompt_support(chatbot)
file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True)
yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1)
yield from update_ui_latest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1)
else:
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
yield from update_ui_latest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
return # 2. 如果没有文件
if len(file_list) == 0:
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
yield from update_ui_latest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
return # 2. 如果没有文件
# 读取文件
@@ -183,7 +183,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
# 粗心检查
if is_the_upload_folder(txt):
yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1)
yield from update_ui_latest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1)
return
# 开始干正事
@@ -195,7 +195,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
chatbot.append(["代码生成阶段结束", ""])
yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1)
yield from update_ui_latest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1)
# ⭐ 分离代码块
code = get_code_block(code)
# ⭐ 检查模块
@@ -206,11 +206,11 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
if not traceback: traceback = trimmed_format_exc()
# 处理异常
if not traceback: traceback = trimmed_format_exc()
yield from update_ui_lastest_msg(f"{j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5)
yield from update_ui_latest_msg(f"{j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5)
# 代码生成结束, 开始执行
TIME_LIMIT = 15
yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1)
yield from update_ui_latest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1)
manager = multiprocessing.Manager()
return_dict = manager.dict()

查看文件

@@ -8,7 +8,7 @@
import time
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
from toolbox import get_conf, select_api_key, update_ui_lastest_msg, Singleton
from toolbox import get_conf, select_api_key, update_ui_latest_msg, Singleton
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
from crazy_functions.crazy_utils import input_clipping, try_install_deps
from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses

查看文件

@@ -1,5 +1,5 @@
from toolbox import CatchException, report_exception, get_log_folder, gen_time_str
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_latest_msg, disable_auto_promotion
from toolbox import write_history_to_file, promote_file_to_downloadzone
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency

查看文件

@@ -166,7 +166,7 @@ class PointWithTrace(Scene):
```
# do not use get_graph, this funciton is deprecated
# do not use get_graph, this function is deprecated
class ExampleFunctionGraph(Scene):
def construct(self):

查看文件

@@ -324,16 +324,16 @@ def 生成多种Mermaid图表(
if os.path.exists(txt): # 如输入区无内容则直接解析历史记录
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
file_exist, final_result, page_one, file_manifest, excption = (
file_exist, final_result, page_one, file_manifest, exception = (
extract_text_from_files(txt, chatbot, history)
)
else:
file_exist = False
excption = ""
exception = ""
file_manifest = []
if excption != "":
if excption == "word":
if exception != "":
if exception == "word":
report_exception(
chatbot,
history,
@@ -341,7 +341,7 @@ def 生成多种Mermaid图表(
b=f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。",
)
elif excption == "pdf":
elif exception == "pdf":
report_exception(
chatbot,
history,
@@ -349,7 +349,7 @@ def 生成多种Mermaid图表(
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。",
)
elif excption == "word_pip":
elif exception == "word_pip":
report_exception(
chatbot,
history,

查看文件

@@ -1,4 +1,4 @@
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_lastest_msg, get_log_folder, get_user
from toolbox import CatchException, update_ui, ProxyNetworkActivate, update_ui_latest_msg, get_log_folder, get_user
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_files_from_everything
from loguru import logger
install_msg ="""
@@ -42,7 +42,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# from crazy_functions.crazy_utils import try_install_deps
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
# yield from update_ui_latest_msg("安装完成,您可以再次重试。", chatbot, history)
return
# < --------------------读取文件--------------- >
@@ -95,7 +95,7 @@ def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
# from crazy_functions.crazy_utils import try_install_deps
# try_install_deps(['zh_langchain==0.2.1', 'pypinyin'], reload_m=['pypinyin', 'zh_langchain'])
# yield from update_ui_lastest_msg("安装完成,您可以再次重试。", chatbot, history)
# yield from update_ui_latest_msg("安装完成,您可以再次重试。", chatbot, history)
return
# < ------------------- --------------- >

查看文件

@@ -47,7 +47,7 @@ explain_msg = """
from pydantic import BaseModel, Field
from typing import List
from toolbox import CatchException, update_ui, is_the_upload_folder
from toolbox import update_ui_lastest_msg, disable_auto_promotion
from toolbox import update_ui_latest_msg, disable_auto_promotion
from request_llms.bridge_all import predict_no_ui_long_connection
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.crazy_utils import input_clipping
@@ -113,19 +113,19 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
# 用简单的关键词检测用户意图
is_certain, _ = analyze_intention_with_simple_rules(txt)
if is_the_upload_folder(txt):
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=False)
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
if is_certain or (state.has_provided_explaination):
if is_certain or (state.has_provided_explanation):
# 如果意图明确,跳过提示环节
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=True)
state.unlock_plugin(chatbot=chatbot)
yield from update_ui(chatbot=chatbot, history=history)
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
return
else:
# 如果意图模糊,提示
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
state.set_state(chatbot=chatbot, key='has_provided_explanation', value=True)
state.lock_plugin(chatbot=chatbot)
chatbot.append(("虚空终端状态:", explain_msg+appendix_msg))
yield from update_ui(chatbot=chatbot, history=history)
@@ -141,7 +141,7 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
# ⭐ ⭐ ⭐ 分析用户意图
is_certain, user_intention = analyze_intention_with_simple_rules(txt)
if not is_certain:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n分析用户意图中", chatbot=chatbot, history=history, delay=0)
gpt_json_io = GptJsonIO(UserIntention)
rf_req = "\nchoose from ['ModifyConfiguration', 'ExecutePlugin', 'Chat']"
@@ -154,13 +154,13 @@ def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
user_intention = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
except JsonStringError as e:
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 失败 当前语言模型({llm_kwargs['llm_model']})不能理解您的意图", chatbot=chatbot, history=history, delay=0)
return
else:
pass
yield from update_ui_lastest_msg(
yield from update_ui_latest_msg(
lastmsg=f"正在执行任务: {txt}\n\n用户意图理解: 意图={explain_intention_to_user[user_intention.intention_type]}",
chatbot=chatbot, history=history, delay=0)

查看文件

@@ -42,7 +42,7 @@ class AsyncGptTask():
MAX_TOKEN_ALLO = 2560
i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO)
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt,
observe_window=observe_window[index], console_slience=True)
observe_window=observe_window[index], console_silence=True)
except ConnectionAbortedError as token_exceed_err:
logger.error('至少一个线程任务Token溢出而失败', e)
except Exception as e:

查看文件

@@ -1,6 +1,6 @@
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from toolbox import CatchException, report_exception, promote_file_to_downloadzone
from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file
from toolbox import update_ui, update_ui_latest_msg, disable_auto_promotion, write_history_to_file
import logging
import requests
import time
@@ -156,7 +156,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
history = []
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
if len(meta_paper_info_list) == 0:
yield from update_ui_lastest_msg(lastmsg='获取文献失败,可能触发了google反爬虫机制。',chatbot=chatbot, history=history, delay=0)
yield from update_ui_latest_msg(lastmsg='获取文献失败,可能触发了google反爬虫机制。',chatbot=chatbot, history=history, delay=0)
return
batchsize = 5
for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)):