镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-09 07:56:48 +00:00
比较提交
37 次代码提交
GHSA-3jrq-
...
bold_front
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
f945a7bd19 | ||
|
|
379dcb2fa7 | ||
|
|
30c905917a | ||
|
|
0c6c357e9c | ||
|
|
6cd2d80dfd | ||
|
|
18d3245fc9 | ||
|
|
194e665a3b | ||
|
|
7e201c5028 | ||
|
|
00e5a31b50 | ||
|
|
d8b9686eeb | ||
|
|
25e06de1b6 | ||
|
|
0ad571e6b5 | ||
|
|
ddad5247fc | ||
|
|
ececfb9b6e | ||
|
|
9f13c5cedf | ||
|
|
68b36042ce | ||
|
|
cac6c50d2f | ||
|
|
f884eb43cf | ||
|
|
d37383dd4e | ||
|
|
dfae4e8081 | ||
|
|
15cc08505f | ||
|
|
c5a82f6ab7 | ||
|
|
768ed4514a | ||
|
|
9dfbff7fd0 | ||
|
|
1e16485087 | ||
|
|
f3660d669f | ||
|
|
e6d1cb09cb | ||
|
|
cdadd38cf7 | ||
|
|
ba484c55a0 | ||
|
|
737101b81d | ||
|
|
612caa2f5f | ||
|
|
85dbe4a4bf | ||
|
|
2262a4d80a | ||
|
|
b456ff02ab | ||
|
|
24a21ae320 | ||
|
|
3d5790cc2c | ||
|
|
7de6015800 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -153,6 +153,6 @@ media
|
||||
flagged
|
||||
request_llms/ChatGLM-6b-onnx-u8s8
|
||||
.pre-commit-config.yaml
|
||||
themes/common.js.min.*.js
|
||||
test*
|
||||
objdump*
|
||||
test.html
|
||||
objdump*
|
||||
*.min.*.js
|
||||
@@ -1,33 +1,44 @@
|
||||
|
||||
def check_proxy(proxies):
|
||||
def check_proxy(proxies, return_ip=False):
|
||||
import requests
|
||||
proxies_https = proxies['https'] if proxies is not None else '无'
|
||||
ip = None
|
||||
try:
|
||||
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
||||
data = response.json()
|
||||
if 'country_name' in data:
|
||||
country = data['country_name']
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
||||
if 'ip' in data: ip = data['ip']
|
||||
elif 'error' in data:
|
||||
alternative = _check_with_backup_source(proxies)
|
||||
alternative, ip = _check_with_backup_source(proxies)
|
||||
if alternative is None:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
|
||||
else:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:{alternative}"
|
||||
else:
|
||||
result = f"代理配置 {proxies_https}, 代理数据解析失败:{data}"
|
||||
print(result)
|
||||
return result
|
||||
if not return_ip:
|
||||
print(result)
|
||||
return result
|
||||
else:
|
||||
return ip
|
||||
except:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
|
||||
print(result)
|
||||
return result
|
||||
if not return_ip:
|
||||
print(result)
|
||||
return result
|
||||
else:
|
||||
return ip
|
||||
|
||||
def _check_with_backup_source(proxies):
|
||||
import random, string, requests
|
||||
random_string = ''.join(random.choices(string.ascii_letters + string.digits, k=32))
|
||||
try: return requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()['dns']['geo']
|
||||
except: return None
|
||||
try:
|
||||
res_json = requests.get(f"http://{random_string}.edns.ip-api.com/json", proxies=proxies, timeout=4).json()
|
||||
return res_json['dns']['geo'], res_json['dns']['ip']
|
||||
except:
|
||||
return None, None
|
||||
|
||||
def backup_and_download(current_version, remote_version):
|
||||
"""
|
||||
|
||||
29
config.py
29
config.py
@@ -43,7 +43,7 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p
|
||||
# AVAIL_LLM_MODELS = [
|
||||
# "glm-4-0520", "glm-4-air", "glm-4-airx", "glm-4-flash",
|
||||
# "qianfan", "deepseekcoder",
|
||||
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
|
||||
# "spark", "sparkv2", "sparkv3", "sparkv3.5", "sparkv4",
|
||||
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
|
||||
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
||||
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
|
||||
@@ -230,9 +230,15 @@ MOONSHOT_API_KEY = ""
|
||||
# 零一万物(Yi Model) API KEY
|
||||
YIMODEL_API_KEY = ""
|
||||
|
||||
|
||||
# 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
||||
DEEPSEEK_API_KEY = ""
|
||||
|
||||
|
||||
# 紫东太初大模型 https://ai-maas.wair.ac.cn
|
||||
TAICHU_API_KEY = ""
|
||||
|
||||
|
||||
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
||||
MATHPIX_APPID = ""
|
||||
MATHPIX_APPKEY = ""
|
||||
@@ -263,6 +269,10 @@ GROBID_URLS = [
|
||||
]
|
||||
|
||||
|
||||
# Searxng互联网检索服务
|
||||
SEARXNG_URL = "https://cloud-1.agent-matrix.com/"
|
||||
|
||||
|
||||
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
||||
ALLOW_RESET_CONFIG = False
|
||||
|
||||
@@ -271,23 +281,23 @@ ALLOW_RESET_CONFIG = False
|
||||
AUTOGEN_USE_DOCKER = False
|
||||
|
||||
|
||||
# 临时的上传文件夹位置,请勿修改
|
||||
# 临时的上传文件夹位置,请尽量不要修改
|
||||
PATH_PRIVATE_UPLOAD = "private_upload"
|
||||
|
||||
|
||||
# 日志文件夹的位置,请勿修改
|
||||
# 日志文件夹的位置,请尽量不要修改
|
||||
PATH_LOGGING = "gpt_log"
|
||||
|
||||
|
||||
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
||||
# 存储翻译好的arxiv论文的路径,请尽量不要修改
|
||||
ARXIV_CACHE_DIR = "gpt_log/arxiv_cache"
|
||||
|
||||
|
||||
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请尽量不要修改
|
||||
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
||||
"Warmup_Modules", "Nougat_Download", "AutoGen"]
|
||||
|
||||
|
||||
# *实验性功能*: 自动检测并屏蔽失效的KEY,请勿使用
|
||||
BLOCK_INVALID_APIKEY = False
|
||||
|
||||
|
||||
# 启用插件热加载
|
||||
PLUGIN_HOT_RELOAD = False
|
||||
|
||||
@@ -384,6 +394,9 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
|
||||
插件在线服务配置依赖关系示意图
|
||||
│
|
||||
├── 互联网检索
|
||||
│ └── SEARXNG_URL
|
||||
│
|
||||
├── 语音功能
|
||||
│ ├── ENABLE_AUDIO
|
||||
│ ├── ALIYUN_TOKEN
|
||||
|
||||
@@ -43,13 +43,15 @@ def get_crazy_functions():
|
||||
from crazy_functions.Latex_Function import PDF翻译中文并重新编译PDF
|
||||
from crazy_functions.Latex_Function_Wrap import Arxiv_Localize
|
||||
from crazy_functions.Latex_Function_Wrap import PDF_Localize
|
||||
|
||||
from crazy_functions.Internet_GPT import 连接网络回答问题
|
||||
from crazy_functions.Internet_GPT_Wrap import NetworkGPT_Wrap
|
||||
|
||||
function_plugins = {
|
||||
"虚空终端": {
|
||||
"Group": "对话|编程|学术|智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "使用自然语言实现您的想法",
|
||||
"Function": HotReload(虚空终端),
|
||||
},
|
||||
"解析整个Python项目": {
|
||||
@@ -87,10 +89,18 @@ def get_crazy_functions():
|
||||
"Function": None,
|
||||
"Class": Mermaid_Gen
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Arxiv论文翻译": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
|
||||
"Class": Arxiv_Localize, # 新一代插件需要注册Class
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "批量总结word文档 | 输入参数为路径",
|
||||
"Function": HotReload(总结word文档),
|
||||
},
|
||||
@@ -196,6 +206,7 @@ def get_crazy_functions():
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "保存当前的对话 | 不需要输入参数",
|
||||
"Function": HotReload(对话历史存档), # 当注册Class后,Function旧接口仅会在“虚空终端”中起作用
|
||||
@@ -203,13 +214,23 @@ def get_crazy_functions():
|
||||
},
|
||||
"[多线程Demo]解析此项目本身(源码自译解)": {
|
||||
"Group": "对话|编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
|
||||
"Function": HotReload(解析项目本身),
|
||||
},
|
||||
"查互联网后回答": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接网络回答问题),
|
||||
"Class": NetworkGPT_Wrap # 新一代插件需要注册Class
|
||||
},
|
||||
"历史上的今天": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
||||
"Function": None,
|
||||
"Class": Demo_Wrap, # 新一代插件需要注册Class
|
||||
@@ -360,36 +381,36 @@ def get_crazy_functions():
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
# try:
|
||||
# from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接网络回答问题),
|
||||
}
|
||||
}
|
||||
)
|
||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
# function_plugins.update(
|
||||
# {
|
||||
# "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
# "Group": "对话",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||
# "Function": HotReload(连接网络回答问题),
|
||||
# }
|
||||
# }
|
||||
# )
|
||||
# from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接bing搜索回答问题),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
# function_plugins.update(
|
||||
# {
|
||||
# "连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
# "Group": "对话",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||
# "Function": HotReload(连接bing搜索回答问题),
|
||||
# }
|
||||
# }
|
||||
# )
|
||||
# except:
|
||||
# print(trimmed_format_exc())
|
||||
# print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
@@ -418,7 +439,7 @@ def get_crazy_functions():
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AsButton": True,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型),
|
||||
|
||||
@@ -217,6 +217,4 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
|
||||
os.remove(f)
|
||||
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
|
||||
return
|
||||
@@ -1,27 +1,46 @@
|
||||
from toolbox import CatchException, update_ui
|
||||
from toolbox import CatchException, update_ui, get_conf
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from request_llms.bridge_all import model_info
|
||||
import urllib.request
|
||||
import random
|
||||
from functools import lru_cache
|
||||
|
||||
from check_proxy import check_proxy
|
||||
|
||||
@lru_cache
|
||||
def get_auth_ip():
|
||||
try:
|
||||
external_ip = urllib.request.urlopen('https://v4.ident.me/').read().decode('utf8')
|
||||
return external_ip
|
||||
except:
|
||||
return '114.114.114.114'
|
||||
ip = check_proxy(None, return_ip=True)
|
||||
if ip is None:
|
||||
return '114.114.114.' + str(random.randint(1, 10))
|
||||
return ip
|
||||
|
||||
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
|
||||
if searxng_url is None:
|
||||
url = get_conf("SEARXNG_URL")
|
||||
else:
|
||||
url = searxng_url
|
||||
|
||||
if engines is None:
|
||||
engines = 'bing'
|
||||
|
||||
if categories == 'general':
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'engines': engines,
|
||||
}
|
||||
elif categories == 'science':
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'categories': 'science'
|
||||
}
|
||||
else:
|
||||
raise ValueError('不支持的检索类型')
|
||||
|
||||
def searxng_request(query, proxies):
|
||||
url = 'https://cloud-1.agent-matrix.com/' # 请替换为实际的API URL
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
}
|
||||
headers = {
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
|
||||
@@ -29,19 +48,23 @@ def searxng_request(query, proxies):
|
||||
'X-Real-IP': get_auth_ip()
|
||||
}
|
||||
results = []
|
||||
response = requests.post(url, params=params, headers=headers, proxies=proxies)
|
||||
response = requests.post(url, params=params, headers=headers, proxies=proxies, timeout=30)
|
||||
if response.status_code == 200:
|
||||
json_result = response.json()
|
||||
for result in json_result['results']:
|
||||
item = {
|
||||
"title": result["title"],
|
||||
"content": result["content"],
|
||||
"title": result.get("title", ""),
|
||||
"source": result.get("engines", "unknown"),
|
||||
"content": result.get("content", ""),
|
||||
"link": result["url"],
|
||||
}
|
||||
results.append(item)
|
||||
return results
|
||||
else:
|
||||
raise ValueError("搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
|
||||
if response.status_code == 429:
|
||||
raise ValueError("Searxng(在线搜索服务)当前使用人数太多,请稍后。")
|
||||
else:
|
||||
raise ValueError("在线搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
|
||||
|
||||
def scrape_text(url, proxies) -> str:
|
||||
"""Scrape text from a webpage
|
||||
@@ -72,44 +95,41 @@ def scrape_text(url, proxies) -> str:
|
||||
|
||||
@CatchException
|
||||
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
||||
"[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}", "检索中..."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# ------------- < 第1步:爬取搜索引擎的结果 > -------------
|
||||
from toolbox import get_conf
|
||||
proxies = get_conf('proxies')
|
||||
urls = searxng_request(txt, proxies)
|
||||
categories = plugin_kwargs.get('categories', 'general')
|
||||
searxng_url = plugin_kwargs.get('searxng_url', None)
|
||||
engines = plugin_kwargs.get('engine', None)
|
||||
urls = searxng_request(txt, proxies, categories, searxng_url, engines=engines)
|
||||
history = []
|
||||
if len(urls) == 0:
|
||||
chatbot.append((f"结论:{txt}",
|
||||
"[Local Message] 受到google限制,无法从google获取信息!"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
"[Local Message] 受到限制,无法从searxng获取信息!请尝试更换搜索引擎。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
# ------------- < 第2步:依次访问网页 > -------------
|
||||
max_search_result = 5 # 最多收纳多少个网页的结果
|
||||
chatbot.append([f"联网检索中 ...", None])
|
||||
for index, url in enumerate(urls[:max_search_result]):
|
||||
res = scrape_text(url['link'], proxies)
|
||||
history.extend([f"第{index}份搜索结果:", res])
|
||||
chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
prefix = f"第{index}份搜索结果 [源自{url['source'][0]}搜索] ({url['title'][:25]}):"
|
||||
history.extend([prefix, res])
|
||||
res_squeeze = res.replace('\n', '...')
|
||||
chatbot[-1] = [prefix + "\n\n" + res_squeeze[:500] + "......", None]
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# ------------- < 第3步:ChatGPT综合 > -------------
|
||||
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
|
||||
i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
|
||||
inputs=i_say,
|
||||
history=history,
|
||||
max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4
|
||||
max_token_limit=min(model_info[llm_kwargs['llm_model']]['max_token']*3//4, 8192)
|
||||
)
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=i_say,
|
||||
|
||||
@@ -0,0 +1,44 @@
|
||||
|
||||
from toolbox import get_conf
|
||||
from crazy_functions.Internet_GPT import 连接网络回答问题
|
||||
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
||||
|
||||
|
||||
class NetworkGPT_Wrap(GptAcademicPluginTemplate):
|
||||
def __init__(self):
|
||||
"""
|
||||
请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
|
||||
"""
|
||||
pass
|
||||
|
||||
def define_arg_selection_menu(self):
|
||||
"""
|
||||
定义插件的二级选项菜单
|
||||
|
||||
第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
|
||||
第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
|
||||
第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
|
||||
|
||||
"""
|
||||
gui_definition = {
|
||||
"main_input":
|
||||
ArgProperty(title="输入问题", description="待通过互联网检索的问题", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
|
||||
"categories":
|
||||
ArgProperty(title="搜索分类", options=["网页", "学术论文"], default_value="网页", description="无", type="dropdown").model_dump_json(),
|
||||
"engine":
|
||||
ArgProperty(title="选择搜索引擎", options=["bing", "google", "duckduckgo"], default_value="bing", description="无", type="dropdown").model_dump_json(),
|
||||
"searxng_url":
|
||||
ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=get_conf("SEARXNG_URL"), type="string").model_dump_json(), # 主输入,自动从输入框同步
|
||||
|
||||
}
|
||||
return gui_definition
|
||||
|
||||
def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
执行插件
|
||||
"""
|
||||
if plugin_kwargs["categories"] == "网页": plugin_kwargs["categories"] = "general"
|
||||
if plugin_kwargs["categories"] == "学术论文": plugin_kwargs["categories"] = "science"
|
||||
|
||||
yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
||||
|
||||
@@ -4,7 +4,7 @@ from functools import partial
|
||||
import glob, os, requests, time, json, tarfile
|
||||
|
||||
pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
ARXIV_CACHE_DIR = get_conf("ARXIV_CACHE_DIR")
|
||||
|
||||
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
@@ -233,7 +233,7 @@ def pdf2tex_project(pdf_file_path, plugin_kwargs):
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append(["函数插件功能?",
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
@@ -312,7 +312,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
@@ -408,7 +408,7 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------------- more requirements ------------->
|
||||
@@ -545,4 +545,4 @@ def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, h
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
|
||||
# <-------------- we are done ------------->
|
||||
return success
|
||||
return success
|
||||
@@ -1,9 +1,20 @@
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
|
||||
from shared_utils.char_visual_effect import scolling_visual_effect
|
||||
import threading
|
||||
import os
|
||||
import logging
|
||||
|
||||
def input_clipping(inputs, history, max_token_limit):
|
||||
"""
|
||||
当输入文本 + 历史文本超出最大限制时,采取措施丢弃一部分文本。
|
||||
输入:
|
||||
- inputs 本次请求
|
||||
- history 历史上下文
|
||||
- max_token_limit 最大token限制
|
||||
输出:
|
||||
- inputs 本次请求(经过clip)
|
||||
- history 历史上下文(经过clip)
|
||||
"""
|
||||
import numpy as np
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
@@ -158,7 +169,7 @@ def can_multi_process(llm) -> bool:
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||
chatbot, history_array, sys_prompt_array,
|
||||
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
||||
refresh_interval=0.2, max_workers=-1, scroller_max_len=75,
|
||||
handle_token_exceed=True, show_user_at_complete=False,
|
||||
retry_times_at_unknown_error=2,
|
||||
):
|
||||
@@ -283,6 +294,8 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
|
||||
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
|
||||
cnt = 0
|
||||
|
||||
|
||||
while True:
|
||||
# yield一次以刷新前端页面
|
||||
time.sleep(refresh_interval)
|
||||
@@ -295,8 +308,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
mutable[thread_index][1] = time.time()
|
||||
# 在前端打印些好玩的东西
|
||||
for thread_index, _ in enumerate(worker_done):
|
||||
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
||||
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
print_something_really_funny = f"[ ...`{scolling_visual_effect(mutable[thread_index][0], scroller_max_len)}`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
|
||||
@@ -4,20 +4,28 @@ import pickle
|
||||
class SafeUnpickler(pickle.Unpickler):
|
||||
|
||||
def get_safe_classes(self):
|
||||
from .latex_actions import LatexPaperFileGroup, LatexPaperSplit
|
||||
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
|
||||
from crazy_functions.latex_fns.latex_toolbox import LinkedListNode
|
||||
# 定义允许的安全类
|
||||
safe_classes = {
|
||||
# 在这里添加其他安全的类
|
||||
'LatexPaperFileGroup': LatexPaperFileGroup,
|
||||
'LatexPaperSplit' : LatexPaperSplit,
|
||||
'LatexPaperSplit': LatexPaperSplit,
|
||||
'LinkedListNode': LinkedListNode,
|
||||
}
|
||||
return safe_classes
|
||||
|
||||
def find_class(self, module, name):
|
||||
# 只允许特定的类进行反序列化
|
||||
self.safe_classes = self.get_safe_classes()
|
||||
if f'{module}.{name}' in self.safe_classes:
|
||||
return self.safe_classes[f'{module}.{name}']
|
||||
match_class_name = None
|
||||
for class_name in self.safe_classes.keys():
|
||||
if (class_name in f'{module}.{name}'):
|
||||
match_class_name = class_name
|
||||
if module == 'numpy' or module.startswith('numpy.'):
|
||||
return super().find_class(module, name)
|
||||
if match_class_name is not None:
|
||||
return self.safe_classes[match_class_name]
|
||||
# 如果尝试加载未授权的类,则抛出异常
|
||||
raise pickle.UnpicklingError(f"Attempted to deserialize unauthorized class '{name}' from module '{module}'")
|
||||
|
||||
|
||||
@@ -104,6 +104,8 @@ def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, cha
|
||||
z_decoded = z_decoded[len("data: "):]
|
||||
decoded_json = json.loads(z_decoded)
|
||||
res_json.append(decoded_json)
|
||||
if 'limit exceeded' in decoded_json.get('status', ''):
|
||||
raise RuntimeError("Doc2x API 页数受限,请联系 Doc2x 方面,并更换新的 API 秘钥。")
|
||||
else:
|
||||
raise RuntimeError(format("[ERROR] status code: %d, body: %s" % (res.status_code, res.text)))
|
||||
uuid = res_json[0]['uuid']
|
||||
@@ -161,8 +163,8 @@ def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, cha
|
||||
from shared_utils.advanced_markdown_format import markdown_convertion_for_file
|
||||
with open(generated_fp, "r", encoding="utf-8") as f:
|
||||
md = f.read()
|
||||
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
||||
md = re.sub(r'^<table>', r'😃<table>', md, flags=re.MULTILINE)
|
||||
# # Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
||||
# md = re.sub(r'^<table>', r'.<table>', md, flags=re.MULTILINE)
|
||||
html = markdown_convertion_for_file(md)
|
||||
with open(preview_fp, "w", encoding="utf-8") as f: f.write(html)
|
||||
chatbot.append([None, f"生成在线预览:{generate_file_link([preview_fp])}"])
|
||||
@@ -182,7 +184,7 @@ def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, cha
|
||||
with open(generated_fp, 'r', encoding='utf8') as f: content = f.read()
|
||||
content = content.replace('```markdown', '\n').replace('```', '\n')
|
||||
# Markdown中使用不标准的表格,需要在表格前加上一个emoji,以便公式渲染
|
||||
content = re.sub(r'^<table>', r'😃<table>', content, flags=re.MULTILINE)
|
||||
# content = re.sub(r'^<table>', r'.<table>', content, flags=re.MULTILINE)
|
||||
with open(generated_fp, 'w', encoding='utf8') as f: f.write(content)
|
||||
# 生成在线预览html
|
||||
file_name = '在线预览翻译' + gen_time_str() + '.html'
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
||||
|
||||
# edge-tts需要的依赖,某些pip包所需的依赖
|
||||
RUN apt update && apt install ffmpeg build-essential -y
|
||||
|
||||
# use python3 as the system default python
|
||||
WORKDIR /gpt
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
@@ -28,8 +31,6 @@ RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||
RUN python3 -m pip install nougat-ocr
|
||||
|
||||
# edge-tts需要的依赖
|
||||
RUN apt update && apt install ffmpeg -y
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
@@ -5,6 +5,9 @@
|
||||
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
||||
|
||||
# edge-tts需要的依赖,某些pip包所需的依赖
|
||||
RUN apt update && apt install ffmpeg build-essential -y
|
||||
|
||||
# use python3 as the system default python
|
||||
WORKDIR /gpt
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
@@ -36,8 +39,6 @@ RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||
RUN python3 -m pip install nougat-ocr
|
||||
|
||||
# edge-tts需要的依赖
|
||||
RUN apt update && apt install ffmpeg -y
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
@@ -5,6 +5,8 @@ RUN apt-get update
|
||||
RUN apt-get install -y curl proxychains curl gcc
|
||||
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
|
||||
|
||||
# edge-tts需要的依赖,某些pip包所需的依赖
|
||||
RUN apt update && apt install ffmpeg build-essential -y
|
||||
|
||||
# use python3 as the system default python
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
@@ -21,8 +23,6 @@ RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||
|
||||
# edge-tts需要的依赖
|
||||
RUN apt update && apt install ffmpeg -y
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
90
main.py
90
main.py
@@ -26,11 +26,12 @@ def enable_log(PATH_LOGGING):
|
||||
|
||||
def main():
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.32.9', '3.32.10']:
|
||||
if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']:
|
||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||
from request_llms.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||
|
||||
# 读取配置
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
||||
@@ -42,7 +43,7 @@ def main():
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
from check_proxy import get_current_version
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
||||
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||
from themes.theme import js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
|
||||
@@ -70,6 +71,7 @@ def main():
|
||||
from check_proxy import check_proxy, auto_update, warm_up_modules
|
||||
proxy_info = check_proxy(proxies)
|
||||
|
||||
# 切换布局
|
||||
gr_L1 = lambda: gr.Row().style()
|
||||
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400)
|
||||
if LAYOUT == "TOP-DOWN":
|
||||
@@ -84,7 +86,7 @@ def main():
|
||||
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
|
||||
gr.HTML(title_html)
|
||||
secret_css = gr.Textbox(visible=False, elem_id="secret_css")
|
||||
register_advanced_plugin_init_code_arr = ""
|
||||
register_advanced_plugin_init_arr = ""
|
||||
|
||||
cookies, web_cookie_cache = make_cookie_cache() # 定义 后端state(cookies)、前端(web_cookie_cache)两兄弟
|
||||
with gr_L1():
|
||||
@@ -106,7 +108,7 @@ def main():
|
||||
with gr.Row():
|
||||
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
|
||||
with gr.Row():
|
||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。支持将文件直接粘贴到输入区。", elem_id="state-panel")
|
||||
|
||||
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
||||
with gr.Row():
|
||||
@@ -127,13 +129,15 @@ def main():
|
||||
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
|
||||
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
|
||||
with gr.Row():
|
||||
for k, plugin in plugins.items():
|
||||
for index, (k, plugin) in enumerate(plugins.items()):
|
||||
if not plugin.get("AsButton", True): continue
|
||||
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
|
||||
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
|
||||
info = plugins[k].get("Info", k)
|
||||
btn_elem_id = f"plugin_btn_{index}"
|
||||
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
|
||||
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
|
||||
visible=visible, info_str=f'函数插件区: {info}', elem_id=btn_elem_id).style(size="sm")
|
||||
plugin['ButtonElemId'] = btn_elem_id
|
||||
with gr.Row():
|
||||
with gr.Accordion("更多函数插件", open=True):
|
||||
dropdown_fn_list = []
|
||||
@@ -147,19 +151,22 @@ def main():
|
||||
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, elem_id="advance_arg_input_legacy",
|
||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||
with gr.Row():
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary", elem_id="elem_switchy_bt").style(size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||
|
||||
# 左上角工具栏定义
|
||||
from themes.gui_toolbar import define_gui_toolbar
|
||||
checkboxes, checkboxes_2, max_length_sl, theme_dropdown, system_prompt, file_upload_2, md_dropdown, top_p, temperature = \
|
||||
define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAIL_THEMES, ADD_WAIFU, help_menu_description, js_code_for_toggle_darkmode)
|
||||
|
||||
# 浮动菜单定义
|
||||
from themes.gui_floating_menu import define_gui_floating_menu
|
||||
area_input_secondary, txt2, area_customize, submitBtn2, resetBtn2, clearBtn2, stopBtn2 = \
|
||||
define_gui_floating_menu(customize_btns, functional, predefined_btns, cookies, web_cookie_cache)
|
||||
|
||||
# 插件二级菜单的实现
|
||||
from themes.gui_advanced_plugin_class import define_gui_advanced_plugin_class
|
||||
new_plugin_callback, route_switchy_bt_with_arg, usr_confirmed_arg = \
|
||||
define_gui_advanced_plugin_class(plugins)
|
||||
@@ -217,35 +224,42 @@ def main():
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||
# 函数插件-固定按钮区
|
||||
def encode_plugin_info(k, plugin)->str:
|
||||
import copy
|
||||
from themes.theme import to_cookie_str
|
||||
plugin_ = copy.copy(plugin)
|
||||
plugin_.pop("Function", None)
|
||||
plugin_.pop("Class", None)
|
||||
plugin_.pop("Button", None)
|
||||
plugin_["Info"] = plugin.get("Info", k)
|
||||
if plugin.get("AdvancedArgs", False):
|
||||
plugin_["Label"] = f"插件[{k}]的高级参数说明:" + plugin.get("ArgsReminder", f"没有提供高级参数功能说明")
|
||||
else:
|
||||
plugin_["Label"] = f"插件[{k}]不需要高级参数。"
|
||||
return to_cookie_str(plugin_)
|
||||
|
||||
# 插件的注册(前端代码注册)
|
||||
for k in plugins:
|
||||
register_advanced_plugin_init_arr += f"""register_plugin_init("{k}","{encode_plugin_info(k, plugins[k])}");"""
|
||||
if plugins[k].get("Class", None):
|
||||
plugins[k]["JsMenu"] = plugins[k]["Class"]().get_js_code_for_generating_menu(k)
|
||||
register_advanced_plugin_init_code_arr += """register_advanced_plugin_init_code("{k}","{gui_js}");""".format(k=k, gui_js=plugins[k]["JsMenu"])
|
||||
register_advanced_plugin_init_arr += """register_advanced_plugin_init_code("{k}","{gui_js}");""".format(k=k, gui_js=plugins[k]["JsMenu"])
|
||||
if not plugins[k].get("AsButton", True): continue
|
||||
if plugins[k].get("Class", None) is None:
|
||||
assert plugins[k].get("Function", None) is not None
|
||||
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
|
||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [plugins[k]["Button"]], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
||||
cancel_handles.append(click_handle)
|
||||
click_handle = plugins[k]["Button"].click(None, inputs=[], outputs=None, _js=f"""()=>run_classic_plugin_via_id("{plugins[k]["ButtonElemId"]}")""")
|
||||
else:
|
||||
click_handle = plugins[k]["Button"].click(None, inputs=[], outputs=None, _js=f"""()=>run_advanced_plugin_launch_code("{k}")""")
|
||||
|
||||
# 函数插件-下拉菜单与随变按钮的互动
|
||||
def on_dropdown_changed(k):
|
||||
variant = plugins[k]["Color"] if "Color" in plugins[k] else "secondary"
|
||||
info = plugins[k].get("Info", k)
|
||||
ret = {switchy_bt: gr.update(value=k, variant=variant, info_str=f'函数插件区: {info}')}
|
||||
if plugins[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + plugins[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
|
||||
else:
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||
return ret
|
||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
|
||||
# 函数插件-下拉菜单与随变按钮的互动(新版-更流畅)
|
||||
dropdown.select(None, [dropdown], None, _js=f"""(dropdown)=>run_dropdown_shift(dropdown)""")
|
||||
|
||||
# 模型切换时的回调
|
||||
def on_md_dropdown_changed(k):
|
||||
return {chatbot: gr.update(label="当前模型:"+k)}
|
||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
|
||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot])
|
||||
|
||||
# 主题修改
|
||||
def on_theme_dropdown_changed(theme, secret_css):
|
||||
adjust_theme, css_part1, _, adjust_dynamic_theme = load_dynamic_theme(theme)
|
||||
if adjust_dynamic_theme:
|
||||
@@ -253,15 +267,8 @@ def main():
|
||||
else:
|
||||
css_part2 = adjust_theme()._get_theme_css()
|
||||
return css_part2 + css_part1
|
||||
|
||||
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css])
|
||||
theme_handle.then(
|
||||
None,
|
||||
[secret_css],
|
||||
None,
|
||||
_js=js_code_for_css_changing
|
||||
)
|
||||
|
||||
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css]) # , _js="""change_theme_prepare""")
|
||||
theme_handle.then(None, [theme_dropdown, secret_css], None, _js="""change_theme""")
|
||||
|
||||
switchy_bt.click(None, [switchy_bt], None, _js="(switchy_bt)=>on_flex_button_click(switchy_bt)")
|
||||
# 随变按钮的回调函数注册
|
||||
@@ -276,9 +283,10 @@ def main():
|
||||
click_handle_ng.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
||||
cancel_handles.append(click_handle_ng)
|
||||
# 新一代插件的高级参数区确认按钮(隐藏)
|
||||
click_handle_ng = new_plugin_callback.click(route_switchy_bt_with_arg, [
|
||||
gr.State(["new_plugin_callback", "usr_confirmed_arg"] + input_combo_order),
|
||||
new_plugin_callback, usr_confirmed_arg, *input_combo
|
||||
click_handle_ng = new_plugin_callback.click(route_switchy_bt_with_arg,
|
||||
[
|
||||
gr.State(["new_plugin_callback", "usr_confirmed_arg"] + input_combo_order), # 第一个参数: 指定了后续参数的名称
|
||||
new_plugin_callback, usr_confirmed_arg, *input_combo # 后续参数: 真正的参数
|
||||
], output_combo)
|
||||
click_handle_ng.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
||||
cancel_handles.append(click_handle_ng)
|
||||
@@ -298,6 +306,8 @@ def main():
|
||||
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
|
||||
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
|
||||
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
|
||||
|
||||
# 是否启动语音输入功能
|
||||
if ENABLE_AUDIO:
|
||||
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
|
||||
rad = RealtimeAudioDistribution()
|
||||
@@ -305,18 +315,18 @@ def main():
|
||||
rad.feed(cookies['uuid'].hex, audio)
|
||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||
|
||||
|
||||
# 生成当前浏览器窗口的uuid(刷新失效)
|
||||
app_block.load(assign_user_uuid, inputs=[cookies], outputs=[cookies])
|
||||
|
||||
# 初始化(前端)
|
||||
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
|
||||
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
|
||||
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
||||
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
||||
|
||||
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}","{TTS_TYPE}")""") # 配置暗色主题或亮色主题
|
||||
app_block.load(None, inputs=[], outputs=None, _js="""()=>{REP}""".replace("REP", register_advanced_plugin_init_code_arr))
|
||||
app_block.load(None, inputs=[], outputs=None, _js="""()=>{REP}""".replace("REP", register_advanced_plugin_init_arr))
|
||||
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
# Gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def run_delayed_tasks():
|
||||
import threading, webbrowser, time
|
||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||
|
||||
@@ -34,6 +34,9 @@ from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||
from .bridge_zhipu import predict as zhipu_ui
|
||||
|
||||
from .bridge_taichu import predict_no_ui_long_connection as taichu_noui
|
||||
from .bridge_taichu import predict as taichu_ui
|
||||
|
||||
from .bridge_cohere import predict as cohere_ui
|
||||
from .bridge_cohere import predict_no_ui_long_connection as cohere_noui
|
||||
|
||||
@@ -116,6 +119,15 @@ model_info = {
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
"taichu": {
|
||||
"fn_with_ui": taichu_ui,
|
||||
"fn_without_ui": taichu_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
"gpt-3.5-turbo-16k": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
@@ -183,6 +195,7 @@ model_info = {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"has_multimodal_capacity": True,
|
||||
"max_token": 128000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
@@ -191,6 +204,7 @@ model_info = {
|
||||
"gpt-4o-2024-05-13": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 128000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
@@ -227,6 +241,7 @@ model_info = {
|
||||
"gpt-4-turbo": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 128000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
@@ -236,6 +251,7 @@ model_info = {
|
||||
"gpt-4-turbo-2024-04-09": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 128000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
@@ -844,6 +860,15 @@ if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"sparkv4":{
|
||||
"fn_with_ui": spark_ui,
|
||||
"fn_without_ui": spark_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": None,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
except:
|
||||
@@ -932,21 +957,31 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
||||
# "mixtral-8x7b" 是模型名(必要)
|
||||
# "(max_token=6666)" 是配置(非必要)
|
||||
try:
|
||||
_, max_token_tmp = read_one_api_model_name(model)
|
||||
origin_model_name, max_token_tmp = read_one_api_model_name(model)
|
||||
# 如果是已知模型,则尝试获取其信息
|
||||
original_model_info = model_info.get(origin_model_name.replace("one-api-", "", 1), None)
|
||||
except:
|
||||
print(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
||||
continue
|
||||
model_info.update({
|
||||
model: {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": max_token_tmp,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
})
|
||||
this_model_info = {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": max_token_tmp,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
|
||||
# 同步已知模型的其他信息
|
||||
attribute = "has_multimodal_capacity"
|
||||
if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
||||
# attribute = "attribute2"
|
||||
# if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
||||
# attribute = "attribute3"
|
||||
# if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
||||
model_info.update({model: this_model_info})
|
||||
|
||||
# -=-=-=-=-=-=- vllm 对齐支持 -=-=-=-=-=-=-
|
||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("vllm-")]:
|
||||
# 为了更灵活地接入vllm多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=6666)"]
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
|
||||
|
||||
"""
|
||||
该文件中主要包含三个函数
|
||||
|
||||
@@ -11,19 +9,19 @@
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
import random
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
||||
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
||||
from toolbox import ChatBotWithCookies
|
||||
from toolbox import ChatBotWithCookies, have_any_recent_upload_image_files, encode_image
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||
|
||||
@@ -41,6 +39,57 @@ def get_full_error(chunk, stream_response):
|
||||
break
|
||||
return chunk
|
||||
|
||||
def make_multimodal_input(inputs, image_paths):
|
||||
image_base64_array = []
|
||||
for image_path in image_paths:
|
||||
path = os.path.abspath(image_path)
|
||||
base64 = encode_image(path)
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={path}" base64="{base64}"></div>'
|
||||
image_base64_array.append(base64)
|
||||
return inputs, image_base64_array
|
||||
|
||||
def reverse_base64_from_input(inputs):
|
||||
# 定义一个正则表达式来匹配 Base64 字符串(假设格式为 base64="<Base64编码>")
|
||||
# pattern = re.compile(r'base64="([^"]+)"></div>')
|
||||
pattern = re.compile(r'<br/><br/><div align="center"><img[^<>]+base64="([^"]+)"></div>')
|
||||
# 使用 findall 方法查找所有匹配的 Base64 字符串
|
||||
base64_strings = pattern.findall(inputs)
|
||||
# 返回反转后的 Base64 字符串列表
|
||||
return base64_strings
|
||||
|
||||
def contain_base64(inputs):
|
||||
base64_strings = reverse_base64_from_input(inputs)
|
||||
return len(base64_strings) > 0
|
||||
|
||||
def append_image_if_contain_base64(inputs):
|
||||
if not contain_base64(inputs):
|
||||
return inputs
|
||||
else:
|
||||
image_base64_array = reverse_base64_from_input(inputs)
|
||||
pattern = re.compile(r'<br/><br/><div align="center"><img[^><]+></div>')
|
||||
inputs = re.sub(pattern, '', inputs)
|
||||
res = []
|
||||
res.append({
|
||||
"type": "text",
|
||||
"text": inputs
|
||||
})
|
||||
for image_base64 in image_base64_array:
|
||||
res.append({
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{image_base64}"
|
||||
}
|
||||
})
|
||||
return res
|
||||
|
||||
def remove_image_if_contain_base64(inputs):
|
||||
if not contain_base64(inputs):
|
||||
return inputs
|
||||
else:
|
||||
pattern = re.compile(r'<br/><br/><div align="center"><img[^><]+></div>')
|
||||
inputs = re.sub(pattern, '', inputs)
|
||||
return inputs
|
||||
|
||||
def decode_chunk(chunk):
|
||||
# 提前读取一些信息 (用于判断异常)
|
||||
chunk_decoded = chunk.decode()
|
||||
@@ -159,6 +208,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
from .bridge_all import model_info
|
||||
if is_any_api_key(inputs):
|
||||
chatbot._cookies['api_key'] = inputs
|
||||
chatbot.append(("输入已识别为openai的api_key", what_keys(inputs)))
|
||||
@@ -174,7 +224,17 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
chatbot.append((inputs, ""))
|
||||
# 多模态模型
|
||||
has_multimodal_capacity = model_info[llm_kwargs['llm_model']].get('has_multimodal_capacity', False)
|
||||
if has_multimodal_capacity:
|
||||
has_recent_image_upload, image_paths = have_any_recent_upload_image_files(chatbot, pop=True)
|
||||
else:
|
||||
has_recent_image_upload, image_paths = False, []
|
||||
if has_recent_image_upload:
|
||||
_inputs, image_base64_array = make_multimodal_input(inputs, image_paths)
|
||||
else:
|
||||
_inputs, image_base64_array = inputs, []
|
||||
chatbot.append((_inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
# check mis-behavior
|
||||
@@ -184,7 +244,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
time.sleep(2)
|
||||
|
||||
try:
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, image_base64_array, has_multimodal_capacity, stream)
|
||||
except RuntimeError as e:
|
||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||
@@ -192,7 +252,6 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
|
||||
# 检查endpoint是否合法
|
||||
try:
|
||||
from .bridge_all import model_info
|
||||
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
||||
except:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
@@ -200,7 +259,11 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面
|
||||
return
|
||||
|
||||
history.append(inputs); history.append("")
|
||||
# 加入历史
|
||||
if has_recent_image_upload:
|
||||
history.extend([_inputs, ""])
|
||||
else:
|
||||
history.extend([inputs, ""])
|
||||
|
||||
retry = 0
|
||||
while True:
|
||||
@@ -314,7 +377,7 @@ def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||
return chatbot, history
|
||||
|
||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:str, image_base64_array:list=[], has_multimodal_capacity:bool=False, stream:bool=True):
|
||||
"""
|
||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||
"""
|
||||
@@ -337,29 +400,74 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"]
|
||||
headers.update({"api-key": azure_api_key_unshared})
|
||||
|
||||
conversation_cnt = len(history) // 2
|
||||
if has_multimodal_capacity:
|
||||
# 当以下条件满足时,启用多模态能力:
|
||||
# 1. 模型本身是多模态模型(has_multimodal_capacity)
|
||||
# 2. 输入包含图像(len(image_base64_array) > 0)
|
||||
# 3. 历史输入包含图像( any([contain_base64(h) for h in history]) )
|
||||
enable_multimodal_capacity = (len(image_base64_array) > 0) or any([contain_base64(h) for h in history])
|
||||
else:
|
||||
enable_multimodal_capacity = False
|
||||
|
||||
if not enable_multimodal_capacity:
|
||||
# 不使用多模态能力
|
||||
conversation_cnt = len(history) // 2
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = remove_image_if_contain_base64(history[index])
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = remove_image_if_contain_base64(history[index+1])
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "": continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
else:
|
||||
# 多模态能力
|
||||
conversation_cnt = len(history) // 2
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = append_image_if_contain_base64(history[index])
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = append_image_if_contain_base64(history[index+1])
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "": continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = []
|
||||
what_i_ask_now["content"].append({
|
||||
"type": "text",
|
||||
"text": inputs
|
||||
})
|
||||
for image_base64 in image_base64_array:
|
||||
what_i_ask_now["content"].append({
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{image_base64}"
|
||||
}
|
||||
})
|
||||
messages.append(what_i_ask_now)
|
||||
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = history[index+1]
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "": continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
model = llm_kwargs['llm_model']
|
||||
if llm_kwargs['llm_model'].startswith('api2d-'):
|
||||
model = llm_kwargs['llm_model'][len('api2d-'):]
|
||||
|
||||
@@ -27,10 +27,8 @@ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check
|
||||
|
||||
|
||||
def report_invalid_key(key):
|
||||
if get_conf("BLOCK_INVALID_APIKEY"):
|
||||
# 实验性功能,自动检测并屏蔽失效的KEY,请勿使用
|
||||
from request_llms.key_manager import ApiKeyManager
|
||||
api_key = ApiKeyManager().add_key_to_blacklist(key)
|
||||
# 弃用功能
|
||||
return
|
||||
|
||||
def get_full_error(chunk, stream_response):
|
||||
"""
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import time
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import check_packages, report_exception
|
||||
from toolbox import check_packages, report_exception, log_chat
|
||||
|
||||
model_name = 'Qwen'
|
||||
|
||||
@@ -59,6 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
|
||||
72
request_llms/bridge_taichu.py
普通文件
72
request_llms/bridge_taichu.py
普通文件
@@ -0,0 +1,72 @@
|
||||
import time
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
|
||||
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
|
||||
from toolbox import ChatBotWithCookies
|
||||
|
||||
# model_name = 'Taichu-2.0'
|
||||
# taichu_default_model = 'taichu_llm'
|
||||
|
||||
def validate_key():
|
||||
TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
|
||||
if TAICHU_API_KEY == '': return False
|
||||
return True
|
||||
|
||||
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||
observe_window:list=[], console_slience:bool=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
watch_dog_patience = 5
|
||||
response = ""
|
||||
|
||||
# if llm_kwargs["llm_model"] == "taichu":
|
||||
# llm_kwargs["llm_model"] = "taichu"
|
||||
|
||||
if validate_key() is False:
|
||||
raise RuntimeError('请配置 TAICHU_API_KEY')
|
||||
|
||||
# 开始接收回复
|
||||
from .com_taichu import TaichuChatInit
|
||||
zhipu_bro_init = TaichuChatInit()
|
||||
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
|
||||
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||
"""
|
||||
⭐单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
chatbot.append([inputs, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if validate_key() is False:
|
||||
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
chatbot[-1] = [inputs, ""]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# if llm_kwargs["llm_model"] == "taichu":
|
||||
# llm_kwargs["llm_model"] = taichu_default_model
|
||||
|
||||
# 开始接收回复
|
||||
from .com_taichu import TaichuChatInit
|
||||
zhipu_bro_init = TaichuChatInit()
|
||||
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = [inputs, response]
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
history.extend([inputs, response])
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -65,8 +65,12 @@ class QwenRequestInstance():
|
||||
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
|
||||
yield self.result_buf
|
||||
break
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {self.result_buf}')
|
||||
|
||||
# 耗尽generator避免报错
|
||||
while True:
|
||||
try: next(responses)
|
||||
except: break
|
||||
|
||||
return self.result_buf
|
||||
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ class SparkRequestInstance():
|
||||
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
|
||||
self.gpt_url_v35 = "wss://spark-api.xf-yun.com/v3.5/chat"
|
||||
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
|
||||
self.gpt_url_v4 = "wss://spark-api.xf-yun.com/v4.0/chat"
|
||||
|
||||
self.time_to_yield_event = threading.Event()
|
||||
self.time_to_exit_event = threading.Event()
|
||||
@@ -94,6 +95,8 @@ class SparkRequestInstance():
|
||||
gpt_url = self.gpt_url_v3
|
||||
elif llm_kwargs['llm_model'] == 'sparkv3.5':
|
||||
gpt_url = self.gpt_url_v35
|
||||
elif llm_kwargs['llm_model'] == 'sparkv4':
|
||||
gpt_url = self.gpt_url_v4
|
||||
else:
|
||||
gpt_url = self.gpt_url
|
||||
file_manifest = []
|
||||
@@ -194,6 +197,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest)
|
||||
"sparkv2": "generalv2",
|
||||
"sparkv3": "generalv3",
|
||||
"sparkv3.5": "generalv3.5",
|
||||
"sparkv4": "4.0Ultra"
|
||||
}
|
||||
domains_select = domains[llm_kwargs['llm_model']]
|
||||
if file_manifest: domains_select = 'image'
|
||||
|
||||
55
request_llms/com_taichu.py
普通文件
55
request_llms/com_taichu.py
普通文件
@@ -0,0 +1,55 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2024/1/22
|
||||
# @Author : Kilig947 & binary husky
|
||||
# @Descr : 兼容最新的智谱Ai
|
||||
from toolbox import get_conf
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
import logging, os, requests
|
||||
import json
|
||||
class TaichuChatInit:
|
||||
def __init__(self): ...
|
||||
|
||||
def __conversation_user(self, user_input: str, llm_kwargs:dict):
|
||||
return {"role": "user", "content": user_input}
|
||||
|
||||
def __conversation_history(self, history:list, llm_kwargs:dict):
|
||||
messages = []
|
||||
conversation_cnt = len(history) // 2
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2 * conversation_cnt, 2):
|
||||
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||
what_gpt_answer = {
|
||||
"role": "assistant",
|
||||
"content": history[index + 1]
|
||||
}
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
return messages
|
||||
|
||||
def generate_chat(self, inputs:str, llm_kwargs:dict, history:list, system_prompt:str):
|
||||
TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
|
||||
params = {
|
||||
'api_key': TAICHU_API_KEY,
|
||||
'model_code': 'taichu_llm',
|
||||
'question': '\n\n'.join(history) + inputs,
|
||||
'prefix': system_prompt,
|
||||
'temperature': llm_kwargs.get('temperature', 0.95),
|
||||
'stream_format': 'json'
|
||||
}
|
||||
|
||||
api = 'https://ai-maas.wair.ac.cn/maas/v1/model_api/invoke'
|
||||
response = requests.post(api, json=params, stream=True)
|
||||
results = ""
|
||||
if response.status_code == 200:
|
||||
response.encoding = 'utf-8'
|
||||
for line in response.iter_lines(decode_unicode=True):
|
||||
delta = json.loads(line)['choices'][0]['text']
|
||||
results += delta
|
||||
yield delta, results
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
zhipu = TaichuChatInit()
|
||||
zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')
|
||||
@@ -44,7 +44,8 @@ def decode_chunk(chunk):
|
||||
try:
|
||||
chunk = json.loads(chunk[6:])
|
||||
except:
|
||||
finish_reason = "JSON_ERROR"
|
||||
respose = "API_ERROR"
|
||||
finish_reason = chunk
|
||||
# 错误处理部分
|
||||
if "error" in chunk:
|
||||
respose = "API_ERROR"
|
||||
|
||||
@@ -46,6 +46,16 @@ code_highlight_configs_block_mermaid = {
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
mathpatterns = {
|
||||
r"(?<!\\|\$)(\$)([^\$]+)(\$)": {"allow_multi_lines": False}, # $...$
|
||||
r"(?<!\\)(\$\$)([^\$]+)(\$\$)": {"allow_multi_lines": True}, # $$...$$
|
||||
r"(?<!\\)(\\\[)(.+?)(\\\])": {"allow_multi_lines": False}, # \[...\]
|
||||
r'(?<!\\)(\\\()(.+?)(\\\))': {'allow_multi_lines': False}, # \(...\)
|
||||
# r'(?<!\\)(\\begin{([a-z]+?\*?)})(.+?)(\\end{\2})': {'allow_multi_lines': True}, # \begin...\end
|
||||
# r'(?<!\\)(\$`)([^`]+)(`\$)': {'allow_multi_lines': False}, # $`...`$
|
||||
}
|
||||
|
||||
def tex2mathml_catch_exception(content, *args, **kwargs):
|
||||
try:
|
||||
content = tex2mathml(content, *args, **kwargs)
|
||||
@@ -96,14 +106,7 @@ def is_equation(txt):
|
||||
return False
|
||||
if "$" not in txt and "\\[" not in txt:
|
||||
return False
|
||||
mathpatterns = {
|
||||
r"(?<!\\|\$)(\$)([^\$]+)(\$)": {"allow_multi_lines": False}, # $...$
|
||||
r"(?<!\\)(\$\$)([^\$]+)(\$\$)": {"allow_multi_lines": True}, # $$...$$
|
||||
r"(?<!\\)(\\\[)(.+?)(\\\])": {"allow_multi_lines": False}, # \[...\]
|
||||
# r'(?<!\\)(\\\()(.+?)(\\\))': {'allow_multi_lines': False}, # \(...\)
|
||||
# r'(?<!\\)(\\begin{([a-z]+?\*?)})(.+?)(\\end{\2})': {'allow_multi_lines': True}, # \begin...\end
|
||||
# r'(?<!\\)(\$`)([^`]+)(`\$)': {'allow_multi_lines': False}, # $`...`$
|
||||
}
|
||||
|
||||
matches = []
|
||||
for pattern, property in mathpatterns.items():
|
||||
flags = re.ASCII | re.DOTALL if property["allow_multi_lines"] else re.ASCII
|
||||
@@ -207,6 +210,61 @@ def fix_code_segment_indent(txt):
|
||||
return txt
|
||||
|
||||
|
||||
def fix_dollar_sticking_bug(txt):
|
||||
"""
|
||||
修复不标准的dollar公式符号的问题
|
||||
"""
|
||||
txt_result = ""
|
||||
single_stack_height = 0
|
||||
double_stack_height = 0
|
||||
while True:
|
||||
while True:
|
||||
index = txt.find('$')
|
||||
|
||||
if index == -1:
|
||||
txt_result += txt
|
||||
return txt_result
|
||||
|
||||
if single_stack_height > 0:
|
||||
if txt[:(index+1)].find('\n') > 0 or txt[:(index+1)].find('<td>') > 0 or txt[:(index+1)].find('</td>') > 0:
|
||||
print('公式之中出现了异常 (Unexpect element in equation)')
|
||||
single_stack_height = 0
|
||||
txt_result += ' $'
|
||||
continue
|
||||
|
||||
if double_stack_height > 0:
|
||||
if txt[:(index+1)].find('\n\n') > 0:
|
||||
print('公式之中出现了异常 (Unexpect element in equation)')
|
||||
double_stack_height = 0
|
||||
txt_result += '$$'
|
||||
continue
|
||||
|
||||
is_double = (txt[index+1] == '$')
|
||||
if is_double:
|
||||
if single_stack_height != 0:
|
||||
# add a padding
|
||||
txt = txt[:(index+1)] + " " + txt[(index+1):]
|
||||
continue
|
||||
if double_stack_height == 0:
|
||||
double_stack_height = 1
|
||||
else:
|
||||
double_stack_height = 0
|
||||
txt_result += txt[:(index+2)]
|
||||
txt = txt[(index+2):]
|
||||
else:
|
||||
if double_stack_height != 0:
|
||||
# print(txt[:(index)])
|
||||
print('发现异常嵌套公式')
|
||||
if single_stack_height == 0:
|
||||
single_stack_height = 1
|
||||
else:
|
||||
single_stack_height = 0
|
||||
# print(txt[:(index)])
|
||||
txt_result += txt[:(index+1)]
|
||||
txt = txt[(index+1):]
|
||||
break
|
||||
|
||||
|
||||
def markdown_convertion_for_file(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
@@ -232,10 +290,10 @@ def markdown_convertion_for_file(txt):
|
||||
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
txt = fix_markdown_indent(txt)
|
||||
convert_stage_1 = fix_dollar_sticking_bug(txt)
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text="---")
|
||||
convert_stage_1 = markdown.markdown(
|
||||
text=txt,
|
||||
convert_stage_2 = markdown.markdown(
|
||||
text=convert_stage_1,
|
||||
extensions=[
|
||||
"sane_lists",
|
||||
"tables",
|
||||
@@ -245,14 +303,24 @@ def markdown_convertion_for_file(txt):
|
||||
],
|
||||
extension_configs={**markdown_extension_configs, **code_highlight_configs},
|
||||
)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
|
||||
|
||||
def repl_fn(match):
|
||||
content = match.group(2)
|
||||
return f'<script type="math/tex">{content}</script>'
|
||||
|
||||
pattern = "|".join([pattern for pattern, property in mathpatterns.items() if not property["allow_multi_lines"]])
|
||||
pattern = re.compile(pattern, flags=re.ASCII)
|
||||
convert_stage_3 = pattern.sub(repl_fn, convert_stage_2)
|
||||
|
||||
convert_stage_4 = markdown_bug_hunt(convert_stage_3)
|
||||
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_2_2, n = re.subn(
|
||||
find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL
|
||||
convert_stage_5, n = re.subn(
|
||||
find_equation_pattern, replace_math_render, convert_stage_4, flags=re.DOTALL
|
||||
)
|
||||
# cat them together
|
||||
return pre + convert_stage_2_2 + suf
|
||||
return pre + convert_stage_5 + suf
|
||||
|
||||
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
|
||||
def markdown_convertion(txt):
|
||||
@@ -405,4 +473,4 @@ def format_io(self, y):
|
||||
# 输出部分
|
||||
None if gpt_reply is None else markdown_convertion(gpt_reply),
|
||||
)
|
||||
return y
|
||||
return y
|
||||
@@ -0,0 +1,25 @@
|
||||
def is_full_width_char(ch):
|
||||
"""判断给定的单个字符是否是全角字符"""
|
||||
if '\u4e00' <= ch <= '\u9fff':
|
||||
return True # 中文字符
|
||||
if '\uff01' <= ch <= '\uff5e':
|
||||
return True # 全角符号
|
||||
if '\u3000' <= ch <= '\u303f':
|
||||
return True # CJK标点符号
|
||||
return False
|
||||
|
||||
def scolling_visual_effect(text, scroller_max_len):
|
||||
text = text.\
|
||||
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')
|
||||
place_take_cnt = 0
|
||||
pointer = len(text) - 1
|
||||
|
||||
if len(text) < scroller_max_len:
|
||||
return text
|
||||
|
||||
while place_take_cnt < scroller_max_len and pointer > 0:
|
||||
if is_full_width_char(text[pointer]): place_take_cnt += 2
|
||||
else: place_take_cnt += 1
|
||||
pointer -= 1
|
||||
|
||||
return text[pointer:]
|
||||
@@ -62,7 +62,7 @@ def validate_path_safety(path_or_url, user):
|
||||
else:
|
||||
raise FriendlyException(f"输入文件的路径 ({path_or_url}) 存在,但位置非法。请将文件上传后再执行该任务。") # return False
|
||||
if sensitive_path:
|
||||
allowed_users = [user, 'autogen', default_user_name] # three user path that can be accessed
|
||||
allowed_users = [user, 'autogen', 'arxiv_cache', default_user_name] # three user path that can be accessed
|
||||
for user_allowed in allowed_users:
|
||||
if f"{os.sep}".join(path_or_url.split(os.sep)[:2]) == os.path.join(sensitive_path, user_allowed):
|
||||
return True
|
||||
@@ -81,7 +81,7 @@ def _authorize_user(path_or_url, request, gradio_app):
|
||||
if sensitive_path:
|
||||
token = request.cookies.get("access-token") or request.cookies.get("access-token-unsecure")
|
||||
user = gradio_app.tokens.get(token) # get user
|
||||
allowed_users = [user, 'autogen', default_user_name] # three user path that can be accessed
|
||||
allowed_users = [user, 'autogen', 'arxiv_cache', default_user_name] # three user path that can be accessed
|
||||
for user_allowed in allowed_users:
|
||||
# exact match
|
||||
if f"{os.sep}".join(path_or_url.split(os.sep)[:2]) == os.path.join(sensitive_path, user_allowed):
|
||||
@@ -99,7 +99,7 @@ class Server(uvicorn.Server):
|
||||
self.thread = threading.Thread(target=self.run, daemon=True)
|
||||
self.thread.start()
|
||||
while not self.started:
|
||||
time.sleep(1e-3)
|
||||
time.sleep(5e-2)
|
||||
|
||||
def close(self):
|
||||
self.should_exit = True
|
||||
@@ -159,6 +159,16 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
|
||||
return "越权访问!"
|
||||
return await endpoint(path_or_url, request)
|
||||
|
||||
from fastapi import Request, status
|
||||
from fastapi.responses import FileResponse, RedirectResponse
|
||||
@gradio_app.get("/academic_logout")
|
||||
async def logout():
|
||||
response = RedirectResponse(url=CUSTOM_PATH, status_code=status.HTTP_302_FOUND)
|
||||
response.delete_cookie('access-token')
|
||||
response.delete_cookie('access-token-unsecure')
|
||||
return response
|
||||
|
||||
# --- --- enable TTS (text-to-speech) functionality --- ---
|
||||
TTS_TYPE = get_conf("TTS_TYPE")
|
||||
if TTS_TYPE != "DISABLE":
|
||||
# audio generation functionality
|
||||
@@ -220,13 +230,22 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
|
||||
fastapi_app = FastAPI(lifespan=app_lifespan)
|
||||
fastapi_app.mount(CUSTOM_PATH, gradio_app)
|
||||
|
||||
# --- --- favicon --- ---
|
||||
# --- --- favicon and block fastapi api reference routes --- ---
|
||||
from starlette.responses import JSONResponse
|
||||
if CUSTOM_PATH != '/':
|
||||
from fastapi.responses import FileResponse
|
||||
@fastapi_app.get("/favicon.ico")
|
||||
async def favicon():
|
||||
return FileResponse(app_block.favicon_path)
|
||||
|
||||
@fastapi_app.middleware("http")
|
||||
async def middleware(request: Request, call_next):
|
||||
if request.scope['path'] in ["/docs", "/redoc", "/openapi.json"]:
|
||||
return JSONResponse(status_code=404, content={"message": "Not Found"})
|
||||
response = await call_next(request)
|
||||
return response
|
||||
|
||||
|
||||
# --- --- uvicorn.Config --- ---
|
||||
ssl_keyfile = None if SSL_KEYFILE == "" else SSL_KEYFILE
|
||||
ssl_certfile = None if SSL_CERTFILE == "" else SSL_CERTFILE
|
||||
@@ -274,4 +293,4 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
|
||||
}
|
||||
requests.get(f"{app_block.local_url}startup-events", verify=app_block.ssl_verify, proxies=forbid_proxies)
|
||||
app_block.is_running = True
|
||||
app_block.block_thread()
|
||||
app_block.block_thread()
|
||||
22
tests/test_latex_auto_correct.py
普通文件
22
tests/test_latex_auto_correct.py
普通文件
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
|
||||
"""
|
||||
|
||||
|
||||
import os, sys, importlib
|
||||
|
||||
|
||||
def validate_path():
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(dir_name + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # 返回项目根路径
|
||||
|
||||
if __name__ == "__main__":
|
||||
plugin_test = importlib.import_module('test_utils').plugin_test
|
||||
|
||||
|
||||
plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2203.01927")
|
||||
@@ -14,12 +14,13 @@ validate_path() # validate path so you can run from base directory
|
||||
|
||||
if "在线模型":
|
||||
if __name__ == "__main__":
|
||||
from request_llms.bridge_cohere import predict_no_ui_long_connection
|
||||
from request_llms.bridge_taichu import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_cohere import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
||||
llm_kwargs = {
|
||||
"llm_model": "command-r-plus",
|
||||
"llm_model": "taichu",
|
||||
"max_length": 4096,
|
||||
"top_p": 1,
|
||||
"temperature": 1,
|
||||
|
||||
17
tests/test_safe_pickle.py
普通文件
17
tests/test_safe_pickle.py
普通文件
@@ -0,0 +1,17 @@
|
||||
def validate_path():
|
||||
import os, sys
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
validate_path() # validate path so you can run from base directory
|
||||
|
||||
from crazy_functions.latex_fns.latex_pickle_io import objdump, objload
|
||||
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
|
||||
pfg = LatexPaperFileGroup()
|
||||
pfg.get_token_num = None
|
||||
pfg.target = "target_elem"
|
||||
x = objdump(pfg)
|
||||
t = objload()
|
||||
|
||||
print(t.target)
|
||||
102
tests/test_save_chat_to_html.py
普通文件
102
tests/test_save_chat_to_html.py
普通文件
@@ -0,0 +1,102 @@
|
||||
def validate_path():
|
||||
import os, sys
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
validate_path() # validate path so you can run from base directory
|
||||
|
||||
def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||
"""
|
||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
from themes.theme import advanced_css
|
||||
# debug
|
||||
import pickle
|
||||
# def objdump(obj, file="objdump.tmp"):
|
||||
# with open(file, "wb+") as f:
|
||||
# pickle.dump(obj, f)
|
||||
# return
|
||||
|
||||
def objload(file="objdump.tmp"):
|
||||
import os
|
||||
if not os.path.exists(file):
|
||||
return
|
||||
with open(file, "rb") as f:
|
||||
return pickle.load(f)
|
||||
# objdump((chatbot, history))
|
||||
chatbot, history = objload()
|
||||
|
||||
with open("test.html", 'w', encoding='utf8') as f:
|
||||
from textwrap import dedent
|
||||
form = dedent("""
|
||||
<!DOCTYPE html><head><meta charset="utf-8"><title>对话存档</title><style>{CSS}</style></head>
|
||||
<body>
|
||||
<div class="test_temp1" style="width:10%; height: 500px; float:left;"></div>
|
||||
<div class="test_temp2" style="width:80%;padding: 40px;float:left;padding-left: 20px;padding-right: 20px;box-shadow: rgba(0, 0, 0, 0.2) 0px 0px 8px 8px;border-radius: 10px;">
|
||||
<div class="chat-body" style="display: flex;justify-content: center;flex-direction: column;align-items: center;flex-wrap: nowrap;">
|
||||
{CHAT_PREVIEW}
|
||||
<div></div>
|
||||
<div></div>
|
||||
<div style="text-align: center;width:80%;padding: 0px;float:left;padding-left:20px;padding-right:20px;box-shadow: rgba(0, 0, 0, 0.05) 0px 0px 1px 2px;border-radius: 1px;">对话(原始数据)</div>
|
||||
{HISTORY_PREVIEW}
|
||||
</div>
|
||||
</div>
|
||||
<div class="test_temp3" style="width:10%; height: 500px; float:left;"></div>
|
||||
</body>
|
||||
""")
|
||||
|
||||
qa_from = dedent("""
|
||||
<div class="QaBox" style="width:80%;padding: 20px;margin-bottom: 20px;box-shadow: rgb(0 255 159 / 50%) 0px 0px 1px 2px;border-radius: 4px;">
|
||||
<div class="Question" style="border-radius: 2px;">{QUESTION}</div>
|
||||
<hr color="blue" style="border-top: dotted 2px #ccc;">
|
||||
<div class="Answer" style="border-radius: 2px;">{ANSWER}</div>
|
||||
</div>
|
||||
""")
|
||||
|
||||
history_from = dedent("""
|
||||
<div class="historyBox" style="width:80%;padding: 0px;float:left;padding-left:20px;padding-right:20px;box-shadow: rgba(0, 0, 0, 0.05) 0px 0px 1px 2px;border-radius: 1px;">
|
||||
<div class="entry" style="border-radius: 2px;">{ENTRY}</div>
|
||||
</div>
|
||||
""")
|
||||
CHAT_PREVIEW_BUF = ""
|
||||
for i, contents in enumerate(chatbot):
|
||||
question, answer = contents[0], contents[1]
|
||||
if question is None: question = ""
|
||||
try: question = str(question)
|
||||
except: question = ""
|
||||
if answer is None: answer = ""
|
||||
try: answer = str(answer)
|
||||
except: answer = ""
|
||||
CHAT_PREVIEW_BUF += qa_from.format(QUESTION=question, ANSWER=answer)
|
||||
|
||||
HISTORY_PREVIEW_BUF = ""
|
||||
for h in history:
|
||||
HISTORY_PREVIEW_BUF += history_from.format(ENTRY=h)
|
||||
html_content = form.format(CHAT_PREVIEW=CHAT_PREVIEW_BUF, HISTORY_PREVIEW=HISTORY_PREVIEW_BUF, CSS=advanced_css)
|
||||
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html_content, 'lxml')
|
||||
|
||||
# 提取QaBox信息
|
||||
qa_box_list = []
|
||||
qa_boxes = soup.find_all("div", class_="QaBox")
|
||||
for box in qa_boxes:
|
||||
question = box.find("div", class_="Question").get_text(strip=False)
|
||||
answer = box.find("div", class_="Answer").get_text(strip=False)
|
||||
qa_box_list.append({"Question": question, "Answer": answer})
|
||||
|
||||
# 提取historyBox信息
|
||||
history_box_list = []
|
||||
history_boxes = soup.find_all("div", class_="historyBox")
|
||||
for box in history_boxes:
|
||||
entry = box.find("div", class_="entry").get_text(strip=False)
|
||||
history_box_list.append(entry)
|
||||
|
||||
print('')
|
||||
|
||||
|
||||
write_chat_to_file(None, None, None)
|
||||
58
tests/test_searxng.py
普通文件
58
tests/test_searxng.py
普通文件
@@ -0,0 +1,58 @@
|
||||
def validate_path():
|
||||
import os, sys
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
validate_path() # validate path so you can run from base directory
|
||||
|
||||
from toolbox import get_conf
|
||||
import requests
|
||||
|
||||
def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
|
||||
url = 'http://localhost:50001/'
|
||||
|
||||
if engines is None:
|
||||
engine = 'bing,'
|
||||
if categories == 'general':
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'engines': engine,
|
||||
}
|
||||
elif categories == 'science':
|
||||
params = {
|
||||
'q': query, # 搜索查询
|
||||
'format': 'json', # 输出格式为JSON
|
||||
'language': 'zh', # 搜索语言
|
||||
'categories': 'science'
|
||||
}
|
||||
else:
|
||||
raise ValueError('不支持的检索类型')
|
||||
headers = {
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
|
||||
'X-Forwarded-For': '112.112.112.112',
|
||||
'X-Real-IP': '112.112.112.112'
|
||||
}
|
||||
results = []
|
||||
response = requests.post(url, params=params, headers=headers, proxies=proxies, timeout=30)
|
||||
if response.status_code == 200:
|
||||
json_result = response.json()
|
||||
for result in json_result['results']:
|
||||
item = {
|
||||
"title": result.get("title", ""),
|
||||
"content": result.get("content", ""),
|
||||
"link": result["url"],
|
||||
}
|
||||
print(result['engines'])
|
||||
results.append(item)
|
||||
return results
|
||||
else:
|
||||
if response.status_code == 429:
|
||||
raise ValueError("Searxng(在线搜索服务)当前使用人数太多,请稍后。")
|
||||
else:
|
||||
raise ValueError("在线搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
|
||||
res = searxng_request("vr environment", None, categories='science', searxng_url=None, engines=None)
|
||||
print(res)
|
||||
215
themes/common.js
215
themes/common.js
@@ -1,3 +1,6 @@
|
||||
// 标志位
|
||||
enable_tts = false;
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 1 部分: 工具函数
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
@@ -914,131 +917,6 @@ function gpt_academic_gradio_saveload(
|
||||
}
|
||||
}
|
||||
|
||||
enable_tts = false;
|
||||
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
// 第一部分,布局初始化
|
||||
audio_fn_init();
|
||||
minor_ui_adjustment();
|
||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||
var chatbotObserver = new MutationObserver(() => {
|
||||
chatbotContentChanged(1);
|
||||
});
|
||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||
if (layout === "LEFT-RIGHT") { chatbotAutoHeight(); }
|
||||
if (layout === "LEFT-RIGHT") { limit_scroll_position(); }
|
||||
|
||||
// 第二部分,读取Cookie,初始话界面
|
||||
let searchString = "";
|
||||
let bool_value = "";
|
||||
// darkmode 深色模式
|
||||
if (getCookie("js_darkmode_cookie")) {
|
||||
dark = getCookie("js_darkmode_cookie")
|
||||
}
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark) {
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark) {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
|
||||
// 自动朗读
|
||||
if (tts != "DISABLE"){
|
||||
enable_tts = true;
|
||||
if (getCookie("js_auto_read_cookie")) {
|
||||
auto_read_tts = getCookie("js_auto_read_cookie")
|
||||
auto_read_tts = auto_read_tts == "True";
|
||||
if (auto_read_tts) {
|
||||
allow_auto_read_tts_flag = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SysPrompt 系统静默提示词
|
||||
gpt_academic_gradio_saveload("load", "elem_prompt", "js_system_prompt_cookie", null, "str");
|
||||
// Temperature 大模型温度参数
|
||||
gpt_academic_gradio_saveload("load", "elem_temperature", "js_temperature_cookie", null, "float");
|
||||
// md_dropdown 大模型类型选择
|
||||
if (getCookie("js_md_dropdown_cookie")) {
|
||||
const cached_model = getCookie("js_md_dropdown_cookie");
|
||||
var model_sel = await get_gradio_component("elem_model_sel");
|
||||
// determine whether the cached model is in the choices
|
||||
if (model_sel.props.choices.includes(cached_model)){
|
||||
// change dropdown
|
||||
gpt_academic_gradio_saveload("load", "elem_model_sel", "js_md_dropdown_cookie", null, "str");
|
||||
// 连锁修改chatbot的label
|
||||
push_data_to_gradio_component({
|
||||
label: '当前模型:' + getCookie("js_md_dropdown_cookie"),
|
||||
__type__: 'update'
|
||||
}, "gpt-chatbot", "obj")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// clearButton 自动清除按钮
|
||||
if (getCookie("js_clearbtn_show_cookie")) {
|
||||
// have cookie
|
||||
bool_value = getCookie("js_clearbtn_show_cookie")
|
||||
bool_value = bool_value == "True";
|
||||
searchString = "输入清除键";
|
||||
|
||||
if (bool_value) {
|
||||
// make btns appear
|
||||
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "block";
|
||||
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "block";
|
||||
// deal with checkboxes
|
||||
let arr_with_clear_btn = update_array(
|
||||
await get_data_from_gradio_component('cbs'), "输入清除键", "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_clear_btn, "cbs", "no_conversion");
|
||||
} else {
|
||||
// make btns disappear
|
||||
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "none";
|
||||
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "none";
|
||||
// deal with checkboxes
|
||||
let arr_without_clear_btn = update_array(
|
||||
await get_data_from_gradio_component('cbs'), "输入清除键", "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_clear_btn, "cbs", "no_conversion");
|
||||
}
|
||||
}
|
||||
|
||||
// live2d 显示
|
||||
if (getCookie("js_live2d_show_cookie")) {
|
||||
// have cookie
|
||||
searchString = "添加Live2D形象";
|
||||
bool_value = getCookie("js_live2d_show_cookie");
|
||||
bool_value = bool_value == "True";
|
||||
if (bool_value) {
|
||||
loadLive2D();
|
||||
let arr_with_live2d = update_array(
|
||||
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_live2d, "cbsc", "no_conversion");
|
||||
} else {
|
||||
try {
|
||||
$('.waifu').hide();
|
||||
let arr_without_live2d = update_array(
|
||||
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_live2d, "cbsc", "no_conversion");
|
||||
} catch (error) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// do not have cookie
|
||||
if (live2d) {
|
||||
loadLive2D();
|
||||
} else {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
function reset_conversation(a, b) {
|
||||
// console.log("js_code_reset");
|
||||
@@ -1690,24 +1568,99 @@ function close_current_pop_up_plugin(){
|
||||
hide_all_elem();
|
||||
}
|
||||
|
||||
|
||||
// 生成高级插件的选择菜单
|
||||
advanced_plugin_init_code_lib = {}
|
||||
plugin_init_info_lib = {}
|
||||
function register_plugin_init(key, base64String){
|
||||
// console.log('x')
|
||||
const stringData = atob(base64String);
|
||||
let guiJsonData = JSON.parse(stringData);
|
||||
if (key in plugin_init_info_lib)
|
||||
{
|
||||
}
|
||||
else
|
||||
{
|
||||
plugin_init_info_lib[key] = {};
|
||||
}
|
||||
plugin_init_info_lib[key].info = guiJsonData.Info;
|
||||
plugin_init_info_lib[key].color = guiJsonData.Color;
|
||||
plugin_init_info_lib[key].elem_id = guiJsonData.ButtonElemId;
|
||||
plugin_init_info_lib[key].label = guiJsonData.Label
|
||||
plugin_init_info_lib[key].enable_advanced_arg = guiJsonData.AdvancedArgs;
|
||||
plugin_init_info_lib[key].arg_reminder = guiJsonData.ArgsReminder;
|
||||
}
|
||||
function register_advanced_plugin_init_code(key, code){
|
||||
advanced_plugin_init_code_lib[key] = code;
|
||||
if (key in plugin_init_info_lib)
|
||||
{
|
||||
}
|
||||
else
|
||||
{
|
||||
plugin_init_info_lib[key] = {};
|
||||
}
|
||||
plugin_init_info_lib[key].secondary_menu_code = code;
|
||||
}
|
||||
function run_advanced_plugin_launch_code(key){
|
||||
// convert js code string to function
|
||||
generate_menu(advanced_plugin_init_code_lib[key], key);
|
||||
generate_menu(plugin_init_info_lib[key].secondary_menu_code, key);
|
||||
}
|
||||
function on_flex_button_click(key){
|
||||
if (advanced_plugin_init_code_lib.hasOwnProperty(key)){
|
||||
if (plugin_init_info_lib.hasOwnProperty(key) && plugin_init_info_lib[key].hasOwnProperty('secondary_menu_code')){
|
||||
run_advanced_plugin_launch_code(key);
|
||||
}else{
|
||||
document.getElementById("old_callback_btn_for_plugin_exe").click();
|
||||
}
|
||||
}
|
||||
async function run_dropdown_shift(dropdown){
|
||||
let key = dropdown;
|
||||
push_data_to_gradio_component({
|
||||
value: key,
|
||||
variant: plugin_init_info_lib[key].color,
|
||||
info_str: plugin_init_info_lib[key].info,
|
||||
__type__: 'update'
|
||||
}, "elem_switchy_bt", "obj");
|
||||
|
||||
if (plugin_init_info_lib[key].enable_advanced_arg){
|
||||
push_data_to_gradio_component({
|
||||
visible: true,
|
||||
label: plugin_init_info_lib[key].label,
|
||||
__type__: 'update'
|
||||
}, "advance_arg_input_legacy", "obj");
|
||||
} else {
|
||||
push_data_to_gradio_component({
|
||||
visible: false,
|
||||
label: plugin_init_info_lib[key].label,
|
||||
__type__: 'update'
|
||||
}, "advance_arg_input_legacy", "obj");
|
||||
}
|
||||
}
|
||||
|
||||
async function run_classic_plugin_via_id(plugin_elem_id){
|
||||
// find elementid
|
||||
for (key in plugin_init_info_lib){
|
||||
if (plugin_init_info_lib[key].elem_id == plugin_elem_id){
|
||||
let current_btn_name = await get_data_from_gradio_component(plugin_elem_id);
|
||||
console.log(current_btn_name);
|
||||
|
||||
|
||||
|
||||
gui_args = {}
|
||||
// 关闭菜单 (如果处于开启状态)
|
||||
push_data_to_gradio_component({
|
||||
visible: false,
|
||||
__type__: 'update'
|
||||
}, "plugin_arg_menu", "obj");
|
||||
hide_all_elem();
|
||||
// 为了与旧插件兼容,生成菜单时,自动加载旧高级参数输入区的值
|
||||
let advance_arg_input_legacy = await get_data_from_gradio_component('advance_arg_input_legacy');
|
||||
if (advance_arg_input_legacy.length != 0){
|
||||
gui_args["advanced_arg"] = {};
|
||||
gui_args["advanced_arg"].user_confirmed_value = advance_arg_input_legacy;
|
||||
}
|
||||
// execute the plugin
|
||||
push_data_to_gradio_component(JSON.stringify(gui_args), "invisible_current_pop_up_plugin_arg_final", "string");
|
||||
push_data_to_gradio_component(current_btn_name, "invisible_callback_btn_for_plugin_exe", "string");
|
||||
document.getElementById("invisible_callback_btn_for_plugin_exe").click();
|
||||
return;
|
||||
}
|
||||
}
|
||||
// console.log('unable to find function');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from functools import cache
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
|
||||
@@ -23,22 +24,30 @@ def minimize_js(common_js_path):
|
||||
except:
|
||||
return common_js_path
|
||||
|
||||
@cache
|
||||
def get_common_html_javascript_code():
|
||||
js = "\n"
|
||||
common_js_path = "themes/common.js"
|
||||
minimized_js_path = minimize_js(common_js_path)
|
||||
for jsf in [
|
||||
f"file={minimized_js_path}",
|
||||
]:
|
||||
js += f"""<script src="{jsf}"></script>\n"""
|
||||
common_js_path_list = [
|
||||
"themes/common.js",
|
||||
"themes/theme.js",
|
||||
"themes/init.js",
|
||||
]
|
||||
|
||||
# 添加Live2D
|
||||
if ADD_WAIFU:
|
||||
if ADD_WAIFU: # 添加Live2D
|
||||
common_js_path_list += [
|
||||
"themes/waifu_plugin/jquery.min.js",
|
||||
"themes/waifu_plugin/jquery-ui.min.js",
|
||||
]
|
||||
|
||||
for common_js_path in common_js_path_list:
|
||||
if '.min.' not in common_js_path:
|
||||
minimized_js_path = minimize_js(common_js_path)
|
||||
for jsf in [
|
||||
"file=themes/waifu_plugin/jquery.min.js",
|
||||
"file=themes/waifu_plugin/jquery-ui.min.js",
|
||||
f"file={minimized_js_path}",
|
||||
]:
|
||||
js += f"""<script src="{jsf}"></script>\n"""
|
||||
else:
|
||||
|
||||
if not ADD_WAIFU:
|
||||
js += """<script>window.loadLive2D = function(){};</script>\n"""
|
||||
|
||||
return js
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
/* 小按钮 */
|
||||
#basic-panel .sm {
|
||||
font-family: "Microsoft YaHei UI", "Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui";
|
||||
--button-small-text-weight: 600;
|
||||
--button-small-text-size: 16px;
|
||||
--button-small-text-weight: 400;
|
||||
--button-small-text-size: 14px;
|
||||
border-bottom-right-radius: 6px;
|
||||
border-bottom-left-radius: 6px;
|
||||
border-top-right-radius: 6px;
|
||||
|
||||
@@ -31,18 +31,27 @@ def define_gui_advanced_plugin_class(plugins):
|
||||
invisible_callback_btn_for_plugin_exe = gr.Button(r"未选定任何插件", variant="secondary", visible=False, elem_id="invisible_callback_btn_for_plugin_exe").style(size="sm")
|
||||
# 随变按钮的回调函数注册
|
||||
def route_switchy_bt_with_arg(request: gr.Request, input_order, *arg):
|
||||
arguments = {k:v for k,v in zip(input_order, arg)}
|
||||
which_plugin = arguments.pop('new_plugin_callback')
|
||||
arguments = {k:v for k,v in zip(input_order, arg)} # 重新梳理输入参数,转化为kwargs字典
|
||||
which_plugin = arguments.pop('new_plugin_callback') # 获取需要执行的插件名称
|
||||
if which_plugin in [r"未选定任何插件"]: return
|
||||
usr_confirmed_arg = arguments.pop('usr_confirmed_arg')
|
||||
usr_confirmed_arg = arguments.pop('usr_confirmed_arg') # 获取插件参数
|
||||
arg_confirm: dict = {}
|
||||
usr_confirmed_arg_dict = json.loads(usr_confirmed_arg)
|
||||
usr_confirmed_arg_dict = json.loads(usr_confirmed_arg) # 读取插件参数
|
||||
for arg_name in usr_confirmed_arg_dict:
|
||||
arg_confirm.update({arg_name: str(usr_confirmed_arg_dict[arg_name]['user_confirmed_value'])})
|
||||
plugin_obj = plugins[which_plugin]["Class"]
|
||||
arguments['plugin_advanced_arg'] = arg_confirm
|
||||
if arg_confirm.get('main_input', None) is not None:
|
||||
|
||||
if plugins[which_plugin].get("Class", None) is not None: # 获取插件执行函数
|
||||
plugin_obj = plugins[which_plugin]["Class"]
|
||||
plugin_exe = plugin_obj.execute
|
||||
else:
|
||||
plugin_exe = plugins[which_plugin]["Function"]
|
||||
|
||||
arguments['plugin_advanced_arg'] = arg_confirm # 更新高级参数输入区的参数
|
||||
if arg_confirm.get('main_input', None) is not None: # 更新主输入区的参数
|
||||
arguments['txt'] = arg_confirm['main_input']
|
||||
yield from ArgsGeneralWrapper(plugin_obj.execute)(request, *arguments.values())
|
||||
|
||||
# 万事俱备,开始执行
|
||||
yield from ArgsGeneralWrapper(plugin_exe)(request, *arguments.values())
|
||||
|
||||
return invisible_callback_btn_for_plugin_exe, route_switchy_bt_with_arg, usr_confirmed_arg
|
||||
|
||||
|
||||
125
themes/init.js
普通文件
125
themes/init.js
普通文件
@@ -0,0 +1,125 @@
|
||||
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
// 第一部分,布局初始化
|
||||
audio_fn_init();
|
||||
minor_ui_adjustment();
|
||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||
var chatbotObserver = new MutationObserver(() => {
|
||||
chatbotContentChanged(1);
|
||||
});
|
||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||
if (layout === "LEFT-RIGHT") { chatbotAutoHeight(); }
|
||||
if (layout === "LEFT-RIGHT") { limit_scroll_position(); }
|
||||
|
||||
// 第二部分,读取Cookie,初始话界面
|
||||
let searchString = "";
|
||||
let bool_value = "";
|
||||
// darkmode 深色模式
|
||||
if (getCookie("js_darkmode_cookie")) {
|
||||
dark = getCookie("js_darkmode_cookie")
|
||||
}
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark) {
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark) {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
|
||||
// 自动朗读
|
||||
if (tts != "DISABLE"){
|
||||
enable_tts = true;
|
||||
if (getCookie("js_auto_read_cookie")) {
|
||||
auto_read_tts = getCookie("js_auto_read_cookie")
|
||||
auto_read_tts = auto_read_tts == "True";
|
||||
if (auto_read_tts) {
|
||||
allow_auto_read_tts_flag = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SysPrompt 系统静默提示词
|
||||
gpt_academic_gradio_saveload("load", "elem_prompt", "js_system_prompt_cookie", null, "str");
|
||||
// Temperature 大模型温度参数
|
||||
gpt_academic_gradio_saveload("load", "elem_temperature", "js_temperature_cookie", null, "float");
|
||||
// md_dropdown 大模型类型选择
|
||||
if (getCookie("js_md_dropdown_cookie")) {
|
||||
const cached_model = getCookie("js_md_dropdown_cookie");
|
||||
var model_sel = await get_gradio_component("elem_model_sel");
|
||||
// determine whether the cached model is in the choices
|
||||
if (model_sel.props.choices.includes(cached_model)){
|
||||
// change dropdown
|
||||
gpt_academic_gradio_saveload("load", "elem_model_sel", "js_md_dropdown_cookie", null, "str");
|
||||
// 连锁修改chatbot的label
|
||||
push_data_to_gradio_component({
|
||||
label: '当前模型:' + getCookie("js_md_dropdown_cookie"),
|
||||
__type__: 'update'
|
||||
}, "gpt-chatbot", "obj")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// clearButton 自动清除按钮
|
||||
if (getCookie("js_clearbtn_show_cookie")) {
|
||||
// have cookie
|
||||
bool_value = getCookie("js_clearbtn_show_cookie")
|
||||
bool_value = bool_value == "True";
|
||||
searchString = "输入清除键";
|
||||
|
||||
if (bool_value) {
|
||||
// make btns appear
|
||||
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "block";
|
||||
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "block";
|
||||
// deal with checkboxes
|
||||
let arr_with_clear_btn = update_array(
|
||||
await get_data_from_gradio_component('cbs'), "输入清除键", "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_clear_btn, "cbs", "no_conversion");
|
||||
} else {
|
||||
// make btns disappear
|
||||
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "none";
|
||||
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "none";
|
||||
// deal with checkboxes
|
||||
let arr_without_clear_btn = update_array(
|
||||
await get_data_from_gradio_component('cbs'), "输入清除键", "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_clear_btn, "cbs", "no_conversion");
|
||||
}
|
||||
}
|
||||
|
||||
// live2d 显示
|
||||
if (getCookie("js_live2d_show_cookie")) {
|
||||
// have cookie
|
||||
searchString = "添加Live2D形象";
|
||||
bool_value = getCookie("js_live2d_show_cookie");
|
||||
bool_value = bool_value == "True";
|
||||
if (bool_value) {
|
||||
loadLive2D();
|
||||
let arr_with_live2d = update_array(
|
||||
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_live2d, "cbsc", "no_conversion");
|
||||
} else {
|
||||
try {
|
||||
$('.waifu').hide();
|
||||
let arr_without_live2d = update_array(
|
||||
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_live2d, "cbsc", "no_conversion");
|
||||
} catch (error) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// do not have cookie
|
||||
if (live2d) {
|
||||
loadLive2D();
|
||||
} else {
|
||||
}
|
||||
}
|
||||
|
||||
// 主题加载(恢复到上次)
|
||||
change_theme("", "")
|
||||
}
|
||||
41
themes/theme.js
普通文件
41
themes/theme.js
普通文件
@@ -0,0 +1,41 @@
|
||||
async function try_load_previous_theme(){
|
||||
if (getCookie("js_theme_selection_cookie")) {
|
||||
theme_selection = getCookie("js_theme_selection_cookie");
|
||||
let css = localStorage.getItem('theme-' + theme_selection);
|
||||
if (css) {
|
||||
change_theme(theme_selection, css);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function change_theme(theme_selection, css) {
|
||||
if (theme_selection.length==0) {
|
||||
try_load_previous_theme();
|
||||
return;
|
||||
}
|
||||
|
||||
var existingStyles = document.querySelectorAll("body > gradio-app > div > style")
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var existingStyles = document.querySelectorAll("style[data-loaded-css]");
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
|
||||
setCookie("js_theme_selection_cookie", theme_selection, 3);
|
||||
localStorage.setItem('theme-' + theme_selection, css);
|
||||
|
||||
var styleElement = document.createElement('style');
|
||||
styleElement.setAttribute('data-loaded-css', 'placeholder');
|
||||
styleElement.innerHTML = css;
|
||||
document.body.appendChild(styleElement);
|
||||
}
|
||||
|
||||
|
||||
// // 记录本次的主题切换
|
||||
// async function change_theme_prepare(theme_selection, secret_css) {
|
||||
// setCookie("js_theme_selection_cookie", theme_selection, 3);
|
||||
// }
|
||||
@@ -71,29 +71,10 @@ def from_cookie_str(c):
|
||||
"""
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第 3 部分
|
||||
内嵌的javascript代码
|
||||
内嵌的javascript代码(这部分代码会逐渐移动到common.js中)
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
js_code_for_css_changing = """(css) => {
|
||||
var existingStyles = document.querySelectorAll("body > gradio-app > div > style")
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var existingStyles = document.querySelectorAll("style[data-loaded-css]");
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var styleElement = document.createElement('style');
|
||||
styleElement.setAttribute('data-loaded-css', 'placeholder');
|
||||
styleElement.innerHTML = css;
|
||||
document.body.appendChild(styleElement);
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
js_code_for_toggle_darkmode = """() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
setCookie("js_darkmode_cookie", "False", 365);
|
||||
@@ -114,6 +95,8 @@ js_code_for_persistent_cookie_init = """(web_cookie_cache, cookie) => {
|
||||
# 详见 themes/common.js
|
||||
js_code_reset = """
|
||||
(a,b,c)=>{
|
||||
let stopButton = document.getElementById("elem_stop");
|
||||
stopButton.click();
|
||||
return reset_conversation(a,b);
|
||||
}
|
||||
"""
|
||||
|
||||
10
toolbox.py
10
toolbox.py
@@ -1,3 +1,4 @@
|
||||
|
||||
import importlib
|
||||
import time
|
||||
import inspect
|
||||
@@ -920,15 +921,18 @@ def get_pictures_list(path):
|
||||
return file_manifest
|
||||
|
||||
|
||||
def have_any_recent_upload_image_files(chatbot:ChatBotWithCookies):
|
||||
def have_any_recent_upload_image_files(chatbot:ChatBotWithCookies, pop:bool=False):
|
||||
_5min = 5 * 60
|
||||
if chatbot is None:
|
||||
return False, None # chatbot is None
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
if pop:
|
||||
most_recent_uploaded = chatbot._cookies.pop("most_recent_uploaded", None)
|
||||
else:
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
# most_recent_uploaded 是一个放置最新上传图像的路径
|
||||
if not most_recent_uploaded:
|
||||
return False, None # most_recent_uploaded is None
|
||||
if time.time() - most_recent_uploaded["time"] < _5min:
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
path = most_recent_uploaded["path"]
|
||||
file_manifest = get_pictures_list(path)
|
||||
if len(file_manifest) == 0:
|
||||
|
||||
在新工单中引用
屏蔽一个用户