镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 22:46:48 +00:00
比较提交
37 次代码提交
binary-hus
...
version3.7
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
163f12c533 | ||
|
|
bdd46c5dd1 | ||
|
|
ae51a0e686 | ||
|
|
f2582ea137 | ||
|
|
ddd2fd84da | ||
|
|
6c90ff80ea | ||
|
|
cb7c0703be | ||
|
|
5181cd441d | ||
|
|
216d4374e7 | ||
|
|
8af6c0cab6 | ||
|
|
67ad041372 | ||
|
|
725c72229c | ||
|
|
e42ede512b | ||
|
|
84ccc9e64c | ||
|
|
c172847e19 | ||
|
|
d166d25eb4 | ||
|
|
516bbb1331 | ||
|
|
c3140ce344 | ||
|
|
cd18663800 | ||
|
|
dbf1322836 | ||
|
|
98dd3ae1c0 | ||
|
|
3036709496 | ||
|
|
8e9c07644f | ||
|
|
90d96b77e6 | ||
|
|
66c876a9ca | ||
|
|
0665eb75ed | ||
|
|
6b784035fa | ||
|
|
8bb3d84912 | ||
|
|
a0193cf227 | ||
|
|
b72289bfb0 | ||
|
|
bdfe3862eb | ||
|
|
dae180b9ea | ||
|
|
e359fff040 | ||
|
|
2e9b4a5770 | ||
|
|
e0c5859cf9 | ||
|
|
b9b1e12dc9 | ||
|
|
8814026ec3 |
@@ -1,7 +1,6 @@
|
|||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
|
> 2024.3.11: 恭迎Claude3和Moonshot,全力支持Qwen、GLM、DeepseekCoder等中文大语言模型!
|
||||||
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库(让大模型绘制脑图)
|
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库(让大模型绘制脑图)
|
||||||
> 2024.1.17: 恭迎GLM4,全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型!
|
|
||||||
> 2024.1.17: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
|
||||||
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
@@ -253,8 +252,7 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以
|
|||||||
# Advanced Usage
|
# Advanced Usage
|
||||||
### I:自定义新的便捷按钮(学术快捷键)
|
### I:自定义新的便捷按钮(学术快捷键)
|
||||||
|
|
||||||
任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。)
|
现在已可以通过UI中的`界面外观`菜单中的`自定义菜单`添加新的便捷按钮。如果需要在代码中定义,请使用任意文本编辑器打开`core_functional.py`,添加如下条目即可:
|
||||||
例如
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
"超级英译中": {
|
"超级英译中": {
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ def backup_and_download(current_version, remote_version):
|
|||||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf('proxies')
|
||||||
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||||
except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True)
|
except: r = requests.get('https://public.agent-matrix.com/publish/master.zip', proxies=proxies, stream=True)
|
||||||
zip_file_path = backup_dir+'/master.zip'
|
zip_file_path = backup_dir+'/master.zip'
|
||||||
with open(zip_file_path, 'wb+') as f:
|
with open(zip_file_path, 'wb+') as f:
|
||||||
f.write(r.content)
|
f.write(r.content)
|
||||||
@@ -113,7 +113,7 @@ def auto_update(raise_error=False):
|
|||||||
import json
|
import json
|
||||||
proxies = get_conf('proxies')
|
proxies = get_conf('proxies')
|
||||||
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
||||||
except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5)
|
except: response = requests.get("https://public.agent-matrix.com/publish/version", proxies=proxies, timeout=5)
|
||||||
remote_json_data = json.loads(response.text)
|
remote_json_data = json.loads(response.text)
|
||||||
remote_version = remote_json_data['version']
|
remote_version = remote_json_data['version']
|
||||||
if remote_json_data["show_feature"]:
|
if remote_json_data["show_feature"]:
|
||||||
|
|||||||
103
config.py
103
config.py
@@ -30,7 +30,33 @@ if USE_PROXY:
|
|||||||
else:
|
else:
|
||||||
proxies = None
|
proxies = None
|
||||||
|
|
||||||
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
|
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
|
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||||
|
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||||
|
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
|
||||||
|
"gemini-pro", "chatglm3"
|
||||||
|
]
|
||||||
|
# --- --- --- ---
|
||||||
|
# P.S. 其他可用的模型还包括
|
||||||
|
# AVAIL_LLM_MODELS = [
|
||||||
|
# "qianfan", "deepseekcoder",
|
||||||
|
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
|
||||||
|
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
|
||||||
|
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
|
||||||
|
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125"
|
||||||
|
# "claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229", "claude-2.1", "claude-instant-1.2",
|
||||||
|
# "moss", "llama2", "chatglm_onnx", "internlm", "jittorllms_pangualpha", "jittorllms_llama",
|
||||||
|
# "yi-34b-chat-0205", "yi-34b-chat-200k"
|
||||||
|
# ]
|
||||||
|
# --- --- --- ---
|
||||||
|
# 此外,为了更灵活地接入one-api多模型管理界面,您还可以在接入one-api时,
|
||||||
|
# 使用"one-api-*"前缀直接使用非标准方式接入的模型,例如
|
||||||
|
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)"]
|
||||||
|
# --- --- --- ---
|
||||||
|
|
||||||
|
|
||||||
|
# --------------- 以下配置可以优化体验 ---------------
|
||||||
|
|
||||||
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||||
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||||
@@ -85,20 +111,6 @@ MAX_RETRY = 2
|
|||||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||||
|
|
||||||
|
|
||||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
|
||||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
|
||||||
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
|
||||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
|
||||||
"gemini-pro", "chatglm3", "claude-2", "zhipuai"]
|
|
||||||
# P.S. 其他可用的模型还包括 [
|
|
||||||
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
|
||||||
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
|
||||||
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
|
||||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
|
||||||
# ]
|
|
||||||
|
|
||||||
|
|
||||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||||
|
|
||||||
@@ -127,6 +139,7 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b
|
|||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
||||||
|
|
||||||
|
|
||||||
# 设置gradio的并行线程数(不需要修改)
|
# 设置gradio的并行线程数(不需要修改)
|
||||||
CONCURRENT_COUNT = 100
|
CONCURRENT_COUNT = 100
|
||||||
|
|
||||||
@@ -144,7 +157,8 @@ ADD_WAIFU = False
|
|||||||
AUTHENTICATION = []
|
AUTHENTICATION = []
|
||||||
|
|
||||||
|
|
||||||
# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
|
# 如果需要在二级路径下运行(常规情况下,不要修改!!)
|
||||||
|
# (举例 CUSTOM_PATH = "/gpt_academic",可以让软件运行在 http://ip:port/gpt_academic/ 下。)
|
||||||
CUSTOM_PATH = "/"
|
CUSTOM_PATH = "/"
|
||||||
|
|
||||||
|
|
||||||
@@ -172,14 +186,8 @@ AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.
|
|||||||
AZURE_CFG_ARRAY = {}
|
AZURE_CFG_ARRAY = {}
|
||||||
|
|
||||||
|
|
||||||
# 使用Newbing (不推荐使用,未来将删除)
|
# 阿里云实时语音识别 配置难度较高
|
||||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
# 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md
|
||||||
NEWBING_COOKIES = """
|
|
||||||
put your new bing cookies here
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md
|
|
||||||
ENABLE_AUDIO = False
|
ENABLE_AUDIO = False
|
||||||
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
|
ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f
|
||||||
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
|
ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK
|
||||||
@@ -195,19 +203,26 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
|||||||
|
|
||||||
# 接入智谱大模型
|
# 接入智谱大模型
|
||||||
ZHIPUAI_API_KEY = ""
|
ZHIPUAI_API_KEY = ""
|
||||||
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写
|
||||||
|
|
||||||
|
|
||||||
# # 火山引擎YUNQUE大模型
|
|
||||||
# YUNQUE_SECRET_KEY = ""
|
|
||||||
# YUNQUE_ACCESS_KEY = ""
|
|
||||||
# YUNQUE_MODEL = ""
|
|
||||||
|
|
||||||
|
|
||||||
# Claude API KEY
|
# Claude API KEY
|
||||||
ANTHROPIC_API_KEY = ""
|
ANTHROPIC_API_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
|
# 月之暗面 API KEY
|
||||||
|
MOONSHOT_API_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
|
# 零一万物(Yi Model) API KEY
|
||||||
|
YIMODEL_API_KEY = ""
|
||||||
|
|
||||||
|
|
||||||
|
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
||||||
|
MATHPIX_APPID = ""
|
||||||
|
MATHPIX_APPKEY = ""
|
||||||
|
|
||||||
|
|
||||||
# 自定义API KEY格式
|
# 自定义API KEY格式
|
||||||
CUSTOM_API_KEY_PATTERN = ""
|
CUSTOM_API_KEY_PATTERN = ""
|
||||||
|
|
||||||
@@ -261,7 +276,11 @@ PLUGIN_HOT_RELOAD = False
|
|||||||
# 自定义按钮的最大数量限制
|
# 自定义按钮的最大数量限制
|
||||||
NUM_CUSTOM_BASIC_BTN = 4
|
NUM_CUSTOM_BASIC_BTN = 4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
--------------- 配置关联关系说明 ---------------
|
||||||
|
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
│
|
│
|
||||||
├── "gpt-3.5-turbo" 等openai模型
|
├── "gpt-3.5-turbo" 等openai模型
|
||||||
@@ -285,7 +304,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|||||||
│ ├── XFYUN_API_SECRET
|
│ ├── XFYUN_API_SECRET
|
||||||
│ └── XFYUN_API_KEY
|
│ └── XFYUN_API_KEY
|
||||||
│
|
│
|
||||||
├── "claude-1-100k" 等claude模型
|
├── "claude-3-opus-20240229" 等claude模型
|
||||||
│ └── ANTHROPIC_API_KEY
|
│ └── ANTHROPIC_API_KEY
|
||||||
│
|
│
|
||||||
├── "stack-claude"
|
├── "stack-claude"
|
||||||
@@ -297,9 +316,11 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|||||||
│ ├── BAIDU_CLOUD_API_KEY
|
│ ├── BAIDU_CLOUD_API_KEY
|
||||||
│ └── BAIDU_CLOUD_SECRET_KEY
|
│ └── BAIDU_CLOUD_SECRET_KEY
|
||||||
│
|
│
|
||||||
├── "zhipuai" 智谱AI大模型chatglm_turbo
|
├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型
|
||||||
│ ├── ZHIPUAI_API_KEY
|
│ └── ZHIPUAI_API_KEY
|
||||||
│ └── ZHIPUAI_MODEL
|
│
|
||||||
|
├── "yi-34b-chat-0205", "yi-34b-chat-200k" 等零一万物(Yi Model)大模型
|
||||||
|
│ └── YIMODEL_API_KEY
|
||||||
│
|
│
|
||||||
├── "qwen-turbo" 等通义千问大模型
|
├── "qwen-turbo" 等通义千问大模型
|
||||||
│ └── DASHSCOPE_API_KEY
|
│ └── DASHSCOPE_API_KEY
|
||||||
@@ -307,9 +328,10 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|||||||
├── "Gemini"
|
├── "Gemini"
|
||||||
│ └── GEMINI_API_KEY
|
│ └── GEMINI_API_KEY
|
||||||
│
|
│
|
||||||
└── "newbing" Newbing接口不再稳定,不推荐使用
|
└── "one-api-...(max_token=...)" 用一种更方便的方式接入one-api多模型管理界面
|
||||||
├── NEWBING_STYLE
|
├── AVAIL_LLM_MODELS
|
||||||
└── NEWBING_COOKIES
|
├── API_KEY
|
||||||
|
└── API_URL_REDIRECT
|
||||||
|
|
||||||
|
|
||||||
本地大模型示意图
|
本地大模型示意图
|
||||||
@@ -351,6 +373,9 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|||||||
│ └── ALIYUN_SECRET
|
│ └── ALIYUN_SECRET
|
||||||
│
|
│
|
||||||
└── PDF文档精准解析
|
└── PDF文档精准解析
|
||||||
└── GROBID_URLS
|
├── GROBID_URLS
|
||||||
|
├── MATHPIX_APPID
|
||||||
|
└── MATHPIX_APPKEY
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -38,12 +38,12 @@ def get_core_functions():
|
|||||||
|
|
||||||
"总结绘制脑图": {
|
"总结绘制脑图": {
|
||||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||||
"Prefix": r"",
|
"Prefix": '''"""\n\n''',
|
||||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||||
"Suffix":
|
"Suffix":
|
||||||
# dedent() 函数用于去除多行字符串的缩进
|
# dedent() 函数用于去除多行字符串的缩进
|
||||||
dedent("\n"+r'''
|
dedent("\n\n"+r'''
|
||||||
==============================
|
"""
|
||||||
|
|
||||||
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ def get_core_functions():
|
|||||||
C --> |"箭头名2"| F["节点名6"]
|
C --> |"箭头名2"| F["节点名6"]
|
||||||
```
|
```
|
||||||
|
|
||||||
警告:
|
注意:
|
||||||
(1)使用中文
|
(1)使用中文
|
||||||
(2)节点名字使用引号包裹,如["Laptop"]
|
(2)节点名字使用引号包裹,如["Laptop"]
|
||||||
(3)`|` 和 `"`之间不要存在空格
|
(3)`|` 和 `"`之间不要存在空格
|
||||||
|
|||||||
@@ -70,11 +70,11 @@ def get_crazy_functions():
|
|||||||
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
||||||
"Function": HotReload(清除缓存),
|
"Function": HotReload(清除缓存),
|
||||||
},
|
},
|
||||||
"生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": {
|
"生成多种Mermaid图表(从当前对话或路径(.pdf/.md/.docx)中生产图表)": {
|
||||||
"Group": "对话",
|
"Group": "对话",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断",
|
"Info" : "基于当前对话或文件生成多种Mermaid图表,图表类型由模型判断",
|
||||||
"Function": HotReload(生成多种Mermaid图表),
|
"Function": HotReload(生成多种Mermaid图表),
|
||||||
"AdvancedArgs": True,
|
"AdvancedArgs": True,
|
||||||
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
|
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
|
||||||
@@ -532,8 +532,9 @@ def get_crazy_functions():
|
|||||||
print("Load function plugin failed")
|
print("Load function plugin failed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
from crazy_functions.Latex输出PDF import Latex英文纠错加PDF对比
|
||||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
from crazy_functions.Latex输出PDF import Latex翻译中文并重新编译PDF
|
||||||
|
from crazy_functions.Latex输出PDF import PDF翻译中文并重新编译PDF
|
||||||
|
|
||||||
function_plugins.update(
|
function_plugins.update(
|
||||||
{
|
{
|
||||||
@@ -550,9 +551,9 @@ def get_crazy_functions():
|
|||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"AdvancedArgs": True,
|
"AdvancedArgs": True,
|
||||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||||
},
|
},
|
||||||
@@ -561,11 +562,22 @@ def get_crazy_functions():
|
|||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": False,
|
"AsButton": False,
|
||||||
"AdvancedArgs": True,
|
"AdvancedArgs": True,
|
||||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
||||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||||
|
},
|
||||||
|
"PDF翻译中文并重新编译PDF(上传PDF)[需Latex]": {
|
||||||
|
"Group": "学术",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": False,
|
||||||
|
"AdvancedArgs": True,
|
||||||
|
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||||
|
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||||
|
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||||
|
"Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径",
|
||||||
|
"Function": HotReload(PDF翻译中文并重新编译PDF)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,232 +0,0 @@
|
|||||||
from collections.abc import Callable, Iterable, Mapping
|
|
||||||
from typing import Any
|
|
||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc
|
|
||||||
from toolbox import promote_file_to_downloadzone, get_log_folder
|
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
||||||
from .crazy_utils import input_clipping, try_install_deps
|
|
||||||
from multiprocessing import Process, Pipe
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
templete = """
|
|
||||||
```python
|
|
||||||
import ... # Put dependencies here, e.g. import numpy as np
|
|
||||||
|
|
||||||
class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
|
|
||||||
|
|
||||||
def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
|
|
||||||
# rewrite the function you have just written here
|
|
||||||
...
|
|
||||||
return generated_file_path
|
|
||||||
```
|
|
||||||
"""
|
|
||||||
|
|
||||||
def inspect_dependency(chatbot, history):
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_code_block(reply):
|
|
||||||
import re
|
|
||||||
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
|
||||||
matches = re.findall(pattern, reply) # find all code blocks in text
|
|
||||||
if len(matches) == 1:
|
|
||||||
return matches[0].strip('python') # code block
|
|
||||||
for match in matches:
|
|
||||||
if 'class TerminalFunction' in match:
|
|
||||||
return match.strip('python') # code block
|
|
||||||
raise RuntimeError("GPT is not generating proper code.")
|
|
||||||
|
|
||||||
def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
|
||||||
# 输入
|
|
||||||
prompt_compose = [
|
|
||||||
f'Your job:\n'
|
|
||||||
f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
|
|
||||||
f"2. You should write this function to perform following task: " + txt + "\n",
|
|
||||||
f"3. Wrap the output python function with markdown codeblock."
|
|
||||||
]
|
|
||||||
i_say = "".join(prompt_compose)
|
|
||||||
demo = []
|
|
||||||
|
|
||||||
# 第一步
|
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
||||||
inputs=i_say, inputs_show_user=i_say,
|
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
|
|
||||||
sys_prompt= r"You are a programmer."
|
|
||||||
)
|
|
||||||
history.extend([i_say, gpt_say])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
|
||||||
|
|
||||||
# 第二步
|
|
||||||
prompt_compose = [
|
|
||||||
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
|
|
||||||
templete
|
|
||||||
]
|
|
||||||
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
|
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
||||||
inputs=i_say, inputs_show_user=inputs_show_user,
|
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
|
||||||
sys_prompt= r"You are a programmer."
|
|
||||||
)
|
|
||||||
code_to_return = gpt_say
|
|
||||||
history.extend([i_say, gpt_say])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
|
||||||
|
|
||||||
# # 第三步
|
|
||||||
# i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
|
|
||||||
# i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
|
|
||||||
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
||||||
# inputs=i_say, inputs_show_user=inputs_show_user,
|
|
||||||
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
|
||||||
# sys_prompt= r"You are a programmer."
|
|
||||||
# )
|
|
||||||
# # # 第三步
|
|
||||||
# i_say = "Show me how to use `pip` to install packages to run the code above. "
|
|
||||||
# i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
|
|
||||||
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
|
||||||
# inputs=i_say, inputs_show_user=i_say,
|
|
||||||
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
|
||||||
# sys_prompt= r"You are a programmer."
|
|
||||||
# )
|
|
||||||
installation_advance = ""
|
|
||||||
|
|
||||||
return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
|
|
||||||
|
|
||||||
def make_module(code):
|
|
||||||
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
|
||||||
with open(f'{get_log_folder()}/{module_file}.py', 'w', encoding='utf8') as f:
|
|
||||||
f.write(code)
|
|
||||||
|
|
||||||
def get_class_name(class_string):
|
|
||||||
import re
|
|
||||||
# Use regex to extract the class name
|
|
||||||
class_name = re.search(r'class (\w+)\(', class_string).group(1)
|
|
||||||
return class_name
|
|
||||||
|
|
||||||
class_name = get_class_name(code)
|
|
||||||
return f"{get_log_folder().replace('/', '.')}.{module_file}->{class_name}"
|
|
||||||
|
|
||||||
def init_module_instance(module):
|
|
||||||
import importlib
|
|
||||||
module_, class_ = module.split('->')
|
|
||||||
init_f = getattr(importlib.import_module(module_), class_)
|
|
||||||
return init_f()
|
|
||||||
|
|
||||||
def for_immediate_show_off_when_possible(file_type, fp, chatbot):
|
|
||||||
if file_type in ['png', 'jpg']:
|
|
||||||
image_path = os.path.abspath(fp)
|
|
||||||
chatbot.append(['这是一张图片, 展示如下:',
|
|
||||||
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
|
||||||
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
|
||||||
])
|
|
||||||
return chatbot
|
|
||||||
|
|
||||||
def subprocess_worker(instance, file_path, return_dict):
|
|
||||||
return_dict['result'] = instance.run(file_path)
|
|
||||||
|
|
||||||
def have_any_recent_upload_files(chatbot):
|
|
||||||
_5min = 5 * 60
|
|
||||||
if not chatbot: return False # chatbot is None
|
|
||||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
|
||||||
if not most_recent_uploaded: return False # most_recent_uploaded is None
|
|
||||||
if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
|
|
||||||
else: return False # most_recent_uploaded is too old
|
|
||||||
|
|
||||||
def get_recent_file_prompt_support(chatbot):
|
|
||||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
|
||||||
path = most_recent_uploaded['path']
|
|
||||||
return path
|
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
|
||||||
"""
|
|
||||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
|
||||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
||||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
|
||||||
chatbot 聊天显示框的句柄,用于显示给用户
|
|
||||||
history 聊天历史,前情提要
|
|
||||||
system_prompt 给gpt的静默提醒
|
|
||||||
user_request 当前用户的请求信息(IP地址等)
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
# 清空历史,以免输入溢出
|
|
||||||
history = []; clear_file_downloadzone(chatbot)
|
|
||||||
|
|
||||||
# 基本信息:功能、贡献者
|
|
||||||
chatbot.append([
|
|
||||||
"函数插件功能?",
|
|
||||||
"CodeInterpreter开源版, 此插件处于开发阶段, 建议暂时不要使用, 插件初始化中 ..."
|
|
||||||
])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
if have_any_recent_upload_files(chatbot):
|
|
||||||
file_path = get_recent_file_prompt_support(chatbot)
|
|
||||||
else:
|
|
||||||
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 读取文件
|
|
||||||
if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files")
|
|
||||||
recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None)
|
|
||||||
file_path = recently_uploaded_files[-1]
|
|
||||||
file_type = file_path.split('.')[-1]
|
|
||||||
|
|
||||||
# 粗心检查
|
|
||||||
if is_the_upload_folder(txt):
|
|
||||||
chatbot.append([
|
|
||||||
"...",
|
|
||||||
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
|
||||||
])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
# 开始干正事
|
|
||||||
for j in range(5): # 最多重试5次
|
|
||||||
try:
|
|
||||||
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
|
|
||||||
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
|
|
||||||
code = get_code_block(code)
|
|
||||||
res = make_module(code)
|
|
||||||
instance = init_module_instance(res)
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
chatbot.append([f"第{j}次代码生成尝试,失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 代码生成结束, 开始执行
|
|
||||||
try:
|
|
||||||
import multiprocessing
|
|
||||||
manager = multiprocessing.Manager()
|
|
||||||
return_dict = manager.dict()
|
|
||||||
|
|
||||||
p = multiprocessing.Process(target=subprocess_worker, args=(instance, file_path, return_dict))
|
|
||||||
# only has 10 seconds to run
|
|
||||||
p.start(); p.join(timeout=10)
|
|
||||||
if p.is_alive(): p.terminate(); p.join()
|
|
||||||
p.close()
|
|
||||||
res = return_dict['result']
|
|
||||||
# res = instance.run(file_path)
|
|
||||||
except Exception as e:
|
|
||||||
chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
|
||||||
# chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
# 顺利完成,收尾
|
|
||||||
res = str(res)
|
|
||||||
if os.path.exists(res):
|
|
||||||
chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res])
|
|
||||||
new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot)
|
|
||||||
chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
|
||||||
else:
|
|
||||||
chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
|
||||||
|
|
||||||
"""
|
|
||||||
测试:
|
|
||||||
裁剪图像,保留下半部分
|
|
||||||
交换图像的蓝色通道和红色通道
|
|
||||||
将图像转为灰度图像
|
|
||||||
将csv文件转excel表格
|
|
||||||
"""
|
|
||||||
@@ -81,8 +81,8 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
# <-------- 多线程润色开始 ---------->
|
# <-------- 多线程润色开始 ---------->
|
||||||
if language == 'en':
|
if language == 'en':
|
||||||
if mode == 'polish':
|
if mode == 'polish':
|
||||||
inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, " +
|
inputs_array = [r"Below is a section from an academic paper, polish this section to meet the academic standard, " +
|
||||||
"improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
r"improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
else:
|
else:
|
||||||
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
||||||
@@ -93,10 +93,10 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||||
elif language == 'zh':
|
elif language == 'zh':
|
||||||
if mode == 'polish':
|
if mode == 'polish':
|
||||||
inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
inputs_array = [r"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
else:
|
else:
|
||||||
inputs_array = [f"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
inputs_array = [r"以下是一篇学术论文中的一段内容,请对这部分内容进行语法矫正。不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
||||||
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
||||||
|
|||||||
538
crazy_functions/Latex输出PDF.py
普通文件
538
crazy_functions/Latex输出PDF.py
普通文件
@@ -0,0 +1,538 @@
|
|||||||
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone, check_repeat_upload, map_file_to_sha256
|
||||||
|
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
|
from functools import partial
|
||||||
|
import glob, os, requests, time, json, tarfile
|
||||||
|
|
||||||
|
pj = os.path.join
|
||||||
|
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||||
|
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
|
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||||
|
def switch_prompt(pfg, mode, more_requirement):
|
||||||
|
"""
|
||||||
|
Generate prompts and system prompts based on the mode for proofreading or translating.
|
||||||
|
Args:
|
||||||
|
- pfg: Proofreader or Translator instance.
|
||||||
|
- mode: A string specifying the mode, either 'proofread' or 'translate_zh'.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- inputs_array: A list of strings containing prompts for users to respond to.
|
||||||
|
- sys_prompt_array: A list of strings containing prompts for system prompts.
|
||||||
|
"""
|
||||||
|
n_split = len(pfg.sp_file_contents)
|
||||||
|
if mode == 'proofread_en':
|
||||||
|
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
||||||
|
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement +
|
||||||
|
r"Answer me only with the revised text:" +
|
||||||
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
|
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||||
|
elif mode == 'translate_zh':
|
||||||
|
inputs_array = [
|
||||||
|
r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement +
|
||||||
|
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " +
|
||||||
|
r"Answer me only with the translated text:" +
|
||||||
|
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||||
|
sys_prompt_array = ["You are a professional translator." for _ in range(n_split)]
|
||||||
|
else:
|
||||||
|
assert False, "未知指令"
|
||||||
|
return inputs_array, sys_prompt_array
|
||||||
|
|
||||||
|
|
||||||
|
def desend_to_extracted_folder_if_exist(project_folder):
|
||||||
|
"""
|
||||||
|
Descend into the extracted folder if it exists, otherwise return the original folder.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- project_folder: A string specifying the folder path.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- A string specifying the path to the extracted folder, or the original folder if there is no extracted folder.
|
||||||
|
"""
|
||||||
|
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
|
||||||
|
if len(maybe_dir) == 0: return project_folder
|
||||||
|
if maybe_dir[0].endswith('.extract'): return maybe_dir[0]
|
||||||
|
return project_folder
|
||||||
|
|
||||||
|
|
||||||
|
def move_project(project_folder, arxiv_id=None):
|
||||||
|
"""
|
||||||
|
Create a new work folder and copy the project folder to it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- project_folder: A string specifying the folder path of the project.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- A string specifying the path to the new work folder.
|
||||||
|
"""
|
||||||
|
import shutil, time
|
||||||
|
time.sleep(2) # avoid time string conflict
|
||||||
|
if arxiv_id is not None:
|
||||||
|
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
||||||
|
else:
|
||||||
|
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
||||||
|
try:
|
||||||
|
shutil.rmtree(new_workfolder)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# align subfolder if there is a folder wrapper
|
||||||
|
items = glob.glob(pj(project_folder, '*'))
|
||||||
|
items = [item for item in items if os.path.basename(item) != '__MACOSX']
|
||||||
|
if len(glob.glob(pj(project_folder, '*.tex'))) == 0 and len(items) == 1:
|
||||||
|
if os.path.isdir(items[0]): project_folder = items[0]
|
||||||
|
|
||||||
|
shutil.copytree(src=project_folder, dst=new_workfolder)
|
||||||
|
return new_workfolder
|
||||||
|
|
||||||
|
|
||||||
|
def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||||
|
def check_cached_translation_pdf(arxiv_id):
|
||||||
|
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
|
||||||
|
if not os.path.exists(translation_dir):
|
||||||
|
os.makedirs(translation_dir)
|
||||||
|
target_file = pj(translation_dir, 'translate_zh.pdf')
|
||||||
|
if os.path.exists(target_file):
|
||||||
|
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
||||||
|
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
||||||
|
if os.path.exists(target_file_compare):
|
||||||
|
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
||||||
|
return target_file
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_float(s):
|
||||||
|
try:
|
||||||
|
float(s)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
||||||
|
txt = 'https://arxiv.org/abs/' + txt.strip()
|
||||||
|
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||||
|
txt = 'https://arxiv.org/abs/' + txt[:10]
|
||||||
|
|
||||||
|
if not txt.startswith('https://arxiv.org'):
|
||||||
|
return txt, None # 是本地文件,跳过下载
|
||||||
|
|
||||||
|
# <-------------- inspect format ------------->
|
||||||
|
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
|
||||||
|
url_ = txt # https://arxiv.org/abs/1707.06690
|
||||||
|
if not txt.startswith('https://arxiv.org/abs/'):
|
||||||
|
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
||||||
|
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return msg, None
|
||||||
|
# <-------------- set format ------------->
|
||||||
|
arxiv_id = url_.split('/abs/')[-1]
|
||||||
|
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
|
||||||
|
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
|
||||||
|
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
|
||||||
|
|
||||||
|
url_tar = url_.replace('/abs/', '/e-print/')
|
||||||
|
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
|
||||||
|
extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract')
|
||||||
|
os.makedirs(translation_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# <-------------- download arxiv source file ------------->
|
||||||
|
dst = pj(translation_dir, arxiv_id + '.tar')
|
||||||
|
if os.path.exists(dst):
|
||||||
|
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
else:
|
||||||
|
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
proxies = get_conf('proxies')
|
||||||
|
r = requests.get(url_tar, proxies=proxies)
|
||||||
|
with open(dst, 'wb+') as f:
|
||||||
|
f.write(r.content)
|
||||||
|
# <-------------- extract file ------------->
|
||||||
|
yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
from toolbox import extract_archive
|
||||||
|
extract_archive(file_path=dst, dest_dir=extract_dst)
|
||||||
|
return extract_dst, arxiv_id
|
||||||
|
|
||||||
|
|
||||||
|
def pdf2tex_project(pdf_file_path):
|
||||||
|
# Mathpix API credentials
|
||||||
|
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
|
||||||
|
headers = {"app_id": app_id, "app_key": app_key}
|
||||||
|
|
||||||
|
# Step 1: Send PDF file for processing
|
||||||
|
options = {
|
||||||
|
"conversion_formats": {"tex.zip": True},
|
||||||
|
"math_inline_delimiters": ["$", "$"],
|
||||||
|
"rm_spaces": True
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(url="https://api.mathpix.com/v3/pdf",
|
||||||
|
headers=headers,
|
||||||
|
data={"options_json": json.dumps(options)},
|
||||||
|
files={"file": open(pdf_file_path, "rb")})
|
||||||
|
|
||||||
|
if response.ok:
|
||||||
|
pdf_id = response.json()["pdf_id"]
|
||||||
|
print(f"PDF processing initiated. PDF ID: {pdf_id}")
|
||||||
|
|
||||||
|
# Step 2: Check processing status
|
||||||
|
while True:
|
||||||
|
conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers)
|
||||||
|
conversion_data = conversion_response.json()
|
||||||
|
|
||||||
|
if conversion_data["status"] == "completed":
|
||||||
|
print("PDF processing completed.")
|
||||||
|
break
|
||||||
|
elif conversion_data["status"] == "error":
|
||||||
|
print("Error occurred during processing.")
|
||||||
|
else:
|
||||||
|
print(f"Processing status: {conversion_data['status']}")
|
||||||
|
time.sleep(5) # wait for a few seconds before checking again
|
||||||
|
|
||||||
|
# Step 3: Save results to local files
|
||||||
|
output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output')
|
||||||
|
if not os.path.exists(output_dir):
|
||||||
|
os.makedirs(output_dir)
|
||||||
|
|
||||||
|
url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex"
|
||||||
|
response = requests.get(url, headers=headers)
|
||||||
|
file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1])
|
||||||
|
output_name = f"{file_name_wo_dot}.tex.zip"
|
||||||
|
output_path = os.path.join(output_dir, output_name)
|
||||||
|
with open(output_path, "wb") as output_file:
|
||||||
|
output_file.write(response.content)
|
||||||
|
print(f"tex.zip file saved at: {output_path}")
|
||||||
|
|
||||||
|
import zipfile
|
||||||
|
unzip_dir = os.path.join(output_dir, file_name_wo_dot)
|
||||||
|
with zipfile.ZipFile(output_path, 'r') as zip_ref:
|
||||||
|
zip_ref.extractall(unzip_dir)
|
||||||
|
|
||||||
|
return unzip_dir
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f"Error sending PDF for processing. Status code: {response.status_code}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
# <-------------- information about this plugin ------------->
|
||||||
|
chatbot.append(["函数插件功能?",
|
||||||
|
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# <-------------- more requirements ------------->
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||||
|
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||||
|
|
||||||
|
# <-------------- check deps ------------->
|
||||||
|
try:
|
||||||
|
import glob, os, time, subprocess
|
||||||
|
subprocess.Popen(['pdflatex', '-version'])
|
||||||
|
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append([f"解析项目: {txt}",
|
||||||
|
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# <-------------- clear history and read input ------------->
|
||||||
|
history = []
|
||||||
|
if os.path.exists(txt):
|
||||||
|
project_folder = txt
|
||||||
|
else:
|
||||||
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# <-------------- if is a zip/tar file ------------->
|
||||||
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
|
project_folder = move_project(project_folder, arxiv_id=None)
|
||||||
|
|
||||||
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
|
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
||||||
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
|
chatbot, history, system_prompt, mode='proofread_en',
|
||||||
|
switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
|
# <-------------- compile PDF ------------->
|
||||||
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||||
|
main_file_modified='merge_proofread_en',
|
||||||
|
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||||
|
work_folder=project_folder)
|
||||||
|
|
||||||
|
# <-------------- zip PDF ------------->
|
||||||
|
zip_res = zip_result(project_folder)
|
||||||
|
if success:
|
||||||
|
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history);
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
else:
|
||||||
|
chatbot.append((f"失败了",
|
||||||
|
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history);
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
|
||||||
|
# <-------------- we are done ------------->
|
||||||
|
return success
|
||||||
|
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||||
|
# <-------------- information about this plugin ------------->
|
||||||
|
chatbot.append([
|
||||||
|
"函数插件功能?",
|
||||||
|
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# <-------------- more requirements ------------->
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||||
|
no_cache = more_req.startswith("--no-cache")
|
||||||
|
if no_cache: more_req.lstrip("--no-cache")
|
||||||
|
allow_cache = not no_cache
|
||||||
|
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||||
|
|
||||||
|
# <-------------- check deps ------------->
|
||||||
|
try:
|
||||||
|
import glob, os, time, subprocess
|
||||||
|
subprocess.Popen(['pdflatex', '-version'])
|
||||||
|
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append([f"解析项目: {txt}",
|
||||||
|
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# <-------------- clear history and read input ------------->
|
||||||
|
history = []
|
||||||
|
try:
|
||||||
|
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||||
|
except tarfile.ReadError as e:
|
||||||
|
yield from update_ui_lastest_msg(
|
||||||
|
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
||||||
|
chatbot=chatbot, history=history)
|
||||||
|
return
|
||||||
|
|
||||||
|
if txt.endswith('.pdf'):
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现已经存在翻译好的PDF文档")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
if os.path.exists(txt):
|
||||||
|
project_folder = txt
|
||||||
|
else:
|
||||||
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# <-------------- if is a zip/tar file ------------->
|
||||||
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
|
project_folder = move_project(project_folder, arxiv_id)
|
||||||
|
|
||||||
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
|
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
||||||
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
|
chatbot, history, system_prompt, mode='translate_zh',
|
||||||
|
switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
|
# <-------------- compile PDF ------------->
|
||||||
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||||
|
main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||||
|
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||||
|
work_folder=project_folder)
|
||||||
|
|
||||||
|
# <-------------- zip PDF ------------->
|
||||||
|
zip_res = zip_result(project_folder)
|
||||||
|
if success:
|
||||||
|
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history);
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
else:
|
||||||
|
chatbot.append((f"失败了",
|
||||||
|
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history);
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
|
||||||
|
# <-------------- we are done ------------->
|
||||||
|
return success
|
||||||
|
|
||||||
|
|
||||||
|
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
# <-------------- information about this plugin ------------->
|
||||||
|
chatbot.append([
|
||||||
|
"函数插件功能?",
|
||||||
|
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# <-------------- more requirements ------------->
|
||||||
|
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||||
|
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||||
|
no_cache = more_req.startswith("--no-cache")
|
||||||
|
if no_cache: more_req.lstrip("--no-cache")
|
||||||
|
allow_cache = not no_cache
|
||||||
|
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||||
|
|
||||||
|
# <-------------- check deps ------------->
|
||||||
|
try:
|
||||||
|
import glob, os, time, subprocess
|
||||||
|
subprocess.Popen(['pdflatex', '-version'])
|
||||||
|
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
||||||
|
except Exception as e:
|
||||||
|
chatbot.append([f"解析项目: {txt}",
|
||||||
|
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# <-------------- clear history and read input ------------->
|
||||||
|
if os.path.exists(txt):
|
||||||
|
project_folder = txt
|
||||||
|
else:
|
||||||
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
if len(file_manifest) != 1:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"不支持同时处理多个pdf文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
|
||||||
|
if len(app_id) == 0 or len(app_key) == 0:
|
||||||
|
report_exception(chatbot, history, a="缺失 MATHPIX_APPID 和 MATHPIX_APPKEY。", b=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
hash_tag = map_file_to_sha256(file_manifest[0])
|
||||||
|
|
||||||
|
# <-------------- check repeated pdf ------------->
|
||||||
|
chatbot.append([f"检查PDF是否被重复上传", "正在检查..."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
repeat, project_folder = check_repeat_upload(file_manifest[0], hash_tag)
|
||||||
|
|
||||||
|
except_flag = False
|
||||||
|
|
||||||
|
if repeat:
|
||||||
|
yield from update_ui_lastest_msg(f"发现重复上传,请查收结果(压缩包)...", chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
try:
|
||||||
|
trans_html_file = [f for f in glob.glob(f'{project_folder}/**/*.trans.html', recursive=True)][0]
|
||||||
|
promote_file_to_downloadzone(trans_html_file, rename_file=None, chatbot=chatbot)
|
||||||
|
|
||||||
|
translate_pdf = [f for f in glob.glob(f'{project_folder}/**/merge_translate_zh.pdf', recursive=True)][0]
|
||||||
|
promote_file_to_downloadzone(translate_pdf, rename_file=None, chatbot=chatbot)
|
||||||
|
|
||||||
|
comparison_pdf = [f for f in glob.glob(f'{project_folder}/**/comparison.pdf', recursive=True)][0]
|
||||||
|
promote_file_to_downloadzone(comparison_pdf, rename_file=None, chatbot=chatbot)
|
||||||
|
|
||||||
|
zip_res = zip_result(project_folder)
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
except:
|
||||||
|
report_exception(chatbot, history, b=f"发现重复上传,但是无法找到相关文件")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
chatbot.append([f"没有相关文件", '尝试重新翻译PDF...'])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
except_flag = True
|
||||||
|
|
||||||
|
|
||||||
|
elif not repeat or except_flag:
|
||||||
|
yield from update_ui_lastest_msg(f"未发现重复上传", chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
# <-------------- convert pdf into tex ------------->
|
||||||
|
chatbot.append([f"解析项目: {txt}", "正在将PDF转换为tex项目,请耐心等待..."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
project_folder = pdf2tex_project(file_manifest[0])
|
||||||
|
if project_folder is None:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"PDF转换为tex项目失败")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# <-------------- translate latex file into Chinese ------------->
|
||||||
|
yield from update_ui_lastest_msg("正在tex项目将翻译为中文...", chatbot=chatbot, history=history)
|
||||||
|
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# <-------------- if is a zip/tar file ------------->
|
||||||
|
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
||||||
|
|
||||||
|
# <-------------- move latex project away from temp folder ------------->
|
||||||
|
project_folder = move_project(project_folder)
|
||||||
|
|
||||||
|
# <-------------- set a hash tag for repeat-checking ------------->
|
||||||
|
with open(pj(project_folder, hash_tag + '.tag'), 'w') as f:
|
||||||
|
f.write(hash_tag)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
|
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
||||||
|
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
||||||
|
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
||||||
|
chatbot, history, system_prompt, mode='translate_zh',
|
||||||
|
switch_prompt=_switch_prompt_)
|
||||||
|
|
||||||
|
# <-------------- compile PDF ------------->
|
||||||
|
yield from update_ui_lastest_msg("正在将翻译好的项目tex项目编译为PDF...", chatbot=chatbot, history=history)
|
||||||
|
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
||||||
|
main_file_modified='merge_translate_zh', mode='translate_zh',
|
||||||
|
work_folder_original=project_folder, work_folder_modified=project_folder,
|
||||||
|
work_folder=project_folder)
|
||||||
|
|
||||||
|
# <-------------- zip PDF ------------->
|
||||||
|
zip_res = zip_result(project_folder)
|
||||||
|
if success:
|
||||||
|
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history);
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
else:
|
||||||
|
chatbot.append((f"失败了",
|
||||||
|
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history);
|
||||||
|
time.sleep(1) # 刷新界面
|
||||||
|
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||||
|
|
||||||
|
# <-------------- we are done ------------->
|
||||||
|
return success
|
||||||
@@ -1,313 +0,0 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
|
||||||
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
|
||||||
from functools import partial
|
|
||||||
import glob, os, requests, time, tarfile
|
|
||||||
pj = os.path.join
|
|
||||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
|
||||||
|
|
||||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
|
||||||
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
|
||||||
def switch_prompt(pfg, mode, more_requirement):
|
|
||||||
"""
|
|
||||||
Generate prompts and system prompts based on the mode for proofreading or translating.
|
|
||||||
Args:
|
|
||||||
- pfg: Proofreader or Translator instance.
|
|
||||||
- mode: A string specifying the mode, either 'proofread' or 'translate_zh'.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
- inputs_array: A list of strings containing prompts for users to respond to.
|
|
||||||
- sys_prompt_array: A list of strings containing prompts for system prompts.
|
|
||||||
"""
|
|
||||||
n_split = len(pfg.sp_file_contents)
|
|
||||||
if mode == 'proofread_en':
|
|
||||||
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
|
||||||
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement +
|
|
||||||
r"Answer me only with the revised text:" +
|
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
|
||||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
|
||||||
elif mode == 'translate_zh':
|
|
||||||
inputs_array = [r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement +
|
|
||||||
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " +
|
|
||||||
r"Answer me only with the translated text:" +
|
|
||||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
|
||||||
sys_prompt_array = ["You are a professional translator." for _ in range(n_split)]
|
|
||||||
else:
|
|
||||||
assert False, "未知指令"
|
|
||||||
return inputs_array, sys_prompt_array
|
|
||||||
|
|
||||||
def desend_to_extracted_folder_if_exist(project_folder):
|
|
||||||
"""
|
|
||||||
Descend into the extracted folder if it exists, otherwise return the original folder.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- project_folder: A string specifying the folder path.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
- A string specifying the path to the extracted folder, or the original folder if there is no extracted folder.
|
|
||||||
"""
|
|
||||||
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
|
|
||||||
if len(maybe_dir) == 0: return project_folder
|
|
||||||
if maybe_dir[0].endswith('.extract'): return maybe_dir[0]
|
|
||||||
return project_folder
|
|
||||||
|
|
||||||
def move_project(project_folder, arxiv_id=None):
|
|
||||||
"""
|
|
||||||
Create a new work folder and copy the project folder to it.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
- project_folder: A string specifying the folder path of the project.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
- A string specifying the path to the new work folder.
|
|
||||||
"""
|
|
||||||
import shutil, time
|
|
||||||
time.sleep(2) # avoid time string conflict
|
|
||||||
if arxiv_id is not None:
|
|
||||||
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
|
||||||
else:
|
|
||||||
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
|
||||||
try:
|
|
||||||
shutil.rmtree(new_workfolder)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# align subfolder if there is a folder wrapper
|
|
||||||
items = glob.glob(pj(project_folder,'*'))
|
|
||||||
items = [item for item in items if os.path.basename(item)!='__MACOSX']
|
|
||||||
if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1:
|
|
||||||
if os.path.isdir(items[0]): project_folder = items[0]
|
|
||||||
|
|
||||||
shutil.copytree(src=project_folder, dst=new_workfolder)
|
|
||||||
return new_workfolder
|
|
||||||
|
|
||||||
def arxiv_download(chatbot, history, txt, allow_cache=True):
|
|
||||||
def check_cached_translation_pdf(arxiv_id):
|
|
||||||
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
|
|
||||||
if not os.path.exists(translation_dir):
|
|
||||||
os.makedirs(translation_dir)
|
|
||||||
target_file = pj(translation_dir, 'translate_zh.pdf')
|
|
||||||
if os.path.exists(target_file):
|
|
||||||
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
|
||||||
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
|
||||||
if os.path.exists(target_file_compare):
|
|
||||||
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
|
||||||
return target_file
|
|
||||||
return False
|
|
||||||
def is_float(s):
|
|
||||||
try:
|
|
||||||
float(s)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
|
||||||
txt = 'https://arxiv.org/abs/' + txt.strip()
|
|
||||||
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
|
||||||
txt = 'https://arxiv.org/abs/' + txt[:10]
|
|
||||||
if not txt.startswith('https://arxiv.org'):
|
|
||||||
return txt, None # 是本地文件,跳过下载
|
|
||||||
|
|
||||||
# <-------------- inspect format ------------->
|
|
||||||
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
time.sleep(1) # 刷新界面
|
|
||||||
|
|
||||||
url_ = txt # https://arxiv.org/abs/1707.06690
|
|
||||||
if not txt.startswith('https://arxiv.org/abs/'):
|
|
||||||
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
|
||||||
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return msg, None
|
|
||||||
# <-------------- set format ------------->
|
|
||||||
arxiv_id = url_.split('/abs/')[-1]
|
|
||||||
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
|
|
||||||
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
|
|
||||||
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
|
|
||||||
|
|
||||||
url_tar = url_.replace('/abs/', '/e-print/')
|
|
||||||
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
|
|
||||||
extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract')
|
|
||||||
os.makedirs(translation_dir, exist_ok=True)
|
|
||||||
|
|
||||||
# <-------------- download arxiv source file ------------->
|
|
||||||
dst = pj(translation_dir, arxiv_id+'.tar')
|
|
||||||
if os.path.exists(dst):
|
|
||||||
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
else:
|
|
||||||
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
proxies = get_conf('proxies')
|
|
||||||
r = requests.get(url_tar, proxies=proxies)
|
|
||||||
with open(dst, 'wb+') as f:
|
|
||||||
f.write(r.content)
|
|
||||||
# <-------------- extract file ------------->
|
|
||||||
yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
from toolbox import extract_archive
|
|
||||||
extract_archive(file_path=dst, dest_dir=extract_dst)
|
|
||||||
return extract_dst, arxiv_id
|
|
||||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
|
||||||
|
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
|
||||||
# <-------------- information about this plugin ------------->
|
|
||||||
chatbot.append([ "函数插件功能?",
|
|
||||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# <-------------- more requirements ------------->
|
|
||||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
|
||||||
more_req = plugin_kwargs.get("advanced_arg", "")
|
|
||||||
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
|
||||||
|
|
||||||
# <-------------- check deps ------------->
|
|
||||||
try:
|
|
||||||
import glob, os, time, subprocess
|
|
||||||
subprocess.Popen(['pdflatex', '-version'])
|
|
||||||
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
|
||||||
except Exception as e:
|
|
||||||
chatbot.append([ f"解析项目: {txt}",
|
|
||||||
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- clear history and read input ------------->
|
|
||||||
history = []
|
|
||||||
if os.path.exists(txt):
|
|
||||||
project_folder = txt
|
|
||||||
else:
|
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
|
||||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
|
||||||
if len(file_manifest) == 0:
|
|
||||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- if is a zip/tar file ------------->
|
|
||||||
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- move latex project away from temp folder ------------->
|
|
||||||
project_folder = move_project(project_folder, arxiv_id=None)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
|
||||||
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
|
||||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
||||||
chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- compile PDF ------------->
|
|
||||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_proofread_en',
|
|
||||||
work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- zip PDF ------------->
|
|
||||||
zip_res = zip_result(project_folder)
|
|
||||||
if success:
|
|
||||||
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
|
||||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
|
||||||
else:
|
|
||||||
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
|
||||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
|
||||||
|
|
||||||
# <-------------- we are done ------------->
|
|
||||||
return success
|
|
||||||
|
|
||||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
|
||||||
# <-------------- information about this plugin ------------->
|
|
||||||
chatbot.append([
|
|
||||||
"函数插件功能?",
|
|
||||||
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# <-------------- more requirements ------------->
|
|
||||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
|
||||||
more_req = plugin_kwargs.get("advanced_arg", "")
|
|
||||||
no_cache = more_req.startswith("--no-cache")
|
|
||||||
if no_cache: more_req.lstrip("--no-cache")
|
|
||||||
allow_cache = not no_cache
|
|
||||||
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
|
||||||
|
|
||||||
# <-------------- check deps ------------->
|
|
||||||
try:
|
|
||||||
import glob, os, time, subprocess
|
|
||||||
subprocess.Popen(['pdflatex', '-version'])
|
|
||||||
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
|
||||||
except Exception as e:
|
|
||||||
chatbot.append([ f"解析项目: {txt}",
|
|
||||||
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- clear history and read input ------------->
|
|
||||||
history = []
|
|
||||||
try:
|
|
||||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
|
||||||
except tarfile.ReadError as e:
|
|
||||||
yield from update_ui_lastest_msg(
|
|
||||||
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
|
||||||
chatbot=chatbot, history=history)
|
|
||||||
return
|
|
||||||
|
|
||||||
if txt.endswith('.pdf'):
|
|
||||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
if os.path.exists(txt):
|
|
||||||
project_folder = txt
|
|
||||||
else:
|
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
|
||||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
|
||||||
if len(file_manifest) == 0:
|
|
||||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- if is a zip/tar file ------------->
|
|
||||||
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- move latex project away from temp folder ------------->
|
|
||||||
project_folder = move_project(project_folder, arxiv_id)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
|
||||||
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
|
||||||
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
||||||
chatbot, history, system_prompt, mode='translate_zh', switch_prompt=_switch_prompt_)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- compile PDF ------------->
|
|
||||||
success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_translate_zh', mode='translate_zh',
|
|
||||||
work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder)
|
|
||||||
|
|
||||||
# <-------------- zip PDF ------------->
|
|
||||||
zip_res = zip_result(project_folder)
|
|
||||||
if success:
|
|
||||||
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
|
||||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
|
||||||
else:
|
|
||||||
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
|
||||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
|
||||||
|
|
||||||
|
|
||||||
# <-------------- we are done ------------->
|
|
||||||
return success
|
|
||||||
@@ -135,13 +135,25 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
||||||
return final_result
|
return final_result
|
||||||
|
|
||||||
def can_multi_process(llm):
|
def can_multi_process(llm) -> bool:
|
||||||
if llm.startswith('gpt-'): return True
|
from request_llms.bridge_all import model_info
|
||||||
if llm.startswith('api2d-'): return True
|
|
||||||
if llm.startswith('azure-'): return True
|
def default_condition(llm) -> bool:
|
||||||
if llm.startswith('spark'): return True
|
# legacy condition
|
||||||
if llm.startswith('zhipuai'): return True
|
if llm.startswith('gpt-'): return True
|
||||||
return False
|
if llm.startswith('api2d-'): return True
|
||||||
|
if llm.startswith('azure-'): return True
|
||||||
|
if llm.startswith('spark'): return True
|
||||||
|
if llm.startswith('zhipuai') or llm.startswith('glm-'): return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
if llm in model_info:
|
||||||
|
if 'can_multi_thread' in model_info[llm]:
|
||||||
|
return model_info[llm]['can_multi_thread']
|
||||||
|
else:
|
||||||
|
return default_condition(llm)
|
||||||
|
else:
|
||||||
|
return default_condition(llm)
|
||||||
|
|
||||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||||
|
|||||||
@@ -0,0 +1,85 @@
|
|||||||
|
from crazy_functions.crazy_utils import read_and_clean_pdf_text, get_files_from_everything
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
def extract_text_from_files(txt, chatbot, history):
|
||||||
|
"""
|
||||||
|
查找pdf/md/word并获取文本内容并返回状态以及文本
|
||||||
|
|
||||||
|
输入参数 Args:
|
||||||
|
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
|
||||||
|
history (list): List of chat history (历史,对话历史列表)
|
||||||
|
|
||||||
|
输出 Returns:
|
||||||
|
文件是否存在(bool)
|
||||||
|
final_result(list):文本内容
|
||||||
|
page_one(list):第一页内容/摘要
|
||||||
|
file_manifest(list):文件路径
|
||||||
|
excption(string):需要用户手动处理的信息,如没出错则保持为空
|
||||||
|
"""
|
||||||
|
|
||||||
|
final_result = []
|
||||||
|
page_one = []
|
||||||
|
file_manifest = []
|
||||||
|
excption = ""
|
||||||
|
|
||||||
|
if txt == "":
|
||||||
|
final_result.append(txt)
|
||||||
|
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
||||||
|
|
||||||
|
#查找输入区内容中的文件
|
||||||
|
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
||||||
|
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
|
||||||
|
file_word,word_manifest,folder_word = get_files_from_everything(txt, '.docx')
|
||||||
|
file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc')
|
||||||
|
|
||||||
|
if file_doc:
|
||||||
|
excption = "word"
|
||||||
|
return False, final_result, page_one, file_manifest, excption
|
||||||
|
|
||||||
|
file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest)
|
||||||
|
if file_num == 0:
|
||||||
|
final_result.append(txt)
|
||||||
|
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
||||||
|
|
||||||
|
if file_pdf:
|
||||||
|
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
|
import fitz
|
||||||
|
except:
|
||||||
|
excption = "pdf"
|
||||||
|
return False, final_result, page_one, file_manifest, excption
|
||||||
|
for index, fp in enumerate(pdf_manifest):
|
||||||
|
file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
||||||
|
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
|
pdf_one = str(pdf_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||||
|
final_result.append(file_content)
|
||||||
|
page_one.append(pdf_one)
|
||||||
|
file_manifest.append(os.path.relpath(fp, folder_pdf))
|
||||||
|
|
||||||
|
if file_md:
|
||||||
|
for index, fp in enumerate(md_manifest):
|
||||||
|
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||||
|
file_content = f.read()
|
||||||
|
file_content = file_content.encode('utf-8', 'ignore').decode()
|
||||||
|
headers = re.findall(r'^#\s(.*)$', file_content, re.MULTILINE) #接下来提取md中的一级/二级标题作为摘要
|
||||||
|
if len(headers) > 0:
|
||||||
|
page_one.append("\n".join(headers)) #合并所有的标题,以换行符分割
|
||||||
|
else:
|
||||||
|
page_one.append("")
|
||||||
|
final_result.append(file_content)
|
||||||
|
file_manifest.append(os.path.relpath(fp, folder_md))
|
||||||
|
|
||||||
|
if file_word:
|
||||||
|
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
|
from docx import Document
|
||||||
|
except:
|
||||||
|
excption = "word_pip"
|
||||||
|
return False, final_result, page_one, file_manifest, excption
|
||||||
|
for index, fp in enumerate(word_manifest):
|
||||||
|
doc = Document(fp)
|
||||||
|
file_content = '\n'.join([p.text for p in doc.paragraphs])
|
||||||
|
file_content = file_content.encode('utf-8', 'ignore').decode()
|
||||||
|
page_one.append(file_content[:200])
|
||||||
|
final_result.append(file_content)
|
||||||
|
file_manifest.append(os.path.relpath(fp, folder_word))
|
||||||
|
|
||||||
|
return True, final_result, page_one, file_manifest, excption
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
from toolbox import CatchException, update_ui, report_exception
|
from toolbox import CatchException, update_ui, report_exception
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
#以下是每类图表的PROMPT
|
#以下是每类图表的PROMPT
|
||||||
@@ -162,7 +161,7 @@ mindmap
|
|||||||
```
|
```
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs):
|
||||||
############################## <第 0 步,切割输入> ##################################
|
############################## <第 0 步,切割输入> ##################################
|
||||||
# 借用PDF切割中的函数对文本进行切割
|
# 借用PDF切割中的函数对文本进行切割
|
||||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||||
@@ -170,8 +169,6 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
|||||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||||
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||||
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
||||||
i_say_show_user = f'首先你从历史记录或文件中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
|
||||||
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
|
||||||
results = []
|
results = []
|
||||||
MAX_WORD_TOTAL = 4096
|
MAX_WORD_TOTAL = 4096
|
||||||
n_txt = len(txt)
|
n_txt = len(txt)
|
||||||
@@ -179,7 +176,7 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
|||||||
if n_txt >= 20: print('文章极长,不能达到预期效果')
|
if n_txt >= 20: print('文章极长,不能达到预期效果')
|
||||||
for i in range(n_txt):
|
for i in range(n_txt):
|
||||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
|
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
|
||||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i]}"
|
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}"
|
||||||
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
|
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||||
llm_kwargs, chatbot,
|
llm_kwargs, chatbot,
|
||||||
@@ -232,35 +229,11 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs):
|
|||||||
inputs=i_say,
|
inputs=i_say,
|
||||||
inputs_show_user=i_say_show_user,
|
inputs_show_user=i_say_show_user,
|
||||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||||
sys_prompt="你精通使用mermaid语法来绘制图表,首先确保语法正确,其次避免在mermaid语法中使用不允许的字符,此外也应当分考虑图表的可读性。"
|
sys_prompt=""
|
||||||
)
|
)
|
||||||
history.append(gpt_say)
|
history.append(gpt_say)
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
def 输入区文件处理(txt):
|
|
||||||
if txt == "": return False, txt
|
|
||||||
success = True
|
|
||||||
import glob
|
|
||||||
from .crazy_utils import get_files_from_everything
|
|
||||||
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
|
||||||
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
|
|
||||||
if len(pdf_manifest) == 0 and len(md_manifest) == 0:
|
|
||||||
return False, txt #如输入区内容不是文件则直接返回输入区内容
|
|
||||||
|
|
||||||
final_result = ""
|
|
||||||
if file_pdf:
|
|
||||||
for index, fp in enumerate(pdf_manifest):
|
|
||||||
file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
|
||||||
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
|
||||||
final_result += "\n" + file_content
|
|
||||||
if file_md:
|
|
||||||
for index, fp in enumerate(md_manifest):
|
|
||||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
|
||||||
file_content = f.read()
|
|
||||||
file_content = file_content.encode('utf-8', 'ignore').decode()
|
|
||||||
final_result += "\n" + file_content
|
|
||||||
return True, final_result
|
|
||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
"""
|
"""
|
||||||
@@ -277,26 +250,47 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|||||||
# 基本信息:功能、贡献者
|
# 基本信息:功能、贡献者
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"函数插件功能?",
|
"函数插件功能?",
|
||||||
"根据当前聊天历史或文件中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
|
"根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
|
||||||
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
|
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
|
||||||
try:
|
|
||||||
import fitz
|
|
||||||
except:
|
|
||||||
report_exception(chatbot, history,
|
|
||||||
a = f"解析项目: {txt}",
|
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
|
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
|
||||||
file_exist, txt = 输入区文件处理(txt)
|
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
|
||||||
|
file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history)
|
||||||
else:
|
else:
|
||||||
file_exist = False
|
file_exist = False
|
||||||
|
excption = ""
|
||||||
|
file_manifest = []
|
||||||
|
|
||||||
if file_exist : history = [] #如输入区内容为文件则清空历史记录
|
if excption != "":
|
||||||
history.append(txt) #将解析后的txt传递加入到历史中
|
if excption == "word":
|
||||||
|
report_exception(chatbot, history,
|
||||||
|
a = f"解析项目: {txt}",
|
||||||
|
b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。")
|
||||||
|
|
||||||
yield from 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs)
|
elif excption == "pdf":
|
||||||
|
report_exception(chatbot, history,
|
||||||
|
a = f"解析项目: {txt}",
|
||||||
|
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||||
|
|
||||||
|
elif excption == "word_pip":
|
||||||
|
report_exception(chatbot, history,
|
||||||
|
a=f"解析项目: {txt}",
|
||||||
|
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
||||||
|
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
else:
|
||||||
|
if not file_exist:
|
||||||
|
history.append(txt) #如输入区不是文件则将输入区内容加入历史记录
|
||||||
|
i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||||
|
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
||||||
|
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
|
||||||
|
else:
|
||||||
|
file_num = len(file_manifest)
|
||||||
|
for i in range(file_num): #依次处理文件
|
||||||
|
i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||||
|
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
||||||
|
history = [] #如输入区内容为文件则清空历史记录
|
||||||
|
history.append(final_result[i])
|
||||||
|
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
|
||||||
@@ -12,6 +12,12 @@ class PaperFileGroup():
|
|||||||
self.sp_file_index = []
|
self.sp_file_index = []
|
||||||
self.sp_file_tag = []
|
self.sp_file_tag = []
|
||||||
|
|
||||||
|
# count_token
|
||||||
|
from request_llms.bridge_all import model_info
|
||||||
|
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||||
|
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||||
|
self.get_token_num = get_token_num
|
||||||
|
|
||||||
def run_file_split(self, max_token_limit=1900):
|
def run_file_split(self, max_token_limit=1900):
|
||||||
"""
|
"""
|
||||||
将长文本分离开来
|
将长文本分离开来
|
||||||
|
|||||||
@@ -345,9 +345,12 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|||||||
pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")]
|
pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")]
|
||||||
pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件
|
pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件
|
||||||
# 将要忽略匹配的文件名(例如: ^README.md)
|
# 将要忽略匹配的文件名(例如: ^README.md)
|
||||||
pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", "\.") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")]
|
pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", r"\.") # 移除左边通配符,移除右侧逗号,转义点号
|
||||||
|
for _ in txt_pattern.split(" ") # 以空格分割
|
||||||
|
if (_ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")) # ^开始,但不是^*.开始
|
||||||
|
]
|
||||||
# 生成正则表达式
|
# 生成正则表达式
|
||||||
pattern_except = '/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$'
|
pattern_except = r'/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$'
|
||||||
pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else ''
|
pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else ''
|
||||||
|
|
||||||
history.clear()
|
history.clear()
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
## ===================================================
|
## ===================================================
|
||||||
# docker-compose.yml
|
# docker-compose.yml
|
||||||
## ===================================================
|
## ===================================================
|
||||||
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
|
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
|
||||||
# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py
|
# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py
|
||||||
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
|
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
|
||||||
# 【方法1: 适用于Linux,很方便,可惜windows不支持】与宿主的网络融合为一体,这个是默认配置
|
# 「方法1: 适用于Linux,很方便,可惜windows不支持」与宿主的网络融合为一体,这个是默认配置
|
||||||
# network_mode: "host"
|
# network_mode: "host"
|
||||||
# 【方法2: 适用于所有系统包括Windows和MacOS】端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容)
|
# 「方法2: 适用于所有系统包括Windows和MacOS」端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容)
|
||||||
# ports:
|
# ports:
|
||||||
# - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应
|
# - "12345:12345" # 注意!12345必须与WEB_PORT环境变量相互对应
|
||||||
# 4. 最后`docker-compose up`运行
|
# 4. 最后`docker-compose up`运行
|
||||||
@@ -25,7 +25,7 @@
|
|||||||
## ===================================================
|
## ===================================================
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 【方案零】 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个)
|
## 「方案零」 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个)
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -63,10 +63,10 @@ services:
|
|||||||
# count: 1
|
# count: 1
|
||||||
# capabilities: [gpu]
|
# capabilities: [gpu]
|
||||||
|
|
||||||
# 【WEB_PORT暴露方法1: 适用于Linux】与宿主的网络融合
|
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 【WEB_PORT暴露方法2: 适用于所有系统】端口映射
|
# 「WEB_PORT暴露方法2: 适用于所有系统」端口映射
|
||||||
# ports:
|
# ports:
|
||||||
# - "12345:12345" # 12345必须与WEB_PORT相互对应
|
# - "12345:12345" # 12345必须与WEB_PORT相互对应
|
||||||
|
|
||||||
@@ -75,10 +75,8 @@ services:
|
|||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 【方案一】 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务)
|
## 「方案一」 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务)
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -97,16 +95,16 @@ services:
|
|||||||
# DEFAULT_WORKER_NUM: ' 10 '
|
# DEFAULT_WORKER_NUM: ' 10 '
|
||||||
# AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
|
# AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] '
|
||||||
|
|
||||||
# 与宿主的网络融合
|
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 不使用代理网络拉取最新代码
|
# 启动命令
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
### ===================================================
|
### ===================================================
|
||||||
### 【方案二】 如果需要运行ChatGLM + Qwen + MOSS等本地模型
|
### 「方案二」 如果需要运行ChatGLM + Qwen + MOSS等本地模型
|
||||||
### ===================================================
|
### ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -130,8 +128,10 @@ services:
|
|||||||
devices:
|
devices:
|
||||||
- /dev/nvidia0:/dev/nvidia0
|
- /dev/nvidia0:/dev/nvidia0
|
||||||
|
|
||||||
# 与宿主的网络融合
|
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
|
# 启动命令
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
@@ -139,8 +139,9 @@ services:
|
|||||||
# command: >
|
# command: >
|
||||||
# bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"
|
# bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
### ===================================================
|
### ===================================================
|
||||||
### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
|
### 「方案三」 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
|
||||||
### ===================================================
|
### ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -164,16 +165,16 @@ services:
|
|||||||
devices:
|
devices:
|
||||||
- /dev/nvidia0:/dev/nvidia0
|
- /dev/nvidia0:/dev/nvidia0
|
||||||
|
|
||||||
# 与宿主的网络融合
|
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 不使用代理网络拉取最新代码
|
# 启动命令
|
||||||
command: >
|
command: >
|
||||||
python3 -u main.py
|
python3 -u main.py
|
||||||
|
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 【方案四】 ChatGPT + Latex
|
## 「方案四」 ChatGPT + Latex
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -190,16 +191,16 @@ services:
|
|||||||
DEFAULT_WORKER_NUM: ' 10 '
|
DEFAULT_WORKER_NUM: ' 10 '
|
||||||
WEB_PORT: ' 12303 '
|
WEB_PORT: ' 12303 '
|
||||||
|
|
||||||
# 与宿主的网络融合
|
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 不使用代理网络拉取最新代码
|
# 启动命令
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|
||||||
|
|
||||||
## ===================================================
|
## ===================================================
|
||||||
## 【方案五】 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md)
|
## 「方案五」 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md)
|
||||||
## ===================================================
|
## ===================================================
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
@@ -223,9 +224,9 @@ services:
|
|||||||
# (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 '
|
# (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 '
|
||||||
# (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 '
|
# (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 '
|
||||||
|
|
||||||
# 与宿主的网络融合
|
# 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
|
|
||||||
# 不使用代理网络拉取最新代码
|
# 启动命令
|
||||||
command: >
|
command: >
|
||||||
bash -c "python3 -u main.py"
|
bash -c "python3 -u main.py"
|
||||||
|
|||||||
@@ -1668,7 +1668,7 @@
|
|||||||
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
||||||
"Langchain知识库": "LangchainKnowledgeBase",
|
"Langchain知识库": "LangchainKnowledgeBase",
|
||||||
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
||||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
"Latex输出PDF": "OutputPDFFromLatex",
|
||||||
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
|
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
|
||||||
"sprint亮靛": "SprintIndigo",
|
"sprint亮靛": "SprintIndigo",
|
||||||
"寻找Latex主文件": "FindLatexMainFile",
|
"寻找Latex主文件": "FindLatexMainFile",
|
||||||
@@ -3004,5 +3004,7 @@
|
|||||||
"1. 上传图片": "TranslatedText",
|
"1. 上传图片": "TranslatedText",
|
||||||
"保存状态": "TranslatedText",
|
"保存状态": "TranslatedText",
|
||||||
"GPT-Academic对话存档": "TranslatedText",
|
"GPT-Academic对话存档": "TranslatedText",
|
||||||
"Arxiv论文精细翻译": "TranslatedText"
|
"Arxiv论文精细翻译": "TranslatedText",
|
||||||
|
"from crazy_functions.AdvancedFunctionTemplate import 测试图表渲染": "from crazy_functions.AdvancedFunctionTemplate import test_chart_rendering",
|
||||||
|
"测试图表渲染": "test_chart_rendering"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1492,7 +1492,7 @@
|
|||||||
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
|
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
|
||||||
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
|
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
|
||||||
"Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison",
|
"Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison",
|
||||||
"Latex输出PDF结果": "LatexOutputPDFResult",
|
"Latex输出PDF": "LatexOutputPDFResult",
|
||||||
"Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF",
|
"Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF",
|
||||||
"语音助手": "VoiceAssistant",
|
"语音助手": "VoiceAssistant",
|
||||||
"微调数据集生成": "FineTuneDatasetGeneration",
|
"微调数据集生成": "FineTuneDatasetGeneration",
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
"批量Markdown翻译": "BatchTranslateMarkdown",
|
"批量Markdown翻译": "BatchTranslateMarkdown",
|
||||||
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
|
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
|
||||||
"Langchain知识库": "LangchainKnowledgeBase",
|
"Langchain知识库": "LangchainKnowledgeBase",
|
||||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
"Latex输出PDF": "OutputPDFFromLatex",
|
||||||
"把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline",
|
"把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline",
|
||||||
"Latex精细分解与转化": "DecomposeAndConvertLatex",
|
"Latex精细分解与转化": "DecomposeAndConvertLatex",
|
||||||
"解析一个C项目的头文件": "ParseCProjectHeaderFiles",
|
"解析一个C项目的头文件": "ParseCProjectHeaderFiles",
|
||||||
@@ -97,5 +97,12 @@
|
|||||||
"多智能体": "MultiAgent",
|
"多智能体": "MultiAgent",
|
||||||
"图片生成_DALLE2": "ImageGeneration_DALLE2",
|
"图片生成_DALLE2": "ImageGeneration_DALLE2",
|
||||||
"图片生成_DALLE3": "ImageGeneration_DALLE3",
|
"图片生成_DALLE3": "ImageGeneration_DALLE3",
|
||||||
"图片修改_DALLE2": "ImageModification_DALLE2"
|
"图片修改_DALLE2": "ImageModification_DALLE2",
|
||||||
|
"生成多种Mermaid图表": "GenerateMultipleMermaidCharts",
|
||||||
|
"知识库文件注入": "InjectKnowledgeBaseFiles",
|
||||||
|
"PDF翻译中文并重新编译PDF": "TranslatePDFToChineseAndRecompilePDF",
|
||||||
|
"随机小游戏": "RandomMiniGame",
|
||||||
|
"互动小游戏": "InteractiveMiniGame",
|
||||||
|
"解析历史输入": "ParseHistoricalInput",
|
||||||
|
"高阶功能模板函数示意图": "HighOrderFunctionTemplateDiagram"
|
||||||
}
|
}
|
||||||
@@ -1468,7 +1468,7 @@
|
|||||||
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
|
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
|
||||||
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
|
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
|
||||||
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
|
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
|
||||||
"Latex输出PDF结果": "OutputPDFFromLatex",
|
"Latex输出PDF": "OutputPDFFromLatex",
|
||||||
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
|
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
|
||||||
"语音助手": "VoiceAssistant",
|
"语音助手": "VoiceAssistant",
|
||||||
"微调数据集生成": "FineTuneDatasetGeneration",
|
"微调数据集生成": "FineTuneDatasetGeneration",
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
try {
|
|
||||||
$("<link>").attr({href: "file=docs/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css"}).appendTo('head');
|
|
||||||
$('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
|
|
||||||
$.ajax({url: "file=docs/waifu_plugin/waifu-tips.js", dataType:"script", cache: true, success: function() {
|
|
||||||
$.ajax({url: "file=docs/waifu_plugin/live2d.js", dataType:"script", cache: true, success: function() {
|
|
||||||
/* 可直接修改部分参数 */
|
|
||||||
live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
|
|
||||||
live2d_settings['modelId'] = 5; // 默认模型 ID
|
|
||||||
live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
|
|
||||||
live2d_settings['modelStorage'] = false; // 不储存模型 ID
|
|
||||||
live2d_settings['waifuSize'] = '210x187';
|
|
||||||
live2d_settings['waifuTipsSize'] = '187x52';
|
|
||||||
live2d_settings['canSwitchModel'] = true;
|
|
||||||
live2d_settings['canSwitchTextures'] = true;
|
|
||||||
live2d_settings['canSwitchHitokoto'] = false;
|
|
||||||
live2d_settings['canTakeScreenshot'] = false;
|
|
||||||
live2d_settings['canTurnToHomePage'] = false;
|
|
||||||
live2d_settings['canTurnToAboutPage'] = false;
|
|
||||||
live2d_settings['showHitokoto'] = false; // 显示一言
|
|
||||||
live2d_settings['showF12Status'] = false; // 显示加载状态
|
|
||||||
live2d_settings['showF12Message'] = false; // 显示看板娘消息
|
|
||||||
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
|
|
||||||
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
|
|
||||||
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
|
|
||||||
|
|
||||||
/* 在 initModel 前添加 */
|
|
||||||
initModel("file=docs/waifu_plugin/waifu-tips.json");
|
|
||||||
}});
|
|
||||||
}});
|
|
||||||
} catch(err) { console.log("[Error] JQuery is not defined.") }
|
|
||||||
183
main.py
183
main.py
@@ -13,35 +13,40 @@ help_menu_description = \
|
|||||||
</br></br>如何语音对话: 请阅读Wiki
|
</br></br>如何语音对话: 请阅读Wiki
|
||||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||||
|
|
||||||
|
def enable_log(PATH_LOGGING):
|
||||||
|
import logging, uuid
|
||||||
|
admin_log_path = os.path.join(PATH_LOGGING, "admin")
|
||||||
|
os.makedirs(admin_log_path, exist_ok=True)
|
||||||
|
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
|
||||||
|
try:logging.basicConfig(filename=log_dir, level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
|
except:logging.basicConfig(filename=log_dir, level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
|
# Disable logging output from the 'httpx' logger
|
||||||
|
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||||
|
print(f"所有对话记录将自动保存在本地目录{log_dir}, 请注意自我隐私保护哦!")
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
if gr.__version__ not in ['3.32.6', '3.32.7', '3.32.8']:
|
if gr.__version__ not in ['3.32.9']:
|
||||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||||
from request_llms.bridge_all import predict
|
from request_llms.bridge_all import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
||||||
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
||||||
INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT')
|
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU')
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
from check_proxy import get_current_version
|
from check_proxy import get_current_version
|
||||||
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
||||||
from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||||
|
|
||||||
# 问询记录, python 版本建议3.9+(越新越好)
|
# 对话、日志记录
|
||||||
import logging, uuid
|
enable_log(PATH_LOGGING)
|
||||||
os.makedirs(PATH_LOGGING, exist_ok=True)
|
|
||||||
try:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
|
||||||
except:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
|
||||||
# Disable logging output from the 'httpx' logger
|
|
||||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
|
||||||
print(f"所有问询记录将自动保存在本地目录./{PATH_LOGGING}/chat_secrets.log, 请注意自我隐私保护哦!")
|
|
||||||
|
|
||||||
# 一些普通功能模块
|
# 一些普通功能模块
|
||||||
from core_functional import get_core_functions
|
from core_functional import get_core_functions
|
||||||
@@ -65,7 +70,7 @@ def main():
|
|||||||
proxy_info = check_proxy(proxies)
|
proxy_info = check_proxy(proxies)
|
||||||
|
|
||||||
gr_L1 = lambda: gr.Row().style()
|
gr_L1 = lambda: gr.Row().style()
|
||||||
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id)
|
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400)
|
||||||
if LAYOUT == "TOP-DOWN":
|
if LAYOUT == "TOP-DOWN":
|
||||||
gr_L1 = lambda: DummyWith()
|
gr_L1 = lambda: DummyWith()
|
||||||
gr_L2 = lambda scale, elem_id: gr.Row()
|
gr_L2 = lambda scale, elem_id: gr.Row()
|
||||||
@@ -74,9 +79,9 @@ def main():
|
|||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
customize_btns = {}
|
customize_btns = {}
|
||||||
predefined_btns = {}
|
predefined_btns = {}
|
||||||
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
secret_css, dark_mode, persistent_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
|
secret_css, web_cookie_cache = gr.Textbox(visible=False), gr.Textbox(visible=False)
|
||||||
cookies = gr.State(load_chat_cookies())
|
cookies = gr.State(load_chat_cookies())
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
with gr_L2(scale=2, elem_id="gpt-chat"):
|
with gr_L2(scale=2, elem_id="gpt-chat"):
|
||||||
@@ -98,6 +103,7 @@ def main():
|
|||||||
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
|
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
||||||
|
|
||||||
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
for k in range(NUM_CUSTOM_BASIC_BTN):
|
for k in range(NUM_CUSTOM_BASIC_BTN):
|
||||||
@@ -142,7 +148,6 @@ def main():
|
|||||||
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||||
|
|
||||||
|
|
||||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Tab("上传文件", elem_id="interact-panel"):
|
with gr.Tab("上传文件", elem_id="interact-panel"):
|
||||||
@@ -152,16 +157,21 @@ def main():
|
|||||||
with gr.Tab("更换模型", elem_id="interact-panel"):
|
with gr.Tab("更换模型", elem_id="interact-panel"):
|
||||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature", elem_id="elem_temperature")
|
||||||
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
|
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
|
||||||
system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT)
|
system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT, elem_id="elem_prompt")
|
||||||
|
temperature.change(None, inputs=[temperature], outputs=None,
|
||||||
|
_js="""(temperature)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_temperature_cookie", temperature)""")
|
||||||
|
system_prompt.change(None, inputs=[system_prompt], outputs=None,
|
||||||
|
_js="""(system_prompt)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_system_prompt_cookie", system_prompt)""")
|
||||||
|
|
||||||
with gr.Tab("界面外观", elem_id="interact-panel"):
|
with gr.Tab("界面外观", elem_id="interact-panel"):
|
||||||
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
||||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
opt = ["自定义菜单"]
|
||||||
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
value=[]
|
||||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"]
|
||||||
|
checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||||
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
||||||
with gr.Tab("帮助", elem_id="interact-panel"):
|
with gr.Tab("帮助", elem_id="interact-panel"):
|
||||||
@@ -178,7 +188,7 @@ def main():
|
|||||||
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
||||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
||||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||||
|
|
||||||
|
|
||||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
||||||
@@ -192,69 +202,31 @@ def main():
|
|||||||
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
|
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
|
||||||
with gr.Column(scale=1, min_width=70):
|
with gr.Column(scale=1, min_width=70):
|
||||||
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
||||||
basic_fn_load = gr.Button("加载已保存", variant="primary"); basic_fn_load.style(size="sm")
|
basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm")
|
||||||
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix):
|
|
||||||
ret = {}
|
|
||||||
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
|
||||||
customize_fn_overwrite_.update({
|
|
||||||
basic_btn_dropdown_:
|
|
||||||
{
|
|
||||||
"Title":basic_fn_title,
|
|
||||||
"Prefix":basic_fn_prefix,
|
|
||||||
"Suffix":basic_fn_suffix,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
cookies_.update(customize_fn_overwrite_)
|
|
||||||
if basic_btn_dropdown_ in customize_btns:
|
|
||||||
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
|
||||||
else:
|
|
||||||
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
|
||||||
ret.update({cookies: cookies_})
|
|
||||||
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
|
||||||
except: persistent_cookie_ = {}
|
|
||||||
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
|
||||||
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
|
||||||
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def reflesh_btn(persistent_cookie_, cookies_):
|
from shared_utils.cookie_manager import assign_btn__fn_builder
|
||||||
ret = {}
|
assign_btn = assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)
|
||||||
for k in customize_btns:
|
# update btn
|
||||||
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
h = basic_fn_confirm.click(assign_btn, [web_cookie_cache, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||||
|
[web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
|
h.then(None, [web_cookie_cache], None, _js="""(web_cookie_cache)=>{setCookie("web_cookie_cache", web_cookie_cache, 365);}""")
|
||||||
|
# clean up btn
|
||||||
|
h2 = basic_fn_clean.click(assign_btn, [web_cookie_cache, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)],
|
||||||
|
[web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
|
h2.then(None, [web_cookie_cache], None, _js="""(web_cookie_cache)=>{setCookie("web_cookie_cache", web_cookie_cache, 365);}""")
|
||||||
|
|
||||||
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
|
||||||
except: return ret
|
|
||||||
|
|
||||||
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
|
|
||||||
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
|
|
||||||
ret.update({cookies: cookies_})
|
|
||||||
|
|
||||||
for k,v in persistent_cookie_["custom_bnt"].items():
|
|
||||||
if v['Title'] == "": continue
|
|
||||||
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
|
|
||||||
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
|
||||||
return ret
|
|
||||||
|
|
||||||
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
|
|
||||||
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
|
||||||
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
|
||||||
# save persistent cookie
|
|
||||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
|
|
||||||
|
|
||||||
# 功能区显示开关与功能区的互动
|
# 功能区显示开关与功能区的互动
|
||||||
def fn_area_visibility(a):
|
def fn_area_visibility(a):
|
||||||
ret = {}
|
ret = {}
|
||||||
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
|
||||||
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
|
||||||
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
|
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
|
||||||
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
|
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
|
||||||
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
|
|
||||||
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
|
|
||||||
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
||||||
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
||||||
return ret
|
return ret
|
||||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
|
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] )
|
||||||
|
checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide)
|
||||||
|
|
||||||
# 功能区显示开关与功能区的互动
|
# 功能区显示开关与功能区的互动
|
||||||
def fn_area_visibility_2(a):
|
def fn_area_visibility_2(a):
|
||||||
@@ -262,6 +234,7 @@ def main():
|
|||||||
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
||||||
return ret
|
return ret
|
||||||
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
||||||
|
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
|
||||||
|
|
||||||
# 整理反复出现的控件句柄组合
|
# 整理反复出现的控件句柄组合
|
||||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||||
@@ -272,15 +245,17 @@ def main():
|
|||||||
cancel_handles.append(txt2.submit(**predict_args))
|
cancel_handles.append(txt2.submit(**predict_args))
|
||||||
cancel_handles.append(submitBtn.click(**predict_args))
|
cancel_handles.append(submitBtn.click(**predict_args))
|
||||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
clearBtn.click(lambda: ("",""), None, [txt, txt2])
|
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
||||||
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
||||||
|
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
|
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
if AUTO_CLEAR_TXT:
|
if AUTO_CLEAR_TXT:
|
||||||
submitBtn.click(lambda: ("",""), None, [txt, txt2])
|
submitBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
submitBtn2.click(lambda: ("",""), None, [txt, txt2])
|
submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
txt.submit(lambda: ("",""), None, [txt, txt2])
|
txt.submit(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
txt2.submit(lambda: ("",""), None, [txt, txt2])
|
txt2.submit(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
# 基础功能区的回调函数注册
|
# 基础功能区的回调函数注册
|
||||||
for k in functional:
|
for k in functional:
|
||||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||||
@@ -360,11 +335,14 @@ def main():
|
|||||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||||
|
|
||||||
|
|
||||||
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
app_block.load(assign_user_uuid, inputs=[cookies], outputs=[cookies])
|
||||||
darkmode_js = js_code_for_darkmode_init
|
|
||||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init)
|
from shared_utils.cookie_manager import load_web_cookie_cache__fn_builder
|
||||||
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
load_web_cookie_cache = load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)
|
||||||
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
||||||
|
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
||||||
|
|
||||||
|
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}")""") # 配置暗色主题或亮色主题
|
||||||
|
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
def run_delayed_tasks():
|
def run_delayed_tasks():
|
||||||
@@ -379,28 +357,15 @@ def main():
|
|||||||
|
|
||||||
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
||||||
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
||||||
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
||||||
|
|
||||||
|
# 运行一些异步任务:自动更新、打开浏览器页面、预热tiktoken模块
|
||||||
run_delayed_tasks()
|
run_delayed_tasks()
|
||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
|
||||||
quiet=True,
|
|
||||||
server_name="0.0.0.0",
|
|
||||||
ssl_keyfile=None if SSL_KEYFILE == "" else SSL_KEYFILE,
|
|
||||||
ssl_certfile=None if SSL_CERTFILE == "" else SSL_CERTFILE,
|
|
||||||
ssl_verify=False,
|
|
||||||
server_port=PORT,
|
|
||||||
favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"),
|
|
||||||
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
|
|
||||||
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
|
|
||||||
|
|
||||||
# 如果需要在二级路径下运行
|
# 最后,正式开始服务
|
||||||
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
from shared_utils.fastapi_server import start_app
|
||||||
# if CUSTOM_PATH != "/":
|
start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SSL_CERTFILE)
|
||||||
# from toolbox import run_gradio_in_subpath
|
|
||||||
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
|
||||||
# else:
|
|
||||||
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
|
|
||||||
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -8,10 +8,10 @@
|
|||||||
具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
|
具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
|
||||||
2. predict_no_ui_long_connection(...)
|
2. predict_no_ui_long_connection(...)
|
||||||
"""
|
"""
|
||||||
import tiktoken, copy
|
import tiktoken, copy, re
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask
|
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name
|
||||||
|
|
||||||
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||||
from .bridge_chatgpt import predict as chatgpt_ui
|
from .bridge_chatgpt import predict as chatgpt_ui
|
||||||
@@ -31,6 +31,12 @@ from .bridge_qianfan import predict as qianfan_ui
|
|||||||
from .bridge_google_gemini import predict as genai_ui
|
from .bridge_google_gemini import predict as genai_ui
|
||||||
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||||
|
|
||||||
|
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
||||||
|
from .bridge_zhipu import predict as zhipu_ui
|
||||||
|
|
||||||
|
from .bridge_cohere import predict as cohere_ui
|
||||||
|
from .bridge_cohere import predict_no_ui_long_connection as cohere_noui
|
||||||
|
|
||||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||||
|
|
||||||
class LazyloadTiktoken(object):
|
class LazyloadTiktoken(object):
|
||||||
@@ -58,6 +64,11 @@ API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "A
|
|||||||
openai_endpoint = "https://api.openai.com/v1/chat/completions"
|
openai_endpoint = "https://api.openai.com/v1/chat/completions"
|
||||||
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
|
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
|
||||||
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
||||||
|
gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models"
|
||||||
|
claude_endpoint = "https://api.anthropic.com/v1/messages"
|
||||||
|
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
||||||
|
cohere_endpoint = 'https://api.cohere.ai/v1/chat'
|
||||||
|
|
||||||
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
||||||
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
||||||
# 兼容旧版的配置
|
# 兼容旧版的配置
|
||||||
@@ -72,7 +83,10 @@ except:
|
|||||||
if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
|
if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
|
||||||
if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
|
if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
|
||||||
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
|
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
|
||||||
|
if gemini_endpoint in API_URL_REDIRECT: gemini_endpoint = API_URL_REDIRECT[gemini_endpoint]
|
||||||
|
if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint]
|
||||||
|
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
||||||
|
if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[cohere_endpoint]
|
||||||
|
|
||||||
# 获取tokenizer
|
# 获取tokenizer
|
||||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||||
@@ -91,7 +105,7 @@ model_info = {
|
|||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
"endpoint": openai_endpoint,
|
"endpoint": openai_endpoint,
|
||||||
"max_token": 4096,
|
"max_token": 16385,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -123,7 +137,16 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
|
||||||
"gpt-3.5-turbo-1106": {#16k
|
"gpt-3.5-turbo-1106": { #16k
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": openai_endpoint,
|
||||||
|
"max_token": 16385,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
|
||||||
|
"gpt-3.5-turbo-0125": { #16k
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
"endpoint": openai_endpoint,
|
"endpoint": openai_endpoint,
|
||||||
@@ -215,16 +238,25 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
# 智谱AI
|
||||||
"api2d-gpt-3.5-turbo": {
|
"glm-4": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": zhipu_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": zhipu_noui,
|
||||||
"endpoint": api2d_endpoint,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 10124 * 8,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"glm-3-turbo": {
|
||||||
|
"fn_with_ui": zhipu_ui,
|
||||||
|
"fn_without_ui": zhipu_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 10124 * 4,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
||||||
"api2d-gpt-4": {
|
"api2d-gpt-4": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
"fn_without_ui": chatgpt_noui,
|
"fn_without_ui": chatgpt_noui,
|
||||||
@@ -270,7 +302,7 @@ model_info = {
|
|||||||
"gemini-pro": {
|
"gemini-pro": {
|
||||||
"fn_with_ui": genai_ui,
|
"fn_with_ui": genai_ui,
|
||||||
"fn_without_ui": genai_noui,
|
"fn_without_ui": genai_noui,
|
||||||
"endpoint": None,
|
"endpoint": gemini_endpoint,
|
||||||
"max_token": 1024 * 32,
|
"max_token": 1024 * 32,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
@@ -278,13 +310,56 @@ model_info = {
|
|||||||
"gemini-pro-vision": {
|
"gemini-pro-vision": {
|
||||||
"fn_with_ui": genai_ui,
|
"fn_with_ui": genai_ui,
|
||||||
"fn_without_ui": genai_noui,
|
"fn_without_ui": genai_noui,
|
||||||
|
"endpoint": gemini_endpoint,
|
||||||
|
"max_token": 1024 * 32,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
|
||||||
|
# cohere
|
||||||
|
"cohere-command-r-plus": {
|
||||||
|
"fn_with_ui": cohere_ui,
|
||||||
|
"fn_without_ui": cohere_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
|
"endpoint": cohere_endpoint,
|
||||||
|
"max_token": 1024 * 4,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
|
||||||
|
}
|
||||||
|
# -=-=-=-=-=-=- 月之暗面 -=-=-=-=-=-=-
|
||||||
|
from request_llms.bridge_moonshot import predict as moonshot_ui
|
||||||
|
from request_llms.bridge_moonshot import predict_no_ui_long_connection as moonshot_no_ui
|
||||||
|
model_info.update({
|
||||||
|
"moonshot-v1-8k": {
|
||||||
|
"fn_with_ui": moonshot_ui,
|
||||||
|
"fn_without_ui": moonshot_no_ui,
|
||||||
|
"can_multi_thread": True,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 1024 * 8,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"moonshot-v1-32k": {
|
||||||
|
"fn_with_ui": moonshot_ui,
|
||||||
|
"fn_without_ui": moonshot_no_ui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 1024 * 32,
|
"max_token": 1024 * 32,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
}
|
"moonshot-v1-128k": {
|
||||||
|
"fn_with_ui": moonshot_ui,
|
||||||
|
"fn_without_ui": moonshot_no_ui,
|
||||||
|
"can_multi_thread": True,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 1024 * 128,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
}
|
||||||
|
})
|
||||||
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
||||||
for model in AVAIL_LLM_MODELS:
|
for model in AVAIL_LLM_MODELS:
|
||||||
if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()):
|
if model.startswith('api2d-') and (model.replace('api2d-','') in model_info.keys()):
|
||||||
@@ -300,25 +375,67 @@ for model in AVAIL_LLM_MODELS:
|
|||||||
model_info.update({model: mi})
|
model_info.update({model: mi})
|
||||||
|
|
||||||
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- 以下部分是新加入的模型,可能附带额外依赖 -=-=-=-=-=-=-
|
||||||
if "claude-1-100k" in AVAIL_LLM_MODELS or "claude-2" in AVAIL_LLM_MODELS:
|
# claude家族
|
||||||
|
claude_models = ["claude-instant-1.2","claude-2.0","claude-2.1","claude-3-haiku-20240307","claude-3-sonnet-20240229","claude-3-opus-20240229"]
|
||||||
|
if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
||||||
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
||||||
from .bridge_claude import predict as claude_ui
|
from .bridge_claude import predict as claude_ui
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"claude-1-100k": {
|
"claude-instant-1.2": {
|
||||||
"fn_with_ui": claude_ui,
|
"fn_with_ui": claude_ui,
|
||||||
"fn_without_ui": claude_noui,
|
"fn_without_ui": claude_noui,
|
||||||
"endpoint": None,
|
"endpoint": claude_endpoint,
|
||||||
"max_token": 8196,
|
"max_token": 100000,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"claude-2": {
|
"claude-2.0": {
|
||||||
"fn_with_ui": claude_ui,
|
"fn_with_ui": claude_ui,
|
||||||
"fn_without_ui": claude_noui,
|
"fn_without_ui": claude_noui,
|
||||||
"endpoint": None,
|
"endpoint": claude_endpoint,
|
||||||
"max_token": 8196,
|
"max_token": 100000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
model_info.update({
|
||||||
|
"claude-2.1": {
|
||||||
|
"fn_with_ui": claude_ui,
|
||||||
|
"fn_without_ui": claude_noui,
|
||||||
|
"endpoint": claude_endpoint,
|
||||||
|
"max_token": 200000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
model_info.update({
|
||||||
|
"claude-3-haiku-20240307": {
|
||||||
|
"fn_with_ui": claude_ui,
|
||||||
|
"fn_without_ui": claude_noui,
|
||||||
|
"endpoint": claude_endpoint,
|
||||||
|
"max_token": 200000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
model_info.update({
|
||||||
|
"claude-3-sonnet-20240229": {
|
||||||
|
"fn_with_ui": claude_ui,
|
||||||
|
"fn_without_ui": claude_noui,
|
||||||
|
"endpoint": claude_endpoint,
|
||||||
|
"max_token": 200000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
model_info.update({
|
||||||
|
"claude-3-opus-20240229": {
|
||||||
|
"fn_with_ui": claude_ui,
|
||||||
|
"fn_without_ui": claude_noui,
|
||||||
|
"endpoint": claude_endpoint,
|
||||||
|
"max_token": 200000,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
@@ -388,22 +505,6 @@ if "stack-claude" in AVAIL_LLM_MODELS:
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if "newbing-free" in AVAIL_LLM_MODELS:
|
|
||||||
try:
|
|
||||||
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
|
||||||
from .bridge_newbingfree import predict as newbingfree_ui
|
|
||||||
model_info.update({
|
|
||||||
"newbing-free": {
|
|
||||||
"fn_with_ui": newbingfree_ui,
|
|
||||||
"fn_without_ui": newbingfree_noui,
|
|
||||||
"endpoint": newbing_endpoint,
|
|
||||||
"max_token": 4096,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
except:
|
|
||||||
print(trimmed_format_exc())
|
|
||||||
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||||
try:
|
try:
|
||||||
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
|
||||||
@@ -436,6 +537,7 @@ if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
|
# -=-=-=-=-=-=- 上海AI-LAB书生大模型 -=-=-=-=-=-=-
|
||||||
if "internlm" in AVAIL_LLM_MODELS:
|
if "internlm" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
from .bridge_internlm import predict_no_ui_long_connection as internlm_noui
|
from .bridge_internlm import predict_no_ui_long_connection as internlm_noui
|
||||||
@@ -468,6 +570,7 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
|
# -=-=-=-=-=-=- 通义-本地模型 -=-=-=-=-=-=-
|
||||||
if "qwen-local" in AVAIL_LLM_MODELS:
|
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
||||||
@@ -476,6 +579,7 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
|||||||
"qwen-local": {
|
"qwen-local": {
|
||||||
"fn_with_ui": qwen_local_ui,
|
"fn_with_ui": qwen_local_ui,
|
||||||
"fn_without_ui": qwen_local_noui,
|
"fn_without_ui": qwen_local_noui,
|
||||||
|
"can_multi_thread": False,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -484,6 +588,7 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
|
# -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=-
|
||||||
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||||
try:
|
try:
|
||||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||||
@@ -492,6 +597,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
"qwen-turbo": {
|
"qwen-turbo": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 6144,
|
"max_token": 6144,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -500,6 +606,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
"qwen-plus": {
|
"qwen-plus": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 30720,
|
"max_token": 30720,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -508,6 +615,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
"qwen-max": {
|
"qwen-max": {
|
||||||
"fn_with_ui": qwen_ui,
|
"fn_with_ui": qwen_ui,
|
||||||
"fn_without_ui": qwen_noui,
|
"fn_without_ui": qwen_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 28672,
|
"max_token": 28672,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -516,7 +624,35 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
||||||
|
if "yi-34b-chat-0205" in AVAIL_LLM_MODELS or "yi-34b-chat-200k" in AVAIL_LLM_MODELS: # zhipuai
|
||||||
|
try:
|
||||||
|
from .bridge_yimodel import predict_no_ui_long_connection as yimodel_noui
|
||||||
|
from .bridge_yimodel import predict as yimodel_ui
|
||||||
|
model_info.update({
|
||||||
|
"yi-34b-chat-0205": {
|
||||||
|
"fn_with_ui": yimodel_ui,
|
||||||
|
"fn_without_ui": yimodel_noui,
|
||||||
|
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
|
||||||
|
"endpoint": yimodel_endpoint,
|
||||||
|
"max_token": 4000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"yi-34b-chat-200k": {
|
||||||
|
"fn_with_ui": yimodel_ui,
|
||||||
|
"fn_without_ui": yimodel_noui,
|
||||||
|
"can_multi_thread": False, # 目前来说,默认情况下并发量极低,因此禁用
|
||||||
|
"endpoint": yimodel_endpoint,
|
||||||
|
"max_token": 200000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
except:
|
||||||
|
print(trimmed_format_exc())
|
||||||
|
# -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=-
|
||||||
|
if "spark" in AVAIL_LLM_MODELS:
|
||||||
try:
|
try:
|
||||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||||
from .bridge_spark import predict as spark_ui
|
from .bridge_spark import predict as spark_ui
|
||||||
@@ -524,6 +660,7 @@ if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|||||||
"spark": {
|
"spark": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -540,6 +677,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|||||||
"sparkv2": {
|
"sparkv2": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -548,7 +686,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||||
try:
|
try:
|
||||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||||
from .bridge_spark import predict as spark_ui
|
from .bridge_spark import predict as spark_ui
|
||||||
@@ -556,6 +694,16 @@ if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|||||||
"sparkv3": {
|
"sparkv3": {
|
||||||
"fn_with_ui": spark_ui,
|
"fn_with_ui": spark_ui,
|
||||||
"fn_without_ui": spark_noui,
|
"fn_without_ui": spark_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 4096,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
"sparkv3.5": {
|
||||||
|
"fn_with_ui": spark_ui,
|
||||||
|
"fn_without_ui": spark_noui,
|
||||||
|
"can_multi_thread": True,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 4096,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
@@ -580,22 +728,22 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
|
# -=-=-=-=-=-=- 智谱 -=-=-=-=-=-=-
|
||||||
|
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
||||||
try:
|
try:
|
||||||
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
|
||||||
from .bridge_zhipu import predict as zhipu_ui
|
|
||||||
model_info.update({
|
model_info.update({
|
||||||
"zhipuai": {
|
"zhipuai": {
|
||||||
"fn_with_ui": zhipu_ui,
|
"fn_with_ui": zhipu_ui,
|
||||||
"fn_without_ui": zhipu_noui,
|
"fn_without_ui": zhipu_noui,
|
||||||
"endpoint": None,
|
"endpoint": None,
|
||||||
"max_token": 4096,
|
"max_token": 10124 * 8,
|
||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
}
|
},
|
||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
|
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
|
||||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||||
try:
|
try:
|
||||||
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
||||||
@@ -612,26 +760,34 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
|||||||
})
|
})
|
||||||
except:
|
except:
|
||||||
print(trimmed_format_exc())
|
print(trimmed_format_exc())
|
||||||
# if "skylark" in AVAIL_LLM_MODELS:
|
|
||||||
# try:
|
|
||||||
# from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui
|
|
||||||
# from .bridge_skylark2 import predict as skylark_ui
|
|
||||||
# model_info.update({
|
|
||||||
# "skylark": {
|
|
||||||
# "fn_with_ui": skylark_ui,
|
|
||||||
# "fn_without_ui": skylark_noui,
|
|
||||||
# "endpoint": None,
|
|
||||||
# "max_token": 4096,
|
|
||||||
# "tokenizer": tokenizer_gpt35,
|
|
||||||
# "token_cnt": get_token_num_gpt35,
|
|
||||||
# }
|
|
||||||
# })
|
|
||||||
# except:
|
|
||||||
# print(trimmed_format_exc())
|
|
||||||
|
|
||||||
|
|
||||||
# <-- 用于定义和切换多个azure模型 -->
|
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
||||||
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
||||||
|
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
||||||
|
# 其中
|
||||||
|
# "one-api-" 是前缀(必要)
|
||||||
|
# "mixtral-8x7b" 是模型名(必要)
|
||||||
|
# "(max_token=6666)" 是配置(非必要)
|
||||||
|
try:
|
||||||
|
_, max_token_tmp = read_one_api_model_name(model)
|
||||||
|
except:
|
||||||
|
print(f"one-api模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
||||||
|
continue
|
||||||
|
model_info.update({
|
||||||
|
model: {
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": openai_endpoint,
|
||||||
|
"max_token": max_token_tmp,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
# -=-=-=-=-=-=- azure模型对齐支持 -=-=-=-=-=-=-
|
||||||
|
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") # <-- 用于定义和切换多个azure模型 -->
|
||||||
if len(AZURE_CFG_ARRAY) > 0:
|
if len(AZURE_CFG_ARRAY) > 0:
|
||||||
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
||||||
# 可能会覆盖之前的配置,但这是意料之中的
|
# 可能会覆盖之前的配置,但这是意料之中的
|
||||||
@@ -660,7 +816,7 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
"""
|
"""
|
||||||
装饰器函数,将错误显示出来
|
装饰器函数,将错误显示出来
|
||||||
"""
|
"""
|
||||||
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
def decorated(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list, console_slience:bool):
|
||||||
try:
|
try:
|
||||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -670,9 +826,9 @@ def LLM_CATCH_EXCEPTION(f):
|
|||||||
return decorated
|
return decorated
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list, sys_prompt:str, observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部(尽可能地)用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
是本次问询的输入
|
是本次问询的输入
|
||||||
sys_prompt:
|
sys_prompt:
|
||||||
@@ -690,7 +846,6 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
n_model = 1
|
n_model = 1
|
||||||
if '&' not in model:
|
if '&' not in model:
|
||||||
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
|
|
||||||
|
|
||||||
# 如果只询问1个大语言模型:
|
# 如果只询问1个大语言模型:
|
||||||
method = model_info[model]["fn_without_ui"]
|
method = model_info[model]["fn_without_ui"]
|
||||||
@@ -725,7 +880,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
# 观察窗(window)
|
# 观察窗(window)
|
||||||
chat_string = []
|
chat_string = []
|
||||||
for i in range(n_model):
|
for i in range(n_model):
|
||||||
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
color = colors[i%len(colors)]
|
||||||
|
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{color}\"> {window_mutex[i][0]} </font>" )
|
||||||
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||||
# # # # # # # # # # #
|
# # # # # # # # # # #
|
||||||
observe_window[0] = res
|
observe_window[0] = res
|
||||||
@@ -742,22 +898,30 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
for i, future in enumerate(futures): # wait and get
|
for i, future in enumerate(futures): # wait and get
|
||||||
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
color = colors[i%len(colors)]
|
||||||
|
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{color}\"> {future.result()} </font>" )
|
||||||
|
|
||||||
window_mutex[-1] = False # stop mutex thread
|
window_mutex[-1] = False # stop mutex thread
|
||||||
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, *args, **kwargs):
|
def predict(inputs:str, llm_kwargs:dict, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
发送至LLM,流式获取输出。
|
发送至LLM,流式获取输出。
|
||||||
用于基础的对话功能。
|
用于基础的对话功能。
|
||||||
inputs 是本次问询的输入
|
|
||||||
top_p, temperature是LLM的内部调优参数
|
完整参数列表:
|
||||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
predict(
|
||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
inputs:str, # 是本次问询的输入
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
llm_kwargs:dict, # 是LLM的内部调优参数
|
||||||
|
plugin_kwargs:dict, # 是插件的内部参数
|
||||||
|
chatbot:ChatBotWithCookies, # 原样传递,负责向用户前端展示对话,兼顾前端状态的功能
|
||||||
|
history:list=[], # 是之前的对话列表
|
||||||
|
system_prompt:str='', # 系统静默prompt
|
||||||
|
stream:bool=True, # 是否流式输出(已弃用)
|
||||||
|
additional_fn:str=None # 基础功能区按钮的附加功能
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
||||||
|
|||||||
@@ -137,7 +137,8 @@ class GetGLMFTHandle(Process):
|
|||||||
global glmft_handle
|
global glmft_handle
|
||||||
glmft_handle = None
|
glmft_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -21,7 +21,9 @@ import random
|
|||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
||||||
|
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
||||||
|
from toolbox import ChatBotWithCookies
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||||
|
|
||||||
@@ -68,7 +70,7 @@ def verify_endpoint(endpoint):
|
|||||||
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
inputs:
|
inputs:
|
||||||
@@ -113,6 +115,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
error_msg = get_full_error(chunk, stream_response).decode()
|
error_msg = get_full_error(chunk, stream_response).decode()
|
||||||
if "reduce the length" in error_msg:
|
if "reduce the length" in error_msg:
|
||||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||||
|
elif """type":"upstream_error","param":"307""" in error_msg:
|
||||||
|
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||||
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
||||||
@@ -123,8 +127,9 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
json_data = chunkjson['choices'][0]
|
json_data = chunkjson['choices'][0]
|
||||||
delta = json_data["delta"]
|
delta = json_data["delta"]
|
||||||
if len(delta) == 0: break
|
if len(delta) == 0: break
|
||||||
if "role" in delta: continue
|
if (not has_content) and has_role: continue
|
||||||
if "content" in delta:
|
if (not has_content) and (not has_role): continue # raise RuntimeError("发现不标准的第三方接口:"+delta)
|
||||||
|
if has_content: # has_role = True/False
|
||||||
result += delta["content"]
|
result += delta["content"]
|
||||||
if not console_slience: print(delta["content"], end='')
|
if not console_slience: print(delta["content"], end='')
|
||||||
if observe_window is not None:
|
if observe_window is not None:
|
||||||
@@ -143,7 +148,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
"""
|
"""
|
||||||
发送至chatGPT,流式获取输出。
|
发送至chatGPT,流式获取输出。
|
||||||
用于基础的对话功能。
|
用于基础的对话功能。
|
||||||
@@ -169,7 +175,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
raw_input = inputs
|
raw_input = inputs
|
||||||
logging.info(f'[raw_input] {raw_input}')
|
# logging.info(f'[raw_input] {raw_input}')
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
@@ -250,7 +256,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
logging.info(f'[response] {gpt_replying_buffer}')
|
# logging.info(f'[response] {gpt_replying_buffer}')
|
||||||
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
@@ -262,7 +269,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
# 一些第三方接口的出现这样的错误,兼容一下吧
|
# 一些第三方接口的出现这样的错误,兼容一下吧
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# 一些垃圾第三方接口的出现这样的错误
|
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
|
||||||
|
if chunkjson['choices'][0]["delta"]["content"] is None: continue # 一些垃圾第三方接口出现这样的错误,兼容一下吧
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
@@ -354,6 +362,9 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|||||||
model = llm_kwargs['llm_model']
|
model = llm_kwargs['llm_model']
|
||||||
if llm_kwargs['llm_model'].startswith('api2d-'):
|
if llm_kwargs['llm_model'].startswith('api2d-'):
|
||||||
model = llm_kwargs['llm_model'][len('api2d-'):]
|
model = llm_kwargs['llm_model'][len('api2d-'):]
|
||||||
|
if llm_kwargs['llm_model'].startswith('one-api-'):
|
||||||
|
model = llm_kwargs['llm_model'][len('one-api-'):]
|
||||||
|
model, _ = read_one_api_model_name(model)
|
||||||
|
|
||||||
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
||||||
model = random.choice([
|
model = random.choice([
|
||||||
|
|||||||
@@ -9,15 +9,15 @@
|
|||||||
具备多线程调用能力的函数
|
具备多线程调用能力的函数
|
||||||
2. predict_no_ui_long_connection:支持多线程
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import gradio as gr
|
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import json
|
||||||
import requests
|
import requests
|
||||||
import importlib
|
from toolbox import get_conf, update_ui, trimmed_format_exc, encode_image, every_image_file_in_path, log_chat
|
||||||
|
picture_system_prompt = "\n当回复图像时,必须说明正在回复哪张图像。所有图像仅在最后一个问题中提供,即使它们在历史记录中被提及。请使用'这是第X张图像:'的格式来指明您正在描述的是哪张图像。"
|
||||||
|
Claude_3_Models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
|
||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
@@ -39,6 +39,34 @@ def get_full_error(chunk, stream_response):
|
|||||||
break
|
break
|
||||||
return chunk
|
return chunk
|
||||||
|
|
||||||
|
def decode_chunk(chunk):
|
||||||
|
# 提前读取一些信息(用于判断异常)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
chunkjson = None
|
||||||
|
is_last_chunk = False
|
||||||
|
need_to_pass = False
|
||||||
|
if chunk_decoded.startswith('data:'):
|
||||||
|
try:
|
||||||
|
chunkjson = json.loads(chunk_decoded[6:])
|
||||||
|
except:
|
||||||
|
need_to_pass = True
|
||||||
|
pass
|
||||||
|
elif chunk_decoded.startswith('event:'):
|
||||||
|
try:
|
||||||
|
event_type = chunk_decoded.split(':')[1].strip()
|
||||||
|
if event_type == 'content_block_stop' or event_type == 'message_stop':
|
||||||
|
is_last_chunk = True
|
||||||
|
elif event_type == 'content_block_start' or event_type == 'message_start':
|
||||||
|
need_to_pass = True
|
||||||
|
pass
|
||||||
|
except:
|
||||||
|
need_to_pass = True
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
need_to_pass = True
|
||||||
|
pass
|
||||||
|
return need_to_pass, chunkjson, is_last_chunk
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
@@ -54,50 +82,67 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
observe_window = None:
|
observe_window = None:
|
||||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||||
"""
|
"""
|
||||||
from anthropic import Anthropic
|
|
||||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
|
||||||
retry = 0
|
|
||||||
if len(ANTHROPIC_API_KEY) == 0:
|
if len(ANTHROPIC_API_KEY) == 0:
|
||||||
raise RuntimeError("没有设置ANTHROPIC_API_KEY选项")
|
raise RuntimeError("没有设置ANTHROPIC_API_KEY选项")
|
||||||
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
|
headers, message = generate_payload(inputs, llm_kwargs, history, sys_prompt, image_paths=None)
|
||||||
|
retry = 0
|
||||||
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=False
|
# make a POST request to the API endpoint, stream=False
|
||||||
from .bridge_all import model_info
|
from .bridge_all import model_info
|
||||||
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
response = requests.post(endpoint, headers=headers, json=message,
|
||||||
# with ProxyNetworkActivate()
|
proxies=proxies, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
stream = anthropic.completions.create(
|
except requests.exceptions.ReadTimeout as e:
|
||||||
prompt=prompt,
|
|
||||||
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
|
|
||||||
model=llm_kwargs['llm_model'],
|
|
||||||
stream=True,
|
|
||||||
temperature = llm_kwargs['temperature']
|
|
||||||
)
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
retry += 1
|
retry += 1
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||||
|
stream_response = response.iter_lines()
|
||||||
result = ''
|
result = ''
|
||||||
try:
|
while True:
|
||||||
for completion in stream:
|
try: chunk = next(stream_response)
|
||||||
result += completion.completion
|
except StopIteration:
|
||||||
if not console_slience: print(completion.completion, end='')
|
break
|
||||||
if observe_window is not None:
|
except requests.exceptions.ConnectionError:
|
||||||
# 观测窗,把已经获取的数据显示出去
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
if len(observe_window) >= 1: observe_window[0] += completion.completion
|
need_to_pass, chunkjson, is_last_chunk = decode_chunk(chunk)
|
||||||
# 看门狗,如果超过期限没有喂狗,则终止
|
if chunk:
|
||||||
if len(observe_window) >= 2:
|
try:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
if need_to_pass:
|
||||||
raise RuntimeError("用户取消了程序。")
|
pass
|
||||||
except Exception as e:
|
elif is_last_chunk:
|
||||||
traceback.print_exc()
|
# logging.info(f'[response] {result}')
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if chunkjson and chunkjson['type'] == 'content_block_delta':
|
||||||
|
result += chunkjson['delta']['text']
|
||||||
|
print(chunkjson['delta']['text'], end='')
|
||||||
|
if observe_window is not None:
|
||||||
|
# 观测窗,把已经获取的数据显示出去
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] += chunkjson['delta']['text']
|
||||||
|
# 看门狗,如果超过期限没有喂狗,则终止
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
|
raise RuntimeError("用户取消了程序。")
|
||||||
|
except Exception as e:
|
||||||
|
chunk = get_full_error(chunk, stream_response)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
print(error_msg)
|
||||||
|
raise RuntimeError("Json解析不合常规")
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def make_media_input(history,inputs,image_paths):
|
||||||
|
for image_path in image_paths:
|
||||||
|
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||||
|
return inputs
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
"""
|
"""
|
||||||
@@ -109,7 +154,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
"""
|
"""
|
||||||
from anthropic import Anthropic
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
if len(ANTHROPIC_API_KEY) == 0:
|
if len(ANTHROPIC_API_KEY) == 0:
|
||||||
chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY"))
|
chatbot.append((inputs, "没有设置ANTHROPIC_API_KEY"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
@@ -119,13 +164,23 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
from core_functional import handle_core_functionality
|
from core_functional import handle_core_functionality
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
raw_input = inputs
|
have_recent_file, image_paths = every_image_file_in_path(chatbot)
|
||||||
logging.info(f'[raw_input] {raw_input}')
|
if len(image_paths) > 20:
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, "图片数量超过api上限(20张)"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应")
|
||||||
|
return
|
||||||
|
|
||||||
|
if any([llm_kwargs['llm_model'] == model for model in Claude_3_Models]) and have_recent_file:
|
||||||
|
if inputs == "" or inputs == "空空如也的输入栏": inputs = "请描述给出的图片"
|
||||||
|
system_prompt += picture_system_prompt # 由于没有单独的参数保存包含图片的历史,所以只能通过提示词对第几张图片进行定位
|
||||||
|
chatbot.append((make_media_input(history,inputs, image_paths), ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
else:
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
try:
|
try:
|
||||||
prompt = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
headers, message = generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||||
@@ -138,91 +193,117 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
try:
|
try:
|
||||||
# make a POST request to the API endpoint, stream=True
|
# make a POST request to the API endpoint, stream=True
|
||||||
from .bridge_all import model_info
|
from .bridge_all import model_info
|
||||||
anthropic = Anthropic(api_key=ANTHROPIC_API_KEY)
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
# endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
response = requests.post(endpoint, headers=headers, json=message,
|
||||||
# with ProxyNetworkActivate()
|
proxies=proxies, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
stream = anthropic.completions.create(
|
except requests.exceptions.ReadTimeout as e:
|
||||||
prompt=prompt,
|
|
||||||
max_tokens_to_sample=4096, # The maximum number of tokens to generate before stopping.
|
|
||||||
model=llm_kwargs['llm_model'],
|
|
||||||
stream=True,
|
|
||||||
temperature = llm_kwargs['temperature']
|
|
||||||
)
|
|
||||||
|
|
||||||
break
|
|
||||||
except:
|
|
||||||
retry += 1
|
retry += 1
|
||||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
traceback.print_exc()
|
||||||
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
|
||||||
if retry > MAX_RETRY: raise TimeoutError
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||||
|
stream_response = response.iter_lines()
|
||||||
gpt_replying_buffer = ""
|
gpt_replying_buffer = ""
|
||||||
|
|
||||||
for completion in stream:
|
while True:
|
||||||
try:
|
try: chunk = next(stream_response)
|
||||||
gpt_replying_buffer = gpt_replying_buffer + completion.completion
|
except StopIteration:
|
||||||
history[-1] = gpt_replying_buffer
|
break
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
except requests.exceptions.ConnectionError:
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
|
need_to_pass, chunkjson, is_last_chunk = decode_chunk(chunk)
|
||||||
|
if chunk:
|
||||||
|
try:
|
||||||
|
if need_to_pass:
|
||||||
|
pass
|
||||||
|
elif is_last_chunk:
|
||||||
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
|
# logging.info(f'[response] {gpt_replying_buffer}')
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if chunkjson and chunkjson['type'] == 'content_block_delta':
|
||||||
|
gpt_replying_buffer += chunkjson['delta']['text']
|
||||||
|
history[-1] = gpt_replying_buffer
|
||||||
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg='正常') # 刷新界面
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
from toolbox import regular_txt_to_markdown
|
chunk = get_full_error(chunk, stream_response)
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
chunk_decoded = chunk.decode()
|
||||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str}")
|
error_msg = chunk_decoded
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + tb_str) # 刷新界面
|
print(error_msg)
|
||||||
return
|
raise RuntimeError("Json解析不合常规")
|
||||||
|
|
||||||
|
def multiple_picture_types(image_paths):
|
||||||
|
"""
|
||||||
|
根据图片类型返回image/jpeg, image/png, image/gif, image/webp,无法判断则返回image/jpeg
|
||||||
|
"""
|
||||||
|
for image_path in image_paths:
|
||||||
|
if image_path.endswith('.jpeg') or image_path.endswith('.jpg'):
|
||||||
|
return 'image/jpeg'
|
||||||
|
elif image_path.endswith('.png'):
|
||||||
|
return 'image/png'
|
||||||
|
elif image_path.endswith('.gif'):
|
||||||
|
return 'image/gif'
|
||||||
|
elif image_path.endswith('.webp'):
|
||||||
|
return 'image/webp'
|
||||||
|
return 'image/jpeg'
|
||||||
|
|
||||||
|
def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
|
||||||
|
|
||||||
# https://github.com/jtsang4/claude-to-chatgpt/blob/main/claude_to_chatgpt/adapter.py
|
|
||||||
def convert_messages_to_prompt(messages):
|
|
||||||
prompt = ""
|
|
||||||
role_map = {
|
|
||||||
"system": "Human",
|
|
||||||
"user": "Human",
|
|
||||||
"assistant": "Assistant",
|
|
||||||
}
|
|
||||||
for message in messages:
|
|
||||||
role = message["role"]
|
|
||||||
content = message["content"]
|
|
||||||
transformed_role = role_map[role]
|
|
||||||
prompt += f"\n\n{transformed_role.capitalize()}: {content}"
|
|
||||||
prompt += "\n\nAssistant: "
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|
||||||
"""
|
"""
|
||||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||||
"""
|
"""
|
||||||
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
|
||||||
|
|
||||||
conversation_cnt = len(history) // 2
|
conversation_cnt = len(history) // 2
|
||||||
|
|
||||||
messages = [{"role": "system", "content": system_prompt}]
|
messages = []
|
||||||
|
|
||||||
if conversation_cnt:
|
if conversation_cnt:
|
||||||
for index in range(0, 2*conversation_cnt, 2):
|
for index in range(0, 2*conversation_cnt, 2):
|
||||||
what_i_have_asked = {}
|
what_i_have_asked = {}
|
||||||
what_i_have_asked["role"] = "user"
|
what_i_have_asked["role"] = "user"
|
||||||
what_i_have_asked["content"] = history[index]
|
what_i_have_asked["content"] = [{"type": "text", "text": history[index]}]
|
||||||
what_gpt_answer = {}
|
what_gpt_answer = {}
|
||||||
what_gpt_answer["role"] = "assistant"
|
what_gpt_answer["role"] = "assistant"
|
||||||
what_gpt_answer["content"] = history[index+1]
|
what_gpt_answer["content"] = [{"type": "text", "text": history[index+1]}]
|
||||||
if what_i_have_asked["content"] != "":
|
if what_i_have_asked["content"][0]["text"] != "":
|
||||||
if what_gpt_answer["content"] == "": continue
|
if what_i_have_asked["content"][0]["text"] == "": continue
|
||||||
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
if what_i_have_asked["content"][0]["text"] == timeout_bot_msg: continue
|
||||||
messages.append(what_i_have_asked)
|
messages.append(what_i_have_asked)
|
||||||
messages.append(what_gpt_answer)
|
messages.append(what_gpt_answer)
|
||||||
else:
|
else:
|
||||||
messages[-1]['content'] = what_gpt_answer['content']
|
messages[-1]['content'][0]['text'] = what_gpt_answer['content'][0]['text']
|
||||||
|
|
||||||
what_i_ask_now = {}
|
if any([llm_kwargs['llm_model'] == model for model in Claude_3_Models]) and image_paths:
|
||||||
what_i_ask_now["role"] = "user"
|
what_i_ask_now = {}
|
||||||
what_i_ask_now["content"] = inputs
|
what_i_ask_now["role"] = "user"
|
||||||
|
what_i_ask_now["content"] = []
|
||||||
|
for image_path in image_paths:
|
||||||
|
what_i_ask_now["content"].append({
|
||||||
|
"type": "image",
|
||||||
|
"source": {
|
||||||
|
"type": "base64",
|
||||||
|
"media_type": multiple_picture_types(image_paths),
|
||||||
|
"data": encode_image(image_path),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
what_i_ask_now["content"].append({"type": "text", "text": inputs})
|
||||||
|
else:
|
||||||
|
what_i_ask_now = {}
|
||||||
|
what_i_ask_now["role"] = "user"
|
||||||
|
what_i_ask_now["content"] = [{"type": "text", "text": inputs}]
|
||||||
messages.append(what_i_ask_now)
|
messages.append(what_i_ask_now)
|
||||||
prompt = convert_messages_to_prompt(messages)
|
# 开始整理headers与message
|
||||||
|
headers = {
|
||||||
return prompt
|
'x-api-key': ANTHROPIC_API_KEY,
|
||||||
|
'anthropic-version': '2023-06-01',
|
||||||
|
'content-type': 'application/json'
|
||||||
|
}
|
||||||
|
payload = {
|
||||||
|
'model': llm_kwargs['llm_model'],
|
||||||
|
'max_tokens': 4096,
|
||||||
|
'messages': messages,
|
||||||
|
'temperature': llm_kwargs['temperature'],
|
||||||
|
'stream': True,
|
||||||
|
'system': system_prompt
|
||||||
|
}
|
||||||
|
return headers, payload
|
||||||
|
|||||||
328
request_llms/bridge_cohere.py
普通文件
328
request_llms/bridge_cohere.py
普通文件
@@ -0,0 +1,328 @@
|
|||||||
|
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
|
||||||
|
|
||||||
|
"""
|
||||||
|
该文件中主要包含三个函数
|
||||||
|
|
||||||
|
不具备多线程能力的函数:
|
||||||
|
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||||
|
|
||||||
|
具备多线程调用能力的函数
|
||||||
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import gradio as gr
|
||||||
|
import logging
|
||||||
|
import traceback
|
||||||
|
import requests
|
||||||
|
import importlib
|
||||||
|
import random
|
||||||
|
|
||||||
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history
|
||||||
|
from toolbox import trimmed_format_exc, is_the_upload_folder, read_one_api_model_name, log_chat
|
||||||
|
from toolbox import ChatBotWithCookies
|
||||||
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
|
||||||
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
|
||||||
|
|
||||||
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
|
|
||||||
|
def get_full_error(chunk, stream_response):
|
||||||
|
"""
|
||||||
|
获取完整的从Cohere返回的报错
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk += next(stream_response)
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
return chunk
|
||||||
|
|
||||||
|
def decode_chunk(chunk):
|
||||||
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
chunkjson = None
|
||||||
|
has_choices = False
|
||||||
|
choice_valid = False
|
||||||
|
has_content = False
|
||||||
|
has_role = False
|
||||||
|
try:
|
||||||
|
chunkjson = json.loads(chunk_decoded)
|
||||||
|
has_choices = 'choices' in chunkjson
|
||||||
|
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
||||||
|
if has_choices and choice_valid: has_content = ("content" in chunkjson['choices'][0]["delta"])
|
||||||
|
if has_content: has_content = (chunkjson['choices'][0]["delta"]["content"] is not None)
|
||||||
|
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role
|
||||||
|
|
||||||
|
from functools import lru_cache
|
||||||
|
@lru_cache(maxsize=32)
|
||||||
|
def verify_endpoint(endpoint):
|
||||||
|
"""
|
||||||
|
检查endpoint是否可用
|
||||||
|
"""
|
||||||
|
if "你亲手写的api名称" in endpoint:
|
||||||
|
raise ValueError("Endpoint不正确, 请检查AZURE_ENDPOINT的配置! 当前的Endpoint为:" + endpoint)
|
||||||
|
return endpoint
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=None, console_slience:bool=False):
|
||||||
|
"""
|
||||||
|
发送,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
|
inputs:
|
||||||
|
是本次问询的输入
|
||||||
|
sys_prompt:
|
||||||
|
系统静默prompt
|
||||||
|
llm_kwargs:
|
||||||
|
内部调优参数
|
||||||
|
history:
|
||||||
|
是之前的对话列表
|
||||||
|
observe_window = None:
|
||||||
|
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||||
|
"""
|
||||||
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# make a POST request to the API endpoint, stream=False
|
||||||
|
from .bridge_all import model_info
|
||||||
|
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||||
|
except requests.exceptions.ReadTimeout as e:
|
||||||
|
retry += 1
|
||||||
|
traceback.print_exc()
|
||||||
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||||
|
|
||||||
|
stream_response = response.iter_lines()
|
||||||
|
result = ''
|
||||||
|
json_data = None
|
||||||
|
while True:
|
||||||
|
try: chunk = next(stream_response)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
|
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||||
|
if chunkjson['event_type'] == 'stream-start': continue
|
||||||
|
if chunkjson['event_type'] == 'text-generation':
|
||||||
|
result += chunkjson["text"]
|
||||||
|
if not console_slience: print(chunkjson["text"], end='')
|
||||||
|
if observe_window is not None:
|
||||||
|
# 观测窗,把已经获取的数据显示出去
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] += chunkjson["text"]
|
||||||
|
# 看门狗,如果超过期限没有喂狗,则终止
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
|
raise RuntimeError("用户取消了程序。")
|
||||||
|
if chunkjson['event_type'] == 'stream-end': break
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
|
"""
|
||||||
|
发送至chatGPT,流式获取输出。
|
||||||
|
用于基础的对话功能。
|
||||||
|
inputs 是本次问询的输入
|
||||||
|
top_p, temperature是chatGPT的内部调优参数
|
||||||
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
|
"""
|
||||||
|
# if is_any_api_key(inputs):
|
||||||
|
# chatbot._cookies['api_key'] = inputs
|
||||||
|
# chatbot.append(("输入已识别为Cohere的api_key", what_keys(inputs)))
|
||||||
|
# yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
||||||
|
# return
|
||||||
|
# elif not is_any_api_key(chatbot._cookies['api_key']):
|
||||||
|
# chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
||||||
|
# yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
||||||
|
# return
|
||||||
|
|
||||||
|
user_input = inputs
|
||||||
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
|
raw_input = inputs
|
||||||
|
# logging.info(f'[raw_input] {raw_input}')
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
|
# check mis-behavior
|
||||||
|
if is_the_upload_folder(user_input):
|
||||||
|
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
|
except RuntimeError as e:
|
||||||
|
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# 检查endpoint是否合法
|
||||||
|
try:
|
||||||
|
from .bridge_all import model_info
|
||||||
|
endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
|
||||||
|
except:
|
||||||
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
|
chatbot[-1] = (inputs, tb_str)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
history.append(inputs); history.append("")
|
||||||
|
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# make a POST request to the API endpoint, stream=True
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
|
except:
|
||||||
|
retry += 1
|
||||||
|
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||||
|
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||||
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
|
||||||
|
gpt_replying_buffer = ""
|
||||||
|
|
||||||
|
is_head_of_the_stream = True
|
||||||
|
if stream:
|
||||||
|
stream_response = response.iter_lines()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk = next(stream_response)
|
||||||
|
except StopIteration:
|
||||||
|
# 非Cohere官方接口的出现这样的报错,Cohere和API2D不会走这里
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
# 其他情况,直接返回报错
|
||||||
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="非Cohere官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||||
|
|
||||||
|
if chunkjson:
|
||||||
|
try:
|
||||||
|
if chunkjson['event_type'] == 'stream-start':
|
||||||
|
continue
|
||||||
|
if chunkjson['event_type'] == 'text-generation':
|
||||||
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson["text"]
|
||||||
|
history[-1] = gpt_replying_buffer
|
||||||
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
|
if chunkjson['event_type'] == 'stream-end':
|
||||||
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||||
|
history[-1] = gpt_replying_buffer
|
||||||
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||||
|
chunk = get_full_error(chunk, stream_response)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||||
|
print(error_msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
|
||||||
|
from .bridge_all import model_info
|
||||||
|
Cohere_website = ' 请登录Cohere查看详情 https://platform.Cohere.com/signup'
|
||||||
|
if "reduce the length" in error_msg:
|
||||||
|
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
||||||
|
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||||
|
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
||||||
|
elif "does not exist" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
||||||
|
elif "Incorrect API key" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. Cohere以提供了不正确的API_KEY为由, 拒绝服务. " + Cohere_website)
|
||||||
|
elif "exceeded your current quota" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. Cohere以账户额度不足为由, 拒绝服务." + Cohere_website)
|
||||||
|
elif "account is not active" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
||||||
|
elif "associated with a deactivated account" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
||||||
|
elif "API key has been deactivated" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. Cohere以账户失效为由, 拒绝服务." + Cohere_website)
|
||||||
|
elif "bad forward key" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
||||||
|
elif "Not enough point" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
|
||||||
|
else:
|
||||||
|
from toolbox import regular_txt_to_markdown
|
||||||
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||||
|
return chatbot, history
|
||||||
|
|
||||||
|
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||||
|
"""
|
||||||
|
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||||
|
"""
|
||||||
|
# if not is_any_api_key(llm_kwargs['api_key']):
|
||||||
|
# raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
||||||
|
|
||||||
|
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": f"Bearer {api_key}"
|
||||||
|
}
|
||||||
|
if API_ORG.startswith('org-'): headers.update({"Cohere-Organization": API_ORG})
|
||||||
|
if llm_kwargs['llm_model'].startswith('azure-'):
|
||||||
|
headers.update({"api-key": api_key})
|
||||||
|
if llm_kwargs['llm_model'] in AZURE_CFG_ARRAY.keys():
|
||||||
|
azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"]
|
||||||
|
headers.update({"api-key": azure_api_key_unshared})
|
||||||
|
|
||||||
|
conversation_cnt = len(history) // 2
|
||||||
|
|
||||||
|
messages = [{"role": "SYSTEM", "message": system_prompt}]
|
||||||
|
if conversation_cnt:
|
||||||
|
for index in range(0, 2*conversation_cnt, 2):
|
||||||
|
what_i_have_asked = {}
|
||||||
|
what_i_have_asked["role"] = "USER"
|
||||||
|
what_i_have_asked["message"] = history[index]
|
||||||
|
what_gpt_answer = {}
|
||||||
|
what_gpt_answer["role"] = "CHATBOT"
|
||||||
|
what_gpt_answer["message"] = history[index+1]
|
||||||
|
if what_i_have_asked["message"] != "":
|
||||||
|
if what_gpt_answer["message"] == "": continue
|
||||||
|
if what_gpt_answer["message"] == timeout_bot_msg: continue
|
||||||
|
messages.append(what_i_have_asked)
|
||||||
|
messages.append(what_gpt_answer)
|
||||||
|
else:
|
||||||
|
messages[-1]['message'] = what_gpt_answer['message']
|
||||||
|
|
||||||
|
model = llm_kwargs['llm_model']
|
||||||
|
if model.startswith('cohere-'): model = model[len('cohere-'):]
|
||||||
|
payload = {
|
||||||
|
"model": model,
|
||||||
|
"message": inputs,
|
||||||
|
"chat_history": messages,
|
||||||
|
"temperature": llm_kwargs['temperature'], # 1.0,
|
||||||
|
"top_p": llm_kwargs['top_p'], # 1.0,
|
||||||
|
"n": 1,
|
||||||
|
"stream": stream,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"frequency_penalty": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers,payload
|
||||||
|
|
||||||
|
|
||||||
@@ -7,6 +7,7 @@ import re
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
from request_llms.com_google import GoogleChatInit
|
from request_llms.com_google import GoogleChatInit
|
||||||
|
from toolbox import ChatBotWithCookies
|
||||||
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
||||||
|
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||||
@@ -20,7 +21,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
if get_conf("GEMINI_API_KEY") == "":
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
||||||
|
|
||||||
genai = GoogleChatInit()
|
genai = GoogleChatInit(llm_kwargs)
|
||||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
gpt_replying_buffer = ''
|
gpt_replying_buffer = ''
|
||||||
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
||||||
@@ -44,7 +45,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
return gpt_replying_buffer
|
return gpt_replying_buffer
|
||||||
|
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
# 检查API_KEY
|
# 检查API_KEY
|
||||||
if get_conf("GEMINI_API_KEY") == "":
|
if get_conf("GEMINI_API_KEY") == "":
|
||||||
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||||
@@ -57,6 +59,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
|
|
||||||
if "vision" in llm_kwargs["llm_model"]:
|
if "vision" in llm_kwargs["llm_model"]:
|
||||||
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||||
|
if not have_recent_file:
|
||||||
|
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
|
||||||
|
return
|
||||||
def make_media_input(inputs, image_paths):
|
def make_media_input(inputs, image_paths):
|
||||||
for image_path in image_paths:
|
for image_path in image_paths:
|
||||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||||
@@ -66,7 +72,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
|
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append((inputs, ""))
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
genai = GoogleChatInit()
|
genai = GoogleChatInit(llm_kwargs)
|
||||||
retry = 0
|
retry = 0
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
|
from transformers import AutoModel, AutoTokenizer
|
||||||
|
|
||||||
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
|
|
||||||
@@ -106,7 +106,8 @@ class GetGLMHandle(Process):
|
|||||||
global llama_glm_handle
|
global llama_glm_handle
|
||||||
llama_glm_handle = None
|
llama_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
|
from transformers import AutoModel, AutoTokenizer
|
||||||
|
|
||||||
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
|
|
||||||
@@ -106,7 +106,8 @@ class GetGLMHandle(Process):
|
|||||||
global pangu_glm_handle
|
global pangu_glm_handle
|
||||||
pangu_glm_handle = None
|
pangu_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -106,7 +106,8 @@ class GetGLMHandle(Process):
|
|||||||
global rwkv_glm_handle
|
global rwkv_glm_handle
|
||||||
rwkv_glm_handle = None
|
rwkv_glm_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
197
request_llms/bridge_moonshot.py
普通文件
197
request_llms/bridge_moonshot.py
普通文件
@@ -0,0 +1,197 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
# @Time : 2024/3/3
|
||||||
|
# @Author : Spike
|
||||||
|
# @Descr :
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from toolbox import get_conf, update_ui, log_chat
|
||||||
|
from toolbox import ChatBotWithCookies
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
class MoonShotInit:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.llm_model = None
|
||||||
|
self.url = 'https://api.moonshot.cn/v1/chat/completions'
|
||||||
|
self.api_key = get_conf('MOONSHOT_API_KEY')
|
||||||
|
|
||||||
|
def __converter_file(self, user_input: str):
|
||||||
|
what_ask = []
|
||||||
|
for f in user_input.splitlines():
|
||||||
|
if os.path.exists(f):
|
||||||
|
files = []
|
||||||
|
if os.path.isdir(f):
|
||||||
|
file_list = os.listdir(f)
|
||||||
|
files.extend([os.path.join(f, file) for file in file_list])
|
||||||
|
else:
|
||||||
|
files.append(f)
|
||||||
|
for file in files:
|
||||||
|
if file.split('.')[-1] in ['pdf']:
|
||||||
|
with open(file, 'r') as fp:
|
||||||
|
from crazy_functions.crazy_utils import read_and_clean_pdf_text
|
||||||
|
file_content, _ = read_and_clean_pdf_text(fp)
|
||||||
|
what_ask.append({"role": "system", "content": file_content})
|
||||||
|
return what_ask
|
||||||
|
|
||||||
|
def __converter_user(self, user_input: str):
|
||||||
|
what_i_ask_now = {"role": "user", "content": user_input}
|
||||||
|
return what_i_ask_now
|
||||||
|
|
||||||
|
def __conversation_history(self, history):
|
||||||
|
conversation_cnt = len(history) // 2
|
||||||
|
messages = []
|
||||||
|
if conversation_cnt:
|
||||||
|
for index in range(0, 2 * conversation_cnt, 2):
|
||||||
|
what_i_have_asked = {
|
||||||
|
"role": "user",
|
||||||
|
"content": str(history[index])
|
||||||
|
}
|
||||||
|
what_gpt_answer = {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": str(history[index + 1])
|
||||||
|
}
|
||||||
|
if what_i_have_asked["content"] != "":
|
||||||
|
if what_gpt_answer["content"] == "": continue
|
||||||
|
messages.append(what_i_have_asked)
|
||||||
|
messages.append(what_gpt_answer)
|
||||||
|
else:
|
||||||
|
messages[-1]['content'] = what_gpt_answer['content']
|
||||||
|
return messages
|
||||||
|
|
||||||
|
def _analysis_content(self, chuck):
|
||||||
|
chunk_decoded = chuck.decode("utf-8")
|
||||||
|
chunk_json = {}
|
||||||
|
content = ""
|
||||||
|
try:
|
||||||
|
chunk_json = json.loads(chunk_decoded[6:])
|
||||||
|
content = chunk_json['choices'][0]["delta"].get("content", "")
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return chunk_decoded, chunk_json, content
|
||||||
|
|
||||||
|
def generate_payload(self, inputs, llm_kwargs, history, system_prompt, stream):
|
||||||
|
self.llm_model = llm_kwargs['llm_model']
|
||||||
|
llm_kwargs.update({'use-key': self.api_key})
|
||||||
|
messages = []
|
||||||
|
if system_prompt:
|
||||||
|
messages.append({"role": "system", "content": system_prompt})
|
||||||
|
messages.extend(self.__converter_file(inputs))
|
||||||
|
for i in history[0::2]: # 历史文件继续上传
|
||||||
|
messages.extend(self.__converter_file(i))
|
||||||
|
messages.extend(self.__conversation_history(history))
|
||||||
|
messages.append(self.__converter_user(inputs))
|
||||||
|
header = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": f"Bearer {self.api_key}",
|
||||||
|
}
|
||||||
|
payload = {
|
||||||
|
"model": self.llm_model,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": llm_kwargs.get('temperature', 0.3), # 1.0,
|
||||||
|
"top_p": llm_kwargs.get('top_p', 1.0), # 1.0,
|
||||||
|
"n": llm_kwargs.get('n_choices', 1),
|
||||||
|
"stream": stream
|
||||||
|
}
|
||||||
|
return payload, header
|
||||||
|
|
||||||
|
def generate_messages(self, inputs, llm_kwargs, history, system_prompt, stream):
|
||||||
|
payload, headers = self.generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
|
response = requests.post(self.url, headers=headers, json=payload, stream=stream)
|
||||||
|
|
||||||
|
chunk_content = ""
|
||||||
|
gpt_bro_result = ""
|
||||||
|
for chuck in response.iter_lines():
|
||||||
|
chunk_decoded, check_json, content = self._analysis_content(chuck)
|
||||||
|
chunk_content += chunk_decoded
|
||||||
|
if content:
|
||||||
|
gpt_bro_result += content
|
||||||
|
yield content, gpt_bro_result, ''
|
||||||
|
else:
|
||||||
|
error_msg = msg_handle_error(llm_kwargs, chunk_decoded)
|
||||||
|
if error_msg:
|
||||||
|
yield error_msg, gpt_bro_result, error_msg
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def msg_handle_error(llm_kwargs, chunk_decoded):
|
||||||
|
use_ket = llm_kwargs.get('use-key', '')
|
||||||
|
api_key_encryption = use_ket[:8] + '****' + use_ket[-5:]
|
||||||
|
openai_website = f' 请登录OpenAI查看详情 https://platform.openai.com/signup api-key: `{api_key_encryption}`'
|
||||||
|
error_msg = ''
|
||||||
|
if "does not exist" in chunk_decoded:
|
||||||
|
error_msg = f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格."
|
||||||
|
elif "Incorrect API key" in chunk_decoded:
|
||||||
|
error_msg = f"[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务." + openai_website
|
||||||
|
elif "exceeded your current quota" in chunk_decoded:
|
||||||
|
error_msg = "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website
|
||||||
|
elif "account is not active" in chunk_decoded:
|
||||||
|
error_msg = "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website
|
||||||
|
elif "associated with a deactivated account" in chunk_decoded:
|
||||||
|
error_msg = "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website
|
||||||
|
elif "API key has been deactivated" in chunk_decoded:
|
||||||
|
error_msg = "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website
|
||||||
|
elif "bad forward key" in chunk_decoded:
|
||||||
|
error_msg = "[Local Message] Bad forward key. API2D账户额度不足."
|
||||||
|
elif "Not enough point" in chunk_decoded:
|
||||||
|
error_msg = "[Local Message] Not enough point. API2D账户点数不足."
|
||||||
|
elif 'error' in str(chunk_decoded).lower():
|
||||||
|
try:
|
||||||
|
error_msg = json.dumps(json.loads(chunk_decoded[:6]), indent=4, ensure_ascii=False)
|
||||||
|
except:
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
return error_msg
|
||||||
|
|
||||||
|
|
||||||
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
|
chatbot.append([inputs, ""])
|
||||||
|
|
||||||
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
gpt_bro_init = MoonShotInit()
|
||||||
|
history.extend([inputs, ''])
|
||||||
|
stream_response = gpt_bro_init.generate_messages(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
|
for content, gpt_bro_result, error_bro_meg in stream_response:
|
||||||
|
chatbot[-1] = [inputs, gpt_bro_result]
|
||||||
|
history[-1] = gpt_bro_result
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
if error_bro_meg:
|
||||||
|
chatbot[-1] = [inputs, error_bro_meg]
|
||||||
|
history = history[:-2]
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
break
|
||||||
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result)
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||||
|
console_slience=False):
|
||||||
|
gpt_bro_init = MoonShotInit()
|
||||||
|
watch_dog_patience = 60 # 看门狗的耐心, 设置10秒即可
|
||||||
|
stream_response = gpt_bro_init.generate_messages(inputs, llm_kwargs, history, sys_prompt, True)
|
||||||
|
moonshot_bro_result = ''
|
||||||
|
for content, moonshot_bro_result, error_bro_meg in stream_response:
|
||||||
|
moonshot_bro_result = moonshot_bro_result
|
||||||
|
if error_bro_meg:
|
||||||
|
if len(observe_window) >= 3:
|
||||||
|
observe_window[2] = error_bro_meg
|
||||||
|
return f'{moonshot_bro_result} 对话错误'
|
||||||
|
# 观测窗
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] = moonshot_bro_result
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||||
|
observe_window[2] = "请求超时,程序终止。"
|
||||||
|
raise RuntimeError(f"{moonshot_bro_result} 程序终止。")
|
||||||
|
return moonshot_bro_result
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
moon_ai = MoonShotInit()
|
||||||
|
for g in moon_ai.generate_messages('hello', {'llm_model': 'moonshot-v1-8k'},
|
||||||
|
[], '', True):
|
||||||
|
print(g)
|
||||||
@@ -171,7 +171,8 @@ class GetGLMHandle(Process):
|
|||||||
global moss_handle
|
global moss_handle
|
||||||
moss_handle = None
|
moss_handle = None
|
||||||
#################################################################################
|
#################################################################################
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
多线程方法
|
多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
|
|||||||
@@ -117,7 +117,8 @@ def generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
|
|||||||
raise RuntimeError(dec['error_msg'])
|
raise RuntimeError(dec['error_msg'])
|
||||||
|
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
⭐多线程方法
|
⭐多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
@@ -146,9 +147,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
# 开始接收回复
|
# 开始接收回复
|
||||||
try:
|
try:
|
||||||
|
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||||
for response in generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
|
for response in generate_from_baidu_qianfan(inputs, llm_kwargs, history, system_prompt):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
history.extend([inputs, response])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
except ConnectionAbortedError as e:
|
except ConnectionAbortedError as e:
|
||||||
from .bridge_all import model_info
|
from .bridge_all import model_info
|
||||||
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
||||||
@@ -157,10 +161,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面
|
||||||
return
|
return
|
||||||
|
except RuntimeError as e:
|
||||||
# 总结输出
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
response = f"[Local Message] {model_name}响应异常 ..."
|
chatbot[-1] = (chatbot[-1][0], tb_str)
|
||||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
yield from update_ui(chatbot=chatbot, history=history, msg="异常") # 刷新界面
|
||||||
response = f"[Local Message] {model_name}响应异常 ..."
|
return
|
||||||
history.extend([inputs, response])
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
|
||||||
@@ -5,7 +5,8 @@ from toolbox import check_packages, report_exception
|
|||||||
|
|
||||||
model_name = 'Qwen'
|
model_name = 'Qwen'
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
⭐多线程方法
|
⭐多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
@@ -47,10 +48,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
from core_functional import handle_core_functionality
|
from core_functional import handle_core_functionality
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
chatbot[-1] = (inputs, "")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
# 开始接收回复
|
# 开始接收回复
|
||||||
from .com_qwenapi import QwenRequestInstance
|
from .com_qwenapi import QwenRequestInstance
|
||||||
sri = QwenRequestInstance()
|
sri = QwenRequestInstance()
|
||||||
|
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|||||||
@@ -9,7 +9,8 @@ def validate_key():
|
|||||||
if YUNQUE_SECRET_KEY == '': return False
|
if YUNQUE_SECRET_KEY == '': return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
⭐ 多线程方法
|
⭐ 多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
@@ -56,6 +57,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
# 开始接收回复
|
# 开始接收回复
|
||||||
from .com_skylark2api import YUNQUERequestInstance
|
from .com_skylark2api import YUNQUERequestInstance
|
||||||
sri = YUNQUERequestInstance()
|
sri = YUNQUERequestInstance()
|
||||||
|
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|||||||
@@ -13,7 +13,8 @@ def validate_key():
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
⭐多线程方法
|
⭐多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
@@ -52,6 +53,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
# 开始接收回复
|
# 开始接收回复
|
||||||
from .com_sparkapi import SparkRequestInstance
|
from .com_sparkapi import SparkRequestInstance
|
||||||
sri = SparkRequestInstance()
|
sri = SparkRequestInstance()
|
||||||
|
response = f"[Local Message] 等待{model_name}响应中 ..."
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
||||||
chatbot[-1] = (inputs, response)
|
chatbot[-1] = (inputs, response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|||||||
283
request_llms/bridge_yimodel.py
普通文件
283
request_llms/bridge_yimodel.py
普通文件
@@ -0,0 +1,283 @@
|
|||||||
|
# 借鉴自同目录下的bridge_chatgpt.py
|
||||||
|
|
||||||
|
"""
|
||||||
|
该文件中主要包含三个函数
|
||||||
|
|
||||||
|
不具备多线程能力的函数:
|
||||||
|
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||||
|
|
||||||
|
具备多线程调用能力的函数
|
||||||
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import gradio as gr
|
||||||
|
import logging
|
||||||
|
import traceback
|
||||||
|
import requests
|
||||||
|
import importlib
|
||||||
|
import random
|
||||||
|
|
||||||
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
|
from toolbox import get_conf, update_ui, trimmed_format_exc, is_the_upload_folder, read_one_api_model_name
|
||||||
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, YIMODEL_API_KEY = \
|
||||||
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'YIMODEL_API_KEY')
|
||||||
|
|
||||||
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
|
|
||||||
|
def get_full_error(chunk, stream_response):
|
||||||
|
"""
|
||||||
|
获取完整的从Openai返回的报错
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk += next(stream_response)
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
return chunk
|
||||||
|
|
||||||
|
def decode_chunk(chunk):
|
||||||
|
# 提前读取一些信息(用于判断异常)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
chunkjson = None
|
||||||
|
is_last_chunk = False
|
||||||
|
try:
|
||||||
|
chunkjson = json.loads(chunk_decoded[6:])
|
||||||
|
is_last_chunk = chunkjson.get("lastOne", False)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return chunk_decoded, chunkjson, is_last_chunk
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
|
"""
|
||||||
|
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
|
inputs:
|
||||||
|
是本次问询的输入
|
||||||
|
sys_prompt:
|
||||||
|
系统静默prompt
|
||||||
|
llm_kwargs:
|
||||||
|
chatGPT的内部调优参数
|
||||||
|
history:
|
||||||
|
是之前的对话列表
|
||||||
|
observe_window = None:
|
||||||
|
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||||
|
"""
|
||||||
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# make a POST request to the API endpoint, stream=False
|
||||||
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||||
|
except requests.exceptions.ReadTimeout as e:
|
||||||
|
retry += 1
|
||||||
|
traceback.print_exc()
|
||||||
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||||
|
|
||||||
|
stream_response = response.iter_lines()
|
||||||
|
result = ''
|
||||||
|
is_head_of_the_stream = True
|
||||||
|
while True:
|
||||||
|
try: chunk = next(stream_response)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
|
chunk_decoded, chunkjson, is_last_chunk = decode_chunk(chunk)
|
||||||
|
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r'"role":"assistant"' in chunk_decoded):
|
||||||
|
# 数据流的第一帧不携带content
|
||||||
|
is_head_of_the_stream = False; continue
|
||||||
|
if chunk:
|
||||||
|
try:
|
||||||
|
if is_last_chunk:
|
||||||
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
|
logging.info(f'[response] {result}')
|
||||||
|
break
|
||||||
|
result += chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
if not console_slience: print(chunkjson['choices'][0]["delta"]["content"], end='')
|
||||||
|
if observe_window is not None:
|
||||||
|
# 观测窗,把已经获取的数据显示出去
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] += chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
# 看门狗,如果超过期限没有喂狗,则终止
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
|
raise RuntimeError("用户取消了程序。")
|
||||||
|
except Exception as e:
|
||||||
|
chunk = get_full_error(chunk, stream_response)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
print(error_msg)
|
||||||
|
raise RuntimeError("Json解析不合常规")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
|
"""
|
||||||
|
发送至chatGPT,流式获取输出。
|
||||||
|
用于基础的对话功能。
|
||||||
|
inputs 是本次问询的输入
|
||||||
|
top_p, temperature是chatGPT的内部调优参数
|
||||||
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
|
"""
|
||||||
|
if len(YIMODEL_API_KEY) == 0:
|
||||||
|
raise RuntimeError("没有设置YIMODEL_API_KEY选项")
|
||||||
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
|
user_input = inputs
|
||||||
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
|
raw_input = inputs
|
||||||
|
logging.info(f'[raw_input] {raw_input}')
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
|
# check mis-behavior
|
||||||
|
if is_the_upload_folder(user_input):
|
||||||
|
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
|
|
||||||
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
|
||||||
|
history.append(inputs); history.append("")
|
||||||
|
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# make a POST request to the API endpoint, stream=True
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
|
except:
|
||||||
|
retry += 1
|
||||||
|
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||||
|
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||||
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
|
||||||
|
gpt_replying_buffer = ""
|
||||||
|
|
||||||
|
is_head_of_the_stream = True
|
||||||
|
if stream:
|
||||||
|
stream_response = response.iter_lines()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk = next(stream_response)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
|
|
||||||
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
chunk_decoded, chunkjson, is_last_chunk = decode_chunk(chunk)
|
||||||
|
|
||||||
|
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r'"role":"assistant"' in chunk_decoded):
|
||||||
|
# 数据流的第一帧不携带content
|
||||||
|
is_head_of_the_stream = False; continue
|
||||||
|
|
||||||
|
if chunk:
|
||||||
|
try:
|
||||||
|
if is_last_chunk:
|
||||||
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
|
logging.info(f'[response] {gpt_replying_buffer}')
|
||||||
|
break
|
||||||
|
# 处理数据流的主体
|
||||||
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||||
|
history[-1] = gpt_replying_buffer
|
||||||
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||||
|
except Exception as e:
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||||
|
chunk = get_full_error(chunk, stream_response)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||||
|
print(error_msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
|
||||||
|
from .bridge_all import model_info
|
||||||
|
if "bad_request" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] 已经超过了模型的最大上下文或是模型格式错误,请尝试削减单次输入的文本量。")
|
||||||
|
elif "authentication_error" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. 请确保API key有效。")
|
||||||
|
elif "not_found" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] {llm_kwargs['llm_model']} 无效,请确保使用小写的模型名称。")
|
||||||
|
elif "rate_limit" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] 遇到了控制请求速率限制,请一分钟后重试。")
|
||||||
|
elif "system_busy" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] 系统繁忙,请一分钟后重试。")
|
||||||
|
else:
|
||||||
|
from toolbox import regular_txt_to_markdown
|
||||||
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||||
|
return chatbot, history
|
||||||
|
|
||||||
|
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||||
|
"""
|
||||||
|
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||||
|
"""
|
||||||
|
api_key = f"Bearer {YIMODEL_API_KEY}"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": api_key
|
||||||
|
}
|
||||||
|
|
||||||
|
conversation_cnt = len(history) // 2
|
||||||
|
|
||||||
|
messages = [{"role": "system", "content": system_prompt}]
|
||||||
|
if conversation_cnt:
|
||||||
|
for index in range(0, 2*conversation_cnt, 2):
|
||||||
|
what_i_have_asked = {}
|
||||||
|
what_i_have_asked["role"] = "user"
|
||||||
|
what_i_have_asked["content"] = history[index]
|
||||||
|
what_gpt_answer = {}
|
||||||
|
what_gpt_answer["role"] = "assistant"
|
||||||
|
what_gpt_answer["content"] = history[index+1]
|
||||||
|
if what_i_have_asked["content"] != "":
|
||||||
|
if what_gpt_answer["content"] == "": continue
|
||||||
|
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||||
|
messages.append(what_i_have_asked)
|
||||||
|
messages.append(what_gpt_answer)
|
||||||
|
else:
|
||||||
|
messages[-1]['content'] = what_gpt_answer['content']
|
||||||
|
|
||||||
|
what_i_ask_now = {}
|
||||||
|
what_i_ask_now["role"] = "user"
|
||||||
|
what_i_ask_now["content"] = inputs
|
||||||
|
messages.append(what_i_ask_now)
|
||||||
|
model = llm_kwargs['llm_model']
|
||||||
|
if llm_kwargs['llm_model'].startswith('one-api-'):
|
||||||
|
model = llm_kwargs['llm_model'][len('one-api-'):]
|
||||||
|
model, _ = read_one_api_model_name(model)
|
||||||
|
tokens = 600 if llm_kwargs['llm_model'] == 'yi-34b-chat-0205' else 4096 #yi-34b-chat-0205只有4k上下文...
|
||||||
|
payload = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": llm_kwargs['temperature'], # 1.0,
|
||||||
|
"stream": stream,
|
||||||
|
"max_tokens": tokens
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
||||||
|
except:
|
||||||
|
print('输入中可能存在乱码。')
|
||||||
|
return headers,payload
|
||||||
@@ -1,16 +1,24 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
import os
|
||||||
from toolbox import check_packages, report_exception
|
from toolbox import update_ui, get_conf, update_ui_lastest_msg, log_chat
|
||||||
|
from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
|
||||||
|
from toolbox import ChatBotWithCookies
|
||||||
|
|
||||||
model_name = '智谱AI大模型'
|
model_name = '智谱AI大模型'
|
||||||
|
zhipuai_default_model = 'glm-4'
|
||||||
|
|
||||||
def validate_key():
|
def validate_key():
|
||||||
ZHIPUAI_API_KEY = get_conf("ZHIPUAI_API_KEY")
|
ZHIPUAI_API_KEY = get_conf("ZHIPUAI_API_KEY")
|
||||||
if ZHIPUAI_API_KEY == '': return False
|
if ZHIPUAI_API_KEY == '': return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def make_media_input(inputs, image_paths):
|
||||||
|
for image_path in image_paths:
|
||||||
|
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||||
|
return inputs
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
|
||||||
|
observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
⭐多线程方法
|
⭐多线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
@@ -18,32 +26,39 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
|||||||
watch_dog_patience = 5
|
watch_dog_patience = 5
|
||||||
response = ""
|
response = ""
|
||||||
|
|
||||||
|
if llm_kwargs["llm_model"] == "zhipuai":
|
||||||
|
llm_kwargs["llm_model"] = zhipuai_default_model
|
||||||
|
|
||||||
if validate_key() is False:
|
if validate_key() is False:
|
||||||
raise RuntimeError('请配置ZHIPUAI_API_KEY')
|
raise RuntimeError('请配置ZHIPUAI_API_KEY')
|
||||||
|
|
||||||
from .com_zhipuapi import ZhipuRequestInstance
|
# 开始接收回复
|
||||||
sri = ZhipuRequestInstance()
|
from .com_zhipuglm import ZhipuChatInit
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
zhipu_bro_init = ZhipuChatInit()
|
||||||
|
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
|
||||||
if len(observe_window) >= 1:
|
if len(observe_window) >= 1:
|
||||||
observe_window[0] = response
|
observe_window[0] = response
|
||||||
if len(observe_window) >= 2:
|
if len(observe_window) >= 2:
|
||||||
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||||
|
raise RuntimeError("程序终止。")
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
|
||||||
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
"""
|
"""
|
||||||
⭐单线程方法
|
⭐单线程方法
|
||||||
函数的说明请见 request_llms/bridge_all.py
|
函数的说明请见 request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
chatbot.append((inputs, ""))
|
chatbot.append([inputs, ""])
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||||
try:
|
try:
|
||||||
check_packages(["zhipuai"])
|
check_packages(["zhipuai"])
|
||||||
except:
|
except:
|
||||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
|
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
|
||||||
chatbot=chatbot, history=history, delay=0)
|
chatbot=chatbot, history=history, delay=0)
|
||||||
return
|
return
|
||||||
|
|
||||||
if validate_key() is False:
|
if validate_key() is False:
|
||||||
@@ -53,16 +68,30 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if additional_fn is not None:
|
if additional_fn is not None:
|
||||||
from core_functional import handle_core_functionality
|
from core_functional import handle_core_functionality
|
||||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
chatbot[-1] = [inputs, ""]
|
||||||
# 开始接收回复
|
|
||||||
from .com_zhipuapi import ZhipuRequestInstance
|
|
||||||
sri = ZhipuRequestInstance()
|
|
||||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
|
||||||
chatbot[-1] = (inputs, response)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
# 总结输出
|
if llm_kwargs["llm_model"] == "zhipuai":
|
||||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
llm_kwargs["llm_model"] = zhipuai_default_model
|
||||||
response = f"[Local Message] {model_name}响应异常 ..."
|
|
||||||
|
if llm_kwargs["llm_model"] in ["glm-4v"]:
|
||||||
|
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||||
|
if not have_recent_file:
|
||||||
|
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
|
||||||
|
return
|
||||||
|
if have_recent_file:
|
||||||
|
inputs = make_media_input(inputs, image_paths)
|
||||||
|
chatbot[-1] = [inputs, ""]
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
|
||||||
|
# 开始接收回复
|
||||||
|
from .com_zhipuglm import ZhipuChatInit
|
||||||
|
zhipu_bro_init = ZhipuChatInit()
|
||||||
|
for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
|
||||||
|
chatbot[-1] = [inputs, response]
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
history.extend([inputs, response])
|
history.extend([inputs, response])
|
||||||
|
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
@@ -7,7 +7,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
import requests
|
import requests
|
||||||
from typing import List, Dict, Tuple
|
from typing import List, Dict, Tuple
|
||||||
from toolbox import get_conf, encode_image, get_pictures_list
|
from toolbox import get_conf, encode_image, get_pictures_list, to_markdown_tabs
|
||||||
|
|
||||||
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
|
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
|
||||||
|
|
||||||
@@ -112,38 +112,12 @@ def html_local_img(__file, layout="left", max_width=None, max_height=None, md=Tr
|
|||||||
return a
|
return a
|
||||||
|
|
||||||
|
|
||||||
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
head: 表头:[]
|
|
||||||
tabs: 表值:[[列1], [列2], [列3], [列4]]
|
|
||||||
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
|
|
||||||
column: True to keep data in columns, False to keep data in rows (default).
|
|
||||||
Returns:
|
|
||||||
A string representation of the markdown table.
|
|
||||||
"""
|
|
||||||
if column:
|
|
||||||
transposed_tabs = list(map(list, zip(*tabs)))
|
|
||||||
else:
|
|
||||||
transposed_tabs = tabs
|
|
||||||
# Find the maximum length among the columns
|
|
||||||
max_len = max(len(column) for column in transposed_tabs)
|
|
||||||
|
|
||||||
tab_format = "| %s "
|
|
||||||
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
|
|
||||||
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
|
|
||||||
|
|
||||||
for i in range(max_len):
|
|
||||||
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
|
||||||
row_data = file_manifest_filter_html(row_data, filter_=None)
|
|
||||||
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
|
||||||
|
|
||||||
return tabs_list
|
|
||||||
|
|
||||||
|
|
||||||
class GoogleChatInit:
|
class GoogleChatInit:
|
||||||
def __init__(self):
|
def __init__(self, llm_kwargs):
|
||||||
self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k"
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
self.url_gemini = endpoint + "/%m:streamGenerateContent?key=%k"
|
||||||
|
|
||||||
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
|
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
|
||||||
headers, payload = self.generate_message_payload(
|
headers, payload = self.generate_message_payload(
|
||||||
|
|||||||
@@ -48,6 +48,10 @@ class QwenRequestInstance():
|
|||||||
for response in responses:
|
for response in responses:
|
||||||
if response.status_code == HTTPStatus.OK:
|
if response.status_code == HTTPStatus.OK:
|
||||||
if response.output.choices[0].finish_reason == 'stop':
|
if response.output.choices[0].finish_reason == 'stop':
|
||||||
|
try:
|
||||||
|
self.result_buf += response.output.choices[0].message.content
|
||||||
|
except:
|
||||||
|
pass
|
||||||
yield self.result_buf
|
yield self.result_buf
|
||||||
break
|
break
|
||||||
elif response.output.choices[0].finish_reason == 'length':
|
elif response.output.choices[0].finish_reason == 'length':
|
||||||
|
|||||||
@@ -65,6 +65,7 @@ class SparkRequestInstance():
|
|||||||
self.gpt_url = "ws://spark-api.xf-yun.com/v1.1/chat"
|
self.gpt_url = "ws://spark-api.xf-yun.com/v1.1/chat"
|
||||||
self.gpt_url_v2 = "ws://spark-api.xf-yun.com/v2.1/chat"
|
self.gpt_url_v2 = "ws://spark-api.xf-yun.com/v2.1/chat"
|
||||||
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
|
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
|
||||||
|
self.gpt_url_v35 = "wss://spark-api.xf-yun.com/v3.5/chat"
|
||||||
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
|
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
|
||||||
|
|
||||||
self.time_to_yield_event = threading.Event()
|
self.time_to_yield_event = threading.Event()
|
||||||
@@ -91,6 +92,8 @@ class SparkRequestInstance():
|
|||||||
gpt_url = self.gpt_url_v2
|
gpt_url = self.gpt_url_v2
|
||||||
elif llm_kwargs['llm_model'] == 'sparkv3':
|
elif llm_kwargs['llm_model'] == 'sparkv3':
|
||||||
gpt_url = self.gpt_url_v3
|
gpt_url = self.gpt_url_v3
|
||||||
|
elif llm_kwargs['llm_model'] == 'sparkv3.5':
|
||||||
|
gpt_url = self.gpt_url_v35
|
||||||
else:
|
else:
|
||||||
gpt_url = self.gpt_url
|
gpt_url = self.gpt_url
|
||||||
file_manifest = []
|
file_manifest = []
|
||||||
@@ -190,6 +193,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest)
|
|||||||
"spark": "general",
|
"spark": "general",
|
||||||
"sparkv2": "generalv2",
|
"sparkv2": "generalv2",
|
||||||
"sparkv3": "generalv3",
|
"sparkv3": "generalv3",
|
||||||
|
"sparkv3.5": "generalv3.5",
|
||||||
}
|
}
|
||||||
domains_select = domains[llm_kwargs['llm_model']]
|
domains_select = domains[llm_kwargs['llm_model']]
|
||||||
if file_manifest: domains_select = 'image'
|
if file_manifest: domains_select = 'image'
|
||||||
|
|||||||
@@ -1,70 +0,0 @@
|
|||||||
from toolbox import get_conf
|
|
||||||
import threading
|
|
||||||
import logging
|
|
||||||
|
|
||||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
|
||||||
|
|
||||||
class ZhipuRequestInstance():
|
|
||||||
def __init__(self):
|
|
||||||
|
|
||||||
self.time_to_yield_event = threading.Event()
|
|
||||||
self.time_to_exit_event = threading.Event()
|
|
||||||
|
|
||||||
self.result_buf = ""
|
|
||||||
|
|
||||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
|
||||||
# import _thread as thread
|
|
||||||
import zhipuai
|
|
||||||
ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL")
|
|
||||||
zhipuai.api_key = ZHIPUAI_API_KEY
|
|
||||||
self.result_buf = ""
|
|
||||||
response = zhipuai.model_api.sse_invoke(
|
|
||||||
model=ZHIPUAI_MODEL,
|
|
||||||
prompt=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
|
|
||||||
top_p=llm_kwargs['top_p']*0.7, # 智谱的API抽风,手动*0.7给做个线性变换
|
|
||||||
temperature=llm_kwargs['temperature']*0.95, # 智谱的API抽风,手动*0.7给做个线性变换
|
|
||||||
)
|
|
||||||
for event in response.events():
|
|
||||||
if event.event == "add":
|
|
||||||
# if self.result_buf == "" and event.data.startswith(" "):
|
|
||||||
# event.data = event.data.lstrip(" ") # 每次智谱为啥都要带个空格开头呢?
|
|
||||||
self.result_buf += event.data
|
|
||||||
yield self.result_buf
|
|
||||||
elif event.event == "error" or event.event == "interrupted":
|
|
||||||
raise RuntimeError("Unknown error:" + event.data)
|
|
||||||
elif event.event == "finish":
|
|
||||||
yield self.result_buf
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError("Unknown error:" + str(event))
|
|
||||||
if self.result_buf == "":
|
|
||||||
yield "智谱没有返回任何数据, 请检查ZHIPUAI_API_KEY和ZHIPUAI_MODEL是否填写正确."
|
|
||||||
logging.info(f'[raw_input] {inputs}')
|
|
||||||
logging.info(f'[response] {self.result_buf}')
|
|
||||||
return self.result_buf
|
|
||||||
|
|
||||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
|
||||||
conversation_cnt = len(history) // 2
|
|
||||||
messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}]
|
|
||||||
if conversation_cnt:
|
|
||||||
for index in range(0, 2*conversation_cnt, 2):
|
|
||||||
what_i_have_asked = {}
|
|
||||||
what_i_have_asked["role"] = "user"
|
|
||||||
what_i_have_asked["content"] = history[index]
|
|
||||||
what_gpt_answer = {}
|
|
||||||
what_gpt_answer["role"] = "assistant"
|
|
||||||
what_gpt_answer["content"] = history[index+1]
|
|
||||||
if what_i_have_asked["content"] != "":
|
|
||||||
if what_gpt_answer["content"] == "":
|
|
||||||
continue
|
|
||||||
if what_gpt_answer["content"] == timeout_bot_msg:
|
|
||||||
continue
|
|
||||||
messages.append(what_i_have_asked)
|
|
||||||
messages.append(what_gpt_answer)
|
|
||||||
else:
|
|
||||||
messages[-1]['content'] = what_gpt_answer['content']
|
|
||||||
what_i_ask_now = {}
|
|
||||||
what_i_ask_now["role"] = "user"
|
|
||||||
what_i_ask_now["content"] = inputs
|
|
||||||
messages.append(what_i_ask_now)
|
|
||||||
return messages
|
|
||||||
129
request_llms/com_zhipuglm.py
普通文件
129
request_llms/com_zhipuglm.py
普通文件
@@ -0,0 +1,129 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
# @Time : 2024/1/22
|
||||||
|
# @Author : Kilig947 & binary husky
|
||||||
|
# @Descr : 兼容最新的智谱Ai
|
||||||
|
from toolbox import get_conf
|
||||||
|
from zhipuai import ZhipuAI
|
||||||
|
from toolbox import get_conf, encode_image, get_pictures_list
|
||||||
|
import logging, os
|
||||||
|
|
||||||
|
|
||||||
|
def input_encode_handler(inputs:str, llm_kwargs:dict):
|
||||||
|
if llm_kwargs["most_recent_uploaded"].get("path"):
|
||||||
|
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
|
||||||
|
md_encode = []
|
||||||
|
for md_path in image_paths:
|
||||||
|
type_ = os.path.splitext(md_path)[1].replace(".", "")
|
||||||
|
type_ = "jpeg" if type_ == "jpg" else type_
|
||||||
|
md_encode.append({"data": encode_image(md_path), "type": type_})
|
||||||
|
return inputs, md_encode
|
||||||
|
|
||||||
|
|
||||||
|
class ZhipuChatInit:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL")
|
||||||
|
if len(ZHIPUAI_MODEL) > 0:
|
||||||
|
logging.error('ZHIPUAI_MODEL 配置项选项已经弃用,请在LLM_MODEL中配置')
|
||||||
|
self.zhipu_bro = ZhipuAI(api_key=ZHIPUAI_API_KEY)
|
||||||
|
self.model = ''
|
||||||
|
|
||||||
|
def __conversation_user(self, user_input: str, llm_kwargs:dict):
|
||||||
|
if self.model not in ["glm-4v"]:
|
||||||
|
return {"role": "user", "content": user_input}
|
||||||
|
else:
|
||||||
|
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
|
||||||
|
what_i_have_asked = {"role": "user", "content": []}
|
||||||
|
what_i_have_asked['content'].append({"type": 'text', "text": user_input})
|
||||||
|
if encode_img:
|
||||||
|
img_d = {"type": "image_url",
|
||||||
|
"image_url": {'url': encode_img}}
|
||||||
|
what_i_have_asked['content'].append(img_d)
|
||||||
|
return what_i_have_asked
|
||||||
|
|
||||||
|
def __conversation_history(self, history:list, llm_kwargs:dict):
|
||||||
|
messages = []
|
||||||
|
conversation_cnt = len(history) // 2
|
||||||
|
if conversation_cnt:
|
||||||
|
for index in range(0, 2 * conversation_cnt, 2):
|
||||||
|
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||||
|
what_gpt_answer = {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": history[index + 1]
|
||||||
|
}
|
||||||
|
messages.append(what_i_have_asked)
|
||||||
|
messages.append(what_gpt_answer)
|
||||||
|
return messages
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def preprocess_param(param, default=0.95, min_val=0.01, max_val=0.99):
|
||||||
|
"""预处理参数,保证其在允许范围内,并处理精度问题"""
|
||||||
|
try:
|
||||||
|
param = float(param)
|
||||||
|
except ValueError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
if param <= min_val:
|
||||||
|
return min_val
|
||||||
|
elif param >= max_val:
|
||||||
|
return max_val
|
||||||
|
else:
|
||||||
|
return round(param, 2) # 可挑选精度,目前是两位小数
|
||||||
|
|
||||||
|
def __conversation_message_payload(self, inputs:str, llm_kwargs:dict, history:list, system_prompt:str):
|
||||||
|
messages = []
|
||||||
|
if system_prompt:
|
||||||
|
messages.append({"role": "system", "content": system_prompt})
|
||||||
|
self.model = llm_kwargs['llm_model']
|
||||||
|
messages.extend(self.__conversation_history(history, llm_kwargs)) # 处理 history
|
||||||
|
if inputs.strip() == "": # 处理空输入导致报错的问题 https://github.com/binary-husky/gpt_academic/issues/1640 提示 {"error":{"code":"1214","message":"messages[1]:content和tool_calls 字段不能同时为空"}
|
||||||
|
inputs = "." # 空格、换行、空字符串都会报错,所以用最没有意义的一个点代替
|
||||||
|
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
|
||||||
|
"""
|
||||||
|
采样温度,控制输出的随机性,必须为正数
|
||||||
|
取值范围是:(0.0, 1.0),不能等于 0,默认值为 0.95,
|
||||||
|
值越大,会使输出更随机,更具创造性;
|
||||||
|
值越小,输出会更加稳定或确定
|
||||||
|
建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数
|
||||||
|
"""
|
||||||
|
temperature = self.preprocess_param(
|
||||||
|
param=llm_kwargs.get('temperature', 0.95),
|
||||||
|
default=0.95,
|
||||||
|
min_val=0.01,
|
||||||
|
max_val=0.99
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
用温度取样的另一种方法,称为核取样
|
||||||
|
取值范围是:(0.0, 1.0) 开区间,
|
||||||
|
不能等于 0 或 1,默认值为 0.7
|
||||||
|
模型考虑具有 top_p 概率质量 tokens 的结果
|
||||||
|
例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens
|
||||||
|
建议您根据应用场景调整 top_p 或 temperature 参数,
|
||||||
|
但不要同时调整两个参数
|
||||||
|
"""
|
||||||
|
top_p = self.preprocess_param(
|
||||||
|
param=llm_kwargs.get('top_p', 0.70),
|
||||||
|
default=0.70,
|
||||||
|
min_val=0.01,
|
||||||
|
max_val=0.99
|
||||||
|
)
|
||||||
|
response = self.zhipu_bro.chat.completions.create(
|
||||||
|
model=self.model, messages=messages, stream=True,
|
||||||
|
temperature=temperature,
|
||||||
|
top_p=top_p,
|
||||||
|
max_tokens=llm_kwargs.get('max_tokens', 1024 * 4),
|
||||||
|
)
|
||||||
|
return response
|
||||||
|
|
||||||
|
def generate_chat(self, inputs:str, llm_kwargs:dict, history:list, system_prompt:str):
|
||||||
|
self.model = llm_kwargs['llm_model']
|
||||||
|
response = self.__conversation_message_payload(inputs, llm_kwargs, history, system_prompt)
|
||||||
|
bro_results = ''
|
||||||
|
for chunk in response:
|
||||||
|
bro_results += chunk.choices[0].delta.content
|
||||||
|
yield chunk.choices[0].delta.content, bro_results
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
zhipu = ZhipuChatInit()
|
||||||
|
zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
from toolbox import update_ui, Singleton
|
from toolbox import update_ui, Singleton
|
||||||
|
from toolbox import ChatBotWithCookies
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
from contextlib import redirect_stdout
|
from contextlib import redirect_stdout
|
||||||
from request_llms.queued_pipe import create_queue_pipe
|
from request_llms.queued_pipe import create_queue_pipe
|
||||||
@@ -214,7 +215,7 @@ class LocalLLMHandle(Process):
|
|||||||
def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'):
|
def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='classic'):
|
||||||
load_message = f"{model_name}尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,{model_name}消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
load_message = f"{model_name}尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,{model_name}消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="", observe_window:list=[], console_slience:bool=False):
|
||||||
"""
|
"""
|
||||||
refer to request_llms/bridge_all.py
|
refer to request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
@@ -260,7 +261,8 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla
|
|||||||
raise RuntimeError("程序终止。")
|
raise RuntimeError("程序终止。")
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
|
||||||
|
history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
|
||||||
"""
|
"""
|
||||||
refer to request_llms/bridge_all.py
|
refer to request_llms/bridge_all.py
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -1,12 +1,14 @@
|
|||||||
https://public.gpt-academic.top/publish/gradio-3.32.7-py3-none-any.whl
|
https://public.agent-matrix.com/publish/gradio-3.32.9-py3-none-any.whl
|
||||||
|
gradio-client==0.8
|
||||||
pypdf2==2.12.1
|
pypdf2==2.12.1
|
||||||
zhipuai<2
|
zhipuai>=2
|
||||||
tiktoken>=0.3.3
|
tiktoken>=0.3.3
|
||||||
requests[socks]
|
requests[socks]
|
||||||
pydantic==1.10.11
|
pydantic==2.5.2
|
||||||
protobuf==3.18
|
protobuf==3.18
|
||||||
transformers>=4.27.1
|
transformers>=4.27.1
|
||||||
scipdf_parser>=0.52
|
scipdf_parser>=0.52
|
||||||
|
anthropic>=0.18.1
|
||||||
python-markdown-math
|
python-markdown-math
|
||||||
pymdown-extensions
|
pymdown-extensions
|
||||||
websocket-client
|
websocket-client
|
||||||
@@ -15,7 +17,7 @@ prompt_toolkit
|
|||||||
latex2mathml
|
latex2mathml
|
||||||
python-docx
|
python-docx
|
||||||
mdtex2html
|
mdtex2html
|
||||||
anthropic
|
dashscope
|
||||||
pyautogen
|
pyautogen
|
||||||
colorama
|
colorama
|
||||||
Markdown
|
Markdown
|
||||||
|
|||||||
61
shared_utils/cookie_manager.py
普通文件
61
shared_utils/cookie_manager.py
普通文件
@@ -0,0 +1,61 @@
|
|||||||
|
from typing import Callable
|
||||||
|
def load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)->Callable:
|
||||||
|
def load_web_cookie_cache(persistent_cookie_, cookies_):
|
||||||
|
import gradio as gr
|
||||||
|
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||||
|
|
||||||
|
ret = {}
|
||||||
|
for k in customize_btns:
|
||||||
|
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
||||||
|
|
||||||
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
except: return ret
|
||||||
|
|
||||||
|
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
|
||||||
|
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
|
||||||
|
ret.update({cookies: cookies_})
|
||||||
|
|
||||||
|
for k,v in persistent_cookie_["custom_bnt"].items():
|
||||||
|
if v['Title'] == "": continue
|
||||||
|
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
|
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
|
return ret
|
||||||
|
return load_web_cookie_cache
|
||||||
|
|
||||||
|
|
||||||
|
def assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)->Callable:
|
||||||
|
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
|
||||||
|
import gradio as gr
|
||||||
|
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||||
|
ret = {}
|
||||||
|
# 读取之前的自定义按钮
|
||||||
|
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
||||||
|
# 更新新的自定义按钮
|
||||||
|
customize_fn_overwrite_.update({
|
||||||
|
basic_btn_dropdown_:
|
||||||
|
{
|
||||||
|
"Title":basic_fn_title,
|
||||||
|
"Prefix":basic_fn_prefix,
|
||||||
|
"Suffix":basic_fn_suffix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if clean_up:
|
||||||
|
customize_fn_overwrite_ = {}
|
||||||
|
cookies_.update(customize_fn_overwrite_) # 更新cookie
|
||||||
|
visible = (not clean_up) and (basic_fn_title != "")
|
||||||
|
if basic_btn_dropdown_ in customize_btns:
|
||||||
|
# 是自定义按钮,不是预定义按钮
|
||||||
|
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
||||||
|
else:
|
||||||
|
# 是预定义按钮
|
||||||
|
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
||||||
|
ret.update({cookies: cookies_})
|
||||||
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
except: persistent_cookie_ = {}
|
||||||
|
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||||
|
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
ret.update({web_cookie_cache: persistent_cookie_}) # write persistent cookie
|
||||||
|
return ret
|
||||||
|
return assign_btn
|
||||||
|
|
||||||
211
shared_utils/fastapi_server.py
普通文件
211
shared_utils/fastapi_server.py
普通文件
@@ -0,0 +1,211 @@
|
|||||||
|
"""
|
||||||
|
Tests:
|
||||||
|
|
||||||
|
- custom_path false / no user auth:
|
||||||
|
-- upload file(yes)
|
||||||
|
-- download file(yes)
|
||||||
|
-- websocket(yes)
|
||||||
|
-- block __pycache__ access(yes)
|
||||||
|
-- rel (yes)
|
||||||
|
-- abs (yes)
|
||||||
|
-- block user access(fail) http://localhost:45013/file=gpt_log/admin/chat_secrets.log
|
||||||
|
-- fix(commit f6bf05048c08f5cd84593f7fdc01e64dec1f584a)-> block successful
|
||||||
|
|
||||||
|
- custom_path yes("/cc/gptac") / no user auth:
|
||||||
|
-- upload file(yes)
|
||||||
|
-- download file(yes)
|
||||||
|
-- websocket(yes)
|
||||||
|
-- block __pycache__ access(yes)
|
||||||
|
-- block user access(yes)
|
||||||
|
|
||||||
|
- custom_path yes("/cc/gptac/") / no user auth:
|
||||||
|
-- upload file(yes)
|
||||||
|
-- download file(yes)
|
||||||
|
-- websocket(yes)
|
||||||
|
-- block user access(yes)
|
||||||
|
|
||||||
|
- custom_path yes("/cc/gptac/") / + user auth:
|
||||||
|
-- upload file(yes)
|
||||||
|
-- download file(yes)
|
||||||
|
-- websocket(yes)
|
||||||
|
-- block user access(yes)
|
||||||
|
-- block user-wise access (yes)
|
||||||
|
|
||||||
|
- custom_path no + user auth:
|
||||||
|
-- upload file(yes)
|
||||||
|
-- download file(yes)
|
||||||
|
-- websocket(yes)
|
||||||
|
-- block user access(yes)
|
||||||
|
-- block user-wise access (yes)
|
||||||
|
|
||||||
|
queue cocurrent effectiveness
|
||||||
|
-- upload file(yes)
|
||||||
|
-- download file(yes)
|
||||||
|
-- websocket(yes)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, requests, threading, time
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
def _authorize_user(path_or_url, request, gradio_app):
|
||||||
|
from toolbox import get_conf, default_user_name
|
||||||
|
PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
|
||||||
|
sensitive_path = None
|
||||||
|
path_or_url = os.path.relpath(path_or_url)
|
||||||
|
if path_or_url.startswith(PATH_LOGGING):
|
||||||
|
sensitive_path = PATH_LOGGING
|
||||||
|
if path_or_url.startswith(PATH_PRIVATE_UPLOAD):
|
||||||
|
sensitive_path = PATH_PRIVATE_UPLOAD
|
||||||
|
if sensitive_path:
|
||||||
|
token = request.cookies.get("access-token") or request.cookies.get("access-token-unsecure")
|
||||||
|
user = gradio_app.tokens.get(token) # get user
|
||||||
|
allowed_users = [user, 'autogen', default_user_name] # three user path that can be accessed
|
||||||
|
for user_allowed in allowed_users:
|
||||||
|
# exact match
|
||||||
|
if f"{os.sep}".join(path_or_url.split(os.sep)[:2]) == os.path.join(sensitive_path, user_allowed):
|
||||||
|
return True
|
||||||
|
return False # "越权访问!"
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Server(uvicorn.Server):
|
||||||
|
# A server that runs in a separate thread
|
||||||
|
def install_signal_handlers(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run_in_thread(self):
|
||||||
|
self.thread = threading.Thread(target=self.run, daemon=True)
|
||||||
|
self.thread.start()
|
||||||
|
while not self.started:
|
||||||
|
time.sleep(1e-3)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.should_exit = True
|
||||||
|
self.thread.join()
|
||||||
|
|
||||||
|
|
||||||
|
def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SSL_CERTFILE):
|
||||||
|
import uvicorn
|
||||||
|
import fastapi
|
||||||
|
import gradio as gr
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from gradio.routes import App
|
||||||
|
from toolbox import get_conf
|
||||||
|
CUSTOM_PATH, PATH_LOGGING = get_conf('CUSTOM_PATH', 'PATH_LOGGING')
|
||||||
|
|
||||||
|
# --- --- configurate gradio app block --- ---
|
||||||
|
app_block:gr.Blocks
|
||||||
|
app_block.ssl_verify = False
|
||||||
|
app_block.auth_message = '请登录'
|
||||||
|
app_block.favicon_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "docs/logo.png")
|
||||||
|
app_block.auth = AUTHENTICATION if len(AUTHENTICATION) != 0 else None
|
||||||
|
app_block.blocked_paths = ["config.py", "__pycache__", "config_private.py", "docker-compose.yml", "Dockerfile", f"{PATH_LOGGING}/admin"]
|
||||||
|
app_block.dev_mode = False
|
||||||
|
app_block.config = app_block.get_config_file()
|
||||||
|
app_block.enable_queue = True
|
||||||
|
app_block.queue(concurrency_count=CONCURRENT_COUNT)
|
||||||
|
app_block.validate_queue_settings()
|
||||||
|
app_block.show_api = False
|
||||||
|
app_block.config = app_block.get_config_file()
|
||||||
|
max_threads = 40
|
||||||
|
app_block.max_threads = max(
|
||||||
|
app_block._queue.max_thread_count if app_block.enable_queue else 0, max_threads
|
||||||
|
)
|
||||||
|
app_block.is_colab = False
|
||||||
|
app_block.is_kaggle = False
|
||||||
|
app_block.is_sagemaker = False
|
||||||
|
|
||||||
|
gradio_app = App.create_app(app_block)
|
||||||
|
|
||||||
|
# --- --- replace gradio endpoint to forbid access to sensitive files --- ---
|
||||||
|
if len(AUTHENTICATION) > 0:
|
||||||
|
dependencies = []
|
||||||
|
endpoint = None
|
||||||
|
for route in list(gradio_app.router.routes):
|
||||||
|
if route.path == "/file/{path:path}":
|
||||||
|
gradio_app.router.routes.remove(route)
|
||||||
|
if route.path == "/file={path_or_url:path}":
|
||||||
|
dependencies = route.dependencies
|
||||||
|
endpoint = route.endpoint
|
||||||
|
gradio_app.router.routes.remove(route)
|
||||||
|
@gradio_app.get("/file/{path:path}", dependencies=dependencies)
|
||||||
|
@gradio_app.head("/file={path_or_url:path}", dependencies=dependencies)
|
||||||
|
@gradio_app.get("/file={path_or_url:path}", dependencies=dependencies)
|
||||||
|
async def file(path_or_url: str, request: fastapi.Request):
|
||||||
|
if len(AUTHENTICATION) > 0:
|
||||||
|
if not _authorize_user(path_or_url, request, gradio_app):
|
||||||
|
return "越权访问!"
|
||||||
|
return await endpoint(path_or_url, request)
|
||||||
|
|
||||||
|
# --- --- app_lifespan --- ---
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
|
@asynccontextmanager
|
||||||
|
async def app_lifespan(app):
|
||||||
|
async def startup_gradio_app():
|
||||||
|
if gradio_app.get_blocks().enable_queue:
|
||||||
|
gradio_app.get_blocks().startup_events()
|
||||||
|
async def shutdown_gradio_app():
|
||||||
|
pass
|
||||||
|
await startup_gradio_app() # startup logic here
|
||||||
|
yield # The application will serve requests after this point
|
||||||
|
await shutdown_gradio_app() # cleanup/shutdown logic here
|
||||||
|
|
||||||
|
# --- --- FastAPI --- ---
|
||||||
|
fastapi_app = FastAPI(lifespan=app_lifespan)
|
||||||
|
fastapi_app.mount(CUSTOM_PATH, gradio_app)
|
||||||
|
|
||||||
|
# --- --- favicon --- ---
|
||||||
|
if CUSTOM_PATH != '/':
|
||||||
|
from fastapi.responses import FileResponse
|
||||||
|
@fastapi_app.get("/favicon.ico")
|
||||||
|
async def favicon():
|
||||||
|
return FileResponse(app_block.favicon_path)
|
||||||
|
|
||||||
|
# --- --- uvicorn.Config --- ---
|
||||||
|
ssl_keyfile = None if SSL_KEYFILE == "" else SSL_KEYFILE
|
||||||
|
ssl_certfile = None if SSL_CERTFILE == "" else SSL_CERTFILE
|
||||||
|
server_name = "0.0.0.0"
|
||||||
|
config = uvicorn.Config(
|
||||||
|
fastapi_app,
|
||||||
|
host=server_name,
|
||||||
|
port=PORT,
|
||||||
|
reload=False,
|
||||||
|
log_level="warning",
|
||||||
|
ssl_keyfile=ssl_keyfile,
|
||||||
|
ssl_certfile=ssl_certfile,
|
||||||
|
)
|
||||||
|
server = Server(config)
|
||||||
|
url_host_name = "localhost" if server_name == "0.0.0.0" else server_name
|
||||||
|
if ssl_keyfile is not None:
|
||||||
|
if ssl_certfile is None:
|
||||||
|
raise ValueError(
|
||||||
|
"ssl_certfile must be provided if ssl_keyfile is provided."
|
||||||
|
)
|
||||||
|
path_to_local_server = f"https://{url_host_name}:{PORT}/"
|
||||||
|
else:
|
||||||
|
path_to_local_server = f"http://{url_host_name}:{PORT}/"
|
||||||
|
if CUSTOM_PATH != '/':
|
||||||
|
path_to_local_server += CUSTOM_PATH.lstrip('/').rstrip('/') + '/'
|
||||||
|
# --- --- begin --- ---
|
||||||
|
server.run_in_thread()
|
||||||
|
|
||||||
|
# --- --- after server launch --- ---
|
||||||
|
app_block.server = server
|
||||||
|
app_block.server_name = server_name
|
||||||
|
app_block.local_url = path_to_local_server
|
||||||
|
app_block.protocol = (
|
||||||
|
"https"
|
||||||
|
if app_block.local_url.startswith("https") or app_block.is_colab
|
||||||
|
else "http"
|
||||||
|
)
|
||||||
|
|
||||||
|
if app_block.enable_queue:
|
||||||
|
app_block._queue.set_url(path_to_local_server)
|
||||||
|
|
||||||
|
forbid_proxies = {
|
||||||
|
"http": "",
|
||||||
|
"https": "",
|
||||||
|
}
|
||||||
|
requests.get(f"{app_block.local_url}startup-events", verify=app_block.ssl_verify, proxies=forbid_proxies)
|
||||||
|
app_block.is_running = True
|
||||||
|
app_block.block_thread()
|
||||||
137
shared_utils/handle_upload.py
普通文件
137
shared_utils/handle_upload.py
普通文件
@@ -0,0 +1,137 @@
|
|||||||
|
import importlib
|
||||||
|
import time
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import base64
|
||||||
|
import gradio
|
||||||
|
import shutil
|
||||||
|
import glob
|
||||||
|
from shared_utils.config_loader import get_conf
|
||||||
|
|
||||||
|
def html_local_file(file):
|
||||||
|
base_path = os.path.dirname(__file__) # 项目目录
|
||||||
|
if os.path.exists(str(file)):
|
||||||
|
file = f'file={file.replace(base_path, ".")}'
|
||||||
|
return file
|
||||||
|
|
||||||
|
|
||||||
|
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
|
||||||
|
style = ""
|
||||||
|
if max_width is not None:
|
||||||
|
style += f"max-width: {max_width};"
|
||||||
|
if max_height is not None:
|
||||||
|
style += f"max-height: {max_height};"
|
||||||
|
__file = html_local_file(__file)
|
||||||
|
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
|
||||||
|
if md:
|
||||||
|
a = f""
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
def file_manifest_filter_type(file_list, filter_: list = None):
|
||||||
|
new_list = []
|
||||||
|
if not filter_:
|
||||||
|
filter_ = ["png", "jpg", "jpeg"]
|
||||||
|
for file in file_list:
|
||||||
|
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||||
|
new_list.append(html_local_img(file, md=False))
|
||||||
|
else:
|
||||||
|
new_list.append(file)
|
||||||
|
return new_list
|
||||||
|
|
||||||
|
|
||||||
|
def zip_extract_member_new(self, member, targetpath, pwd):
|
||||||
|
# 修复中文乱码的问题
|
||||||
|
"""Extract the ZipInfo object 'member' to a physical
|
||||||
|
file on the path targetpath.
|
||||||
|
"""
|
||||||
|
import zipfile
|
||||||
|
if not isinstance(member, zipfile.ZipInfo):
|
||||||
|
member = self.getinfo(member)
|
||||||
|
|
||||||
|
# build the destination pathname, replacing
|
||||||
|
# forward slashes to platform specific separators.
|
||||||
|
arcname = member.filename.replace('/', os.path.sep)
|
||||||
|
arcname = arcname.encode('cp437', errors='replace').decode('gbk', errors='replace')
|
||||||
|
|
||||||
|
if os.path.altsep:
|
||||||
|
arcname = arcname.replace(os.path.altsep, os.path.sep)
|
||||||
|
# interpret absolute pathname as relative, remove drive letter or
|
||||||
|
# UNC path, redundant separators, "." and ".." components.
|
||||||
|
arcname = os.path.splitdrive(arcname)[1]
|
||||||
|
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
|
||||||
|
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
|
||||||
|
if x not in invalid_path_parts)
|
||||||
|
if os.path.sep == '\\':
|
||||||
|
# filter illegal characters on Windows
|
||||||
|
arcname = self._sanitize_windows_name(arcname, os.path.sep)
|
||||||
|
|
||||||
|
targetpath = os.path.join(targetpath, arcname)
|
||||||
|
targetpath = os.path.normpath(targetpath)
|
||||||
|
|
||||||
|
# Create all upper directories if necessary.
|
||||||
|
upperdirs = os.path.dirname(targetpath)
|
||||||
|
if upperdirs and not os.path.exists(upperdirs):
|
||||||
|
os.makedirs(upperdirs)
|
||||||
|
|
||||||
|
if member.is_dir():
|
||||||
|
if not os.path.isdir(targetpath):
|
||||||
|
os.mkdir(targetpath)
|
||||||
|
return targetpath
|
||||||
|
|
||||||
|
with self.open(member, pwd=pwd) as source, \
|
||||||
|
open(targetpath, "wb") as target:
|
||||||
|
shutil.copyfileobj(source, target)
|
||||||
|
|
||||||
|
return targetpath
|
||||||
|
|
||||||
|
|
||||||
|
def extract_archive(file_path, dest_dir):
|
||||||
|
import zipfile
|
||||||
|
import tarfile
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Get the file extension of the input file
|
||||||
|
file_extension = os.path.splitext(file_path)[1]
|
||||||
|
|
||||||
|
# Extract the archive based on its extension
|
||||||
|
if file_extension == ".zip":
|
||||||
|
with zipfile.ZipFile(file_path, "r") as zipobj:
|
||||||
|
zipobj._extract_member = lambda a,b,c: zip_extract_member_new(zipobj, a,b,c) # 修复中文乱码的问题
|
||||||
|
zipobj.extractall(path=dest_dir)
|
||||||
|
print("Successfully extracted zip archive to {}".format(dest_dir))
|
||||||
|
|
||||||
|
elif file_extension in [".tar", ".gz", ".bz2"]:
|
||||||
|
with tarfile.open(file_path, "r:*") as tarobj:
|
||||||
|
tarobj.extractall(path=dest_dir)
|
||||||
|
print("Successfully extracted tar archive to {}".format(dest_dir))
|
||||||
|
|
||||||
|
# 第三方库,需要预先pip install rarfile
|
||||||
|
# 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
|
||||||
|
elif file_extension == ".rar":
|
||||||
|
try:
|
||||||
|
import rarfile
|
||||||
|
|
||||||
|
with rarfile.RarFile(file_path) as rf:
|
||||||
|
rf.extractall(path=dest_dir)
|
||||||
|
print("Successfully extracted rar archive to {}".format(dest_dir))
|
||||||
|
except:
|
||||||
|
print("Rar format requires additional dependencies to install")
|
||||||
|
return "\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议:使用zip压缩格式。"
|
||||||
|
|
||||||
|
# 第三方库,需要预先pip install py7zr
|
||||||
|
elif file_extension == ".7z":
|
||||||
|
try:
|
||||||
|
import py7zr
|
||||||
|
|
||||||
|
with py7zr.SevenZipFile(file_path, mode="r") as f:
|
||||||
|
f.extractall(path=dest_dir)
|
||||||
|
print("Successfully extracted 7z archive to {}".format(dest_dir))
|
||||||
|
except:
|
||||||
|
print("7z format requires additional dependencies to install")
|
||||||
|
return "\n\n解压失败! 需要安装pip install py7zr来解压7z文件"
|
||||||
|
else:
|
||||||
|
return ""
|
||||||
|
return ""
|
||||||
|
|
||||||
@@ -28,6 +28,11 @@ def is_api2d_key(key):
|
|||||||
return bool(API_MATCH_API2D)
|
return bool(API_MATCH_API2D)
|
||||||
|
|
||||||
|
|
||||||
|
def is_cohere_api_key(key):
|
||||||
|
API_MATCH_AZURE = re.match(r"[a-zA-Z0-9]{40}$", key)
|
||||||
|
return bool(API_MATCH_AZURE)
|
||||||
|
|
||||||
|
|
||||||
def is_any_api_key(key):
|
def is_any_api_key(key):
|
||||||
if ',' in key:
|
if ',' in key:
|
||||||
keys = key.split(',')
|
keys = key.split(',')
|
||||||
@@ -35,7 +40,7 @@ def is_any_api_key(key):
|
|||||||
if is_any_api_key(k): return True
|
if is_any_api_key(k): return True
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return is_openai_api_key(key) or is_api2d_key(key) or is_azure_api_key(key)
|
return is_openai_api_key(key) or is_api2d_key(key) or is_azure_api_key(key) or is_cohere_api_key(key)
|
||||||
|
|
||||||
|
|
||||||
def what_keys(keys):
|
def what_keys(keys):
|
||||||
@@ -62,7 +67,7 @@ def select_api_key(keys, llm_model):
|
|||||||
avail_key_list = []
|
avail_key_list = []
|
||||||
key_list = keys.split(',')
|
key_list = keys.split(',')
|
||||||
|
|
||||||
if llm_model.startswith('gpt-'):
|
if llm_model.startswith('gpt-') or llm_model.startswith('one-api-'):
|
||||||
for k in key_list:
|
for k in key_list:
|
||||||
if is_openai_api_key(k): avail_key_list.append(k)
|
if is_openai_api_key(k): avail_key_list.append(k)
|
||||||
|
|
||||||
@@ -74,8 +79,12 @@ def select_api_key(keys, llm_model):
|
|||||||
for k in key_list:
|
for k in key_list:
|
||||||
if is_azure_api_key(k): avail_key_list.append(k)
|
if is_azure_api_key(k): avail_key_list.append(k)
|
||||||
|
|
||||||
|
if llm_model.startswith('cohere-'):
|
||||||
|
for k in key_list:
|
||||||
|
if is_cohere_api_key(k): avail_key_list.append(k)
|
||||||
|
|
||||||
if len(avail_key_list) == 0:
|
if len(avail_key_list) == 0:
|
||||||
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。")
|
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(左上角更换模型菜单中可切换openai,azure,claude,cohere等请求源)。")
|
||||||
|
|
||||||
api_key = random.choice(avail_key_list) # 随机负载均衡
|
api_key = random.choice(avail_key_list) # 随机负载均衡
|
||||||
return api_key
|
return api_key
|
||||||
|
|||||||
34
shared_utils/map_names.py
普通文件
34
shared_utils/map_names.py
普通文件
@@ -0,0 +1,34 @@
|
|||||||
|
import re
|
||||||
|
mapping_dic = {
|
||||||
|
# "qianfan": "qianfan(文心一言大模型)",
|
||||||
|
# "zhipuai": "zhipuai(智谱GLM4超级模型🔥)",
|
||||||
|
# "gpt-4-1106-preview": "gpt-4-1106-preview(新调优版本GPT-4🔥)",
|
||||||
|
# "gpt-4-vision-preview": "gpt-4-vision-preview(识图模型GPT-4V)",
|
||||||
|
}
|
||||||
|
|
||||||
|
rev_mapping_dic = {}
|
||||||
|
for k, v in mapping_dic.items():
|
||||||
|
rev_mapping_dic[v] = k
|
||||||
|
|
||||||
|
def map_model_to_friendly_names(m):
|
||||||
|
if m in mapping_dic:
|
||||||
|
return mapping_dic[m]
|
||||||
|
return m
|
||||||
|
|
||||||
|
def map_friendly_names_to_model(m):
|
||||||
|
if m in rev_mapping_dic:
|
||||||
|
return rev_mapping_dic[m]
|
||||||
|
return m
|
||||||
|
|
||||||
|
def read_one_api_model_name(model: str):
|
||||||
|
"""return real model name and max_token.
|
||||||
|
"""
|
||||||
|
max_token_pattern = r"\(max_token=(\d+)\)"
|
||||||
|
match = re.search(max_token_pattern, model)
|
||||||
|
if match:
|
||||||
|
max_token_tmp = match.group(1) # 获取 max_token 的值
|
||||||
|
max_token_tmp = int(max_token_tmp)
|
||||||
|
model = re.sub(max_token_pattern, "", model) # 从原字符串中删除 "(max_token=...)"
|
||||||
|
else:
|
||||||
|
max_token_tmp = 4096
|
||||||
|
return model, max_token_tmp
|
||||||
@@ -11,28 +11,45 @@ def validate_path():
|
|||||||
|
|
||||||
|
|
||||||
validate_path() # validate path so you can run from base directory
|
validate_path() # validate path so you can run from base directory
|
||||||
if __name__ == "__main__":
|
|
||||||
# from request_llms.bridge_newbingfree import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_moss import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_claude import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_internlm import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_deepseekcoder import predict_no_ui_long_connection
|
|
||||||
# from request_llms.bridge_qwen_7B import predict_no_ui_long_connection
|
|
||||||
from request_llms.bridge_qwen_local import predict_no_ui_long_connection
|
|
||||||
|
|
||||||
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
if "在线模型":
|
||||||
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
if __name__ == "__main__":
|
||||||
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
from request_llms.bridge_cohere import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
||||||
|
llm_kwargs = {
|
||||||
|
"llm_model": "command-r-plus",
|
||||||
|
"max_length": 4096,
|
||||||
|
"top_p": 1,
|
||||||
|
"temperature": 1,
|
||||||
|
}
|
||||||
|
|
||||||
llm_kwargs = {
|
result = predict_no_ui_long_connection(
|
||||||
"max_length": 4096,
|
inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt="系统"
|
||||||
"top_p": 1,
|
)
|
||||||
"temperature": 1,
|
print("final result:", result)
|
||||||
}
|
print("final result:", result)
|
||||||
|
|
||||||
|
|
||||||
|
if "本地模型":
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# from request_llms.bridge_newbingfree import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_moss import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_claude import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_internlm import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_deepseekcoder import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_qwen_7B import predict_no_ui_long_connection
|
||||||
|
# from request_llms.bridge_qwen_local import predict_no_ui_long_connection
|
||||||
|
llm_kwargs = {
|
||||||
|
"max_length": 4096,
|
||||||
|
"top_p": 1,
|
||||||
|
"temperature": 1,
|
||||||
|
}
|
||||||
|
result = predict_no_ui_long_connection(
|
||||||
|
inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt=""
|
||||||
|
)
|
||||||
|
print("final result:", result)
|
||||||
|
|
||||||
result = predict_no_ui_long_connection(
|
|
||||||
inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt=""
|
|
||||||
)
|
|
||||||
print("final result:", result)
|
|
||||||
|
|||||||
@@ -20,10 +20,10 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
# plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||||
|
|
||||||
plugin_test(
|
plugin_test(
|
||||||
plugin="crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF",
|
plugin="crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF",
|
||||||
main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
|
main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -66,7 +66,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
|
# plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
|
||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2210.03629")
|
# plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2210.03629")
|
||||||
|
|
||||||
# advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求:100字以内,用第二人称。' --system_prompt=''" }
|
# advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求:100字以内,用第二人称。' --system_prompt=''" }
|
||||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg)
|
# plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||||
|
|||||||
@@ -1,296 +1 @@
|
|||||||
/**
|
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||||
* base64.ts
|
|
||||||
*
|
|
||||||
* Licensed under the BSD 3-Clause License.
|
|
||||||
* http://opensource.org/licenses/BSD-3-Clause
|
|
||||||
*
|
|
||||||
* References:
|
|
||||||
* http://en.wikipedia.org/wiki/Base64
|
|
||||||
*
|
|
||||||
* @author Dan Kogai (https://github.com/dankogai)
|
|
||||||
*/
|
|
||||||
const version = '3.7.2';
|
|
||||||
/**
|
|
||||||
* @deprecated use lowercase `version`.
|
|
||||||
*/
|
|
||||||
const VERSION = version;
|
|
||||||
const _hasatob = typeof atob === 'function';
|
|
||||||
const _hasbtoa = typeof btoa === 'function';
|
|
||||||
const _hasBuffer = typeof Buffer === 'function';
|
|
||||||
const _TD = typeof TextDecoder === 'function' ? new TextDecoder() : undefined;
|
|
||||||
const _TE = typeof TextEncoder === 'function' ? new TextEncoder() : undefined;
|
|
||||||
const b64ch = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=';
|
|
||||||
const b64chs = Array.prototype.slice.call(b64ch);
|
|
||||||
const b64tab = ((a) => {
|
|
||||||
let tab = {};
|
|
||||||
a.forEach((c, i) => tab[c] = i);
|
|
||||||
return tab;
|
|
||||||
})(b64chs);
|
|
||||||
const b64re = /^(?:[A-Za-z\d+\/]{4})*?(?:[A-Za-z\d+\/]{2}(?:==)?|[A-Za-z\d+\/]{3}=?)?$/;
|
|
||||||
const _fromCC = String.fromCharCode.bind(String);
|
|
||||||
const _U8Afrom = typeof Uint8Array.from === 'function'
|
|
||||||
? Uint8Array.from.bind(Uint8Array)
|
|
||||||
: (it, fn = (x) => x) => new Uint8Array(Array.prototype.slice.call(it, 0).map(fn));
|
|
||||||
const _mkUriSafe = (src) => src
|
|
||||||
.replace(/=/g, '').replace(/[+\/]/g, (m0) => m0 == '+' ? '-' : '_');
|
|
||||||
const _tidyB64 = (s) => s.replace(/[^A-Za-z0-9\+\/]/g, '');
|
|
||||||
/**
|
|
||||||
* polyfill version of `btoa`
|
|
||||||
*/
|
|
||||||
const btoaPolyfill = (bin) => {
|
|
||||||
// console.log('polyfilled');
|
|
||||||
let u32, c0, c1, c2, asc = '';
|
|
||||||
const pad = bin.length % 3;
|
|
||||||
for (let i = 0; i < bin.length;) {
|
|
||||||
if ((c0 = bin.charCodeAt(i++)) > 255 ||
|
|
||||||
(c1 = bin.charCodeAt(i++)) > 255 ||
|
|
||||||
(c2 = bin.charCodeAt(i++)) > 255)
|
|
||||||
throw new TypeError('invalid character found');
|
|
||||||
u32 = (c0 << 16) | (c1 << 8) | c2;
|
|
||||||
asc += b64chs[u32 >> 18 & 63]
|
|
||||||
+ b64chs[u32 >> 12 & 63]
|
|
||||||
+ b64chs[u32 >> 6 & 63]
|
|
||||||
+ b64chs[u32 & 63];
|
|
||||||
}
|
|
||||||
return pad ? asc.slice(0, pad - 3) + "===".substring(pad) : asc;
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* does what `window.btoa` of web browsers do.
|
|
||||||
* @param {String} bin binary string
|
|
||||||
* @returns {string} Base64-encoded string
|
|
||||||
*/
|
|
||||||
const _btoa = _hasbtoa ? (bin) => btoa(bin)
|
|
||||||
: _hasBuffer ? (bin) => Buffer.from(bin, 'binary').toString('base64')
|
|
||||||
: btoaPolyfill;
|
|
||||||
const _fromUint8Array = _hasBuffer
|
|
||||||
? (u8a) => Buffer.from(u8a).toString('base64')
|
|
||||||
: (u8a) => {
|
|
||||||
// cf. https://stackoverflow.com/questions/12710001/how-to-convert-uint8-array-to-base64-encoded-string/12713326#12713326
|
|
||||||
const maxargs = 0x1000;
|
|
||||||
let strs = [];
|
|
||||||
for (let i = 0, l = u8a.length; i < l; i += maxargs) {
|
|
||||||
strs.push(_fromCC.apply(null, u8a.subarray(i, i + maxargs)));
|
|
||||||
}
|
|
||||||
return _btoa(strs.join(''));
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* converts a Uint8Array to a Base64 string.
|
|
||||||
* @param {boolean} [urlsafe] URL-and-filename-safe a la RFC4648 §5
|
|
||||||
* @returns {string} Base64 string
|
|
||||||
*/
|
|
||||||
const fromUint8Array = (u8a, urlsafe = false) => urlsafe ? _mkUriSafe(_fromUint8Array(u8a)) : _fromUint8Array(u8a);
|
|
||||||
// This trick is found broken https://github.com/dankogai/js-base64/issues/130
|
|
||||||
// const utob = (src: string) => unescape(encodeURIComponent(src));
|
|
||||||
// reverting good old fationed regexp
|
|
||||||
const cb_utob = (c) => {
|
|
||||||
if (c.length < 2) {
|
|
||||||
var cc = c.charCodeAt(0);
|
|
||||||
return cc < 0x80 ? c
|
|
||||||
: cc < 0x800 ? (_fromCC(0xc0 | (cc >>> 6))
|
|
||||||
+ _fromCC(0x80 | (cc & 0x3f)))
|
|
||||||
: (_fromCC(0xe0 | ((cc >>> 12) & 0x0f))
|
|
||||||
+ _fromCC(0x80 | ((cc >>> 6) & 0x3f))
|
|
||||||
+ _fromCC(0x80 | (cc & 0x3f)));
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
var cc = 0x10000
|
|
||||||
+ (c.charCodeAt(0) - 0xD800) * 0x400
|
|
||||||
+ (c.charCodeAt(1) - 0xDC00);
|
|
||||||
return (_fromCC(0xf0 | ((cc >>> 18) & 0x07))
|
|
||||||
+ _fromCC(0x80 | ((cc >>> 12) & 0x3f))
|
|
||||||
+ _fromCC(0x80 | ((cc >>> 6) & 0x3f))
|
|
||||||
+ _fromCC(0x80 | (cc & 0x3f)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
const re_utob = /[\uD800-\uDBFF][\uDC00-\uDFFFF]|[^\x00-\x7F]/g;
|
|
||||||
/**
|
|
||||||
* @deprecated should have been internal use only.
|
|
||||||
* @param {string} src UTF-8 string
|
|
||||||
* @returns {string} UTF-16 string
|
|
||||||
*/
|
|
||||||
const utob = (u) => u.replace(re_utob, cb_utob);
|
|
||||||
//
|
|
||||||
const _encode = _hasBuffer
|
|
||||||
? (s) => Buffer.from(s, 'utf8').toString('base64')
|
|
||||||
: _TE
|
|
||||||
? (s) => _fromUint8Array(_TE.encode(s))
|
|
||||||
: (s) => _btoa(utob(s));
|
|
||||||
/**
|
|
||||||
* converts a UTF-8-encoded string to a Base64 string.
|
|
||||||
* @param {boolean} [urlsafe] if `true` make the result URL-safe
|
|
||||||
* @returns {string} Base64 string
|
|
||||||
*/
|
|
||||||
const encode = (src, urlsafe = false) => urlsafe
|
|
||||||
? _mkUriSafe(_encode(src))
|
|
||||||
: _encode(src);
|
|
||||||
/**
|
|
||||||
* converts a UTF-8-encoded string to URL-safe Base64 RFC4648 §5.
|
|
||||||
* @returns {string} Base64 string
|
|
||||||
*/
|
|
||||||
const encodeURI = (src) => encode(src, true);
|
|
||||||
// This trick is found broken https://github.com/dankogai/js-base64/issues/130
|
|
||||||
// const btou = (src: string) => decodeURIComponent(escape(src));
|
|
||||||
// reverting good old fationed regexp
|
|
||||||
const re_btou = /[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF]{2}|[\xF0-\xF7][\x80-\xBF]{3}/g;
|
|
||||||
const cb_btou = (cccc) => {
|
|
||||||
switch (cccc.length) {
|
|
||||||
case 4:
|
|
||||||
var cp = ((0x07 & cccc.charCodeAt(0)) << 18)
|
|
||||||
| ((0x3f & cccc.charCodeAt(1)) << 12)
|
|
||||||
| ((0x3f & cccc.charCodeAt(2)) << 6)
|
|
||||||
| (0x3f & cccc.charCodeAt(3)), offset = cp - 0x10000;
|
|
||||||
return (_fromCC((offset >>> 10) + 0xD800)
|
|
||||||
+ _fromCC((offset & 0x3FF) + 0xDC00));
|
|
||||||
case 3:
|
|
||||||
return _fromCC(((0x0f & cccc.charCodeAt(0)) << 12)
|
|
||||||
| ((0x3f & cccc.charCodeAt(1)) << 6)
|
|
||||||
| (0x3f & cccc.charCodeAt(2)));
|
|
||||||
default:
|
|
||||||
return _fromCC(((0x1f & cccc.charCodeAt(0)) << 6)
|
|
||||||
| (0x3f & cccc.charCodeAt(1)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* @deprecated should have been internal use only.
|
|
||||||
* @param {string} src UTF-16 string
|
|
||||||
* @returns {string} UTF-8 string
|
|
||||||
*/
|
|
||||||
const btou = (b) => b.replace(re_btou, cb_btou);
|
|
||||||
/**
|
|
||||||
* polyfill version of `atob`
|
|
||||||
*/
|
|
||||||
const atobPolyfill = (asc) => {
|
|
||||||
// console.log('polyfilled');
|
|
||||||
asc = asc.replace(/\s+/g, '');
|
|
||||||
if (!b64re.test(asc))
|
|
||||||
throw new TypeError('malformed base64.');
|
|
||||||
asc += '=='.slice(2 - (asc.length & 3));
|
|
||||||
let u24, bin = '', r1, r2;
|
|
||||||
for (let i = 0; i < asc.length;) {
|
|
||||||
u24 = b64tab[asc.charAt(i++)] << 18
|
|
||||||
| b64tab[asc.charAt(i++)] << 12
|
|
||||||
| (r1 = b64tab[asc.charAt(i++)]) << 6
|
|
||||||
| (r2 = b64tab[asc.charAt(i++)]);
|
|
||||||
bin += r1 === 64 ? _fromCC(u24 >> 16 & 255)
|
|
||||||
: r2 === 64 ? _fromCC(u24 >> 16 & 255, u24 >> 8 & 255)
|
|
||||||
: _fromCC(u24 >> 16 & 255, u24 >> 8 & 255, u24 & 255);
|
|
||||||
}
|
|
||||||
return bin;
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* does what `window.atob` of web browsers do.
|
|
||||||
* @param {String} asc Base64-encoded string
|
|
||||||
* @returns {string} binary string
|
|
||||||
*/
|
|
||||||
const _atob = _hasatob ? (asc) => atob(_tidyB64(asc))
|
|
||||||
: _hasBuffer ? (asc) => Buffer.from(asc, 'base64').toString('binary')
|
|
||||||
: atobPolyfill;
|
|
||||||
//
|
|
||||||
const _toUint8Array = _hasBuffer
|
|
||||||
? (a) => _U8Afrom(Buffer.from(a, 'base64'))
|
|
||||||
: (a) => _U8Afrom(_atob(a), c => c.charCodeAt(0));
|
|
||||||
/**
|
|
||||||
* converts a Base64 string to a Uint8Array.
|
|
||||||
*/
|
|
||||||
const toUint8Array = (a) => _toUint8Array(_unURI(a));
|
|
||||||
//
|
|
||||||
const _decode = _hasBuffer
|
|
||||||
? (a) => Buffer.from(a, 'base64').toString('utf8')
|
|
||||||
: _TD
|
|
||||||
? (a) => _TD.decode(_toUint8Array(a))
|
|
||||||
: (a) => btou(_atob(a));
|
|
||||||
const _unURI = (a) => _tidyB64(a.replace(/[-_]/g, (m0) => m0 == '-' ? '+' : '/'));
|
|
||||||
/**
|
|
||||||
* converts a Base64 string to a UTF-8 string.
|
|
||||||
* @param {String} src Base64 string. Both normal and URL-safe are supported
|
|
||||||
* @returns {string} UTF-8 string
|
|
||||||
*/
|
|
||||||
const decode = (src) => _decode(_unURI(src));
|
|
||||||
/**
|
|
||||||
* check if a value is a valid Base64 string
|
|
||||||
* @param {String} src a value to check
|
|
||||||
*/
|
|
||||||
const isValid = (src) => {
|
|
||||||
if (typeof src !== 'string')
|
|
||||||
return false;
|
|
||||||
const s = src.replace(/\s+/g, '').replace(/={0,2}$/, '');
|
|
||||||
return !/[^\s0-9a-zA-Z\+/]/.test(s) || !/[^\s0-9a-zA-Z\-_]/.test(s);
|
|
||||||
};
|
|
||||||
//
|
|
||||||
const _noEnum = (v) => {
|
|
||||||
return {
|
|
||||||
value: v, enumerable: false, writable: true, configurable: true
|
|
||||||
};
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* extend String.prototype with relevant methods
|
|
||||||
*/
|
|
||||||
const extendString = function () {
|
|
||||||
const _add = (name, body) => Object.defineProperty(String.prototype, name, _noEnum(body));
|
|
||||||
_add('fromBase64', function () { return decode(this); });
|
|
||||||
_add('toBase64', function (urlsafe) { return encode(this, urlsafe); });
|
|
||||||
_add('toBase64URI', function () { return encode(this, true); });
|
|
||||||
_add('toBase64URL', function () { return encode(this, true); });
|
|
||||||
_add('toUint8Array', function () { return toUint8Array(this); });
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* extend Uint8Array.prototype with relevant methods
|
|
||||||
*/
|
|
||||||
const extendUint8Array = function () {
|
|
||||||
const _add = (name, body) => Object.defineProperty(Uint8Array.prototype, name, _noEnum(body));
|
|
||||||
_add('toBase64', function (urlsafe) { return fromUint8Array(this, urlsafe); });
|
|
||||||
_add('toBase64URI', function () { return fromUint8Array(this, true); });
|
|
||||||
_add('toBase64URL', function () { return fromUint8Array(this, true); });
|
|
||||||
};
|
|
||||||
/**
|
|
||||||
* extend Builtin prototypes with relevant methods
|
|
||||||
*/
|
|
||||||
const extendBuiltins = () => {
|
|
||||||
extendString();
|
|
||||||
extendUint8Array();
|
|
||||||
};
|
|
||||||
const gBase64 = {
|
|
||||||
version: version,
|
|
||||||
VERSION: VERSION,
|
|
||||||
atob: _atob,
|
|
||||||
atobPolyfill: atobPolyfill,
|
|
||||||
btoa: _btoa,
|
|
||||||
btoaPolyfill: btoaPolyfill,
|
|
||||||
fromBase64: decode,
|
|
||||||
toBase64: encode,
|
|
||||||
encode: encode,
|
|
||||||
encodeURI: encodeURI,
|
|
||||||
encodeURL: encodeURI,
|
|
||||||
utob: utob,
|
|
||||||
btou: btou,
|
|
||||||
decode: decode,
|
|
||||||
isValid: isValid,
|
|
||||||
fromUint8Array: fromUint8Array,
|
|
||||||
toUint8Array: toUint8Array,
|
|
||||||
extendString: extendString,
|
|
||||||
extendUint8Array: extendUint8Array,
|
|
||||||
extendBuiltins: extendBuiltins,
|
|
||||||
};
|
|
||||||
// makecjs:CUT //
|
|
||||||
export { version };
|
|
||||||
export { VERSION };
|
|
||||||
export { _atob as atob };
|
|
||||||
export { atobPolyfill };
|
|
||||||
export { _btoa as btoa };
|
|
||||||
export { btoaPolyfill };
|
|
||||||
export { decode as fromBase64 };
|
|
||||||
export { encode as toBase64 };
|
|
||||||
export { utob };
|
|
||||||
export { encode };
|
|
||||||
export { encodeURI };
|
|
||||||
export { encodeURI as encodeURL };
|
|
||||||
export { btou };
|
|
||||||
export { decode };
|
|
||||||
export { isValid };
|
|
||||||
export { fromUint8Array };
|
|
||||||
export { toUint8Array };
|
|
||||||
export { extendString };
|
|
||||||
export { extendUint8Array };
|
|
||||||
export { extendBuiltins };
|
|
||||||
// and finally,
|
|
||||||
export { gBase64 as Base64 };
|
|
||||||
|
|||||||
@@ -59,6 +59,7 @@
|
|||||||
|
|
||||||
/* Scrollbar Width */
|
/* Scrollbar Width */
|
||||||
::-webkit-scrollbar {
|
::-webkit-scrollbar {
|
||||||
|
height: 12px;
|
||||||
width: 12px;
|
width: 12px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
273
themes/common.js
273
themes/common.js
@@ -2,6 +2,76 @@
|
|||||||
// 第 1 部分: 工具函数
|
// 第 1 部分: 工具函数
|
||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
function push_data_to_gradio_component(DAT, ELEM_ID, TYPE) {
|
||||||
|
// type, // type==="str" / type==="float"
|
||||||
|
if (TYPE == "str") {
|
||||||
|
// convert dat to string: do nothing
|
||||||
|
}
|
||||||
|
else if (TYPE == "no_conversion") {
|
||||||
|
// no nothing
|
||||||
|
}
|
||||||
|
else if (TYPE == "float") {
|
||||||
|
// convert dat to float
|
||||||
|
DAT = parseFloat(DAT);
|
||||||
|
}
|
||||||
|
const myEvent = new CustomEvent('gpt_academic_update_gradio_component', {
|
||||||
|
detail: {
|
||||||
|
data: DAT,
|
||||||
|
elem_id: ELEM_ID,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
window.dispatchEvent(myEvent);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async function get_gradio_component(ELEM_ID) {
|
||||||
|
function waitFor(ELEM_ID) {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const myEvent = new CustomEvent('gpt_academic_get_gradio_component_value', {
|
||||||
|
detail: {
|
||||||
|
elem_id: ELEM_ID,
|
||||||
|
resolve,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
window.dispatchEvent(myEvent);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
result = await waitFor(ELEM_ID);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async function get_data_from_gradio_component(ELEM_ID) {
|
||||||
|
let comp = await get_gradio_component(ELEM_ID);
|
||||||
|
return comp.props.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function update_array(arr, item, mode) {
|
||||||
|
// // Remove "输入清除键"
|
||||||
|
// p = updateArray(p, "输入清除键", "remove");
|
||||||
|
// console.log(p); // Should log: ["基础功能区", "函数插件区"]
|
||||||
|
|
||||||
|
// // Add "输入清除键"
|
||||||
|
// p = updateArray(p, "输入清除键", "add");
|
||||||
|
// console.log(p); // Should log: ["基础功能区", "函数插件区", "输入清除键"]
|
||||||
|
|
||||||
|
const index = arr.indexOf(item);
|
||||||
|
if (mode === "remove") {
|
||||||
|
if (index !== -1) {
|
||||||
|
// Item found, remove it
|
||||||
|
arr.splice(index, 1);
|
||||||
|
}
|
||||||
|
} else if (mode === "add") {
|
||||||
|
if (index === -1) {
|
||||||
|
// Item not found, add it
|
||||||
|
arr.push(item);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arr;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
function gradioApp() {
|
function gradioApp() {
|
||||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
const elems = document.getElementsByTagName('gradio-app');
|
const elems = document.getElementsByTagName('gradio-app');
|
||||||
@@ -14,6 +84,7 @@ function gradioApp() {
|
|||||||
return elem.shadowRoot ? elem.shadowRoot : elem;
|
return elem.shadowRoot ? elem.shadowRoot : elem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function setCookie(name, value, days) {
|
function setCookie(name, value, days) {
|
||||||
var expires = "";
|
var expires = "";
|
||||||
|
|
||||||
@@ -26,6 +97,7 @@ function setCookie(name, value, days) {
|
|||||||
document.cookie = name + "=" + value + expires + "; path=/";
|
document.cookie = name + "=" + value + expires + "; path=/";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function getCookie(name) {
|
function getCookie(name) {
|
||||||
var decodedCookie = decodeURIComponent(document.cookie);
|
var decodedCookie = decodeURIComponent(document.cookie);
|
||||||
var cookies = decodedCookie.split(';');
|
var cookies = decodedCookie.split(';');
|
||||||
@@ -41,6 +113,7 @@ function getCookie(name) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
let toastCount = 0;
|
let toastCount = 0;
|
||||||
function toast_push(msg, duration) {
|
function toast_push(msg, duration) {
|
||||||
duration = isNaN(duration) ? 3000 : duration;
|
duration = isNaN(duration) ? 3000 : duration;
|
||||||
@@ -63,6 +136,7 @@ function toast_push(msg, duration) {
|
|||||||
toastCount++;
|
toastCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function toast_up(msg) {
|
function toast_up(msg) {
|
||||||
var m = document.getElementById('toast_up');
|
var m = document.getElementById('toast_up');
|
||||||
if (m) {
|
if (m) {
|
||||||
@@ -75,6 +149,7 @@ function toast_up(msg) {
|
|||||||
document.body.appendChild(m);
|
document.body.appendChild(m);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function toast_down() {
|
function toast_down() {
|
||||||
var m = document.getElementById('toast_up');
|
var m = document.getElementById('toast_up');
|
||||||
if (m) {
|
if (m) {
|
||||||
@@ -82,6 +157,7 @@ function toast_down() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function begin_loading_status() {
|
function begin_loading_status() {
|
||||||
// Create the loader div and add styling
|
// Create the loader div and add styling
|
||||||
var loader = document.createElement('div');
|
var loader = document.createElement('div');
|
||||||
@@ -234,7 +310,7 @@ let timeoutID = null;
|
|||||||
let lastInvocationTime = 0;
|
let lastInvocationTime = 0;
|
||||||
let lastArgs = null;
|
let lastArgs = null;
|
||||||
function do_something_but_not_too_frequently(min_interval, func) {
|
function do_something_but_not_too_frequently(min_interval, func) {
|
||||||
return function(...args) {
|
return function (...args) {
|
||||||
lastArgs = args;
|
lastArgs = args;
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
if (!lastInvocationTime || (now - lastInvocationTime) >= min_interval) {
|
if (!lastInvocationTime || (now - lastInvocationTime) >= min_interval) {
|
||||||
@@ -256,6 +332,7 @@ function do_something_but_not_too_frequently(min_interval, func) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function chatbotContentChanged(attempt = 1, force = false) {
|
function chatbotContentChanged(attempt = 1, force = false) {
|
||||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
for (var i = 0; i < attempt; i++) {
|
for (var i = 0; i < attempt; i++) {
|
||||||
@@ -263,13 +340,8 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
|||||||
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
||||||
}, i === 0 ? 0 : 200);
|
}, i === 0 ? 0 : 200);
|
||||||
}
|
}
|
||||||
|
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||||
|
|
||||||
const run_mermaid_render = do_something_but_not_too_frequently(1000, function () {
|
|
||||||
const blocks = document.querySelectorAll(`pre.mermaid, diagram-div`);
|
|
||||||
if (blocks.length == 0) { return; }
|
|
||||||
uml("mermaid");
|
|
||||||
});
|
|
||||||
run_mermaid_render();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -277,7 +349,6 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
|||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
// 第 3 部分: chatbot动态高度调整
|
// 第 3 部分: chatbot动态高度调整
|
||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
function chatbotAutoHeight() {
|
function chatbotAutoHeight() {
|
||||||
// 自动调整高度:立即
|
// 自动调整高度:立即
|
||||||
function update_height() {
|
function update_height() {
|
||||||
@@ -309,6 +380,7 @@ function chatbotAutoHeight() {
|
|||||||
setInterval(function () { update_height_slow() }, 50); // 每50毫秒执行一次
|
setInterval(function () { update_height_slow() }, 50); // 每50毫秒执行一次
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
swapped = false;
|
swapped = false;
|
||||||
function swap_input_area() {
|
function swap_input_area() {
|
||||||
// Get the elements to be swapped
|
// Get the elements to be swapped
|
||||||
@@ -328,6 +400,7 @@ function swap_input_area() {
|
|||||||
else { swapped = true; }
|
else { swapped = true; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function get_elements(consider_state_panel = false) {
|
function get_elements(consider_state_panel = false) {
|
||||||
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
|
var chatbot = document.querySelector('#gpt-chatbot > div.wrap.svelte-18telvq');
|
||||||
if (!chatbot) {
|
if (!chatbot) {
|
||||||
@@ -425,6 +498,7 @@ async function upload_files(files) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function register_func_paste(input) {
|
function register_func_paste(input) {
|
||||||
let paste_files = [];
|
let paste_files = [];
|
||||||
if (input) {
|
if (input) {
|
||||||
@@ -451,6 +525,7 @@ function register_func_paste(input) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function register_func_drag(elem) {
|
function register_func_drag(elem) {
|
||||||
if (elem) {
|
if (elem) {
|
||||||
const dragEvents = ["dragover"];
|
const dragEvents = ["dragover"];
|
||||||
@@ -487,6 +562,7 @@ function register_func_drag(elem) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function elem_upload_component_pop_message(elem) {
|
function elem_upload_component_pop_message(elem) {
|
||||||
if (elem) {
|
if (elem) {
|
||||||
const dragEvents = ["dragover"];
|
const dragEvents = ["dragover"];
|
||||||
@@ -516,6 +592,7 @@ function elem_upload_component_pop_message(elem) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function register_upload_event() {
|
function register_upload_event() {
|
||||||
locate_upload_elems();
|
locate_upload_elems();
|
||||||
if (elem_upload_float) {
|
if (elem_upload_float) {
|
||||||
@@ -538,6 +615,7 @@ function register_upload_event() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function monitoring_input_box() {
|
function monitoring_input_box() {
|
||||||
register_upload_event();
|
register_upload_event();
|
||||||
|
|
||||||
@@ -571,7 +649,6 @@ window.addEventListener("DOMContentLoaded", function () {
|
|||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
// 第 5 部分: 音频按钮样式变化
|
// 第 5 部分: 音频按钮样式变化
|
||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
function audio_fn_init() {
|
function audio_fn_init() {
|
||||||
let audio_component = document.getElementById('elem_audio');
|
let audio_component = document.getElementById('elem_audio');
|
||||||
if (audio_component) {
|
if (audio_component) {
|
||||||
@@ -608,6 +685,7 @@ function audio_fn_init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function minor_ui_adjustment() {
|
function minor_ui_adjustment() {
|
||||||
let cbsc_area = document.getElementById('cbsc');
|
let cbsc_area = document.getElementById('cbsc');
|
||||||
cbsc_area.style.paddingTop = '15px';
|
cbsc_area.style.paddingTop = '15px';
|
||||||
@@ -672,9 +750,9 @@ function limit_scroll_position() {
|
|||||||
let scrollableDiv = document.querySelector('#gpt-chatbot > div.wrap');
|
let scrollableDiv = document.querySelector('#gpt-chatbot > div.wrap');
|
||||||
scrollableDiv.addEventListener('wheel', function (e) {
|
scrollableDiv.addEventListener('wheel', function (e) {
|
||||||
let preventScroll = false;
|
let preventScroll = false;
|
||||||
if (e.deltaX != 0) { prevented_offset = 0; return;}
|
if (e.deltaX != 0) { prevented_offset = 0; return; }
|
||||||
if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return;}
|
if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return; }
|
||||||
if (e.deltaY < 0) { prevented_offset = 0; return;}
|
if (e.deltaY < 0) { prevented_offset = 0; return; }
|
||||||
if (e.deltaY > 0 && this.scrollHeight - this.clientHeight - this.scrollTop <= 1) { preventScroll = true; }
|
if (e.deltaY > 0 && this.scrollHeight - this.clientHeight - this.scrollTop <= 1) { preventScroll = true; }
|
||||||
|
|
||||||
if (preventScroll) {
|
if (preventScroll) {
|
||||||
@@ -700,7 +778,88 @@ function limit_scroll_position() {
|
|||||||
// 第 7 部分: JS初始化函数
|
// 第 7 部分: JS初始化函数
|
||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
function loadLive2D() {
|
||||||
|
try {
|
||||||
|
$("<link>").attr({ href: "file=themes/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css" }).appendTo('head');
|
||||||
|
$('body').append('<div class="waifu"><div class="waifu-tips"></div><canvas id="live2d" class="live2d"></canvas><div class="waifu-tool"><span class="fui-home"></span> <span class="fui-chat"></span> <span class="fui-eye"></span> <span class="fui-user"></span> <span class="fui-photo"></span> <span class="fui-info-circle"></span> <span class="fui-cross"></span></div></div>');
|
||||||
|
$.ajax({
|
||||||
|
url: "file=themes/waifu_plugin/waifu-tips.js", dataType: "script", cache: true, success: function () {
|
||||||
|
$.ajax({
|
||||||
|
url: "file=themes/waifu_plugin/live2d.js", dataType: "script", cache: true, success: function () {
|
||||||
|
/* 可直接修改部分参数 */
|
||||||
|
live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API
|
||||||
|
live2d_settings['modelId'] = 3; // 默认模型 ID
|
||||||
|
live2d_settings['modelTexturesId'] = 44; // 默认材质 ID
|
||||||
|
live2d_settings['modelStorage'] = false; // 不储存模型 ID
|
||||||
|
live2d_settings['waifuSize'] = '210x187';
|
||||||
|
live2d_settings['waifuTipsSize'] = '187x52';
|
||||||
|
live2d_settings['canSwitchModel'] = true;
|
||||||
|
live2d_settings['canSwitchTextures'] = true;
|
||||||
|
live2d_settings['canSwitchHitokoto'] = false;
|
||||||
|
live2d_settings['canTakeScreenshot'] = false;
|
||||||
|
live2d_settings['canTurnToHomePage'] = false;
|
||||||
|
live2d_settings['canTurnToAboutPage'] = false;
|
||||||
|
live2d_settings['showHitokoto'] = false; // 显示一言
|
||||||
|
live2d_settings['showF12Status'] = false; // 显示加载状态
|
||||||
|
live2d_settings['showF12Message'] = false; // 显示看板娘消息
|
||||||
|
live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
|
||||||
|
live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
|
||||||
|
live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
|
||||||
|
/* 在 initModel 前添加 */
|
||||||
|
initModel("file=themes/waifu_plugin/waifu-tips.json");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (err) { console.log("[Error] JQuery is not defined.") }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function get_checkbox_selected_items(elem_id) {
|
||||||
|
display_panel_arr = [];
|
||||||
|
document.getElementById(elem_id).querySelector('[data-testid="checkbox-group"]').querySelectorAll('label').forEach(label => {
|
||||||
|
// Get the span text
|
||||||
|
const spanText = label.querySelector('span').textContent;
|
||||||
|
// Get the input value
|
||||||
|
const checked = label.querySelector('input').checked;
|
||||||
|
if (checked) {
|
||||||
|
display_panel_arr.push(spanText)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return display_panel_arr;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function gpt_academic_gradio_saveload(
|
||||||
|
save_or_load, // save_or_load==="save" / save_or_load==="load"
|
||||||
|
elem_id, // element id
|
||||||
|
cookie_key, // cookie key
|
||||||
|
save_value = "", // save value
|
||||||
|
load_type = "str", // type==="str" / type==="float"
|
||||||
|
load_default = false, // load default value
|
||||||
|
load_default_value = ""
|
||||||
|
) {
|
||||||
|
if (save_or_load === "load") {
|
||||||
|
let value = getCookie(cookie_key);
|
||||||
|
if (value) {
|
||||||
|
console.log('加载cookie', elem_id, value)
|
||||||
|
push_data_to_gradio_component(value, elem_id, load_type);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (load_default) {
|
||||||
|
console.log('加载cookie的默认值', elem_id, load_default_value)
|
||||||
|
push_data_to_gradio_component(load_default_value, elem_id, load_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (save_or_load === "save") {
|
||||||
|
setCookie(cookie_key, save_value, 365);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout) {
|
||||||
|
// 第一部分,布局初始化
|
||||||
audio_fn_init();
|
audio_fn_init();
|
||||||
minor_ui_adjustment();
|
minor_ui_adjustment();
|
||||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||||
@@ -708,8 +867,90 @@ function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
|||||||
chatbotContentChanged(1);
|
chatbotContentChanged(1);
|
||||||
});
|
});
|
||||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||||
if (LAYOUT === "LEFT-RIGHT") { chatbotAutoHeight(); }
|
if (layout === "LEFT-RIGHT") { chatbotAutoHeight(); }
|
||||||
if (LAYOUT === "LEFT-RIGHT") { limit_scroll_position(); }
|
if (layout === "LEFT-RIGHT") { limit_scroll_position(); }
|
||||||
// setInterval(function () { uml("mermaid") }, 5000); // 每50毫秒执行一次
|
|
||||||
|
// 第二部分,读取Cookie,初始话界面
|
||||||
|
let searchString = "";
|
||||||
|
let bool_value = "";
|
||||||
|
|
||||||
|
// darkmode 深色模式
|
||||||
|
if (getCookie("js_darkmode_cookie")) {
|
||||||
|
dark = getCookie("js_darkmode_cookie")
|
||||||
|
}
|
||||||
|
dark = dark == "True";
|
||||||
|
if (document.querySelectorAll('.dark').length) {
|
||||||
|
if (!dark) {
|
||||||
|
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (dark) {
|
||||||
|
document.querySelector('body').classList.add('dark');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SysPrompt 系统静默提示词
|
||||||
|
gpt_academic_gradio_saveload("load", "elem_prompt", "js_system_prompt_cookie", null, "str");
|
||||||
|
|
||||||
|
// Temperature 大模型温度参数
|
||||||
|
gpt_academic_gradio_saveload("load", "elem_temperature", "js_temperature_cookie", null, "float");
|
||||||
|
|
||||||
|
// clearButton 自动清除按钮
|
||||||
|
if (getCookie("js_clearbtn_show_cookie")) {
|
||||||
|
// have cookie
|
||||||
|
bool_value = getCookie("js_clearbtn_show_cookie")
|
||||||
|
bool_value = bool_value == "True";
|
||||||
|
searchString = "输入清除键";
|
||||||
|
|
||||||
|
if (bool_value) {
|
||||||
|
// make btns appear
|
||||||
|
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "block";
|
||||||
|
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "block";
|
||||||
|
// deal with checkboxes
|
||||||
|
let arr_with_clear_btn = update_array(
|
||||||
|
await get_data_from_gradio_component('cbs'), "输入清除键", "add"
|
||||||
|
)
|
||||||
|
push_data_to_gradio_component(arr_with_clear_btn, "cbs", "no_conversion");
|
||||||
|
} else {
|
||||||
|
// make btns disappear
|
||||||
|
let clearButton = document.getElementById("elem_clear"); clearButton.style.display = "none";
|
||||||
|
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "none";
|
||||||
|
// deal with checkboxes
|
||||||
|
let arr_without_clear_btn = update_array(
|
||||||
|
await get_data_from_gradio_component('cbs'), "输入清除键", "remove"
|
||||||
|
)
|
||||||
|
push_data_to_gradio_component(arr_without_clear_btn, "cbs", "no_conversion");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// live2d 显示
|
||||||
|
if (getCookie("js_live2d_show_cookie")) {
|
||||||
|
// have cookie
|
||||||
|
searchString = "添加Live2D形象";
|
||||||
|
bool_value = getCookie("js_live2d_show_cookie");
|
||||||
|
bool_value = bool_value == "True";
|
||||||
|
if (bool_value) {
|
||||||
|
loadLive2D();
|
||||||
|
let arr_with_live2d = update_array(
|
||||||
|
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "add"
|
||||||
|
)
|
||||||
|
push_data_to_gradio_component(arr_with_live2d, "cbsc", "no_conversion");
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
$('.waifu').hide();
|
||||||
|
let arr_without_live2d = update_array(
|
||||||
|
await get_data_from_gradio_component('cbsc'), "添加Live2D形象", "remove"
|
||||||
|
)
|
||||||
|
push_data_to_gradio_component(arr_without_live2d, "cbsc", "no_conversion");
|
||||||
|
} catch (error) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// do not have cookie
|
||||||
|
if (live2d) {
|
||||||
|
loadLive2D();
|
||||||
|
} else {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -5,17 +5,14 @@ def get_common_html_javascript_code():
|
|||||||
js = "\n"
|
js = "\n"
|
||||||
for jsf in [
|
for jsf in [
|
||||||
"file=themes/common.js",
|
"file=themes/common.js",
|
||||||
"file=themes/mermaid.min.js",
|
|
||||||
"file=themes/mermaid_loader.js",
|
|
||||||
]:
|
]:
|
||||||
js += f"""<script src="{jsf}"></script>\n"""
|
js += f"""<script src="{jsf}"></script>\n"""
|
||||||
|
|
||||||
# 添加Live2D
|
# 添加Live2D
|
||||||
if ADD_WAIFU:
|
if ADD_WAIFU:
|
||||||
for jsf in [
|
for jsf in [
|
||||||
"file=docs/waifu_plugin/jquery.min.js",
|
"file=themes/waifu_plugin/jquery.min.js",
|
||||||
"file=docs/waifu_plugin/jquery-ui.min.js",
|
"file=themes/waifu_plugin/jquery-ui.min.js",
|
||||||
"file=docs/waifu_plugin/autoload.js",
|
|
||||||
]:
|
]:
|
||||||
js += f"""<script src="{jsf}"></script>\n"""
|
js += f"""<script src="{jsf}"></script>\n"""
|
||||||
return js
|
return js
|
||||||
1590
themes/mermaid.min.js
vendored
1590
themes/mermaid.min.js
vendored
文件差异因一行或多行过长而隐藏
@@ -1,55 +1 @@
|
|||||||
import { deflate, inflate } from '/file=themes/pako.esm.mjs';
|
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||||
import { toUint8Array, fromUint8Array, toBase64, fromBase64 } from '/file=themes/base64.mjs';
|
|
||||||
|
|
||||||
const base64Serde = {
|
|
||||||
serialize: (state) => {
|
|
||||||
return toBase64(state, true);
|
|
||||||
},
|
|
||||||
deserialize: (state) => {
|
|
||||||
return fromBase64(state);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const pakoSerde = {
|
|
||||||
serialize: (state) => {
|
|
||||||
const data = new TextEncoder().encode(state);
|
|
||||||
const compressed = deflate(data, { level: 9 });
|
|
||||||
return fromUint8Array(compressed, true);
|
|
||||||
},
|
|
||||||
deserialize: (state) => {
|
|
||||||
const data = toUint8Array(state);
|
|
||||||
return inflate(data, { to: 'string' });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const serdes = {
|
|
||||||
base64: base64Serde,
|
|
||||||
pako: pakoSerde
|
|
||||||
};
|
|
||||||
|
|
||||||
export const serializeState = (state, serde = 'pako') => {
|
|
||||||
if (!(serde in serdes)) {
|
|
||||||
throw new Error(`Unknown serde type: ${serde}`);
|
|
||||||
}
|
|
||||||
const json = JSON.stringify(state);
|
|
||||||
const serialized = serdes[serde].serialize(json);
|
|
||||||
return `${serde}:${serialized}`;
|
|
||||||
};
|
|
||||||
|
|
||||||
const deserializeState = (state) => {
|
|
||||||
let type, serialized;
|
|
||||||
if (state.includes(':')) {
|
|
||||||
let tempType;
|
|
||||||
[tempType, serialized] = state.split(':');
|
|
||||||
if (tempType in serdes) {
|
|
||||||
type = tempType;
|
|
||||||
} else {
|
|
||||||
throw new Error(`Unknown serde type: ${tempType}`);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
type = 'base64';
|
|
||||||
serialized = state;
|
|
||||||
}
|
|
||||||
const json = serdes[type].deserialize(serialized);
|
|
||||||
return JSON.parse(json);
|
|
||||||
};
|
|
||||||
|
|||||||
@@ -1,197 +1 @@
|
|||||||
const uml = async className => {
|
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||||
|
|
||||||
// Custom element to encapsulate Mermaid content.
|
|
||||||
class MermaidDiv extends HTMLElement {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a special Mermaid div shadow DOM.
|
|
||||||
* Works around issues of shared IDs.
|
|
||||||
* @return {void}
|
|
||||||
*/
|
|
||||||
constructor() {
|
|
||||||
super()
|
|
||||||
|
|
||||||
// Create the Shadow DOM and attach style
|
|
||||||
const shadow = this.attachShadow({ mode: "open" })
|
|
||||||
const style = document.createElement("style")
|
|
||||||
style.textContent = `
|
|
||||||
:host {
|
|
||||||
display: block;
|
|
||||||
line-height: initial;
|
|
||||||
font-size: 16px;
|
|
||||||
}
|
|
||||||
div.diagram {
|
|
||||||
margin: 0;
|
|
||||||
overflow: visible;
|
|
||||||
}`
|
|
||||||
shadow.appendChild(style)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (typeof customElements.get("diagram-div") === "undefined") {
|
|
||||||
customElements.define("diagram-div", MermaidDiv)
|
|
||||||
}
|
|
||||||
|
|
||||||
const getFromCode = parent => {
|
|
||||||
// Handles <pre><code> text extraction.
|
|
||||||
let text = ""
|
|
||||||
for (let j = 0; j < parent.childNodes.length; j++) {
|
|
||||||
const subEl = parent.childNodes[j]
|
|
||||||
if (subEl.tagName.toLowerCase() === "code") {
|
|
||||||
for (let k = 0; k < subEl.childNodes.length; k++) {
|
|
||||||
const child = subEl.childNodes[k]
|
|
||||||
const whitespace = /^\s*$/
|
|
||||||
if (child.nodeName === "#text" && !(whitespace.test(child.nodeValue))) {
|
|
||||||
text = child.nodeValue
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return text
|
|
||||||
}
|
|
||||||
|
|
||||||
function createOrUpdateHyperlink(parentElement, linkText, linkHref) {
|
|
||||||
// Search for an existing anchor element within the parentElement
|
|
||||||
let existingAnchor = parentElement.querySelector("a");
|
|
||||||
|
|
||||||
// Check if an anchor element already exists
|
|
||||||
if (existingAnchor) {
|
|
||||||
// Update the hyperlink reference if it's different from the current one
|
|
||||||
if (existingAnchor.href !== linkHref) {
|
|
||||||
existingAnchor.href = linkHref;
|
|
||||||
}
|
|
||||||
// Update the target attribute to ensure it opens in a new tab
|
|
||||||
existingAnchor.target = '_blank';
|
|
||||||
|
|
||||||
// If the text must be dynamic, uncomment and use the following line:
|
|
||||||
// existingAnchor.textContent = linkText;
|
|
||||||
} else {
|
|
||||||
// If no anchor exists, create one and append it to the parentElement
|
|
||||||
let anchorElement = document.createElement("a");
|
|
||||||
anchorElement.href = linkHref; // Set hyperlink reference
|
|
||||||
anchorElement.textContent = linkText; // Set text displayed
|
|
||||||
anchorElement.target = '_blank'; // Ensure it opens in a new tab
|
|
||||||
parentElement.appendChild(anchorElement); // Append the new anchor element to the parent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function removeLastLine(str) {
|
|
||||||
// 将字符串按换行符分割成数组
|
|
||||||
var lines = str.split('\n');
|
|
||||||
lines.pop();
|
|
||||||
// 将数组重新连接成字符串,并按换行符连接
|
|
||||||
var result = lines.join('\n');
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 给出配置 Provide a default config in case one is not specified
|
|
||||||
const defaultConfig = {
|
|
||||||
startOnLoad: false,
|
|
||||||
theme: "default",
|
|
||||||
flowchart: {
|
|
||||||
htmlLabels: false
|
|
||||||
},
|
|
||||||
er: {
|
|
||||||
useMaxWidth: false
|
|
||||||
},
|
|
||||||
sequence: {
|
|
||||||
useMaxWidth: false,
|
|
||||||
noteFontWeight: "14px",
|
|
||||||
actorFontSize: "14px",
|
|
||||||
messageFontSize: "16px"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (document.body.classList.contains("dark")) {
|
|
||||||
defaultConfig.theme = "dark"
|
|
||||||
}
|
|
||||||
|
|
||||||
const Module = await import('/file=themes/mermaid_editor.js');
|
|
||||||
|
|
||||||
function do_render(block, code, codeContent, cnt) {
|
|
||||||
var rendered_content = mermaid.render(`_diagram_${cnt}`, code);
|
|
||||||
////////////////////////////// 记录有哪些代码已经被渲染了 ///////////////////////////////////
|
|
||||||
let codeFinishRenderElement = block.querySelector("code_finish_render"); // 如果block下已存在code_already_rendered元素,则获取它
|
|
||||||
if (codeFinishRenderElement) { // 如果block下已存在code_already_rendered元素
|
|
||||||
codeFinishRenderElement.style.display = "none";
|
|
||||||
} else {
|
|
||||||
// 如果不存在code_finish_render元素,则将code元素中的内容添加到新创建的code_finish_render元素中
|
|
||||||
let codeFinishRenderElementNew = document.createElement("code_finish_render"); // 创建一个新的code_already_rendered元素
|
|
||||||
codeFinishRenderElementNew.style.display = "none";
|
|
||||||
codeFinishRenderElementNew.textContent = "";
|
|
||||||
block.appendChild(codeFinishRenderElementNew); // 将新创建的code_already_rendered元素添加到block中
|
|
||||||
codeFinishRenderElement = codeFinishRenderElementNew;
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////// 创建一个用于渲染的容器 ///////////////////////////////////
|
|
||||||
let mermaidRender = block.querySelector(".mermaid_render"); // 尝试获取已存在的<div class='mermaid_render'>
|
|
||||||
if (!mermaidRender) {
|
|
||||||
mermaidRender = document.createElement("div"); // 不存在,创建新的<div class='mermaid_render'>
|
|
||||||
mermaidRender.classList.add("mermaid_render");
|
|
||||||
block.appendChild(mermaidRender); // 将新创建的元素附加到block
|
|
||||||
}
|
|
||||||
mermaidRender.innerHTML = rendered_content
|
|
||||||
codeFinishRenderElement.textContent = code // 标记已经渲染的部分
|
|
||||||
|
|
||||||
////////////////////////////// 创建一个“点击这里编辑脑图” ///////////////////////////////
|
|
||||||
let pako_encode = Module.serializeState({
|
|
||||||
"code": codeContent,
|
|
||||||
"mermaid": "{\n \"theme\": \"default\"\n}",
|
|
||||||
"autoSync": true,
|
|
||||||
"updateDiagram": false
|
|
||||||
});
|
|
||||||
createOrUpdateHyperlink(block, "点击这里编辑脑图", "https://mermaid.live/edit#" + pako_encode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 加载配置 Load up the config
|
|
||||||
mermaid.mermaidAPI.globalReset() // 全局复位
|
|
||||||
const config = (typeof mermaidConfig === "undefined") ? defaultConfig : mermaidConfig
|
|
||||||
mermaid.initialize(config)
|
|
||||||
// 查找需要渲染的元素 Find all of our Mermaid sources and render them.
|
|
||||||
const blocks = document.querySelectorAll(`pre.mermaid`);
|
|
||||||
|
|
||||||
for (let i = 0; i < blocks.length; i++) {
|
|
||||||
var block = blocks[i]
|
|
||||||
////////////////////////////// 如果代码没有发生变化,就不渲染了 ///////////////////////////////////
|
|
||||||
var code = getFromCode(block);
|
|
||||||
let code_elem = block.querySelector("code");
|
|
||||||
let codeContent = code_elem.textContent; // 获取code元素中的文本内容
|
|
||||||
|
|
||||||
// 判断codeContent是否包含'<gpt_academic_hide_mermaid_code>',如果是,则使code_elem隐藏
|
|
||||||
if (codeContent.indexOf('<gpt_academic_hide_mermaid_code>') !== -1) {
|
|
||||||
code_elem.style.display = "none";
|
|
||||||
}
|
|
||||||
|
|
||||||
// 如果block下已存在code_already_rendered元素,则获取它
|
|
||||||
let codePendingRenderElement = block.querySelector("code_pending_render");
|
|
||||||
if (codePendingRenderElement) { // 如果block下已存在code_pending_render元素
|
|
||||||
codePendingRenderElement.style.display = "none";
|
|
||||||
if (codePendingRenderElement.textContent !== codeContent) {
|
|
||||||
codePendingRenderElement.textContent = codeContent; // 如果现有的code_pending_render元素中的内容与code元素中的内容不同,更新code_pending_render元素中的内容
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
continue; // 如果相同,就不处理了
|
|
||||||
}
|
|
||||||
} else { // 如果不存在code_pending_render元素,则将code元素中的内容添加到新创建的code_pending_render元素中
|
|
||||||
let codePendingRenderElementNew = document.createElement("code_pending_render"); // 创建一个新的code_already_rendered元素
|
|
||||||
codePendingRenderElementNew.style.display = "none";
|
|
||||||
codePendingRenderElementNew.textContent = codeContent;
|
|
||||||
block.appendChild(codePendingRenderElementNew); // 将新创建的code_pending_render元素添加到block中
|
|
||||||
codePendingRenderElement = codePendingRenderElementNew;
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////// 在这里才真正开始渲染 ///////////////////////////////////
|
|
||||||
try {
|
|
||||||
do_render(block, code, codeContent, i);
|
|
||||||
// console.log("渲染", codeContent);
|
|
||||||
} catch (err) {
|
|
||||||
try {
|
|
||||||
var lines = code.split('\n'); if (lines.length < 2) { continue; }
|
|
||||||
do_render(block, removeLastLine(code), codeContent, i);
|
|
||||||
// console.log("渲染", codeContent);
|
|
||||||
} catch (err) {
|
|
||||||
console.log("以下代码不能渲染", code, removeLastLine(code), err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
6878
themes/pako.esm.mjs
6878
themes/pako.esm.mjs
文件差异内容过多而无法显示
加载差异
122
themes/theme.py
122
themes/theme.py
@@ -1,7 +1,10 @@
|
|||||||
import pickle
|
import pickle
|
||||||
import base64
|
import base64
|
||||||
import uuid
|
import uuid
|
||||||
|
import json
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
@@ -45,25 +48,24 @@ adjust_theme, advanced_css, theme_declaration, _ = load_dynamic_theme(get_conf("
|
|||||||
cookie相关工具函数
|
cookie相关工具函数
|
||||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||||
"""
|
"""
|
||||||
|
def assign_user_uuid(cookies):
|
||||||
|
|
||||||
def init_cookie(cookies, chatbot):
|
|
||||||
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
||||||
cookies.update({"uuid": uuid.uuid4()})
|
cookies.update({"uuid": uuid.uuid4()})
|
||||||
return cookies
|
return cookies
|
||||||
|
|
||||||
|
|
||||||
def to_cookie_str(d):
|
def to_cookie_str(d):
|
||||||
# Pickle the dictionary and encode it as a string
|
# serialize the dictionary and encode it as a string
|
||||||
pickled_dict = pickle.dumps(d)
|
serialized_dict = json.dumps(d)
|
||||||
cookie_value = base64.b64encode(pickled_dict).decode("utf-8")
|
cookie_value = base64.b64encode(serialized_dict.encode('utf8')).decode("utf-8")
|
||||||
return cookie_value
|
return cookie_value
|
||||||
|
|
||||||
|
|
||||||
def from_cookie_str(c):
|
def from_cookie_str(c):
|
||||||
# Decode the base64-encoded string and unpickle it into a dictionary
|
# Decode the base64-encoded string and unserialize it into a dictionary
|
||||||
pickled_dict = base64.b64decode(c.encode("utf-8"))
|
serialized_dict = base64.b64decode(c.encode("utf-8"))
|
||||||
return pickle.loads(pickled_dict)
|
serialized_dict.decode("utf-8")
|
||||||
|
return json.loads(serialized_dict)
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@@ -91,31 +93,103 @@ js_code_for_css_changing = """(css) => {
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
js_code_for_darkmode_init = """(dark) => {
|
|
||||||
dark = dark == "True";
|
|
||||||
if (document.querySelectorAll('.dark').length) {
|
|
||||||
if (!dark){
|
|
||||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (dark){
|
|
||||||
document.querySelector('body').classList.add('dark');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
js_code_for_toggle_darkmode = """() => {
|
js_code_for_toggle_darkmode = """() => {
|
||||||
if (document.querySelectorAll('.dark').length) {
|
if (document.querySelectorAll('.dark').length) {
|
||||||
|
setCookie("js_darkmode_cookie", "False", 365);
|
||||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||||
} else {
|
} else {
|
||||||
|
setCookie("js_darkmode_cookie", "True", 365);
|
||||||
document.querySelector('body').classList.add('dark');
|
document.querySelector('body').classList.add('dark');
|
||||||
}
|
}
|
||||||
document.querySelectorAll('code_pending_render').forEach(code => {code.remove();})
|
document.querySelectorAll('code_pending_render').forEach(code => {code.remove();})
|
||||||
}"""
|
}"""
|
||||||
|
|
||||||
|
|
||||||
js_code_for_persistent_cookie_init = """(persistent_cookie) => {
|
js_code_for_persistent_cookie_init = """(web_cookie_cache, cookie) => {
|
||||||
return getCookie("persistent_cookie");
|
return [getCookie("web_cookie_cache"), cookie];
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
js_code_reset = """
|
||||||
|
(a,b,c)=>{
|
||||||
|
return [[], [], "已重置"];
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
js_code_clear = """
|
||||||
|
(a,b)=>{
|
||||||
|
return ["", ""];
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
js_code_show_or_hide = """
|
||||||
|
(display_panel_arr)=>{
|
||||||
|
setTimeout(() => {
|
||||||
|
// get conf
|
||||||
|
display_panel_arr = get_checkbox_selected_items("cbs");
|
||||||
|
|
||||||
|
////////////////////// 输入清除键 ///////////////////////////
|
||||||
|
let searchString = "输入清除键";
|
||||||
|
let ele = "none";
|
||||||
|
if (display_panel_arr.includes(searchString)) {
|
||||||
|
let clearButton = document.getElementById("elem_clear");
|
||||||
|
let clearButton2 = document.getElementById("elem_clear2");
|
||||||
|
clearButton.style.display = "block";
|
||||||
|
clearButton2.style.display = "block";
|
||||||
|
setCookie("js_clearbtn_show_cookie", "True", 365);
|
||||||
|
} else {
|
||||||
|
let clearButton = document.getElementById("elem_clear");
|
||||||
|
let clearButton2 = document.getElementById("elem_clear2");
|
||||||
|
clearButton.style.display = "none";
|
||||||
|
clearButton2.style.display = "none";
|
||||||
|
setCookie("js_clearbtn_show_cookie", "False", 365);
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////// 基础功能区 ///////////////////////////
|
||||||
|
searchString = "基础功能区";
|
||||||
|
if (display_panel_arr.includes(searchString)) {
|
||||||
|
ele = document.getElementById("basic-panel");
|
||||||
|
ele.style.display = "block";
|
||||||
|
} else {
|
||||||
|
ele = document.getElementById("basic-panel");
|
||||||
|
ele.style.display = "none";
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////// 函数插件区 ///////////////////////////
|
||||||
|
searchString = "函数插件区";
|
||||||
|
if (display_panel_arr.includes(searchString)) {
|
||||||
|
ele = document.getElementById("plugin-panel");
|
||||||
|
ele.style.display = "block";
|
||||||
|
} else {
|
||||||
|
ele = document.getElementById("plugin-panel");
|
||||||
|
ele.style.display = "none";
|
||||||
|
}
|
||||||
|
|
||||||
|
}, 50);
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
js_code_show_or_hide_group2 = """
|
||||||
|
(display_panel_arr)=>{
|
||||||
|
setTimeout(() => {
|
||||||
|
display_panel_arr = get_checkbox_selected_items("cbsc");
|
||||||
|
|
||||||
|
let searchString = "添加Live2D形象";
|
||||||
|
let ele = "none";
|
||||||
|
if (display_panel_arr.includes(searchString)) {
|
||||||
|
setCookie("js_live2d_show_cookie", "True", 365);
|
||||||
|
loadLive2D();
|
||||||
|
} else {
|
||||||
|
setCookie("js_live2d_show_cookie", "False", 365);
|
||||||
|
$('.waifu').hide();
|
||||||
|
}
|
||||||
|
|
||||||
|
}, 50);
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|||||||
|
之前 宽度: | 高度: | 大小: 56 KiB 之后 宽度: | 高度: | 大小: 56 KiB |
@@ -92,7 +92,7 @@ String.prototype.render = function(context) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
var re = /x/;
|
var re = /x/;
|
||||||
console.log(re);
|
// console.log(re);
|
||||||
|
|
||||||
function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false}
|
function empty(obj) {return typeof obj=="undefined"||obj==null||obj==""?true:false}
|
||||||
function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text}
|
function getRandText(text) {return Array.isArray(text) ? text[Math.floor(Math.random() * text.length + 1)-1] : text}
|
||||||
@@ -120,7 +120,7 @@ function hideMessage(timeout) {
|
|||||||
|
|
||||||
function initModel(waifuPath, type) {
|
function initModel(waifuPath, type) {
|
||||||
/* console welcome message */
|
/* console welcome message */
|
||||||
eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
|
// eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
|
||||||
|
|
||||||
/* 判断 JQuery */
|
/* 判断 JQuery */
|
||||||
if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
|
if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
|
||||||
@@ -44,8 +44,8 @@
|
|||||||
{ "selector": ".container a[href^='http']", "text": ["要看看 <span style=\"color:#0099cc;\">{text}</span> 么?"] },
|
{ "selector": ".container a[href^='http']", "text": ["要看看 <span style=\"color:#0099cc;\">{text}</span> 么?"] },
|
||||||
{ "selector": ".fui-home", "text": ["点击前往首页,想回到上一页可以使用浏览器的后退功能哦"] },
|
{ "selector": ".fui-home", "text": ["点击前往首页,想回到上一页可以使用浏览器的后退功能哦"] },
|
||||||
{ "selector": ".fui-chat", "text": ["一言一语,一颦一笑。一字一句,一颗赛艇。"] },
|
{ "selector": ".fui-chat", "text": ["一言一语,一颦一笑。一字一句,一颗赛艇。"] },
|
||||||
{ "selector": ".fui-eye", "text": ["嗯··· 要切换 看板娘 吗?"] },
|
{ "selector": ".fui-eye", "text": ["嗯··· 要切换 Live2D形象 吗?"] },
|
||||||
{ "selector": ".fui-user", "text": ["喜欢换装 Play 吗?"] },
|
{ "selector": ".fui-user", "text": ["喜欢换装吗?"] },
|
||||||
{ "selector": ".fui-photo", "text": ["要拍张纪念照片吗?"] },
|
{ "selector": ".fui-photo", "text": ["要拍张纪念照片吗?"] },
|
||||||
{ "selector": ".fui-info-circle", "text": ["这里有关于我的信息呢"] },
|
{ "selector": ".fui-info-circle", "text": ["这里有关于我的信息呢"] },
|
||||||
{ "selector": ".fui-cross", "text": ["你不喜欢我了吗..."] },
|
{ "selector": ".fui-cross", "text": ["你不喜欢我了吗..."] },
|
||||||
@@ -77,14 +77,28 @@
|
|||||||
"看什么看(*^▽^*)",
|
"看什么看(*^▽^*)",
|
||||||
"焦虑时,吃顿大餐心情就好啦^_^",
|
"焦虑时,吃顿大餐心情就好啦^_^",
|
||||||
"你这个年纪,怎么睡得着觉的你^_^",
|
"你这个年纪,怎么睡得着觉的你^_^",
|
||||||
"修改ADD_WAIFU=False,我就不再打扰你了~",
|
"打开“界面外观”菜单,可选择关闭Live2D形象",
|
||||||
"经常去github看看我们的更新吧,也许有好玩的新功能呢。",
|
"经常去Github看看我们的更新吧,也许有好玩的新功能呢。",
|
||||||
"试试本地大模型吧,有的也很强大的哦。",
|
"试试本地大模型吧,有的也很强大的哦。",
|
||||||
"很多强大的函数插件隐藏在下拉菜单中呢。",
|
"很多强大的函数插件隐藏在下拉菜单中呢。",
|
||||||
"红色的插件,使用之前需要把文件上传进去哦。",
|
"插件使用之前,需要把文件上传进去哦。",
|
||||||
"想添加功能按钮吗?读读readme很容易就学会啦。",
|
"上传文件时,可以把文件直接拖进对话中的哦。",
|
||||||
|
"上传文件时,可以文件或图片粘贴到输入区哦。",
|
||||||
|
"想添加基础功能按钮吗?打开“界面外观”菜单进行自定义吧!",
|
||||||
"敏感或机密的信息,不可以问AI的哦!",
|
"敏感或机密的信息,不可以问AI的哦!",
|
||||||
"LLM究竟是划时代的创新,还是扼杀创造力的毒药呢?"
|
"LLM究竟是划时代的创新,还是扼杀创造力的毒药呢?",
|
||||||
|
"休息一下,起来走动走动吧!",
|
||||||
|
"今天的阳光也很不错哦,不妨外出晒晒。",
|
||||||
|
"笑一笑,生活更美好!",
|
||||||
|
"遇到难题,深呼吸就能解决一半。",
|
||||||
|
"偶尔换换环境,灵感也许就来了。",
|
||||||
|
"小憩片刻,醒来便是满血复活。",
|
||||||
|
"技术改变生活,让我们共同进步。",
|
||||||
|
"保持好奇心,探索未知的世界。",
|
||||||
|
"遇到困难,记得还有朋友和AI陪在你身边。",
|
||||||
|
"劳逸结合,方能长久。",
|
||||||
|
"偶尔给自己放个假,放松心情。",
|
||||||
|
"不要害怕失败,勇敢尝试才能成功。"
|
||||||
] }
|
] }
|
||||||
],
|
],
|
||||||
"click": [
|
"click": [
|
||||||
236
toolbox.py
236
toolbox.py
@@ -7,6 +7,8 @@ import base64
|
|||||||
import gradio
|
import gradio
|
||||||
import shutil
|
import shutil
|
||||||
import glob
|
import glob
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from shared_utils.config_loader import get_conf
|
from shared_utils.config_loader import get_conf
|
||||||
from shared_utils.config_loader import set_conf
|
from shared_utils.config_loader import set_conf
|
||||||
@@ -25,7 +27,14 @@ from shared_utils.text_mask import apply_gpt_academic_string_mask
|
|||||||
from shared_utils.text_mask import build_gpt_academic_masked_string
|
from shared_utils.text_mask import build_gpt_academic_masked_string
|
||||||
from shared_utils.text_mask import apply_gpt_academic_string_mask_langbased
|
from shared_utils.text_mask import apply_gpt_academic_string_mask_langbased
|
||||||
from shared_utils.text_mask import build_gpt_academic_masked_string_langbased
|
from shared_utils.text_mask import build_gpt_academic_masked_string_langbased
|
||||||
|
from shared_utils.map_names import map_friendly_names_to_model
|
||||||
|
from shared_utils.map_names import map_model_to_friendly_names
|
||||||
|
from shared_utils.map_names import read_one_api_model_name
|
||||||
|
from shared_utils.handle_upload import html_local_file
|
||||||
|
from shared_utils.handle_upload import html_local_img
|
||||||
|
from shared_utils.handle_upload import file_manifest_filter_type
|
||||||
|
from shared_utils.handle_upload import extract_archive
|
||||||
|
from typing import List
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
default_user_name = "default_user"
|
default_user_name = "default_user"
|
||||||
|
|
||||||
@@ -77,7 +86,9 @@ def ArgsGeneralWrapper(f):
|
|||||||
该装饰器是大多数功能调用的入口。
|
该装饰器是大多数功能调用的入口。
|
||||||
函数示意图:https://mermaid.live/edit#pako:eNqNVFtPGkEY_StkntoEDQtLoTw0sWqapjQxVWPabmOm7AiEZZcsQ9QiiW012qixqdeqqIn10geBh6ZR8PJnmAWe-hc6l3VhrWnLEzNzzvnO953ZyYOYoSIQAWOaMR5LQBN7hvoU3UN_g5iu7imAXEyT4wUF3Pd0dT3y9KGYYUJsmK8V0GPGs0-QjkyojZgwk0Fm82C2dVghX08U8EaoOHjOfoEMU0XmADRhOksVWnNLjdpM82qFzB6S5Q_WWsUhuqCc3JtAsVR_OoMnhyZwXgHWwbS1d4gnsLVZJp-P6mfVxveqAgqC70Jz_pQCOGDKM5xFdNNPDdilF6uSU_hOYqu4a3MHYDZLDzq5fodrC3PWcEaFGPUaRiqJWK_W9g9rvRITa4dhy_0nw67SiePMp3oSR6PPn41DGgllkvkizYwsrmtaejTFd8V4yekGmT1zqrt4XGlAy8WTuiPULF01LksZvukSajfQQRAxmYi5S0D81sDcyzapVdn6sYFHkjhhGyel3frVQnvsnbR23lEjlhIlaOJiFPWzU5G4tfNJo8ejwp47-TbvJkKKZvmxA6SKo16oaazJysfG6klr9T0pbTW2ZqzlL_XaT8fYbQLXe4mSmvoCZXMaa7FePW6s7jVqK9bujvse3WFjY5_Z4KfsA4oiPY4T7Drvn1tLJTbG1to1qR79ulgk89-oJbvZzbIwJty6u20LOReWa9BvwserUd9s9MIKc3x5TUWEoAhUyJK5y85w_yG-dFu_R9waoU7K581y8W_qLle35-rG9Nxcrz8QHRsc0K-r9NViYRT36KsFvCCNzDRMqvSVyzOKAnACpZECIvSvCs2UAhS9QHEwh43BST0GItjMIS_I8e-sLwnj9A262cxA_ZVh0OUY1LJiDSJ5MAEiUijYLUtBORR6KElyQPaCSRDpksNSd8AfluSgHPaFC17wjrOlbgbzyyFf4IFPDvoD_sJvnkdK-g
|
函数示意图:https://mermaid.live/edit#pako:eNqNVFtPGkEY_StkntoEDQtLoTw0sWqapjQxVWPabmOm7AiEZZcsQ9QiiW012qixqdeqqIn10geBh6ZR8PJnmAWe-hc6l3VhrWnLEzNzzvnO953ZyYOYoSIQAWOaMR5LQBN7hvoU3UN_g5iu7imAXEyT4wUF3Pd0dT3y9KGYYUJsmK8V0GPGs0-QjkyojZgwk0Fm82C2dVghX08U8EaoOHjOfoEMU0XmADRhOksVWnNLjdpM82qFzB6S5Q_WWsUhuqCc3JtAsVR_OoMnhyZwXgHWwbS1d4gnsLVZJp-P6mfVxveqAgqC70Jz_pQCOGDKM5xFdNNPDdilF6uSU_hOYqu4a3MHYDZLDzq5fodrC3PWcEaFGPUaRiqJWK_W9g9rvRITa4dhy_0nw67SiePMp3oSR6PPn41DGgllkvkizYwsrmtaejTFd8V4yekGmT1zqrt4XGlAy8WTuiPULF01LksZvukSajfQQRAxmYi5S0D81sDcyzapVdn6sYFHkjhhGyel3frVQnvsnbR23lEjlhIlaOJiFPWzU5G4tfNJo8ejwp47-TbvJkKKZvmxA6SKo16oaazJysfG6klr9T0pbTW2ZqzlL_XaT8fYbQLXe4mSmvoCZXMaa7FePW6s7jVqK9bujvse3WFjY5_Z4KfsA4oiPY4T7Drvn1tLJTbG1to1qR79ulgk89-oJbvZzbIwJty6u20LOReWa9BvwserUd9s9MIKc3x5TUWEoAhUyJK5y85w_yG-dFu_R9waoU7K581y8W_qLle35-rG9Nxcrz8QHRsc0K-r9NViYRT36KsFvCCNzDRMqvSVyzOKAnACpZECIvSvCs2UAhS9QHEwh43BST0GItjMIS_I8e-sLwnj9A262cxA_ZVh0OUY1LJiDSJ5MAEiUijYLUtBORR6KElyQPaCSRDpksNSd8AfluSgHPaFC17wjrOlbgbzyyFf4IFPDvoD_sJvnkdK-g
|
||||||
"""
|
"""
|
||||||
def decorated(request: gradio.Request, cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
|
def decorated(request: gradio.Request, cookies:dict, max_length:int, llm_model:str,
|
||||||
|
txt:str, txt2:str, top_p:float, temperature:float, chatbot:list,
|
||||||
|
history:list, system_prompt:str, plugin_advanced_arg:str, *args):
|
||||||
txt_passon = txt
|
txt_passon = txt
|
||||||
if txt == "" and txt2 != "": txt_passon = txt2
|
if txt == "" and txt2 != "": txt_passon = txt2
|
||||||
# 引入一个有cookie的chatbot
|
# 引入一个有cookie的chatbot
|
||||||
@@ -129,7 +140,7 @@ def ArgsGeneralWrapper(f):
|
|||||||
return decorated
|
return decorated
|
||||||
|
|
||||||
|
|
||||||
def update_ui(chatbot, history, msg="正常", **kwargs): # 刷新界面
|
def update_ui(chatbot:ChatBotWithCookies, history, msg="正常", **kwargs): # 刷新界面
|
||||||
"""
|
"""
|
||||||
刷新用户界面
|
刷新用户界面
|
||||||
"""
|
"""
|
||||||
@@ -159,7 +170,7 @@ def update_ui(chatbot, history, msg="正常", **kwargs): # 刷新界面
|
|||||||
yield cookies, chatbot_gr, history, msg
|
yield cookies, chatbot_gr, history, msg
|
||||||
|
|
||||||
|
|
||||||
def update_ui_lastest_msg(lastmsg, chatbot, history, delay=1): # 刷新界面
|
def update_ui_lastest_msg(lastmsg:str, chatbot:ChatBotWithCookies, history:list, delay=1): # 刷新界面
|
||||||
"""
|
"""
|
||||||
刷新用户界面
|
刷新用户界面
|
||||||
"""
|
"""
|
||||||
@@ -186,13 +197,12 @@ def CatchException(f):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@wraps(f)
|
@wraps(f)
|
||||||
def decorated(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, *args, **kwargs):
|
def decorated(main_input:str, llm_kwargs:dict, plugin_kwargs:dict,
|
||||||
|
chatbot_with_cookie:ChatBotWithCookies, history:list, *args, **kwargs):
|
||||||
try:
|
try:
|
||||||
yield from f(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, *args, **kwargs)
|
yield from f(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, *args, **kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
from check_proxy import check_proxy
|
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
proxies = get_conf('proxies')
|
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
if len(chatbot_with_cookie) == 0:
|
if len(chatbot_with_cookie) == 0:
|
||||||
chatbot_with_cookie.clear()
|
chatbot_with_cookie.clear()
|
||||||
@@ -245,7 +255,7 @@ def HotReload(f):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def get_reduce_token_percent(text):
|
def get_reduce_token_percent(text:str):
|
||||||
"""
|
"""
|
||||||
* 此函数未来将被弃用
|
* 此函数未来将被弃用
|
||||||
"""
|
"""
|
||||||
@@ -264,7 +274,7 @@ def get_reduce_token_percent(text):
|
|||||||
|
|
||||||
|
|
||||||
def write_history_to_file(
|
def write_history_to_file(
|
||||||
history, file_basename=None, file_fullname=None, auto_caption=True
|
history:list, file_basename:str=None, file_fullname:str=None, auto_caption:bool=True
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||||
@@ -298,7 +308,7 @@ def write_history_to_file(
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def regular_txt_to_markdown(text):
|
def regular_txt_to_markdown(text:str):
|
||||||
"""
|
"""
|
||||||
将普通文本转换为Markdown格式的文本。
|
将普通文本转换为Markdown格式的文本。
|
||||||
"""
|
"""
|
||||||
@@ -308,7 +318,7 @@ def regular_txt_to_markdown(text):
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def report_exception(chatbot, history, a, b):
|
def report_exception(chatbot:ChatBotWithCookies, history:list, a:str, b:str):
|
||||||
"""
|
"""
|
||||||
向chatbot中添加错误信息
|
向chatbot中添加错误信息
|
||||||
"""
|
"""
|
||||||
@@ -316,7 +326,7 @@ def report_exception(chatbot, history, a, b):
|
|||||||
history.extend([a, b])
|
history.extend([a, b])
|
||||||
|
|
||||||
|
|
||||||
def find_free_port():
|
def find_free_port()->int:
|
||||||
"""
|
"""
|
||||||
返回当前系统中可用的未使用端口。
|
返回当前系统中可用的未使用端口。
|
||||||
"""
|
"""
|
||||||
@@ -329,58 +339,9 @@ def find_free_port():
|
|||||||
return s.getsockname()[1]
|
return s.getsockname()[1]
|
||||||
|
|
||||||
|
|
||||||
def extract_archive(file_path, dest_dir):
|
def find_recent_files(directory:str)->List[str]:
|
||||||
import zipfile
|
|
||||||
import tarfile
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Get the file extension of the input file
|
|
||||||
file_extension = os.path.splitext(file_path)[1]
|
|
||||||
|
|
||||||
# Extract the archive based on its extension
|
|
||||||
if file_extension == ".zip":
|
|
||||||
with zipfile.ZipFile(file_path, "r") as zipobj:
|
|
||||||
zipobj.extractall(path=dest_dir)
|
|
||||||
print("Successfully extracted zip archive to {}".format(dest_dir))
|
|
||||||
|
|
||||||
elif file_extension in [".tar", ".gz", ".bz2"]:
|
|
||||||
with tarfile.open(file_path, "r:*") as tarobj:
|
|
||||||
tarobj.extractall(path=dest_dir)
|
|
||||||
print("Successfully extracted tar archive to {}".format(dest_dir))
|
|
||||||
|
|
||||||
# 第三方库,需要预先pip install rarfile
|
|
||||||
# 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
|
|
||||||
elif file_extension == ".rar":
|
|
||||||
try:
|
|
||||||
import rarfile
|
|
||||||
|
|
||||||
with rarfile.RarFile(file_path) as rf:
|
|
||||||
rf.extractall(path=dest_dir)
|
|
||||||
print("Successfully extracted rar archive to {}".format(dest_dir))
|
|
||||||
except:
|
|
||||||
print("Rar format requires additional dependencies to install")
|
|
||||||
return "\n\n解压失败! 需要安装pip install rarfile来解压rar文件。建议:使用zip压缩格式。"
|
|
||||||
|
|
||||||
# 第三方库,需要预先pip install py7zr
|
|
||||||
elif file_extension == ".7z":
|
|
||||||
try:
|
|
||||||
import py7zr
|
|
||||||
|
|
||||||
with py7zr.SevenZipFile(file_path, mode="r") as f:
|
|
||||||
f.extractall(path=dest_dir)
|
|
||||||
print("Successfully extracted 7z archive to {}".format(dest_dir))
|
|
||||||
except:
|
|
||||||
print("7z format requires additional dependencies to install")
|
|
||||||
return "\n\n解压失败! 需要安装pip install py7zr来解压7z文件"
|
|
||||||
else:
|
|
||||||
return ""
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
def find_recent_files(directory):
|
|
||||||
"""
|
"""
|
||||||
me: find files that is created with in one minutes under a directory with python, write a function
|
Find files that is created with in one minutes under a directory with python, write a function
|
||||||
gpt: here it is!
|
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
@@ -403,7 +364,7 @@ def find_recent_files(directory):
|
|||||||
return recent_files
|
return recent_files
|
||||||
|
|
||||||
|
|
||||||
def file_already_in_downloadzone(file, user_path):
|
def file_already_in_downloadzone(file:str, user_path:str):
|
||||||
try:
|
try:
|
||||||
parent_path = os.path.abspath(user_path)
|
parent_path = os.path.abspath(user_path)
|
||||||
child_path = os.path.abspath(file)
|
child_path = os.path.abspath(file)
|
||||||
@@ -415,7 +376,7 @@ def file_already_in_downloadzone(file, user_path):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
def promote_file_to_downloadzone(file:str, rename_file:str=None, chatbot:ChatBotWithCookies=None):
|
||||||
# 将文件复制一份到下载区
|
# 将文件复制一份到下载区
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
@@ -450,12 +411,12 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
|||||||
return new_path
|
return new_path
|
||||||
|
|
||||||
|
|
||||||
def disable_auto_promotion(chatbot):
|
def disable_auto_promotion(chatbot:ChatBotWithCookies):
|
||||||
chatbot._cookies.update({"files_to_promote": []})
|
chatbot._cookies.update({"files_to_promote": []})
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def del_outdated_uploads(outdate_time_seconds, target_path_base=None):
|
def del_outdated_uploads(outdate_time_seconds:float, target_path_base:str=None):
|
||||||
if target_path_base is None:
|
if target_path_base is None:
|
||||||
user_upload_dir = get_conf("PATH_PRIVATE_UPLOAD")
|
user_upload_dir = get_conf("PATH_PRIVATE_UPLOAD")
|
||||||
else:
|
else:
|
||||||
@@ -474,39 +435,8 @@ def del_outdated_uploads(outdate_time_seconds, target_path_base=None):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def html_local_file(file):
|
|
||||||
base_path = os.path.dirname(__file__) # 项目目录
|
|
||||||
if os.path.exists(str(file)):
|
|
||||||
file = f'file={file.replace(base_path, ".")}'
|
|
||||||
return file
|
|
||||||
|
|
||||||
|
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False, omit_path=None):
|
||||||
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
|
|
||||||
style = ""
|
|
||||||
if max_width is not None:
|
|
||||||
style += f"max-width: {max_width};"
|
|
||||||
if max_height is not None:
|
|
||||||
style += f"max-height: {max_height};"
|
|
||||||
__file = html_local_file(__file)
|
|
||||||
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
|
|
||||||
if md:
|
|
||||||
a = f""
|
|
||||||
return a
|
|
||||||
|
|
||||||
|
|
||||||
def file_manifest_filter_type(file_list, filter_: list = None):
|
|
||||||
new_list = []
|
|
||||||
if not filter_:
|
|
||||||
filter_ = ["png", "jpg", "jpeg"]
|
|
||||||
for file in file_list:
|
|
||||||
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
|
||||||
new_list.append(html_local_img(file, md=False))
|
|
||||||
else:
|
|
||||||
new_list.append(file)
|
|
||||||
return new_list
|
|
||||||
|
|
||||||
|
|
||||||
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
head: 表头:[]
|
head: 表头:[]
|
||||||
@@ -530,13 +460,17 @@ def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
|||||||
for i in range(max_len):
|
for i in range(max_len):
|
||||||
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
||||||
row_data = file_manifest_filter_type(row_data, filter_=None)
|
row_data = file_manifest_filter_type(row_data, filter_=None)
|
||||||
|
# for dat in row_data:
|
||||||
|
# if (omit_path is not None) and os.path.exists(dat):
|
||||||
|
# dat = os.path.relpath(dat, omit_path)
|
||||||
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
||||||
|
|
||||||
return tabs_list
|
return tabs_list
|
||||||
|
|
||||||
|
|
||||||
def on_file_uploaded(
|
def on_file_uploaded(
|
||||||
request: gradio.Request, files, chatbot, txt, txt2, checkboxes, cookies
|
request: gradio.Request, files:List[str], chatbot:ChatBotWithCookies,
|
||||||
|
txt:str, txt2:str, checkboxes:List[str], cookies:dict
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
当文件被上传时的回调函数
|
当文件被上传时的回调函数
|
||||||
@@ -565,15 +499,21 @@ def on_file_uploaded(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# 整理文件集合 输出消息
|
# 整理文件集合 输出消息
|
||||||
moved_files = [fp for fp in glob.glob(f"{target_path_base}/**/*", recursive=True)]
|
files = glob.glob(f"{target_path_base}/**/*", recursive=True)
|
||||||
moved_files_str = to_markdown_tabs(head=["文件"], tabs=[moved_files])
|
moved_files = [fp for fp in files]
|
||||||
|
max_file_to_show = 10
|
||||||
|
if len(moved_files) > max_file_to_show:
|
||||||
|
moved_files = moved_files[:max_file_to_show//2] + [f'... ( 📌省略{len(moved_files) - max_file_to_show}个文件的显示 ) ...'] + \
|
||||||
|
moved_files[-max_file_to_show//2:]
|
||||||
|
moved_files_str = to_markdown_tabs(head=["文件"], tabs=[moved_files], omit_path=target_path_base)
|
||||||
chatbot.append(
|
chatbot.append(
|
||||||
[
|
[
|
||||||
"我上传了文件,请查收",
|
"我上传了文件,请查收",
|
||||||
f"[Local Message] 收到以下文件: \n\n{moved_files_str}"
|
f"[Local Message] 收到以下文件 (上传到路径:{target_path_base}): " +
|
||||||
+ f"\n\n调用路径参数已自动修正到: \n\n{txt}"
|
f"\n\n{moved_files_str}" +
|
||||||
+ f"\n\n现在您点击任意函数插件时,以上文件将被作为输入参数"
|
f"\n\n调用路径参数已自动修正到: \n\n{txt}" +
|
||||||
+ upload_msg,
|
f"\n\n现在您点击任意函数插件时,以上文件将被作为输入参数" +
|
||||||
|
upload_msg,
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -594,18 +534,14 @@ def on_file_uploaded(
|
|||||||
return chatbot, txt, txt2, cookies
|
return chatbot, txt, txt2, cookies
|
||||||
|
|
||||||
|
|
||||||
def on_report_generated(cookies, files, chatbot):
|
def on_report_generated(cookies:dict, files:List[str], chatbot:ChatBotWithCookies):
|
||||||
# from toolbox import find_recent_files
|
|
||||||
# PATH_LOGGING = get_conf('PATH_LOGGING')
|
|
||||||
if "files_to_promote" in cookies:
|
if "files_to_promote" in cookies:
|
||||||
report_files = cookies["files_to_promote"]
|
report_files = cookies["files_to_promote"]
|
||||||
cookies.pop("files_to_promote")
|
cookies.pop("files_to_promote")
|
||||||
else:
|
else:
|
||||||
report_files = []
|
report_files = []
|
||||||
# report_files = find_recent_files(PATH_LOGGING)
|
|
||||||
if len(report_files) == 0:
|
if len(report_files) == 0:
|
||||||
return cookies, None, chatbot
|
return cookies, None, chatbot
|
||||||
# files.extend(report_files)
|
|
||||||
file_links = ""
|
file_links = ""
|
||||||
for f in report_files:
|
for f in report_files:
|
||||||
file_links += (
|
file_links += (
|
||||||
@@ -885,7 +821,7 @@ def is_the_upload_folder(string):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_user(chatbotwithcookies):
|
def get_user(chatbotwithcookies:ChatBotWithCookies):
|
||||||
return chatbotwithcookies._cookies.get("user_name", default_user_name)
|
return chatbotwithcookies._cookies.get("user_name", default_user_name)
|
||||||
|
|
||||||
|
|
||||||
@@ -968,7 +904,7 @@ def get_pictures_list(path):
|
|||||||
return file_manifest
|
return file_manifest
|
||||||
|
|
||||||
|
|
||||||
def have_any_recent_upload_image_files(chatbot):
|
def have_any_recent_upload_image_files(chatbot:ChatBotWithCookies):
|
||||||
_5min = 5 * 60
|
_5min = 5 * 60
|
||||||
if chatbot is None:
|
if chatbot is None:
|
||||||
return False, None # chatbot is None
|
return False, None # chatbot is None
|
||||||
@@ -985,6 +921,18 @@ def have_any_recent_upload_image_files(chatbot):
|
|||||||
else:
|
else:
|
||||||
return False, None # most_recent_uploaded is too old
|
return False, None # most_recent_uploaded is too old
|
||||||
|
|
||||||
|
# Claude3 model supports graphic context dialogue, reads all images
|
||||||
|
def every_image_file_in_path(chatbot:ChatBotWithCookies):
|
||||||
|
if chatbot is None:
|
||||||
|
return False, [] # chatbot is None
|
||||||
|
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||||
|
if not most_recent_uploaded:
|
||||||
|
return False, [] # most_recent_uploaded is None
|
||||||
|
path = most_recent_uploaded["path"]
|
||||||
|
file_manifest = get_pictures_list(path)
|
||||||
|
if len(file_manifest) == 0:
|
||||||
|
return False, []
|
||||||
|
return True, file_manifest
|
||||||
|
|
||||||
# Function to encode the image
|
# Function to encode the image
|
||||||
def encode_image(image_path):
|
def encode_image(image_path):
|
||||||
@@ -1005,3 +953,65 @@ def check_packages(packages=[]):
|
|||||||
spam_spec = importlib.util.find_spec(p)
|
spam_spec = importlib.util.find_spec(p)
|
||||||
if spam_spec is None:
|
if spam_spec is None:
|
||||||
raise ModuleNotFoundError
|
raise ModuleNotFoundError
|
||||||
|
|
||||||
|
|
||||||
|
def map_file_to_sha256(file_path):
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
with open(file_path, 'rb') as file:
|
||||||
|
content = file.read()
|
||||||
|
|
||||||
|
# Calculate the SHA-256 hash of the file contents
|
||||||
|
sha_hash = hashlib.sha256(content).hexdigest()
|
||||||
|
|
||||||
|
return sha_hash
|
||||||
|
|
||||||
|
|
||||||
|
def check_repeat_upload(new_pdf_path, pdf_hash):
|
||||||
|
'''
|
||||||
|
检查历史上传的文件是否与新上传的文件相同,如果相同则返回(True, 重复文件路径),否则返回(False,None)
|
||||||
|
'''
|
||||||
|
from toolbox import get_conf
|
||||||
|
import PyPDF2
|
||||||
|
|
||||||
|
user_upload_dir = os.path.dirname(os.path.dirname(new_pdf_path))
|
||||||
|
file_name = os.path.basename(new_pdf_path)
|
||||||
|
|
||||||
|
file_manifest = [f for f in glob.glob(f'{user_upload_dir}/**/{file_name}', recursive=True)]
|
||||||
|
|
||||||
|
for saved_file in file_manifest:
|
||||||
|
with open(new_pdf_path, 'rb') as file1, open(saved_file, 'rb') as file2:
|
||||||
|
reader1 = PyPDF2.PdfFileReader(file1)
|
||||||
|
reader2 = PyPDF2.PdfFileReader(file2)
|
||||||
|
|
||||||
|
# 比较页数是否相同
|
||||||
|
if reader1.getNumPages() != reader2.getNumPages():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 比较每一页的内容是否相同
|
||||||
|
for page_num in range(reader1.getNumPages()):
|
||||||
|
page1 = reader1.getPage(page_num).extractText()
|
||||||
|
page2 = reader2.getPage(page_num).extractText()
|
||||||
|
if page1 != page2:
|
||||||
|
continue
|
||||||
|
|
||||||
|
maybe_project_dir = glob.glob('{}/**/{}'.format(get_log_folder(), pdf_hash + ".tag"), recursive=True)
|
||||||
|
|
||||||
|
|
||||||
|
if len(maybe_project_dir) > 0:
|
||||||
|
return True, os.path.dirname(maybe_project_dir[0])
|
||||||
|
|
||||||
|
# 如果所有页的内容都相同,返回 True
|
||||||
|
return False, None
|
||||||
|
|
||||||
|
def log_chat(llm_model: str, input_str: str, output_str: str):
|
||||||
|
try:
|
||||||
|
if output_str and input_str and llm_model:
|
||||||
|
uid = str(uuid.uuid4().hex)
|
||||||
|
logging.info(f"[Model({uid})] {llm_model}")
|
||||||
|
input_str = input_str.rstrip('\n')
|
||||||
|
logging.info(f"[Query({uid})]\n{input_str}")
|
||||||
|
output_str = output_str.rstrip('\n')
|
||||||
|
logging.info(f"[Response({uid})]\n{output_str}\n\n")
|
||||||
|
except:
|
||||||
|
print(trimmed_format_exc())
|
||||||
|
|||||||
4
version
4
version
@@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version": 3.71,
|
"version": 3.74,
|
||||||
"show_feature": true,
|
"show_feature": true,
|
||||||
"new_feature": "用绘图功能增强部分插件 <-> 基础功能区支持自动切换中英提示词 <-> 支持Mermaid绘图库(让大模型绘制脑图) <-> 支持Gemini-pro <-> 支持直接拖拽文件到上传区 <-> 支持将图片粘贴到输入区"
|
"new_feature": "增加多用户文件鉴权验证提高安全性 <-> 优化oneapi接入方法 <-> 接入Cohere和月之暗面模型 <-> 简化挂载二级目录的步骤 <-> 支持Mermaid绘图库(让大模型绘制脑图)"
|
||||||
}
|
}
|
||||||
|
|||||||
在新工单中引用
屏蔽一个用户