镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
比较提交
100 次代码提交
production
...
version3.7
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
8565a35cf7 | ||
|
|
72d78eb150 | ||
|
|
7aeda537ac | ||
|
|
6cea17d4b7 | ||
|
|
20bc51d747 | ||
|
|
b8ebefa427 | ||
|
|
dcc9326f0b | ||
|
|
94fc396eb9 | ||
|
|
e594e1b928 | ||
|
|
8fe545d97b | ||
|
|
6f978fa72e | ||
|
|
19be471aa8 | ||
|
|
38956934fd | ||
|
|
32439e14b5 | ||
|
|
317389bf4b | ||
|
|
2c740fc641 | ||
|
|
96832a8228 | ||
|
|
361557da3c | ||
|
|
5f18d4a1af | ||
|
|
0d10bc570f | ||
|
|
3ce7d9347d | ||
|
|
8a78d7b89f | ||
|
|
0e43b08837 | ||
|
|
74bced2d35 | ||
|
|
961a24846f | ||
|
|
b7e4744f28 | ||
|
|
71adc40901 | ||
|
|
a2099f1622 | ||
|
|
c0a697f6c8 | ||
|
|
bdde1d2fd7 | ||
|
|
63373ab3b6 | ||
|
|
fb6566adde | ||
|
|
9f2ef9ec49 | ||
|
|
35c1aa21e4 | ||
|
|
627d739720 | ||
|
|
37f15185b6 | ||
|
|
9643e1c25f | ||
|
|
28eae2f80e | ||
|
|
7ab379688e | ||
|
|
3d4c6f54f1 | ||
|
|
1714116a89 | ||
|
|
2bc65a99ca | ||
|
|
0a2805513e | ||
|
|
d698b96209 | ||
|
|
6b1c6f0bf7 | ||
|
|
c22867b74c | ||
|
|
2abe665521 | ||
|
|
b0e6c4d365 | ||
|
|
d883c7f34b | ||
|
|
aba871342f | ||
|
|
37744a9cb1 | ||
|
|
480516380d | ||
|
|
60ba712131 | ||
|
|
a7c960dcb0 | ||
|
|
a96f842b3a | ||
|
|
417ca91e23 | ||
|
|
ef8fadfa18 | ||
|
|
865c4ca993 | ||
|
|
31304f481a | ||
|
|
1bd3637d32 | ||
|
|
160a683667 | ||
|
|
49ca03ca06 | ||
|
|
c625348ce1 | ||
|
|
6d4a74893a | ||
|
|
5c7499cada | ||
|
|
f522691529 | ||
|
|
ca85573ec1 | ||
|
|
2c7bba5c63 | ||
|
|
e22f0226d5 | ||
|
|
0f250305b4 | ||
|
|
7606f5c130 | ||
|
|
4f0dcc431c | ||
|
|
6ca0dd2f9e | ||
|
|
e3e9921f6b | ||
|
|
867ddd355e | ||
|
|
bb431db7d3 | ||
|
|
43568b83e1 | ||
|
|
2b90302851 | ||
|
|
f7588d4776 | ||
|
|
a0bfa7ba1c | ||
|
|
c60a7452bf | ||
|
|
68a49d3758 | ||
|
|
ac3d4cf073 | ||
|
|
9479dd984c | ||
|
|
3c271302cc | ||
|
|
6e9936531d | ||
|
|
439147e4b7 | ||
|
|
8d13821099 | ||
|
|
49fe06ed69 | ||
|
|
7882ce7304 | ||
|
|
dc68e601a5 | ||
|
|
d169fb4b16 | ||
|
|
36e19d5202 | ||
|
|
c5f1e4e392 | ||
|
|
d3f7267a63 | ||
|
|
f4127a9c9c | ||
|
|
c181ad38b4 | ||
|
|
107944f5b7 | ||
|
|
8c7569b689 | ||
|
|
fa374bf1fc |
6
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
6
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -69,9 +69,3 @@ body:
|
||||
attributes:
|
||||
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,8 +21,3 @@ body:
|
||||
attributes:
|
||||
label: Feature Request | 功能请求
|
||||
description: Feature Request | 功能请求
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
普通文件
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
普通文件
@@ -0,0 +1,44 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-all-capacity-beta
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+AllCapacityBeta
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -152,3 +152,4 @@ request_llms/moss
|
||||
media
|
||||
flagged
|
||||
request_llms/ChatGLM-6b-onnx-u8s8
|
||||
.pre-commit-config.yaml
|
||||
|
||||
@@ -18,7 +18,6 @@ WORKDIR /gpt
|
||||
|
||||
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
|
||||
COPY requirements.txt ./
|
||||
COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
|
||||
|
||||
45
README.md
45
README.md
@@ -1,8 +1,8 @@
|
||||
> **Caution**
|
||||
>
|
||||
> 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||
>
|
||||
> 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。
|
||||
> [!IMPORTANT]
|
||||
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库(让大模型绘制脑图)
|
||||
> 2024.1.17: 恭迎GLM4,全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型!
|
||||
> 2024.1.17: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||
|
||||
<br>
|
||||
|
||||
@@ -42,13 +42,11 @@ If you like this project, please give it a Star.
|
||||
Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||
<br>
|
||||
|
||||
|
||||
> 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
|
||||
>
|
||||
> 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
|
||||
> [!NOTE]
|
||||
> 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
|
||||
> [](#installation) [](https://github.com/binary-husky/gpt_academic/releases) [](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) []([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
|
||||
>
|
||||
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
|
||||
> 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
|
||||
|
||||
<br><br>
|
||||
|
||||
@@ -56,7 +54,7 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
|
||||
|
||||
功能(⭐= 近期新增功能) | 描述
|
||||
--- | ---
|
||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱API](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
|
||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱GLM4](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
|
||||
润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
|
||||
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
||||
模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
@@ -65,7 +63,7 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
|
||||
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
|
||||
批量注释生成 | [插件] 一键批量生成函数注释
|
||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
|
||||
chat分析报告生成 | [插件] 运行后自动生成总结汇报
|
||||
⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等(3.7版本)
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
|
||||
@@ -77,7 +75,6 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
||||
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
|
||||
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
|
||||
⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
|
||||
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
|
||||
⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
|
||||
⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
|
||||
@@ -111,7 +108,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
@@ -152,10 +149,10 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
|
||||
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
||||
<p>
|
||||
|
||||
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
|
||||
```sh
|
||||
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# 【可选步骤II】支持复旦MOSS
|
||||
@@ -197,7 +194,7 @@ pip install peft
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
1. 仅ChatGPT+文心一言+spark等在线模型(推荐大多数人选择)
|
||||
1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
|
||||
@@ -209,7 +206,7 @@ pip install peft
|
||||
|
||||
P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
|
||||
|
||||
2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
||||
2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
|
||||
|
||||
``` sh
|
||||
@@ -308,9 +305,9 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
8. OpenAI音频解析与总结
|
||||
8. 基于mermaid的流图、脑图绘制
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/c518b82f-bd53-46e2-baf5-ad1b081c1da4" width="500" >
|
||||
</div>
|
||||
|
||||
9. Latex全文校对纠错
|
||||
@@ -327,8 +324,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
|
||||
|
||||
### II:版本:
|
||||
|
||||
- version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件
|
||||
- version 3.80(TODO): 优化AutoGen插件主题并设计一系列衍生插件
|
||||
- version 3.70: 引入Mermaid绘图,实现GPT画脑图等功能
|
||||
- version 3.60: 引入AutoGen作为新一代插件的基石
|
||||
- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
|
||||
- version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
|
||||
@@ -370,8 +367,8 @@ GPT Academic开发者QQ群:`610599535`
|
||||
|
||||
1. `master` 分支: 主分支,稳定版
|
||||
2. `frontier` 分支: 开发分支,测试版
|
||||
3. 如何接入其他大模型:[接入其他大模型](request_llms/README.md)
|
||||
|
||||
3. 如何[接入其他大模型](request_llms/README.md)
|
||||
4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online)
|
||||
|
||||
### V:参考与学习
|
||||
|
||||
|
||||
37
config.py
37
config.py
@@ -89,11 +89,14 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview",
|
||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||
"chatglm3", "moss", "claude-2"]
|
||||
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
"gemini-pro", "chatglm3", "claude-2", "zhipuai"]
|
||||
# P.S. 其他可用的模型还包括 [
|
||||
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
||||
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
||||
# ]
|
||||
|
||||
|
||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||
@@ -103,7 +106,11 @@ MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
||||
# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
|
||||
# 也可以是具体的模型路径
|
||||
QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||
|
||||
|
||||
# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/
|
||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||
|
||||
|
||||
# 百度千帆(LLM_MODEL="qianfan")
|
||||
@@ -188,7 +195,13 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
|
||||
# 接入智谱大模型
|
||||
ZHIPUAI_API_KEY = ""
|
||||
ZHIPUAI_MODEL = "chatglm_turbo"
|
||||
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
||||
|
||||
|
||||
# # 火山引擎YUNQUE大模型
|
||||
# YUNQUE_SECRET_KEY = ""
|
||||
# YUNQUE_ACCESS_KEY = ""
|
||||
# YUNQUE_MODEL = ""
|
||||
|
||||
|
||||
# Claude API KEY
|
||||
@@ -199,6 +212,10 @@ ANTHROPIC_API_KEY = ""
|
||||
CUSTOM_API_KEY_PATTERN = ""
|
||||
|
||||
|
||||
# Google Gemini API-Key
|
||||
GEMINI_API_KEY = ''
|
||||
|
||||
|
||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
||||
|
||||
@@ -284,6 +301,12 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
│ ├── ZHIPUAI_API_KEY
|
||||
│ └── ZHIPUAI_MODEL
|
||||
│
|
||||
├── "qwen-turbo" 等通义千问大模型
|
||||
│ └── DASHSCOPE_API_KEY
|
||||
│
|
||||
├── "Gemini"
|
||||
│ └── GEMINI_API_KEY
|
||||
│
|
||||
└── "newbing" Newbing接口不再稳定,不推荐使用
|
||||
├── NEWBING_STYLE
|
||||
└── NEWBING_COOKIES
|
||||
@@ -300,7 +323,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
||||
├── "jittorllms_pangualpha"
|
||||
├── "jittorllms_llama"
|
||||
├── "deepseekcoder"
|
||||
├── "qwen"
|
||||
├── "qwen-local"
|
||||
├── RWKV的支持见Wiki
|
||||
└── "llama2"
|
||||
|
||||
|
||||
@@ -3,30 +3,58 @@
|
||||
# 'stop' 颜色对应 theme.py 中的 color_er
|
||||
import importlib
|
||||
from toolbox import clear_line_break
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
def get_core_functions():
|
||||
return {
|
||||
|
||||
"英语学术润色": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
||||
# [1*] 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, "
|
||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. "
|
||||
r"Firstly, you should provide the polished paragraph. "
|
||||
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
|
||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||
# [2*] 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||
"Suffix": r"",
|
||||
# 按钮颜色 (默认 secondary)
|
||||
# [3] 按钮颜色 (可选参数,默认 secondary)
|
||||
"Color": r"secondary",
|
||||
# 按钮是否可见 (默认 True,即可见)
|
||||
# [4] 按钮是否可见 (可选参数,默认 True,即可见)
|
||||
"Visible": True,
|
||||
# 是否在触发时清除历史 (默认 False,即不处理之前的对话历史)
|
||||
"AutoClearHistory": False
|
||||
# [5] 是否在触发时清除历史 (可选参数,默认 False,即不处理之前的对话历史)
|
||||
"AutoClearHistory": False,
|
||||
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
||||
"PreProcess": None,
|
||||
},
|
||||
"中文学术润色": {
|
||||
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
||||
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
||||
"Suffix": r"",
|
||||
|
||||
|
||||
"总结绘制脑图": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": r"",
|
||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||
"Suffix":
|
||||
dedent("\n"+r'''
|
||||
==============================
|
||||
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
||||
|
||||
以下是对以上文本的总结,以mermaid flowchart的形式展示:
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["节点名1"] --> B("节点名2")
|
||||
B --> C{"节点名3"}
|
||||
C --> D["节点名4"]
|
||||
C --> |"箭头名1"| E["节点名5"]
|
||||
C --> |"箭头名2"| F["节点名6"]
|
||||
```
|
||||
|
||||
警告:
|
||||
(1)使用中文
|
||||
(2)节点名字使用引号包裹,如["Laptop"]
|
||||
(3)`|` 和 `"`之间不要存在空格
|
||||
(4)根据情况选择flowchart LR(从左到右)或者flowchart TD(从上到下)
|
||||
'''),
|
||||
},
|
||||
|
||||
|
||||
"查找语法错误": {
|
||||
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
|
||||
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
|
||||
@@ -46,11 +74,15 @@ def get_core_functions():
|
||||
"Suffix": r"",
|
||||
"PreProcess": clear_line_break, # 预处理:清除换行符
|
||||
},
|
||||
|
||||
|
||||
"中译英": {
|
||||
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"学术中英互译": {
|
||||
|
||||
|
||||
"学术英中互译": {
|
||||
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
||||
r"I will provide you with some paragraphs in one language " +
|
||||
r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
|
||||
@@ -59,28 +91,35 @@ def get_core_functions():
|
||||
r"such as natural language processing, and rhetorical knowledge " +
|
||||
r"and experience about effective writing techniques to reply. " +
|
||||
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
|
||||
"Suffix": "",
|
||||
"Color": "secondary",
|
||||
"Suffix": r"",
|
||||
},
|
||||
|
||||
|
||||
"英译中": {
|
||||
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
|
||||
|
||||
"找图片": {
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL,"
|
||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
|
||||
|
||||
"解释代码": {
|
||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||
"Suffix": "\n```\n",
|
||||
},
|
||||
|
||||
|
||||
"参考文献转Bib": {
|
||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
|
||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
|
||||
r"Items need to be transformed:",
|
||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style."
|
||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly."
|
||||
r"Items need to be transformed:" + "\n\n",
|
||||
"Visible": False,
|
||||
"Suffix": r"",
|
||||
}
|
||||
@@ -98,8 +137,14 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
|
||||
return inputs, history
|
||||
else:
|
||||
# 预制功能
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
if "PreProcess" in core_functional[additional_fn]:
|
||||
if core_functional[additional_fn]["PreProcess"] is not None:
|
||||
inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
if core_functional[additional_fn].get("AutoClearHistory", False):
|
||||
history = []
|
||||
return inputs, history
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = get_core_functions()["总结绘制脑图"]
|
||||
print(t["Prefix"] + t["Suffix"])
|
||||
@@ -37,110 +37,109 @@ def get_crazy_functions():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
from crazy_functions.虚空终端 import 虚空终端
|
||||
|
||||
|
||||
function_plugins = {
|
||||
"虚空终端": {
|
||||
"Group": "对话|编程|学术|智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(虚空终端)
|
||||
"Function": HotReload(虚空终端),
|
||||
},
|
||||
"解析整个Python项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Python项目)
|
||||
"Function": HotReload(解析一个Python项目),
|
||||
},
|
||||
"载入对话历史存档(先上传存档或输入路径)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "载入对话历史存档 | 输入参数为路径",
|
||||
"Function": HotReload(载入对话历史存档)
|
||||
"Function": HotReload(载入对话历史存档),
|
||||
},
|
||||
"删除所有本地对话历史记录(谨慎操作)": {
|
||||
"Group": "对话",
|
||||
"AsButton": False,
|
||||
"Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(删除所有本地对话历史记录)
|
||||
"Function": HotReload(删除所有本地对话历史记录),
|
||||
},
|
||||
"清除所有缓存文件(谨慎操作)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(清除缓存)
|
||||
"Function": HotReload(清除缓存),
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "批量总结word文档 | 输入参数为路径",
|
||||
"Function": HotReload(总结word文档)
|
||||
"Function": HotReload(总结word文档),
|
||||
},
|
||||
"解析整个Matlab项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Matlab项目)
|
||||
"Function": HotReload(解析一个Matlab项目),
|
||||
},
|
||||
"解析整个C++项目头文件": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个C项目的头文件)
|
||||
"Function": HotReload(解析一个C项目的头文件),
|
||||
},
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
|
||||
"Function": HotReload(解析一个C项目)
|
||||
"Function": HotReload(解析一个C项目),
|
||||
},
|
||||
"解析整个Go项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Golang项目)
|
||||
"Function": HotReload(解析一个Golang项目),
|
||||
},
|
||||
"解析整个Rust项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Rust项目)
|
||||
"Function": HotReload(解析一个Rust项目),
|
||||
},
|
||||
"解析整个Java项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Java项目)
|
||||
"Function": HotReload(解析一个Java项目),
|
||||
},
|
||||
"解析整个前端项目(js,ts,css等)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个前端项目)
|
||||
"Function": HotReload(解析一个前端项目),
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Lua项目)
|
||||
"Function": HotReload(解析一个Lua项目),
|
||||
},
|
||||
"解析整个CSharp项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个CSharp项目)
|
||||
"Function": HotReload(解析一个CSharp项目),
|
||||
},
|
||||
"解析Jupyter Notebook文件": {
|
||||
"Group": "编程",
|
||||
@@ -156,103 +155,102 @@ def get_crazy_functions():
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "读取Tex论文并写摘要 | 输入参数为路径",
|
||||
"Function": HotReload(读文章写摘要)
|
||||
"Function": HotReload(读文章写摘要),
|
||||
},
|
||||
"翻译README或MD": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
"Function": HotReload(Markdown英译中),
|
||||
},
|
||||
"翻译Markdown或README(支持Github链接)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
"Function": HotReload(Markdown英译中),
|
||||
},
|
||||
"批量生成函数注释": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量生成函数的注释 | 输入参数为路径",
|
||||
"Function": HotReload(批量生成函数注释)
|
||||
"Function": HotReload(批量生成函数注释),
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Info": "保存当前的对话 | 不需要输入参数",
|
||||
"Function": HotReload(对话历史存档)
|
||||
"Function": HotReload(对话历史存档),
|
||||
},
|
||||
"[多线程Demo]解析此项目本身(源码自译解)": {
|
||||
"Group": "对话|编程",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
|
||||
"Function": HotReload(解析项目本身)
|
||||
"Function": HotReload(解析项目本身),
|
||||
},
|
||||
"历史上的今天": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
||||
"Function": HotReload(高阶功能模板函数)
|
||||
"Function": HotReload(高阶功能模板函数),
|
||||
},
|
||||
"精准翻译PDF论文": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
"Function": HotReload(批量翻译PDF文档),
|
||||
},
|
||||
"询问多个GPT模型": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(同时问询)
|
||||
"Function": HotReload(同时问询),
|
||||
},
|
||||
"批量总结PDF文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量总结PDF文档的内容 | 输入参数为路径",
|
||||
"Function": HotReload(批量总结PDF文档)
|
||||
"Function": HotReload(批量总结PDF文档),
|
||||
},
|
||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
|
||||
"Function": HotReload(谷歌检索小助手)
|
||||
"Function": HotReload(谷歌检索小助手),
|
||||
},
|
||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入),
|
||||
},
|
||||
"英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex英文润色)
|
||||
"Function": HotReload(Latex英文润色),
|
||||
},
|
||||
"英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex英文纠错)
|
||||
"Function": HotReload(Latex英文纠错),
|
||||
},
|
||||
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex中文润色)
|
||||
"Function": HotReload(Latex中文润色),
|
||||
},
|
||||
|
||||
# 已经被新插件取代
|
||||
# "Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
# "Group": "学术",
|
||||
@@ -261,7 +259,6 @@ def get_crazy_functions():
|
||||
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
|
||||
# "Function": HotReload(Latex中译英)
|
||||
# },
|
||||
|
||||
# 已经被新插件取代
|
||||
# "Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
# "Group": "学术",
|
||||
@@ -270,130 +267,153 @@ def get_crazy_functions():
|
||||
# "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
|
||||
# "Function": HotReload(Latex英译中)
|
||||
# },
|
||||
|
||||
"批量Markdown中译英(输入路径或上传压缩包)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Markdown中译英)
|
||||
"Function": HotReload(Markdown中译英),
|
||||
},
|
||||
}
|
||||
|
||||
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
|
||||
try:
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要)
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接网络回答问题)
|
||||
"Function": HotReload(连接网络回答问题),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接bing搜索回答问题)
|
||||
"Function": HotReload(连接bing搜索回答问题),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目)
|
||||
"ArgsReminder": '输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: "*.c, ^*.cpp, config.toml, ^*.toml"', # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目),
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型),
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
|
||||
function_plugins.update({
|
||||
"图片生成_DALLE2 (先切换模型到openai或api2d)": {
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"图片生成_DALLE2 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
|
||||
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片生成_DALLE2)
|
||||
"Function": HotReload(图片生成_DALLE2),
|
||||
},
|
||||
})
|
||||
function_plugins.update({
|
||||
"图片生成_DALLE3 (先切换模型到openai或api2d)": {
|
||||
}
|
||||
)
|
||||
function_plugins.update(
|
||||
{
|
||||
"图片生成_DALLE3 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
|
||||
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片生成_DALLE3)
|
||||
"Function": HotReload(图片生成_DALLE3),
|
||||
},
|
||||
})
|
||||
function_plugins.update({
|
||||
"图片修改_DALLE2 (先切换模型到openai或api2d)": {
|
||||
}
|
||||
)
|
||||
function_plugins.update(
|
||||
{
|
||||
"图片修改_DALLE2 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False)
|
||||
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片修改_DALLE2)
|
||||
"Function": HotReload(图片修改_DALLE2),
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.总结音视频 import 总结音视频
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"批量总结音视频(输入路径或上传压缩包)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
@@ -401,203 +421,255 @@ def get_crazy_functions():
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
||||
"Info": "批量总结音频或视频 | 输入参数为路径",
|
||||
"Function": HotReload(总结音视频)
|
||||
"Function": HotReload(总结音视频),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.数学动画生成manim import 动画生成
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"数学动画生成(Manim)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
|
||||
"Function": HotReload(动画生成)
|
||||
"Function": HotReload(动画生成),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"Markdown翻译(指定翻译成何种语言)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
|
||||
"Function": HotReload(Markdown翻译指定语言)
|
||||
"Function": HotReload(Markdown翻译指定语言),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.知识库问答 import 知识库文件注入
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"构建知识库(先上传文件素材,再运行此插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
|
||||
"Function": HotReload(知识库文件注入)
|
||||
"Function": HotReload(知识库文件注入),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.知识库问答 import 读取知识库作答
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"知识库文件注入(构建知识库后,再运行此插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
|
||||
"Function": HotReload(读取知识库作答)
|
||||
"Function": HotReload(读取知识库作答),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.交互功能函数模板 import 交互功能模板函数
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(交互功能模板函数)
|
||||
"Function": HotReload(交互功能模板函数),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
||||
"Function": HotReload(Latex英文纠错加PDF对比)
|
||||
"Function": HotReload(Latex英文纠错加PDF对比),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"Arxiv论文精细翻译(输入arxivID)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
|
||||
'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||
}
|
||||
})
|
||||
function_plugins.update({
|
||||
}
|
||||
)
|
||||
function_plugins.update(
|
||||
{
|
||||
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
|
||||
'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from toolbox import get_conf
|
||||
ENABLE_AUDIO = get_conf('ENABLE_AUDIO')
|
||||
|
||||
ENABLE_AUDIO = get_conf("ENABLE_AUDIO")
|
||||
if ENABLE_AUDIO:
|
||||
from crazy_functions.语音助手 import 语音助手
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"实时语音对话": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
|
||||
"Function": HotReload(语音助手)
|
||||
"Function": HotReload(语音助手),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"精准翻译PDF文档(NOUGAT)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
"Function": HotReload(批量翻译PDF文档),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.函数动态生成 import 函数动态生成
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"动态代码解释器(CodeInterpreter)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(函数动态生成)
|
||||
"Function": HotReload(函数动态生成),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.多智能体 import 多智能体终端
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"AutoGen多智能体终端(仅供测试)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(多智能体终端)
|
||||
"Function": HotReload(多智能体终端),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print('Load function plugin failed')
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.互动小游戏 import 随机小游戏
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"随机互动小游戏(仅供测试)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(随机小游戏),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
# try:
|
||||
# from crazy_functions.互动小游戏 import 随机小游戏
|
||||
# from crazy_functions.高级功能函数模板 import 测试图表渲染
|
||||
# function_plugins.update({
|
||||
# "随机小游戏": {
|
||||
# "绘制逻辑关系(测试图表渲染)": {
|
||||
# "Group": "智能体",
|
||||
# "Color": "stop",
|
||||
# "AsButton": True,
|
||||
# "Function": HotReload(随机小游戏)
|
||||
# "Function": HotReload(测试图表渲染)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
@@ -618,8 +690,6 @@ def get_crazy_functions():
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
|
||||
|
||||
"""
|
||||
设置默认值:
|
||||
- 默认 Group = 对话
|
||||
@@ -629,12 +699,12 @@ def get_crazy_functions():
|
||||
"""
|
||||
for name, function_meta in function_plugins.items():
|
||||
if "Group" not in function_meta:
|
||||
function_plugins[name]["Group"] = '对话'
|
||||
function_plugins[name]["Group"] = "对话"
|
||||
if "AsButton" not in function_meta:
|
||||
function_plugins[name]["AsButton"] = True
|
||||
if "AdvancedArgs" not in function_meta:
|
||||
function_plugins[name]["AdvancedArgs"] = False
|
||||
if "Color" not in function_meta:
|
||||
function_plugins[name]["Color"] = 'secondary'
|
||||
function_plugins[name]["Color"] = "secondary"
|
||||
|
||||
return function_plugins
|
||||
|
||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -5,7 +5,7 @@ import glob, os, requests, time
|
||||
pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
|
||||
# =================================== 工具函数 ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
def switch_prompt(pfg, mode, more_requirement):
|
||||
"""
|
||||
@@ -142,7 +142,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||
from toolbox import extract_archive
|
||||
extract_archive(file_path=dst, dest_dir=extract_dst)
|
||||
return extract_dst, arxiv_id
|
||||
# ========================================= 插件主程序1 =====================================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
|
||||
@CatchException
|
||||
@@ -218,7 +218,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
||||
# <-------------- we are done ------------->
|
||||
return success
|
||||
|
||||
# ========================================= 插件主程序2 =====================================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
@CatchException
|
||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
|
||||
@@ -139,6 +139,8 @@ def can_multi_process(llm):
|
||||
if llm.startswith('gpt-'): return True
|
||||
if llm.startswith('api2d-'): return True
|
||||
if llm.startswith('azure-'): return True
|
||||
if llm.startswith('spark'): return True
|
||||
if llm.startswith('zhipuai'): return True
|
||||
return False
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
@@ -312,95 +314,6 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
return gpt_response_collection
|
||||
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
||||
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
print(cnt)
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
raise RuntimeError("存在一行极长的文本!")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line))
|
||||
return result
|
||||
try:
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
"""
|
||||
当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||
# 递归
|
||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
cnt = 0
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
if break_anyway:
|
||||
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
|
||||
else:
|
||||
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
||||
return result
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下敷衍吧
|
||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
|
||||
|
||||
def read_and_clean_pdf_text(fp):
|
||||
"""
|
||||
@@ -553,6 +466,9 @@ def read_and_clean_pdf_text(fp):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
# 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写
|
||||
if starts_with_lowercase_word(meta_txt[0]):
|
||||
meta_txt[0] = meta_txt[0].capitalize()
|
||||
for _ in range(100):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if starts_with_lowercase_word(block_txt):
|
||||
@@ -631,7 +547,6 @@ def get_files_from_everything(txt, type): # type='.md'
|
||||
|
||||
|
||||
|
||||
|
||||
@Singleton
|
||||
class nougat_interface():
|
||||
def __init__(self):
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
||||
def step(self, prompt, chatbot, history):
|
||||
if self.step_cnt == 0:
|
||||
chatbot.append(["我画你猜(动物)", "请稍等..."])
|
||||
else:
|
||||
if prompt.strip() == 'exit':
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
chatbot.append([prompt, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if self.step_cnt == 0:
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'draw'
|
||||
|
||||
if self.cur_task == 'draw':
|
||||
avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"]
|
||||
self.obj = random.choice(avail_obj)
|
||||
inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + \
|
||||
f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
|
||||
raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
|
||||
self.cur_task = 'identify user guess'
|
||||
res = get_code_block(raw_res)
|
||||
history += ['', f'the answer is {self.obj}', inputs, res]
|
||||
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
elif self.cur_task == 'identify user guess':
|
||||
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
||||
else:
|
||||
self.cur_task = 'identify user guess'
|
||||
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
||||
@@ -0,0 +1,212 @@
|
||||
prompts_hs = """ 请以“{headstart}”为开头,编写一个小说的第一幕。
|
||||
|
||||
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 字数要求:第一幕的字数少于300字,且少于2个段落。
|
||||
"""
|
||||
|
||||
prompts_interact = """ 小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,根据以上的情节,给出4种不同的后续剧情发展方向,每个发展方向都精明扼要地用一句话说明。稍后,我将在这4个选择中,挑选一种剧情发展。
|
||||
|
||||
输出格式例如:
|
||||
1. 后续剧情发展1
|
||||
2. 后续剧情发展2
|
||||
3. 后续剧情发展3
|
||||
4. 后续剧情发展4
|
||||
"""
|
||||
|
||||
|
||||
prompts_resume = """小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||
在以下的剧情发展中,
|
||||
「
|
||||
{choice}
|
||||
」
|
||||
我认为更合理的是:{user_choice}。
|
||||
请在前文的基础上(不要重复前文),围绕我选定的剧情情节,编写小说的下一幕。
|
||||
|
||||
- 禁止杜撰不符合我选择的剧情。
|
||||
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||
- 不要重复前文。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 小说的下一幕字数少于300字,且少于2个段落。
|
||||
"""
|
||||
|
||||
|
||||
prompts_terminate = """小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||
现在,故事该结束了,我认为最合理的故事结局是:{user_choice}。
|
||||
|
||||
请在前文的基础上(不要重复前文),编写小说的最后一幕。
|
||||
|
||||
- 不要重复前文。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 字数要求:最后一幕的字数少于1000字。
|
||||
"""
|
||||
|
||||
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
||||
story_headstart = [
|
||||
'先行者知道,他现在是全宇宙中唯一的一个人了。',
|
||||
'深夜,一个年轻人穿过天安门广场向纪念堂走去。在二十二世纪编年史中,计算机把他的代号定为M102。',
|
||||
'他知道,这最后一课要提前讲了。又一阵剧痛从肝部袭来,几乎使他晕厥过去。',
|
||||
'在距地球五万光年的远方,在银河系的中心,一场延续了两万年的星际战争已接近尾声。那里的太空中渐渐隐现出一个方形区域,仿佛灿烂的群星的背景被剪出一个方口。',
|
||||
'伊依一行三人乘坐一艘游艇在南太平洋上做吟诗航行,他们的目的地是南极,如果几天后能顺利到达那里,他们将钻出地壳去看诗云。',
|
||||
'很多人生来就会莫名其妙地迷上一样东西,仿佛他的出生就是要和这东西约会似的,正是这样,圆圆迷上了肥皂泡。'
|
||||
]
|
||||
|
||||
|
||||
def begin_game_step_0(self, prompt, chatbot, history):
|
||||
# init game at step 0
|
||||
self.headstart = random.choice(self.story_headstart)
|
||||
self.story = []
|
||||
chatbot.append(["互动写故事", f"这次的故事开头是:{self.headstart}"])
|
||||
self.sys_prompt_ = '你是一个想象力丰富的杰出作家。正在与你的朋友互动,一起写故事,因此你每次写的故事段落应少于300字(结局除外)。'
|
||||
|
||||
|
||||
def generate_story_image(self, story_paragraph):
|
||||
try:
|
||||
from crazy_functions.图片生成 import gen_image
|
||||
prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落,进行简短的环境描写。要求:80字以内。')
|
||||
image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural')
|
||||
return f'<br/><div align="center"><img src="file={image_path}"></div>'
|
||||
except:
|
||||
return ''
|
||||
|
||||
def step(self, prompt, chatbot, history):
|
||||
|
||||
"""
|
||||
首先,处理游戏初始化等特殊情况
|
||||
"""
|
||||
if self.step_cnt == 0:
|
||||
self.begin_game_step_0(prompt, chatbot, history)
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'head_start'
|
||||
else:
|
||||
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
|
||||
# should we terminate game here?
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
if '剧情收尾' in prompt:
|
||||
self.cur_task = 'story_terminate'
|
||||
# # well, game resumes
|
||||
# chatbot.append([prompt, ""])
|
||||
# update ui, don't keep the user waiting
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
"""
|
||||
处理游戏的主体逻辑
|
||||
"""
|
||||
if self.cur_task == 'head_start':
|
||||
"""
|
||||
这是游戏的第一步
|
||||
"""
|
||||
inputs_ = prompts_hs.format(headstart=self.headstart)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, '故事开头', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
self.story.append(story_paragraph)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# # 构建后续剧情引导
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||
history_ = []
|
||||
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs,
|
||||
chatbot,
|
||||
history_,
|
||||
self.sys_prompt_
|
||||
)
|
||||
self.cur_task = 'user_choice'
|
||||
|
||||
|
||||
elif self.cur_task == 'user_choice':
|
||||
"""
|
||||
根据用户的提示,确定故事的下一步
|
||||
"""
|
||||
if '请在以下几种故事走向中,选择一种' in chatbot[-1][0]: chatbot.pop(-1)
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_resume.format(previously_on_story=previously_on_story, choice=self.next_choices, user_choice=prompt)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
self.story.append(story_paragraph)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# # 构建后续剧情引导
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||
history_ = []
|
||||
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_,
|
||||
'请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs,
|
||||
chatbot,
|
||||
history_,
|
||||
self.sys_prompt_
|
||||
)
|
||||
self.cur_task = 'user_choice'
|
||||
|
||||
|
||||
elif self.cur_task == 'story_terminate':
|
||||
"""
|
||||
根据用户的提示,确定故事的结局
|
||||
"""
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_terminate.format(previously_on_story=previously_on_story, user_choice=prompt)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# terminate game
|
||||
self.delete_game = True
|
||||
return
|
||||
37
crazy_functions/ipc_fns/mp.py
普通文件
37
crazy_functions/ipc_fns/mp.py
普通文件
@@ -0,0 +1,37 @@
|
||||
import platform
|
||||
import pickle
|
||||
import multiprocessing
|
||||
|
||||
def run_in_subprocess_wrapper_func(v_args):
|
||||
func, args, kwargs, return_dict, exception_dict = pickle.loads(v_args)
|
||||
import sys
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return_dict['result'] = result
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
exception_dict['exception'] = exc_info
|
||||
|
||||
def run_in_subprocess_with_timeout(func, timeout=60):
|
||||
if platform.system() == 'Linux':
|
||||
def wrapper(*args, **kwargs):
|
||||
return_dict = multiprocessing.Manager().dict()
|
||||
exception_dict = multiprocessing.Manager().dict()
|
||||
v_args = pickle.dumps((func, args, kwargs, return_dict, exception_dict))
|
||||
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, args=(v_args,))
|
||||
process.start()
|
||||
process.join(timeout)
|
||||
if process.is_alive():
|
||||
process.terminate()
|
||||
raise TimeoutError(f'功能单元{str(func)}未能在规定时间内完成任务')
|
||||
process.close()
|
||||
if 'exception' in exception_dict:
|
||||
# ooops, the subprocess ran into an exception
|
||||
exc_info = exception_dict['exception']
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
if 'result' in return_dict.keys():
|
||||
# If the subprocess ran successfully, return the result
|
||||
return return_dict['result']
|
||||
return wrapper
|
||||
else:
|
||||
return func
|
||||
@@ -175,7 +175,6 @@ class LatexPaperFileGroup():
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
@@ -192,13 +191,12 @@ class LatexPaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from ..crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
print('Segmentation: done')
|
||||
|
||||
def merge_result(self):
|
||||
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||
@@ -404,7 +402,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
if modified_pdf_success:
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import os, shutil
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
PRESERVE = 0
|
||||
TRANSFORM = 1
|
||||
|
||||
pj = os.path.join
|
||||
|
||||
class LinkedListNode():
|
||||
|
||||
class LinkedListNode:
|
||||
"""
|
||||
Linked List Node
|
||||
"""
|
||||
|
||||
def __init__(self, string, preserve=True) -> None:
|
||||
self.string = string
|
||||
self.preserve = preserve
|
||||
@@ -18,12 +21,14 @@ class LinkedListNode():
|
||||
# self.begin_line = 0
|
||||
# self.begin_char = 0
|
||||
|
||||
|
||||
def convert_to_linklist(text, mask):
|
||||
root = LinkedListNode("", preserve=True)
|
||||
current_node = root
|
||||
for c, m, i in zip(text, mask, range(len(text))):
|
||||
if (m==PRESERVE and current_node.preserve) \
|
||||
or (m==TRANSFORM and not current_node.preserve):
|
||||
if (m == PRESERVE and current_node.preserve) or (
|
||||
m == TRANSFORM and not current_node.preserve
|
||||
):
|
||||
# add
|
||||
current_node.string += c
|
||||
else:
|
||||
@@ -31,6 +36,7 @@ def convert_to_linklist(text, mask):
|
||||
current_node = current_node.next
|
||||
return root
|
||||
|
||||
|
||||
def post_process(root):
|
||||
# 修复括号
|
||||
node = root
|
||||
@@ -38,21 +44,24 @@ def post_process(root):
|
||||
string = node.string
|
||||
if node.preserve:
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
continue
|
||||
|
||||
def break_check(string):
|
||||
str_stack = [""] # (lv, index)
|
||||
for i, c in enumerate(string):
|
||||
if c == '{':
|
||||
str_stack.append('{')
|
||||
elif c == '}':
|
||||
if c == "{":
|
||||
str_stack.append("{")
|
||||
elif c == "}":
|
||||
if len(str_stack) == 1:
|
||||
print('stack fix')
|
||||
print("stack fix")
|
||||
return i
|
||||
str_stack.pop(-1)
|
||||
else:
|
||||
str_stack[-1] += c
|
||||
return -1
|
||||
|
||||
bp = break_check(string)
|
||||
|
||||
if bp == -1:
|
||||
@@ -69,51 +78,66 @@ def post_process(root):
|
||||
node.next = q
|
||||
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
|
||||
# 屏蔽空行和太短的句子
|
||||
node = root
|
||||
while True:
|
||||
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
|
||||
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
|
||||
if len(node.string.strip("\n").strip("")) == 0:
|
||||
node.preserve = True
|
||||
if len(node.string.strip("\n").strip("")) < 42:
|
||||
node.preserve = True
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
node = root
|
||||
while True:
|
||||
if node.next and node.preserve and node.next.preserve:
|
||||
node.string += node.next.string
|
||||
node.next = node.next.next
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
|
||||
# 将前后断行符脱离
|
||||
node = root
|
||||
prev_node = None
|
||||
while True:
|
||||
if not node.preserve:
|
||||
lstriped_ = node.string.lstrip().lstrip('\n')
|
||||
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
|
||||
lstriped_ = node.string.lstrip().lstrip("\n")
|
||||
if (
|
||||
(prev_node is not None)
|
||||
and (prev_node.preserve)
|
||||
and (len(lstriped_) != len(node.string))
|
||||
):
|
||||
prev_node.string += node.string[: -len(lstriped_)]
|
||||
node.string = lstriped_
|
||||
rstriped_ = node.string.rstrip().rstrip('\n')
|
||||
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
|
||||
rstriped_ = node.string.rstrip().rstrip("\n")
|
||||
if (
|
||||
(node.next is not None)
|
||||
and (node.next.preserve)
|
||||
and (len(rstriped_) != len(node.string))
|
||||
):
|
||||
node.next.string = node.string[len(rstriped_) :] + node.next.string
|
||||
node.string = rstriped_
|
||||
# =====
|
||||
# =-=-=
|
||||
prev_node = node
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
|
||||
# 标注节点的行数范围
|
||||
node = root
|
||||
n_line = 0
|
||||
expansion = 2
|
||||
while True:
|
||||
n_l = node.string.count('\n')
|
||||
n_l = node.string.count("\n")
|
||||
node.range = [n_line - expansion, n_line + n_l + expansion] # 失败时,扭转的范围
|
||||
n_line = n_line + n_l
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
return root
|
||||
|
||||
|
||||
@@ -131,12 +155,14 @@ def set_forbidden_text(text, mask, pattern, flags=0):
|
||||
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
|
||||
everything between "\begin{equation}" and "\end{equation}"
|
||||
"""
|
||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||
if isinstance(pattern, list):
|
||||
pattern = "|".join(pattern)
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
mask[res.span()[0] : res.span()[1]] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
|
||||
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
"""
|
||||
Move area out of preserve area (make text editable for GPT)
|
||||
@@ -144,7 +170,8 @@ def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
e.g.
|
||||
\begin{abstract} blablablablablabla. \end{abstract}
|
||||
"""
|
||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||
if isinstance(pattern, list):
|
||||
pattern = "|".join(pattern)
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
if not forbid_wrapper:
|
||||
@@ -155,6 +182,7 @@ def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
mask[res.regs[1][1] : res.regs[0][1]] = PRESERVE # abstract
|
||||
return text, mask
|
||||
|
||||
|
||||
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||
"""
|
||||
Add a preserve text area in this paper (text become untouchable for GPT).
|
||||
@@ -167,15 +195,21 @@ def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||
brace_level = -1
|
||||
p = begin = end = res.regs[0][0]
|
||||
for _ in range(1024 * 16):
|
||||
if text[p] == '}' and brace_level == 0: break
|
||||
elif text[p] == '}': brace_level -= 1
|
||||
elif text[p] == '{': brace_level += 1
|
||||
if text[p] == "}" and brace_level == 0:
|
||||
break
|
||||
elif text[p] == "}":
|
||||
brace_level -= 1
|
||||
elif text[p] == "{":
|
||||
brace_level += 1
|
||||
p += 1
|
||||
end = p + 1
|
||||
mask[begin:end] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
|
||||
def reverse_forbidden_text_careful_brace(
|
||||
text, mask, pattern, flags=0, forbid_wrapper=True
|
||||
):
|
||||
"""
|
||||
Move area out of preserve area (make text editable for GPT)
|
||||
count the number of the braces so as to catch compelete text area.
|
||||
@@ -187,9 +221,12 @@ def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wr
|
||||
brace_level = 0
|
||||
p = begin = end = res.regs[1][0]
|
||||
for _ in range(1024 * 16):
|
||||
if text[p] == '}' and brace_level == 0: break
|
||||
elif text[p] == '}': brace_level -= 1
|
||||
elif text[p] == '{': brace_level += 1
|
||||
if text[p] == "}" and brace_level == 0:
|
||||
break
|
||||
elif text[p] == "}":
|
||||
brace_level -= 1
|
||||
elif text[p] == "{":
|
||||
brace_level += 1
|
||||
p += 1
|
||||
end = p
|
||||
mask[begin:end] = TRANSFORM
|
||||
@@ -198,27 +235,42 @@ def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wr
|
||||
mask[end : res.regs[0][1]] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
|
||||
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
|
||||
"""
|
||||
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
|
||||
Add it to preserve area
|
||||
"""
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
|
||||
def search_with_line_limit(text, mask):
|
||||
for res in pattern_compile.finditer(text):
|
||||
cmd = res.group(1) # begin{what}
|
||||
this = res.group(2) # content between begin and end
|
||||
this_mask = mask[res.regs[2][0] : res.regs[2][1]]
|
||||
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
|
||||
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
|
||||
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
|
||||
white_list = [
|
||||
"document",
|
||||
"abstract",
|
||||
"lemma",
|
||||
"definition",
|
||||
"sproof",
|
||||
"em",
|
||||
"emph",
|
||||
"textit",
|
||||
"textbf",
|
||||
"itemize",
|
||||
"enumerate",
|
||||
]
|
||||
if (cmd in white_list) or this.count(
|
||||
"\n"
|
||||
) >= limit_n_lines: # use a magical number 42
|
||||
this, this_mask = search_with_line_limit(this, this_mask)
|
||||
mask[res.regs[2][0] : res.regs[2][1]] = this_mask
|
||||
else:
|
||||
mask[res.regs[0][0] : res.regs[0][1]] = PRESERVE
|
||||
return text, mask
|
||||
return search_with_line_limit(text, mask)
|
||||
|
||||
return search_with_line_limit(text, mask)
|
||||
|
||||
|
||||
"""
|
||||
@@ -227,6 +279,7 @@ Latex Merge File
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
"""
|
||||
|
||||
|
||||
def find_main_tex_file(file_manifest, mode):
|
||||
"""
|
||||
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
||||
@@ -234,27 +287,36 @@ def find_main_tex_file(file_manifest, mode):
|
||||
"""
|
||||
canidates = []
|
||||
for texf in file_manifest:
|
||||
if os.path.basename(texf).startswith('merge'):
|
||||
if os.path.basename(texf).startswith("merge"):
|
||||
continue
|
||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
||||
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||
file_content = f.read()
|
||||
if r'\documentclass' in file_content:
|
||||
if r"\documentclass" in file_content:
|
||||
canidates.append(texf)
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(canidates) == 0:
|
||||
raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)')
|
||||
raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)")
|
||||
elif len(canidates) == 1:
|
||||
return canidates[0]
|
||||
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||
canidates_score = []
|
||||
# 给出一些判定模板文档的词作为扣分项
|
||||
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
||||
expected_words = ['\input', '\ref', '\cite']
|
||||
unexpected_words = [
|
||||
"\\LaTeX",
|
||||
"manuscript",
|
||||
"Guidelines",
|
||||
"font",
|
||||
"citations",
|
||||
"rejected",
|
||||
"blind review",
|
||||
"reviewers",
|
||||
]
|
||||
expected_words = ["\\input", "\\ref", "\\cite"]
|
||||
for texf in canidates:
|
||||
canidates_score.append(0)
|
||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
||||
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||
file_content = f.read()
|
||||
file_content = rm_comments(file_content)
|
||||
for uw in unexpected_words:
|
||||
@@ -266,6 +328,7 @@ def find_main_tex_file(file_manifest, mode):
|
||||
select = np.argmax(canidates_score) # 取评分最高者返回
|
||||
return canidates[select]
|
||||
|
||||
|
||||
def rm_comments(main_file):
|
||||
new_file_remove_comment_lines = []
|
||||
for l in main_file.splitlines():
|
||||
@@ -274,30 +337,39 @@ def rm_comments(main_file):
|
||||
pass
|
||||
else:
|
||||
new_file_remove_comment_lines.append(l)
|
||||
main_file = '\n'.join(new_file_remove_comment_lines)
|
||||
main_file = "\n".join(new_file_remove_comment_lines)
|
||||
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
|
||||
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||
main_file = re.sub(r"(?<!\\)%.*", "", main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||
return main_file
|
||||
|
||||
|
||||
def find_tex_file_ignore_case(fp):
|
||||
dir_name = os.path.dirname(fp)
|
||||
base_name = os.path.basename(fp)
|
||||
# 如果输入的文件路径是正确的
|
||||
if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name)
|
||||
if os.path.isfile(pj(dir_name, base_name)):
|
||||
return pj(dir_name, base_name)
|
||||
# 如果不正确,试着加上.tex后缀试试
|
||||
if not base_name.endswith('.tex'): base_name+='.tex'
|
||||
if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name)
|
||||
if not base_name.endswith(".tex"):
|
||||
base_name += ".tex"
|
||||
if os.path.isfile(pj(dir_name, base_name)):
|
||||
return pj(dir_name, base_name)
|
||||
# 如果还找不到,解除大小写限制,再试一次
|
||||
import glob
|
||||
for f in glob.glob(dir_name+'/*.tex'):
|
||||
|
||||
for f in glob.glob(dir_name + "/*.tex"):
|
||||
base_name_s = os.path.basename(fp)
|
||||
base_name_f = os.path.basename(f)
|
||||
if base_name_s.lower() == base_name_f.lower(): return f
|
||||
if base_name_s.lower() == base_name_f.lower():
|
||||
return f
|
||||
# 试着加上.tex后缀试试
|
||||
if not base_name_s.endswith('.tex'): base_name_s+='.tex'
|
||||
if base_name_s.lower() == base_name_f.lower(): return f
|
||||
if not base_name_s.endswith(".tex"):
|
||||
base_name_s += ".tex"
|
||||
if base_name_s.lower() == base_name_f.lower():
|
||||
return f
|
||||
return None
|
||||
|
||||
|
||||
def merge_tex_files_(project_foler, main_file, mode):
|
||||
"""
|
||||
Merge Tex project recrusively
|
||||
@@ -309,18 +381,18 @@ def merge_tex_files_(project_foler, main_file, mode):
|
||||
fp_ = find_tex_file_ignore_case(fp)
|
||||
if fp_:
|
||||
try:
|
||||
with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
||||
with open(fp_, "r", encoding="utf-8", errors="replace") as fx:
|
||||
c = fx.read()
|
||||
except:
|
||||
c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
|
||||
else:
|
||||
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
||||
raise RuntimeError(f"找不到{fp},Tex源文件缺失!")
|
||||
c = merge_tex_files_(project_foler, c, mode)
|
||||
main_file = main_file[: s.span()[0]] + c + main_file[s.span()[1] :]
|
||||
return main_file
|
||||
|
||||
|
||||
def find_title_and_abs(main_file):
|
||||
|
||||
def extract_abstract_1(text):
|
||||
pattern = r"\\abstract\{(.*?)\}"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
@@ -362,21 +434,30 @@ def merge_tex_files(project_foler, main_file, mode):
|
||||
main_file = merge_tex_files_(project_foler, main_file, mode)
|
||||
main_file = rm_comments(main_file)
|
||||
|
||||
if mode == 'translate_zh':
|
||||
if mode == "translate_zh":
|
||||
# find paper documentclass
|
||||
pattern = re.compile(r'\\documentclass.*\n')
|
||||
pattern = re.compile(r"\\documentclass.*\n")
|
||||
match = pattern.search(main_file)
|
||||
assert match is not None, "Cannot find documentclass statement!"
|
||||
position = match.end()
|
||||
add_ctex = '\\usepackage{ctex}\n'
|
||||
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
|
||||
add_ctex = "\\usepackage{ctex}\n"
|
||||
add_url = "\\usepackage{url}\n" if "{url}" not in main_file else ""
|
||||
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
|
||||
# fontset=windows
|
||||
import platform
|
||||
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
|
||||
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
|
||||
|
||||
main_file = re.sub(
|
||||
r"\\documentclass\[(.*?)\]{(.*?)}",
|
||||
r"\\documentclass[\1,fontset=windows,UTF8]{\2}",
|
||||
main_file,
|
||||
)
|
||||
main_file = re.sub(
|
||||
r"\\documentclass{(.*?)}",
|
||||
r"\\documentclass[fontset=windows,UTF8]{\1}",
|
||||
main_file,
|
||||
)
|
||||
# find paper abstract
|
||||
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
|
||||
pattern_opt1 = re.compile(r"\\begin\{abstract\}.*\n")
|
||||
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||
match_opt1 = pattern_opt1.search(main_file)
|
||||
match_opt2 = pattern_opt2.search(main_file)
|
||||
@@ -385,7 +466,9 @@ def merge_tex_files(project_foler, main_file, mode):
|
||||
main_file = insert_abstract(main_file)
|
||||
match_opt1 = pattern_opt1.search(main_file)
|
||||
match_opt2 = pattern_opt2.search(main_file)
|
||||
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
||||
assert (match_opt1 is not None) or (
|
||||
match_opt2 is not None
|
||||
), "Cannot find paper abstract section!"
|
||||
return main_file
|
||||
|
||||
|
||||
@@ -395,6 +478,7 @@ The GPT-Academic program cannot find abstract section in this paper.
|
||||
\end{abstract}
|
||||
"""
|
||||
|
||||
|
||||
def insert_abstract(tex_content):
|
||||
if "\\maketitle" in tex_content:
|
||||
# find the position of "\maketitle"
|
||||
@@ -402,7 +486,13 @@ def insert_abstract(tex_content):
|
||||
# find the nearest ending line
|
||||
end_line_index = tex_content.find("\n", find_index)
|
||||
# insert "abs_str" on the next line
|
||||
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
||||
modified_tex = (
|
||||
tex_content[: end_line_index + 1]
|
||||
+ "\n\n"
|
||||
+ insert_missing_abs_str
|
||||
+ "\n\n"
|
||||
+ tex_content[end_line_index + 1 :]
|
||||
)
|
||||
return modified_tex
|
||||
elif r"\begin{document}" in tex_content:
|
||||
# find the position of "\maketitle"
|
||||
@@ -410,16 +500,25 @@ def insert_abstract(tex_content):
|
||||
# find the nearest ending line
|
||||
end_line_index = tex_content.find("\n", find_index)
|
||||
# insert "abs_str" on the next line
|
||||
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
||||
modified_tex = (
|
||||
tex_content[: end_line_index + 1]
|
||||
+ "\n\n"
|
||||
+ insert_missing_abs_str
|
||||
+ "\n\n"
|
||||
+ tex_content[end_line_index + 1 :]
|
||||
)
|
||||
return modified_tex
|
||||
else:
|
||||
return tex_content
|
||||
|
||||
|
||||
"""
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
Post process
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
"""
|
||||
|
||||
|
||||
def mod_inbraket(match):
|
||||
"""
|
||||
为啥chatgpt会把cite里面的逗号换成中文逗号呀
|
||||
@@ -428,11 +527,12 @@ def mod_inbraket(match):
|
||||
cmd = match.group(1)
|
||||
str_to_modify = match.group(2)
|
||||
# modify the matched string
|
||||
str_to_modify = str_to_modify.replace(':', ':') # 前面是中文冒号,后面是英文冒号
|
||||
str_to_modify = str_to_modify.replace(',', ',') # 前面是中文逗号,后面是英文逗号
|
||||
str_to_modify = str_to_modify.replace(":", ":") # 前面是中文冒号,后面是英文冒号
|
||||
str_to_modify = str_to_modify.replace(",", ",") # 前面是中文逗号,后面是英文逗号
|
||||
# str_to_modify = 'BOOM'
|
||||
return "\\" + cmd + "{" + str_to_modify + "}"
|
||||
|
||||
|
||||
def fix_content(final_tex, node_string):
|
||||
"""
|
||||
Fix common GPT errors to increase success rate
|
||||
@@ -444,9 +544,9 @@ def fix_content(final_tex, node_string):
|
||||
|
||||
if "Traceback" in final_tex and "[Local Message]" in final_tex:
|
||||
final_tex = node_string # 出问题了,还原原文
|
||||
if node_string.count('\\begin') != final_tex.count('\\begin'):
|
||||
if node_string.count("\\begin") != final_tex.count("\\begin"):
|
||||
final_tex = node_string # 出问题了,还原原文
|
||||
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
|
||||
if node_string.count("\_") > 0 and node_string.count("\_") > final_tex.count("\_"):
|
||||
# walk and replace any _ without \
|
||||
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
|
||||
|
||||
@@ -454,24 +554,32 @@ def fix_content(final_tex, node_string):
|
||||
# this function count the number of { and }
|
||||
brace_level = 0
|
||||
for c in string:
|
||||
if c == "{": brace_level += 1
|
||||
elif c == "}": brace_level -= 1
|
||||
if c == "{":
|
||||
brace_level += 1
|
||||
elif c == "}":
|
||||
brace_level -= 1
|
||||
return brace_level
|
||||
|
||||
def join_most(tex_t, tex_o):
|
||||
# this function join translated string and original string when something goes wrong
|
||||
p_t = 0
|
||||
p_o = 0
|
||||
|
||||
def find_next(string, chars, begin):
|
||||
p = begin
|
||||
while p < len(string):
|
||||
if string[p] in chars: return p, string[p]
|
||||
if string[p] in chars:
|
||||
return p, string[p]
|
||||
p += 1
|
||||
return None, None
|
||||
|
||||
while True:
|
||||
res1, char = find_next(tex_o, ['{','}'], p_o)
|
||||
if res1 is None: break
|
||||
res1, char = find_next(tex_o, ["{", "}"], p_o)
|
||||
if res1 is None:
|
||||
break
|
||||
res2, char = find_next(tex_t, [char], p_t)
|
||||
if res2 is None: break
|
||||
if res2 is None:
|
||||
break
|
||||
p_o = res1 + 1
|
||||
p_t = res2 + 1
|
||||
return tex_t[:p_t] + tex_o[p_o:]
|
||||
@@ -481,9 +589,13 @@ def fix_content(final_tex, node_string):
|
||||
final_tex = join_most(final_tex, node_string)
|
||||
return final_tex
|
||||
|
||||
|
||||
def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||
import subprocess
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
|
||||
|
||||
process = subprocess.Popen(
|
||||
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
|
||||
)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -493,43 +605,52 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
|
||||
import sys
|
||||
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return_dict['result'] = result
|
||||
return_dict["result"] = result
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
exception_dict['exception'] = exc_info
|
||||
exception_dict["exception"] = exc_info
|
||||
|
||||
|
||||
def run_in_subprocess(func):
|
||||
import multiprocessing
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
return_dict = multiprocessing.Manager().dict()
|
||||
exception_dict = multiprocessing.Manager().dict()
|
||||
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func,
|
||||
args=(func, args, kwargs, return_dict, exception_dict))
|
||||
process = multiprocessing.Process(
|
||||
target=run_in_subprocess_wrapper_func,
|
||||
args=(func, args, kwargs, return_dict, exception_dict),
|
||||
)
|
||||
process.start()
|
||||
process.join()
|
||||
process.close()
|
||||
if 'exception' in exception_dict:
|
||||
if "exception" in exception_dict:
|
||||
# ooops, the subprocess ran into an exception
|
||||
exc_info = exception_dict['exception']
|
||||
exc_info = exception_dict["exception"]
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
if 'result' in return_dict.keys():
|
||||
if "result" in return_dict.keys():
|
||||
# If the subprocess ran successfully, return the result
|
||||
return return_dict['result']
|
||||
return return_dict["result"]
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def _merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||
|
||||
Percent = 0.95
|
||||
# raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
|
||||
# Open the first PDF file
|
||||
with open(pdf1_path, 'rb') as pdf1_file:
|
||||
with open(pdf1_path, "rb") as pdf1_file:
|
||||
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
||||
# Open the second PDF file
|
||||
with open(pdf2_path, 'rb') as pdf2_file:
|
||||
with open(pdf2_path, "rb") as pdf2_file:
|
||||
pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
|
||||
# Create a new PDF file to store the merged pages
|
||||
output_writer = PyPDF2.PdfFileWriter()
|
||||
@@ -549,14 +670,25 @@ def _merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
|
||||
# Create a new empty page with double width
|
||||
new_page = PyPDF2.PageObject.createBlankPage(
|
||||
width = int(int(page1.mediaBox.getWidth()) + int(page2.mediaBox.getWidth()) * Percent),
|
||||
height = max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight())
|
||||
width=int(
|
||||
int(page1.mediaBox.getWidth())
|
||||
+ int(page2.mediaBox.getWidth()) * Percent
|
||||
),
|
||||
height=max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight()),
|
||||
)
|
||||
new_page.mergeTranslatedPage(page1, 0, 0)
|
||||
new_page.mergeTranslatedPage(page2, int(int(page1.mediaBox.getWidth())-int(page2.mediaBox.getWidth())* (1-Percent)), 0)
|
||||
new_page.mergeTranslatedPage(
|
||||
page2,
|
||||
int(
|
||||
int(page1.mediaBox.getWidth())
|
||||
- int(page2.mediaBox.getWidth()) * (1 - Percent)
|
||||
),
|
||||
0,
|
||||
)
|
||||
output_writer.addPage(new_page)
|
||||
# Save the merged PDF file
|
||||
with open(output_path, 'wb') as output_file:
|
||||
with open(output_path, "wb") as output_file:
|
||||
output_writer.write(output_file)
|
||||
|
||||
|
||||
merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||
|
||||
@@ -0,0 +1,125 @@
|
||||
from crazy_functions.ipc_fns.mp import run_in_subprocess_with_timeout
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
""" 当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
|
||||
def maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage):
|
||||
""" 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
|
||||
当 remain_txt_to_cut < `_min` 时,我们再把 remain_txt_to_cut_storage 中的部分文字取出
|
||||
"""
|
||||
_min = int(5e4)
|
||||
_max = int(1e5)
|
||||
# print(len(remain_txt_to_cut), len(remain_txt_to_cut_storage))
|
||||
if len(remain_txt_to_cut) < _min and len(remain_txt_to_cut_storage) > 0:
|
||||
remain_txt_to_cut = remain_txt_to_cut + remain_txt_to_cut_storage
|
||||
remain_txt_to_cut_storage = ""
|
||||
if len(remain_txt_to_cut) > _max:
|
||||
remain_txt_to_cut_storage = remain_txt_to_cut[_max:] + remain_txt_to_cut_storage
|
||||
remain_txt_to_cut = remain_txt_to_cut[:_max]
|
||||
return remain_txt_to_cut, remain_txt_to_cut_storage
|
||||
|
||||
|
||||
def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
""" 文本切分
|
||||
"""
|
||||
res = []
|
||||
total_len = len(txt_tocut)
|
||||
fin_len = 0
|
||||
remain_txt_to_cut = txt_tocut
|
||||
remain_txt_to_cut_storage = ""
|
||||
# 为了加速计算,我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时, 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
|
||||
remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
|
||||
|
||||
while True:
|
||||
if get_token_fn(remain_txt_to_cut) <= limit:
|
||||
# 如果剩余文本的token数小于限制,那么就不用切了
|
||||
res.append(remain_txt_to_cut); fin_len+=len(remain_txt_to_cut)
|
||||
break
|
||||
else:
|
||||
# 如果剩余文本的token数大于限制,那么就切
|
||||
lines = remain_txt_to_cut.split('\n')
|
||||
|
||||
# 估计一个切分点
|
||||
estimated_line_cut = limit / get_token_fn(remain_txt_to_cut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
|
||||
# 开始查找合适切分点的偏移(cnt)
|
||||
cnt = 0
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
# 首先尝试用双空行(\n\n)作为切分点
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
|
||||
if cnt == 0:
|
||||
# 如果没有找到合适的切分点
|
||||
if break_anyway:
|
||||
# 是否允许暴力切分
|
||||
prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn)
|
||||
else:
|
||||
# 不允许直接报错
|
||||
raise RuntimeError(f"存在一行极长的文本!{remain_txt_to_cut}")
|
||||
|
||||
# 追加列表
|
||||
res.append(prev); fin_len+=len(prev)
|
||||
# 准备下一次迭代
|
||||
remain_txt_to_cut = post
|
||||
remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
|
||||
process = fin_len/total_len
|
||||
print(f'正在文本切分 {int(process*100)}%')
|
||||
if len(remain_txt_to_cut.strip()) == 0:
|
||||
break
|
||||
return res
|
||||
|
||||
|
||||
def breakdown_text_to_satisfy_token_limit_(txt, limit, llm_model="gpt-3.5-turbo"):
|
||||
""" 使用多种方式尝试切分文本,以满足 token 限制
|
||||
"""
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info[llm_model]['tokenizer']
|
||||
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(limit, get_token_fn, txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(limit, get_token_fn, txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(limit, get_token_fn, txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(limit, get_token_fn, txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下吧
|
||||
return cut(limit, get_token_fn, txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
breakdown_text_to_satisfy_token_limit = run_in_subprocess_with_timeout(breakdown_text_to_satisfy_token_limit_, timeout=60)
|
||||
|
||||
if __name__ == '__main__':
|
||||
from crazy_functions.crazy_utils import read_and_clean_pdf_text
|
||||
file_content, page_one = read_and_clean_pdf_text("build/assets/at.pdf")
|
||||
|
||||
from request_llms.bridge_all import model_info
|
||||
for i in range(5):
|
||||
file_content += file_content
|
||||
|
||||
print(len(file_content))
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
res = breakdown_text_to_satisfy_token_limit(file_content, TOKEN_LIMIT_PER_FRAGMENT)
|
||||
|
||||
@@ -74,7 +74,7 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat
|
||||
|
||||
def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
@@ -116,7 +116,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
|
||||
# find a smooth token limit to achieve even seperation
|
||||
count = int(math.ceil(raw_token_num / TOKEN_LIMIT_PER_FRAGMENT))
|
||||
token_limit_smooth = raw_token_num // count + count
|
||||
return breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn=get_token_num, limit=token_limit_smooth)
|
||||
return breakdown_text_to_satisfy_token_limit(txt, limit=token_limit_smooth, llm_model=llm_kwargs['llm_model'])
|
||||
|
||||
for section in article_dict.get('sections'):
|
||||
if len(section['text']) == 0: continue
|
||||
|
||||
@@ -3,47 +3,28 @@ from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseSta
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
||||
|
||||
def step(self, prompt, chatbot, history):
|
||||
if self.step_cnt == 0:
|
||||
chatbot.append(["我画你猜(动物)", "请稍等..."])
|
||||
else:
|
||||
if prompt.strip() == 'exit':
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
chatbot.append([prompt, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if self.step_cnt == 0:
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'draw'
|
||||
|
||||
if self.cur_task == 'draw':
|
||||
avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"]
|
||||
self.obj = random.choice(avail_obj)
|
||||
inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
|
||||
raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
|
||||
self.cur_task = 'identify user guess'
|
||||
res = get_code_block(raw_res)
|
||||
history += ['', f'the answer is {self.obj}', inputs, res]
|
||||
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
elif self.cur_task == 'identify user guess':
|
||||
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
||||
else:
|
||||
self.cur_task = 'identify user guess'
|
||||
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
|
||||
# 清空历史
|
||||
history = []
|
||||
# 选择游戏
|
||||
cls = MiniGame_ResumeStory
|
||||
# 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
|
||||
state = cls.sync_state(chatbot,
|
||||
llm_kwargs,
|
||||
cls,
|
||||
plugin_name='MiniGame_ResumeStory',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏',
|
||||
lock_plugin=True
|
||||
)
|
||||
yield from state.continue_game(prompt, chatbot, history)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
|
||||
# 清空历史
|
||||
history = []
|
||||
# 选择游戏
|
||||
@@ -53,7 +34,7 @@ def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
llm_kwargs,
|
||||
cls,
|
||||
plugin_name='MiniGame_ASCII_Art',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏',
|
||||
callback_fn='crazy_functions.互动小游戏->随机小游戏1',
|
||||
lock_plugin=True
|
||||
)
|
||||
yield from state.continue_game(prompt, chatbot, history)
|
||||
|
||||
@@ -104,7 +104,11 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
if prompt.strip() == "":
|
||||
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||
return
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
|
||||
@@ -121,7 +125,11 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
||||
@CatchException
|
||||
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
if prompt.strip() == "":
|
||||
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
|
||||
return
|
||||
chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
resolution_arg = plugin_kwargs.get("advanced_arg", '1024x1024-standard-vivid').lower()
|
||||
|
||||
@@ -29,17 +29,12 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
||||
except:
|
||||
raise RuntimeError('请先将.doc文档转换为.docx文档。')
|
||||
|
||||
print(file_content)
|
||||
# private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
from request_llms.bridge_all import model_info
|
||||
max_token = model_info[llm_kwargs['llm_model']]['max_token']
|
||||
TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content,
|
||||
get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'],
|
||||
limit=TOKEN_LIMIT_PER_FRAGMENT
|
||||
)
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
this_paper_history = []
|
||||
for i, paper_frag in enumerate(paper_fragments):
|
||||
i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```'
|
||||
|
||||
@@ -28,8 +28,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -20,14 +20,9 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
|
||||
@@ -91,14 +91,9 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
||||
page_one = str(page_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
||||
|
||||
# 递归地切割PDF文件
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=page_one, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=page_one, limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
@@ -18,14 +18,9 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
paper_fragments = breakdown_text_to_satisfy_token_limit(txt=file_content, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
||||
page_one_fragments = breakdown_text_to_satisfy_token_limit(txt=str(page_one), limit=TOKEN_LIMIT_PER_FRAGMENT//4, llm_model=llm_kwargs['llm_model'])
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
@@ -45,7 +40,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
for i in range(n_fragment):
|
||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
|
||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
|
||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}"
|
||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]} ...."
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||
llm_kwargs, chatbot,
|
||||
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
||||
|
||||
@@ -12,13 +12,6 @@ class PaperFileGroup():
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(
|
||||
enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
@@ -29,9 +22,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
|
||||
@@ -27,3 +27,45 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say);history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
|
||||
|
||||
|
||||
PROMPT = """
|
||||
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例:
|
||||
```mermaid
|
||||
graph TD
|
||||
P(编程) --> L1(Python)
|
||||
P(编程) --> L2(C)
|
||||
P(编程) --> L3(C++)
|
||||
P(编程) --> L4(Javascipt)
|
||||
P(编程) --> L5(PHP)
|
||||
```
|
||||
"""
|
||||
@CatchException
|
||||
def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一些关键词,然后使用mermaid+llm绘制图表。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
if txt == "": txt = "空白的输入栏" # 调皮一下
|
||||
|
||||
i_say_show_user = f'请绘制有关“{txt}”的逻辑关系图。'
|
||||
i_say = PROMPT.format(subject=txt)
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt=""
|
||||
)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
@@ -229,4 +229,3 @@ services:
|
||||
# 不使用代理网络拉取最新代码
|
||||
command: >
|
||||
bash -c "python3 -u main.py"
|
||||
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
# 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
|
||||
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 .
|
||||
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacityBeta --network=host .
|
||||
# docker run -it --net=host gpt-academic-all-capacity bash
|
||||
|
||||
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
||||
|
||||
# use python3 as the system default python
|
||||
WORKDIR /gpt
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
|
||||
# # 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||
# RUN echo '[global]' > /etc/pip.conf && \
|
||||
# echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
# echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
# 下载pytorch
|
||||
RUN python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
|
||||
# 准备pip依赖
|
||||
RUN python3 -m pip install openai numpy arxiv rich
|
||||
RUN python3 -m pip install colorama Markdown pygments pymupdf
|
||||
RUN python3 -m pip install python-docx moviepy pdfminer
|
||||
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
||||
RUN python3 -m pip install rarfile py7zr
|
||||
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
||||
# 下载分支
|
||||
WORKDIR /gpt
|
||||
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
WORKDIR /gpt/gpt_academic
|
||||
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss
|
||||
|
||||
RUN python3 -m pip install -r requirements.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_moss.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||
RUN python3 -m pip install nougat-ocr
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 安装知识库插件的额外依赖
|
||||
RUN apt-get update && apt-get install libgl1 -y
|
||||
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
RUN pip3 install unstructured[all-docs] --upgrade
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
RUN rm -rf /usr/local/lib/python3.8/dist-packages/tests
|
||||
|
||||
|
||||
# COPY .cache /root/.cache
|
||||
# COPY config_private.py config_private.py
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
@@ -17,10 +17,10 @@ RUN apt-get update && apt-get install libgl1 -y
|
||||
RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
RUN pip3 install unstructured[all-docs] --upgrade
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
|
||||
# 可选步骤,用于预热模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
|
||||
@@ -341,4 +341,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# المزيد:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -355,4 +355,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# More:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -354,4 +354,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Plus:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Weitere:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Altre risorse:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -342,4 +342,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# その他:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# 더보기:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -355,4 +355,3 @@ https://github.com/oobabooga/instaladores-de-um-clique
|
||||
# Mais:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
|
||||
# Больше:
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/fghrsh/live2d_demo
|
||||
|
||||
|
||||
二进制文件未显示。
@@ -7,13 +7,27 @@ sample = """
|
||||
"""
|
||||
import re
|
||||
|
||||
|
||||
def preprocess_newbing_out(s):
|
||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
||||
pattern2 = r'\[(\d+)\]' # 匹配^数字^
|
||||
sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
|
||||
pattern = r"\^(\d+)\^" # 匹配^数字^
|
||||
pattern2 = r"\[(\d+)\]" # 匹配^数字^
|
||||
|
||||
def sub(m):
|
||||
return "\\[" + m.group(1) + "\\]" # 将匹配到的数字作为替换值
|
||||
|
||||
result = re.sub(pattern, sub, s) # 替换操作
|
||||
if '[1]' in result:
|
||||
result += '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>' + "<br/>".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '</small>'
|
||||
if "[1]" in result:
|
||||
result += (
|
||||
'<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>'
|
||||
+ "<br/>".join(
|
||||
[
|
||||
re.sub(pattern2, sub, r)
|
||||
for r in result.split("\n")
|
||||
if r.startswith("[")
|
||||
]
|
||||
)
|
||||
+ "</small>"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -28,37 +42,39 @@ def close_up_code_segment_during_stream(gpt_reply):
|
||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||
|
||||
"""
|
||||
if '```' not in gpt_reply:
|
||||
if "```" not in gpt_reply:
|
||||
return gpt_reply
|
||||
if gpt_reply.endswith('```'):
|
||||
if gpt_reply.endswith("```"):
|
||||
return gpt_reply
|
||||
|
||||
# 排除了以上两个情况,我们
|
||||
segments = gpt_reply.split('```')
|
||||
segments = gpt_reply.split("```")
|
||||
n_mark = len(segments) - 1
|
||||
if n_mark % 2 == 1:
|
||||
# print('输出代码片段中!')
|
||||
return gpt_reply+'\n```'
|
||||
return gpt_reply + "\n```"
|
||||
else:
|
||||
return gpt_reply
|
||||
|
||||
|
||||
import markdown
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
from functools import wraps, lru_cache
|
||||
|
||||
|
||||
def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
suf = "</div>"
|
||||
if txt.startswith(pre) and txt.endswith(suf):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
|
||||
markdown_extension_configs = {
|
||||
'mdx_math': {
|
||||
'enable_dollar_delimiter': True,
|
||||
'use_gitlab_delimiters': False,
|
||||
"mdx_math": {
|
||||
"enable_dollar_delimiter": True,
|
||||
"use_gitlab_delimiters": False,
|
||||
},
|
||||
}
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
@@ -72,19 +88,19 @@ def markdown_convertion(txt):
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
content = content.replace('\n', '</br>')
|
||||
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
||||
if "mode=display" in match.group(0):
|
||||
content = content.replace("\n", "</br>")
|
||||
return f'<font color="#00FF00">$$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$$</font>'
|
||||
else:
|
||||
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
||||
return f'<font color="#00FF00">$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$</font>'
|
||||
|
||||
def replace_math_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
if '\\begin{aligned}' in content:
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
if "mode=display" in match.group(0):
|
||||
if "\\begin{aligned}" in content:
|
||||
content = content.replace("\\begin{aligned}", "\\begin{array}")
|
||||
content = content.replace("\\end{aligned}", "\\end{array}")
|
||||
content = content.replace("&", " ")
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
@@ -94,37 +110,58 @@ def markdown_convertion(txt):
|
||||
"""
|
||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||
"""
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
content = content.replace(
|
||||
'<script type="math/tex">\n<script type="math/tex; mode=display">',
|
||||
'<script type="math/tex; mode=display">',
|
||||
)
|
||||
content = content.replace("</script>\n</script>", "</script>")
|
||||
return content
|
||||
|
||||
|
||||
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
if ("$" in txt) and ("```" not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
||||
split = markdown.markdown(text="---")
|
||||
convert_stage_1 = markdown.markdown(
|
||||
text=txt,
|
||||
extensions=["mdx_math", "fenced_code", "tables", "sane_lists"],
|
||||
extension_configs=markdown_extension_configs,
|
||||
)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
convert_stage_2_1, n = re.subn(
|
||||
find_equation_pattern,
|
||||
replace_math_no_render,
|
||||
convert_stage_1,
|
||||
flags=re.DOTALL,
|
||||
)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
convert_stage_2_2, n = re.subn(
|
||||
find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL
|
||||
)
|
||||
# cat them together
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
return pre + convert_stage_2_1 + f"{split}" + convert_stage_2_2 + suf
|
||||
else:
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
return (
|
||||
pre
|
||||
+ markdown.markdown(
|
||||
txt, extensions=["fenced_code", "codehilite", "tables", "sane_lists"]
|
||||
)
|
||||
+ suf
|
||||
)
|
||||
|
||||
|
||||
sample = preprocess_newbing_out(sample)
|
||||
sample = close_up_code_segment_during_stream(sample)
|
||||
sample = markdown_convertion(sample)
|
||||
with open('tmp.html', 'w', encoding='utf8') as f:
|
||||
f.write("""
|
||||
with open("tmp.html", "w", encoding="utf8") as f:
|
||||
f.write(
|
||||
"""
|
||||
|
||||
<head>
|
||||
<title>My Website</title>
|
||||
<link rel="stylesheet" type="text/css" href="style.css">
|
||||
</head>
|
||||
|
||||
""")
|
||||
"""
|
||||
)
|
||||
f.write(sample)
|
||||
|
||||
@@ -2863,7 +2863,7 @@
|
||||
"加载API_KEY": "Loading API_KEY",
|
||||
"协助您编写代码": "Assist you in writing code",
|
||||
"我可以为您提供以下服务": "I can provide you with the following services",
|
||||
"排队中请稍后 ...": "Please wait in line ...",
|
||||
"排队中请稍候 ...": "Please wait in line ...",
|
||||
"建议您使用英文提示词": "It is recommended to use English prompts",
|
||||
"不能支撑AutoGen运行": "Cannot support AutoGen operation",
|
||||
"帮助您解决编程问题": "Help you solve programming problems",
|
||||
|
||||
@@ -61,4 +61,3 @@ VI 两种音频监听模式切换时,需要刷新页面才有效。
|
||||
VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
|
||||
|
||||
## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
|
||||
|
||||
|
||||
109
main.py
109
main.py
@@ -1,14 +1,25 @@
|
||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
import pickle
|
||||
import base64
|
||||
|
||||
help_menu_description = \
|
||||
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
||||
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
|
||||
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
|
||||
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
|
||||
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
||||
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
||||
</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮
|
||||
</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端
|
||||
</br></br>如何保存对话: 点击保存当前的对话按钮
|
||||
</br></br>如何语音对话: 请阅读Wiki
|
||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||
|
||||
def main():
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.32.6']:
|
||||
if gr.__version__ not in ['3.32.6', '3.32.7']:
|
||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||
from request_llms.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
||||
@@ -18,20 +29,10 @@ def main():
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
from check_proxy import get_current_version
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme
|
||||
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
||||
from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
|
||||
description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
|
||||
description += "</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), "
|
||||
description += "如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues)."
|
||||
description += "</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交"
|
||||
description += "</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮"
|
||||
description += "</br></br>函数插件区使用说明: 1. 输入路径/问题, 或者上传文件; 2. 点击任意函数插件区按钮"
|
||||
description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
|
||||
description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
|
||||
description += "</br></br>如何语音对话: 请阅读Wiki"
|
||||
description += "</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"
|
||||
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
import logging, uuid
|
||||
@@ -138,17 +139,17 @@ def main():
|
||||
with gr.Row():
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
|
||||
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
||||
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
||||
|
||||
|
||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
|
||||
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
||||
with gr.Row():
|
||||
with gr.Tab("上传文件", elem_id="interact-panel"):
|
||||
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
|
||||
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
||||
|
||||
with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
|
||||
with gr.Tab("更换模型", elem_id="interact-panel"):
|
||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||
@@ -160,18 +161,11 @@ def main():
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
|
||||
value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||
dark_mode_btn.click(None, None, None, _js="""() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
} else {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}""",
|
||||
)
|
||||
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
||||
with gr.Tab("帮助", elem_id="interact-panel"):
|
||||
gr.Markdown(description)
|
||||
gr.Markdown(help_menu_description)
|
||||
|
||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_input_secondary:
|
||||
with gr.Accordion("浮动输入区", open=True, elem_id="input-panel2"):
|
||||
@@ -186,16 +180,6 @@ def main():
|
||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||
|
||||
def to_cookie_str(d):
|
||||
# Pickle the dictionary and encode it as a string
|
||||
pickled_dict = pickle.dumps(d)
|
||||
cookie_value = base64.b64encode(pickled_dict).decode('utf-8')
|
||||
return cookie_value
|
||||
|
||||
def from_cookie_str(c):
|
||||
# Decode the base64-encoded string and unpickle it into a dictionary
|
||||
pickled_dict = base64.b64decode(c.encode('utf-8'))
|
||||
return pickle.loads(pickled_dict)
|
||||
|
||||
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
||||
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
|
||||
@@ -255,7 +239,8 @@ def main():
|
||||
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") # save persistent cookie
|
||||
# save persistent cookie
|
||||
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
|
||||
|
||||
# 功能区显示开关与功能区的互动
|
||||
def fn_area_visibility(a):
|
||||
@@ -305,8 +290,8 @@ def main():
|
||||
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies]).then(None, None, None, _js=r"()=>{toast_push('上传完毕 ...'); cancel_loading_status();}")
|
||||
# 函数插件-固定按钮区
|
||||
for k in plugins:
|
||||
if not plugins[k].get("AsButton", True): continue
|
||||
@@ -342,18 +327,7 @@ def main():
|
||||
None,
|
||||
[secret_css],
|
||||
None,
|
||||
_js="""(css) => {
|
||||
var existingStyles = document.querySelectorAll("style[data-loaded-css]");
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var styleElement = document.createElement('style');
|
||||
styleElement.setAttribute('data-loaded-css', css);
|
||||
styleElement.innerHTML = css;
|
||||
document.head.appendChild(styleElement);
|
||||
}
|
||||
"""
|
||||
_js=js_code_for_css_changing
|
||||
)
|
||||
# 随变按钮的回调函数注册
|
||||
def route(request: gr.Request, k, *args, **kwargs):
|
||||
@@ -385,27 +359,10 @@ def main():
|
||||
rad.feed(cookies['uuid'].hex, audio)
|
||||
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
||||
|
||||
def init_cookie(cookies, chatbot):
|
||||
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
||||
cookies.update({'uuid': uuid.uuid4()})
|
||||
return cookies
|
||||
|
||||
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
||||
darkmode_js = """(dark) => {
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark){
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark){
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
}"""
|
||||
load_cookie_js = """(persistent_cookie) => {
|
||||
return getCookie("persistent_cookie");
|
||||
}"""
|
||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=load_cookie_js)
|
||||
darkmode_js = js_code_for_darkmode_init
|
||||
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init)
|
||||
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
||||
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
||||
|
||||
@@ -418,7 +375,7 @@ def main():
|
||||
|
||||
def auto_updates(): time.sleep(0); auto_update()
|
||||
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
||||
def warm_up_mods(): time.sleep(4); warm_up_modules()
|
||||
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
||||
|
||||
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
||||
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
||||
|
||||
@@ -352,9 +352,9 @@ def step_1_core_key_translate():
|
||||
chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
|
||||
chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
|
||||
|
||||
# ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
# copy
|
||||
# ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
def copy_source_code():
|
||||
|
||||
from toolbox import get_conf
|
||||
@@ -367,9 +367,9 @@ def step_1_core_key_translate():
|
||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
|
||||
copy_source_code()
|
||||
|
||||
# ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
# primary key replace
|
||||
# ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
directory_path = f'./multi-language/{LANG}/'
|
||||
for root, dirs, files in os.walk(directory_path):
|
||||
for file in files:
|
||||
@@ -389,9 +389,9 @@ def step_1_core_key_translate():
|
||||
|
||||
def step_2_core_key_translate():
|
||||
|
||||
# =================================================================================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
# step2
|
||||
# =================================================================================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
def load_string(strings, string_input):
|
||||
string_ = string_input.strip().strip(',').strip().strip('.').strip()
|
||||
@@ -492,9 +492,9 @@ def step_2_core_key_translate():
|
||||
cached_translation.update(read_map_from_json(language=LANG_STD))
|
||||
cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
|
||||
|
||||
# ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
# literal key replace
|
||||
# ===============================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
directory_path = f'./multi-language/{LANG}/'
|
||||
for root, dirs, files in os.walk(directory_path):
|
||||
for file in files:
|
||||
|
||||
@@ -28,6 +28,9 @@ from .bridge_chatglm3 import predict as chatglm3_ui
|
||||
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
|
||||
from .bridge_qianfan import predict as qianfan_ui
|
||||
|
||||
from .bridge_google_gemini import predict as genai_ui
|
||||
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||
|
||||
class LazyloadTiktoken(object):
|
||||
@@ -246,6 +249,22 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"gemini-pro": {
|
||||
"fn_with_ui": genai_ui,
|
||||
"fn_without_ui": genai_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 1024 * 32,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"gemini-pro-vision": {
|
||||
"fn_with_ui": genai_ui,
|
||||
"fn_without_ui": genai_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 1024 * 32,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
}
|
||||
|
||||
# -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
|
||||
@@ -431,14 +450,14 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "qwen" in AVAIL_LLM_MODELS:
|
||||
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen import predict as qwen_ui
|
||||
from .bridge_qwen_local import predict_no_ui_long_connection as qwen_local_noui
|
||||
from .bridge_qwen_local import predict as qwen_local_ui
|
||||
model_info.update({
|
||||
"qwen": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"qwen-local": {
|
||||
"fn_with_ui": qwen_local_ui,
|
||||
"fn_without_ui": qwen_local_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
@@ -447,16 +466,32 @@ if "qwen" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
|
||||
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||
try:
|
||||
from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
|
||||
from .bridge_chatgpt_website import predict as chatgpt_website_ui
|
||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen import predict as qwen_ui
|
||||
model_info.update({
|
||||
"chatgpt_website": {
|
||||
"fn_with_ui": chatgpt_website_ui,
|
||||
"fn_without_ui": chatgpt_website_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 4096,
|
||||
"qwen-turbo": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 6144,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"qwen-plus": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 30720,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"qwen-max": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 28672,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
@@ -559,6 +594,23 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
# if "skylark" in AVAIL_LLM_MODELS:
|
||||
# try:
|
||||
# from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui
|
||||
# from .bridge_skylark2 import predict as skylark_ui
|
||||
# model_info.update({
|
||||
# "skylark": {
|
||||
# "fn_with_ui": skylark_ui,
|
||||
# "fn_without_ui": skylark_noui,
|
||||
# "endpoint": None,
|
||||
# "max_token": 4096,
|
||||
# "tokenizer": tokenizer_gpt35,
|
||||
# "token_cnt": get_token_num_gpt35,
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print(trimmed_format_exc())
|
||||
|
||||
|
||||
# <-- 用于定义和切换多个azure模型 -->
|
||||
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
||||
|
||||
@@ -51,7 +51,8 @@ def decode_chunk(chunk):
|
||||
chunkjson = json.loads(chunk_decoded[6:])
|
||||
has_choices = 'choices' in chunkjson
|
||||
if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
|
||||
if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"]
|
||||
if has_choices and choice_valid: has_content = ("content" in chunkjson['choices'][0]["delta"])
|
||||
if has_content: has_content = (chunkjson['choices'][0]["delta"]["content"] is not None)
|
||||
if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
|
||||
except:
|
||||
pass
|
||||
@@ -101,20 +102,25 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
result = ''
|
||||
json_data = None
|
||||
while True:
|
||||
try: chunk = next(stream_response).decode()
|
||||
try: chunk = next(stream_response)
|
||||
except StopIteration:
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
||||
if len(chunk)==0: continue
|
||||
if not chunk.startswith('data:'):
|
||||
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||
chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
|
||||
if len(chunk_decoded)==0: continue
|
||||
if not chunk_decoded.startswith('data:'):
|
||||
error_msg = get_full_error(chunk, stream_response).decode()
|
||||
if "reduce the length" in error_msg:
|
||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||
else:
|
||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
||||
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
||||
if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成
|
||||
# 提前读取一些信息 (用于判断异常)
|
||||
if has_choices and not choice_valid:
|
||||
# 一些垃圾第三方接口的出现这样的错误
|
||||
continue
|
||||
json_data = chunkjson['choices'][0]
|
||||
delta = json_data["delta"]
|
||||
if len(delta) == 0: break
|
||||
if "role" in delta: continue
|
||||
@@ -238,6 +244,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
if has_choices and not choice_valid:
|
||||
# 一些垃圾第三方接口的出现这样的错误
|
||||
continue
|
||||
if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
|
||||
# 传递进来一些奇怪的东西
|
||||
raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
|
||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
|
||||
114
request_llms/bridge_google_gemini.py
普通文件
114
request_llms/bridge_google_gemini.py
普通文件
@@ -0,0 +1,114 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/12/21
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
from request_llms.com_google import GoogleChatInit
|
||||
from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
|
||||
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
|
||||
console_slience=False):
|
||||
# 检查API_KEY
|
||||
if get_conf("GEMINI_API_KEY") == "":
|
||||
raise ValueError(f"请配置 GEMINI_API_KEY。")
|
||||
|
||||
genai = GoogleChatInit()
|
||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||
gpt_replying_buffer = ''
|
||||
stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
|
||||
for response in stream_response:
|
||||
results = response.decode()
|
||||
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
|
||||
error_match = re.search(r'\"message\":\s*\"(.*?)\"', results, flags=re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
|
||||
except:
|
||||
raise ValueError(f"解析GEMINI消息出错。")
|
||||
buffer = paraphrase['text']
|
||||
gpt_replying_buffer += buffer
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = gpt_replying_buffer
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||
if error_match:
|
||||
raise RuntimeError(f'{gpt_replying_buffer} 对话错误')
|
||||
return gpt_replying_buffer
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
# 检查API_KEY
|
||||
if get_conf("GEMINI_API_KEY") == "":
|
||||
yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
# 适配润色区域
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
if "vision" in llm_kwargs["llm_model"]:
|
||||
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||
def make_media_input(inputs, image_paths):
|
||||
for image_path in image_paths:
|
||||
inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
|
||||
return inputs
|
||||
if have_recent_file:
|
||||
inputs = make_media_input(inputs, image_paths)
|
||||
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
genai = GoogleChatInit()
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
stream_response = genai.generate_chat(inputs, llm_kwargs, history, system_prompt)
|
||||
break
|
||||
except Exception as e:
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], trimmed_format_exc()))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求失败") # 刷新界面
|
||||
return
|
||||
gpt_replying_buffer = ""
|
||||
gpt_security_policy = ""
|
||||
history.extend([inputs, ''])
|
||||
for response in stream_response:
|
||||
results = response.decode("utf-8") # 被这个解码给耍了。。
|
||||
gpt_security_policy += results
|
||||
match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
|
||||
error_match = re.search(r'\"message\":\s*\"(.*)\"', results, flags=re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
paraphrase = json.loads('{"text": "%s"}' % match.group(1))
|
||||
except:
|
||||
raise ValueError(f"解析GEMINI消息出错。")
|
||||
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer)
|
||||
history[-1] = gpt_replying_buffer
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
if error_match:
|
||||
history = history[-2] # 错误的不纳入对话
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer + f"对话错误,请查看message\n\n```\n{error_match.group(1)}\n```")
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
raise RuntimeError('对话错误')
|
||||
if not gpt_replying_buffer:
|
||||
history = history[-2] # 错误的不纳入对话
|
||||
chatbot[-1] = (inputs, gpt_replying_buffer + f"触发了Google的安全访问策略,没有回答\n\n```\n{gpt_security_policy}\n```")
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
llm_kwargs = {'llm_model': 'gemini-pro'}
|
||||
result = predict('Write long a story about a magic backpack.', llm_kwargs, llm_kwargs, [])
|
||||
for i in result:
|
||||
print(i)
|
||||
@@ -1,16 +1,17 @@
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第一部分:来自EdgeGPT.py
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
from .edge_gpt_free import Chatbot as NewbingChatbot
|
||||
|
||||
load_message = "等待NewBing响应。"
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第二部分:子进程Worker(调用主体)
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
import time
|
||||
import json
|
||||
@@ -22,19 +23,30 @@ import threading
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
|
||||
def preprocess_newbing_out(s):
|
||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
||||
sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
|
||||
pattern = r"\^(\d+)\^" # 匹配^数字^
|
||||
sub = lambda m: "(" + m.group(1) + ")" # 将匹配到的数字作为替换值
|
||||
result = re.sub(pattern, sub, s) # 替换操作
|
||||
if '[1]' in result:
|
||||
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
||||
if "[1]" in result:
|
||||
result += (
|
||||
"\n\n```reference\n"
|
||||
+ "\n".join([r for r in result.split("\n") if r.startswith("[")])
|
||||
+ "\n```\n"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def preprocess_newbing_out_simple(result):
|
||||
if '[1]' in result:
|
||||
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
||||
if "[1]" in result:
|
||||
result += (
|
||||
"\n\n```reference\n"
|
||||
+ "\n".join([r for r in result.split("\n") if r.startswith("[")])
|
||||
+ "\n```\n"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class NewBingHandle(Process):
|
||||
def __init__(self):
|
||||
super().__init__(daemon=True)
|
||||
@@ -51,6 +63,7 @@ class NewBingHandle(Process):
|
||||
try:
|
||||
self.success = False
|
||||
import certifi, httpx, rich
|
||||
|
||||
self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
|
||||
self.success = True
|
||||
except:
|
||||
@@ -62,15 +75,16 @@ class NewBingHandle(Process):
|
||||
|
||||
async def async_run(self):
|
||||
# 读取配置
|
||||
NEWBING_STYLE = get_conf('NEWBING_STYLE')
|
||||
NEWBING_STYLE = get_conf("NEWBING_STYLE")
|
||||
from request_llms.bridge_all import model_info
|
||||
endpoint = model_info['newbing']['endpoint']
|
||||
|
||||
endpoint = model_info["newbing"]["endpoint"]
|
||||
while True:
|
||||
# 等待
|
||||
kwargs = self.child.recv()
|
||||
question=kwargs['query']
|
||||
history=kwargs['history']
|
||||
system_prompt=kwargs['system_prompt']
|
||||
question = kwargs["query"]
|
||||
history = kwargs["history"]
|
||||
system_prompt = kwargs["system_prompt"]
|
||||
|
||||
# 是否重置
|
||||
if len(self.local_history) > 0 and len(history) == 0:
|
||||
@@ -81,19 +95,19 @@ class NewBingHandle(Process):
|
||||
prompt = ""
|
||||
if system_prompt not in self.local_history:
|
||||
self.local_history.append(system_prompt)
|
||||
prompt += system_prompt + '\n'
|
||||
prompt += system_prompt + "\n"
|
||||
|
||||
# 追加历史
|
||||
for ab in history:
|
||||
a, b = ab
|
||||
if a not in self.local_history:
|
||||
self.local_history.append(a)
|
||||
prompt += a + '\n'
|
||||
prompt += a + "\n"
|
||||
|
||||
# 问题
|
||||
prompt += question
|
||||
self.local_history.append(question)
|
||||
print('question:', prompt)
|
||||
print("question:", prompt)
|
||||
# 提交
|
||||
async for final, response in self.newbing_model.ask_stream(
|
||||
prompt=question,
|
||||
@@ -104,11 +118,10 @@ class NewBingHandle(Process):
|
||||
print(response)
|
||||
self.child.send(str(response))
|
||||
else:
|
||||
print('-------- receive final ---------')
|
||||
self.child.send('[Finish]')
|
||||
print("-------- receive final ---------")
|
||||
self.child.send("[Finish]")
|
||||
# self.local_history.append(response)
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
这个函数运行在子进程
|
||||
@@ -118,32 +131,37 @@ class NewBingHandle(Process):
|
||||
self.local_history = []
|
||||
if (self.newbing_model is None) or (not self.success):
|
||||
# 代理设置
|
||||
proxies, NEWBING_COOKIES = get_conf('proxies', 'NEWBING_COOKIES')
|
||||
proxies, NEWBING_COOKIES = get_conf("proxies", "NEWBING_COOKIES")
|
||||
if proxies is None:
|
||||
self.proxies_https = None
|
||||
else:
|
||||
self.proxies_https = proxies['https']
|
||||
self.proxies_https = proxies["https"]
|
||||
|
||||
if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
|
||||
try:
|
||||
cookies = json.loads(NEWBING_COOKIES)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] NEWBING_COOKIES未填写或有格式错误。')
|
||||
self.child.send('[Fail]'); self.child.send('[Finish]')
|
||||
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||
self.child.send(f"[Local Message] NEWBING_COOKIES未填写或有格式错误。")
|
||||
self.child.send("[Fail]")
|
||||
self.child.send("[Finish]")
|
||||
raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
|
||||
else:
|
||||
cookies = None
|
||||
|
||||
try:
|
||||
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
|
||||
self.newbing_model = NewbingChatbot(
|
||||
proxy=self.proxies_https, cookies=cookies
|
||||
)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Newbing组件,请注意Newbing组件已不再维护。{tb_str}')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||
self.child.send(
|
||||
f"[Local Message] 不能加载Newbing组件,请注意Newbing组件已不再维护。{tb_str}"
|
||||
)
|
||||
self.child.send("[Fail]")
|
||||
self.child.send("[Finish]")
|
||||
raise RuntimeError(f"不能加载Newbing组件,请注意Newbing组件已不再维护。")
|
||||
|
||||
self.success = True
|
||||
@@ -151,10 +169,12 @@ class NewBingHandle(Process):
|
||||
# 进入任务等待状态
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||
self.child.send(
|
||||
f"[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}."
|
||||
)
|
||||
self.child.send("[Fail]")
|
||||
self.child.send("[Finish]")
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
"""
|
||||
@@ -164,21 +184,33 @@ class NewBingHandle(Process):
|
||||
self.parent.send(kwargs) # 请求子进程
|
||||
while True:
|
||||
res = self.parent.recv() # 等待newbing回复的片段
|
||||
if res == '[Finish]': break # 结束
|
||||
elif res == '[Fail]': self.success = False; break # 失败
|
||||
else: yield res # newbing回复的片段
|
||||
if res == "[Finish]":
|
||||
break # 结束
|
||||
elif res == "[Fail]":
|
||||
self.success = False
|
||||
break # 失败
|
||||
else:
|
||||
yield res # newbing回复的片段
|
||||
self.threadLock.release() # 释放线程锁
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第三部分:主进程统一调用函数接口
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
global newbingfree_handle
|
||||
newbingfree_handle = None
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
|
||||
def predict_no_ui_long_connection(
|
||||
inputs,
|
||||
llm_kwargs,
|
||||
history=[],
|
||||
sys_prompt="",
|
||||
observe_window=[],
|
||||
console_slience=False,
|
||||
):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -186,7 +218,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
global newbingfree_handle
|
||||
if (newbingfree_handle is None) or (not newbingfree_handle.success):
|
||||
newbingfree_handle = NewBingHandle()
|
||||
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
|
||||
if not newbingfree_handle.success:
|
||||
error = newbingfree_handle.info
|
||||
newbingfree_handle = None
|
||||
@@ -199,15 +232,34 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
if len(observe_window) >= 1: observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
|
||||
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
|
||||
for response in newbingfree_handle.stream_chat(
|
||||
query=inputs,
|
||||
history=history_feedin,
|
||||
system_prompt=sys_prompt,
|
||||
max_length=llm_kwargs["max_length"],
|
||||
top_p=llm_kwargs["top_p"],
|
||||
temperature=llm_kwargs["temperature"],
|
||||
):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return preprocess_newbing_out_simple(response)
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
|
||||
def predict(
|
||||
inputs,
|
||||
llm_kwargs,
|
||||
plugin_kwargs,
|
||||
chatbot,
|
||||
history=[],
|
||||
system_prompt="",
|
||||
stream=True,
|
||||
additional_fn=None,
|
||||
):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -225,7 +277,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
inputs, history = handle_core_functionality(
|
||||
additional_fn, inputs, history, chatbot
|
||||
)
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history) // 2):
|
||||
@@ -233,13 +288,24 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...")
|
||||
response = "[Local Message] 等待NewBing响应中 ..."
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
yield from update_ui(
|
||||
chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||
)
|
||||
for response in newbingfree_handle.stream_chat(
|
||||
query=inputs,
|
||||
history=history_feedin,
|
||||
system_prompt=system_prompt,
|
||||
max_length=llm_kwargs["max_length"],
|
||||
top_p=llm_kwargs["top_p"],
|
||||
temperature=llm_kwargs["temperature"],
|
||||
):
|
||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
if response == "[Local Message] 等待NewBing响应中 ...": response = "[Local Message] NewBing响应异常,请刷新界面重试 ..."
|
||||
yield from update_ui(
|
||||
chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||
)
|
||||
if response == "[Local Message] 等待NewBing响应中 ...":
|
||||
response = "[Local Message] NewBing响应异常,请刷新界面重试 ..."
|
||||
history.extend([inputs, response])
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {response}')
|
||||
logging.info(f"[raw_input] {inputs}")
|
||||
logging.info(f"[response] {response}")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
||||
|
||||
|
||||
@@ -1,59 +1,62 @@
|
||||
model_name = "Qwen"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
|
||||
import time
|
||||
import os
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import check_packages, report_exception
|
||||
|
||||
from toolbox import ProxyNetworkActivate, get_conf
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
model_name = 'Qwen'
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
"""
|
||||
⭐多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
watch_dog_patience = 5
|
||||
response = ""
|
||||
|
||||
from .com_qwenapi import QwenRequestInstance
|
||||
sri = QwenRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetQwenLMHandle(LocalLLMHandle):
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
⭐单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
check_packages(["dashscope"])
|
||||
except:
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = get_conf('QWEN_MODEL_SELECTION')
|
||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
# 检查DASHSCOPE_API_KEY
|
||||
if get_conf("DASHSCOPE_API_KEY") == "":
|
||||
yield from update_ui_lastest_msg(f"请配置 DASHSCOPE_API_KEY。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
return self._model, self._tokenizer
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
# 开始接收回复
|
||||
from .com_qwenapi import QwenRequestInstance
|
||||
sri = QwenRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
|
||||
for response in self._model.chat_stream(self._tokenizer, query, history=history):
|
||||
yield response
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs):
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
import importlib
|
||||
importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -0,0 +1,59 @@
|
||||
model_name = "Qwen_Local"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen_local.txt`"
|
||||
|
||||
from toolbox import ProxyNetworkActivate, get_conf
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetQwenLMHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = get_conf('QWEN_LOCAL_MODEL_SELECTION')
|
||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
|
||||
return self._model, self._tokenizer
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
|
||||
for response in self._model.chat_stream(self._tokenizer, query, history=history):
|
||||
yield response
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs):
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
import importlib
|
||||
importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
|
||||
67
request_llms/bridge_skylark2.py
普通文件
67
request_llms/bridge_skylark2.py
普通文件
@@ -0,0 +1,67 @@
|
||||
import time
|
||||
from toolbox import update_ui, get_conf, update_ui_lastest_msg
|
||||
from toolbox import check_packages, report_exception
|
||||
|
||||
model_name = '云雀大模型'
|
||||
|
||||
def validate_key():
|
||||
YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
|
||||
if YUNQUE_SECRET_KEY == '': return False
|
||||
return True
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
||||
"""
|
||||
⭐ 多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
watch_dog_patience = 5
|
||||
response = ""
|
||||
|
||||
if validate_key() is False:
|
||||
raise RuntimeError('请配置YUNQUE_SECRET_KEY')
|
||||
|
||||
from .com_skylark2api import YUNQUERequestInstance
|
||||
sri = YUNQUERequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
⭐ 单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
check_packages(["zhipuai"])
|
||||
except:
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
if validate_key() is False:
|
||||
yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
# 开始接收回复
|
||||
from .com_skylark2api import YUNQUERequestInstance
|
||||
sri = YUNQUERequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 总结输出
|
||||
if response == f"[Local Message] 等待{model_name}响应中 ...":
|
||||
response = f"[Local Message] {model_name}响应异常 ..."
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
@@ -26,7 +26,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt, use_image_api=False):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
@@ -52,7 +52,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 开始接收回复
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
@@ -7,14 +7,15 @@ import logging
|
||||
import time
|
||||
from toolbox import get_conf
|
||||
import asyncio
|
||||
|
||||
load_message = "正在加载Claude组件,请稍候..."
|
||||
|
||||
try:
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第一部分:Slack API Client
|
||||
https://github.com/yokonsan/claude-in-slack-api
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
from slack_sdk.errors import SlackApiError
|
||||
@@ -33,10 +34,13 @@ try:
|
||||
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
|
||||
|
||||
"""
|
||||
|
||||
CHANNEL_ID = None
|
||||
|
||||
async def open_channel(self):
|
||||
response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID'))
|
||||
response = await self.conversations_open(
|
||||
users=get_conf("SLACK_CLAUDE_BOT_ID")
|
||||
)
|
||||
self.CHANNEL_ID = response["channel"]["id"]
|
||||
|
||||
async def chat(self, text):
|
||||
@@ -49,9 +53,14 @@ try:
|
||||
async def get_slack_messages(self):
|
||||
try:
|
||||
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
||||
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
|
||||
msg = [msg for msg in resp["messages"]
|
||||
if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')]
|
||||
resp = await self.conversations_history(
|
||||
channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1
|
||||
)
|
||||
msg = [
|
||||
msg
|
||||
for msg in resp["messages"]
|
||||
if msg.get("user") == get_conf("SLACK_CLAUDE_BOT_ID")
|
||||
]
|
||||
return msg
|
||||
except (SlackApiError, KeyError) as e:
|
||||
raise RuntimeError(f"获取Slack消息失败。")
|
||||
@@ -69,13 +78,14 @@ try:
|
||||
else:
|
||||
yield True, msg["text"]
|
||||
break
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第二部分:子进程Worker(调用主体)
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
|
||||
@@ -96,6 +106,7 @@ class ClaudeHandle(Process):
|
||||
try:
|
||||
self.success = False
|
||||
import slack_sdk
|
||||
|
||||
self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
|
||||
self.success = True
|
||||
except:
|
||||
@@ -110,15 +121,15 @@ class ClaudeHandle(Process):
|
||||
while True:
|
||||
# 等待
|
||||
kwargs = self.child.recv()
|
||||
question = kwargs['query']
|
||||
history = kwargs['history']
|
||||
question = kwargs["query"]
|
||||
history = kwargs["history"]
|
||||
|
||||
# 开始问问题
|
||||
prompt = ""
|
||||
|
||||
# 问题
|
||||
prompt += question
|
||||
print('question:', prompt)
|
||||
print("question:", prompt)
|
||||
|
||||
# 提交
|
||||
await self.claude_model.chat(prompt)
|
||||
@@ -131,11 +142,15 @@ class ClaudeHandle(Process):
|
||||
else:
|
||||
# 防止丢失最后一条消息
|
||||
slack_msgs = await self.claude_model.get_slack_messages()
|
||||
last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else ""
|
||||
last_msg = (
|
||||
slack_msgs[-1]["text"]
|
||||
if slack_msgs and len(slack_msgs) > 0
|
||||
else ""
|
||||
)
|
||||
if last_msg:
|
||||
self.child.send(last_msg)
|
||||
print('-------- receive final ---------')
|
||||
self.child.send('[Finish]')
|
||||
print("-------- receive final ---------")
|
||||
self.child.send("[Finish]")
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
@@ -146,22 +161,24 @@ class ClaudeHandle(Process):
|
||||
self.local_history = []
|
||||
if (self.claude_model is None) or (not self.success):
|
||||
# 代理设置
|
||||
proxies = get_conf('proxies')
|
||||
proxies = get_conf("proxies")
|
||||
if proxies is None:
|
||||
self.proxies_https = None
|
||||
else:
|
||||
self.proxies_https = proxies['https']
|
||||
self.proxies_https = proxies["https"]
|
||||
|
||||
try:
|
||||
SLACK_CLAUDE_USER_TOKEN = get_conf('SLACK_CLAUDE_USER_TOKEN')
|
||||
self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
|
||||
print('Claude组件初始化成功。')
|
||||
SLACK_CLAUDE_USER_TOKEN = get_conf("SLACK_CLAUDE_USER_TOKEN")
|
||||
self.claude_model = SlackClient(
|
||||
token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https
|
||||
)
|
||||
print("Claude组件初始化成功。")
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||
self.child.send(f"[Local Message] 不能加载Claude组件。{tb_str}")
|
||||
self.child.send("[Fail]")
|
||||
self.child.send("[Finish]")
|
||||
raise RuntimeError(f"不能加载Claude组件。")
|
||||
|
||||
self.success = True
|
||||
@@ -169,10 +186,10 @@ class ClaudeHandle(Process):
|
||||
# 进入任务等待状态
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
|
||||
self.child.send(f"[Local Message] Claude失败 {tb_str}.")
|
||||
self.child.send("[Fail]")
|
||||
self.child.send("[Finish]")
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
"""
|
||||
@@ -182,9 +199,9 @@ class ClaudeHandle(Process):
|
||||
self.parent.send(kwargs) # 发送请求到子进程
|
||||
while True:
|
||||
res = self.parent.recv() # 等待Claude回复的片段
|
||||
if res == '[Finish]':
|
||||
if res == "[Finish]":
|
||||
break # 结束
|
||||
elif res == '[Fail]':
|
||||
elif res == "[Fail]":
|
||||
self.success = False
|
||||
break
|
||||
else:
|
||||
@@ -193,15 +210,22 @@ class ClaudeHandle(Process):
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第三部分:主进程统一调用函数接口
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
global claude_handle
|
||||
claude_handle = None
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
def predict_no_ui_long_connection(
|
||||
inputs,
|
||||
llm_kwargs,
|
||||
history=[],
|
||||
sys_prompt="",
|
||||
observe_window=None,
|
||||
console_slience=False,
|
||||
):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -223,7 +247,14 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
observe_window[0] = "[Local Message] 等待Claude响应中 ..."
|
||||
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
for response in claude_handle.stream_chat(
|
||||
query=inputs,
|
||||
history=history_feedin,
|
||||
system_prompt=sys_prompt,
|
||||
max_length=llm_kwargs["max_length"],
|
||||
top_p=llm_kwargs["top_p"],
|
||||
temperature=llm_kwargs["temperature"],
|
||||
):
|
||||
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time() - observe_window[1]) > watch_dog_patience:
|
||||
@@ -231,7 +262,16 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
return preprocess_newbing_out_simple(response)
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
def predict(
|
||||
inputs,
|
||||
llm_kwargs,
|
||||
plugin_kwargs,
|
||||
chatbot,
|
||||
history=[],
|
||||
system_prompt="",
|
||||
stream=True,
|
||||
additional_fn=None,
|
||||
):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llms/bridge_all.py
|
||||
@@ -249,7 +289,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
if additional_fn is not None:
|
||||
from core_functional import handle_core_functionality
|
||||
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||
|
||||
inputs, history = handle_core_functionality(
|
||||
additional_fn, inputs, history, chatbot
|
||||
)
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history) // 2):
|
||||
@@ -257,13 +300,19 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
|
||||
chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...")
|
||||
response = "[Local Message] 等待Claude响应中 ..."
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt):
|
||||
yield from update_ui(
|
||||
chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||
)
|
||||
for response in claude_handle.stream_chat(
|
||||
query=inputs, history=history_feedin, system_prompt=system_prompt
|
||||
):
|
||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
yield from update_ui(
|
||||
chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
|
||||
)
|
||||
if response == "[Local Message] 等待Claude响应中 ...":
|
||||
response = "[Local Message] Claude响应异常,请刷新界面重试 ..."
|
||||
history.extend([inputs, response])
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {response}')
|
||||
logging.info(f"[raw_input] {inputs}")
|
||||
logging.info(f"[response] {response}")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
||||
|
||||
@@ -42,7 +42,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
try:
|
||||
check_packages(["zhipuai"])
|
||||
except:
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
|
||||
yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
return
|
||||
|
||||
|
||||
229
request_llms/com_google.py
普通文件
229
request_llms/com_google.py
普通文件
@@ -0,0 +1,229 @@
|
||||
# encoding: utf-8
|
||||
# @Time : 2023/12/25
|
||||
# @Author : Spike
|
||||
# @Descr :
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
from typing import List, Dict, Tuple
|
||||
from toolbox import get_conf, encode_image, get_pictures_list
|
||||
|
||||
proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
|
||||
|
||||
"""
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第五部分 一些文件处理方法
|
||||
files_filter_handler 根据type过滤文件
|
||||
input_encode_handler 提取input中的文件,并解析
|
||||
file_manifest_filter_html 根据type过滤文件, 并解析为html or md 文本
|
||||
link_mtime_to_md 文件增加本地时间参数,避免下载到缓存文件
|
||||
html_view_blank 超链接
|
||||
html_local_file 本地文件取相对路径
|
||||
to_markdown_tabs 文件list 转换为 md tab
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
|
||||
def files_filter_handler(file_list):
|
||||
new_list = []
|
||||
filter_ = [
|
||||
"png",
|
||||
"jpg",
|
||||
"jpeg",
|
||||
"bmp",
|
||||
"svg",
|
||||
"webp",
|
||||
"ico",
|
||||
"tif",
|
||||
"tiff",
|
||||
"raw",
|
||||
"eps",
|
||||
]
|
||||
for file in file_list:
|
||||
file = str(file).replace("file=", "")
|
||||
if os.path.exists(file):
|
||||
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||
new_list.append(file)
|
||||
return new_list
|
||||
|
||||
|
||||
def input_encode_handler(inputs, llm_kwargs):
|
||||
if llm_kwargs["most_recent_uploaded"].get("path"):
|
||||
image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
|
||||
md_encode = []
|
||||
for md_path in image_paths:
|
||||
type_ = os.path.splitext(md_path)[1].replace(".", "")
|
||||
type_ = "jpeg" if type_ == "jpg" else type_
|
||||
md_encode.append({"data": encode_image(md_path), "type": type_})
|
||||
return inputs, md_encode
|
||||
|
||||
|
||||
def file_manifest_filter_html(file_list, filter_: list = None, md_type=False):
|
||||
new_list = []
|
||||
if not filter_:
|
||||
filter_ = [
|
||||
"png",
|
||||
"jpg",
|
||||
"jpeg",
|
||||
"bmp",
|
||||
"svg",
|
||||
"webp",
|
||||
"ico",
|
||||
"tif",
|
||||
"tiff",
|
||||
"raw",
|
||||
"eps",
|
||||
]
|
||||
for file in file_list:
|
||||
if str(os.path.basename(file)).split(".")[-1] in filter_:
|
||||
new_list.append(html_local_img(file, md=md_type))
|
||||
elif os.path.exists(file):
|
||||
new_list.append(link_mtime_to_md(file))
|
||||
else:
|
||||
new_list.append(file)
|
||||
return new_list
|
||||
|
||||
|
||||
def link_mtime_to_md(file):
|
||||
link_local = html_local_file(file)
|
||||
link_name = os.path.basename(file)
|
||||
a = f"[{link_name}]({link_local}?{os.path.getmtime(file)})"
|
||||
return a
|
||||
|
||||
|
||||
def html_local_file(file):
|
||||
base_path = os.path.dirname(__file__) # 项目目录
|
||||
if os.path.exists(str(file)):
|
||||
file = f'file={file.replace(base_path, ".")}'
|
||||
return file
|
||||
|
||||
|
||||
def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
|
||||
style = ""
|
||||
if max_width is not None:
|
||||
style += f"max-width: {max_width};"
|
||||
if max_height is not None:
|
||||
style += f"max-height: {max_height};"
|
||||
__file = html_local_file(__file)
|
||||
a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
|
||||
if md:
|
||||
a = f""
|
||||
return a
|
||||
|
||||
|
||||
def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
|
||||
"""
|
||||
Args:
|
||||
head: 表头:[]
|
||||
tabs: 表值:[[列1], [列2], [列3], [列4]]
|
||||
alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
|
||||
column: True to keep data in columns, False to keep data in rows (default).
|
||||
Returns:
|
||||
A string representation of the markdown table.
|
||||
"""
|
||||
if column:
|
||||
transposed_tabs = list(map(list, zip(*tabs)))
|
||||
else:
|
||||
transposed_tabs = tabs
|
||||
# Find the maximum length among the columns
|
||||
max_len = max(len(column) for column in transposed_tabs)
|
||||
|
||||
tab_format = "| %s "
|
||||
tabs_list = "".join([tab_format % i for i in head]) + "|\n"
|
||||
tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
|
||||
|
||||
for i in range(max_len):
|
||||
row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
|
||||
row_data = file_manifest_filter_html(row_data, filter_=None)
|
||||
tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
|
||||
|
||||
return tabs_list
|
||||
|
||||
|
||||
class GoogleChatInit:
|
||||
def __init__(self):
|
||||
self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k"
|
||||
|
||||
def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
|
||||
headers, payload = self.generate_message_payload(
|
||||
inputs, llm_kwargs, history, system_prompt
|
||||
)
|
||||
response = requests.post(
|
||||
url=self.url_gemini,
|
||||
headers=headers,
|
||||
data=json.dumps(payload),
|
||||
stream=True,
|
||||
proxies=proxies,
|
||||
timeout=TIMEOUT_SECONDS,
|
||||
)
|
||||
return response.iter_lines()
|
||||
|
||||
def __conversation_user(self, user_input, llm_kwargs):
|
||||
what_i_have_asked = {"role": "user", "parts": []}
|
||||
if "vision" not in self.url_gemini:
|
||||
input_ = user_input
|
||||
encode_img = []
|
||||
else:
|
||||
input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
|
||||
what_i_have_asked["parts"].append({"text": input_})
|
||||
if encode_img:
|
||||
for data in encode_img:
|
||||
what_i_have_asked["parts"].append(
|
||||
{
|
||||
"inline_data": {
|
||||
"mime_type": f"image/{data['type']}",
|
||||
"data": data["data"],
|
||||
}
|
||||
}
|
||||
)
|
||||
return what_i_have_asked
|
||||
|
||||
def __conversation_history(self, history, llm_kwargs):
|
||||
messages = []
|
||||
conversation_cnt = len(history) // 2
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2 * conversation_cnt, 2):
|
||||
what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
|
||||
what_gpt_answer = {
|
||||
"role": "model",
|
||||
"parts": [{"text": history[index + 1]}],
|
||||
}
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
return messages
|
||||
|
||||
def generate_message_payload(
|
||||
self, inputs, llm_kwargs, history, system_prompt
|
||||
) -> Tuple[Dict, Dict]:
|
||||
messages = [
|
||||
# {"role": "system", "parts": [{"text": system_prompt}]}, # gemini 不允许对话轮次为偶数,所以这个没有用,看后续支持吧。。。
|
||||
# {"role": "user", "parts": [{"text": ""}]},
|
||||
# {"role": "model", "parts": [{"text": ""}]}
|
||||
]
|
||||
self.url_gemini = self.url_gemini.replace(
|
||||
"%m", llm_kwargs["llm_model"]
|
||||
).replace("%k", get_conf("GEMINI_API_KEY"))
|
||||
header = {"Content-Type": "application/json"}
|
||||
if "vision" not in self.url_gemini: # 不是vision 才处理history
|
||||
messages.extend(
|
||||
self.__conversation_history(history, llm_kwargs)
|
||||
) # 处理 history
|
||||
messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
|
||||
payload = {
|
||||
"contents": messages,
|
||||
"generationConfig": {
|
||||
# "maxOutputTokens": 800,
|
||||
"stopSequences": str(llm_kwargs.get("stop", "")).split(" "),
|
||||
"temperature": llm_kwargs.get("temperature", 1),
|
||||
"topP": llm_kwargs.get("top_p", 0.8),
|
||||
"topK": 10,
|
||||
},
|
||||
}
|
||||
return header, payload
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
google = GoogleChatInit()
|
||||
# print(gootle.generate_message_payload('你好呀', {}, ['123123', '3123123'], ''))
|
||||
# gootle.input_encode_handle('123123[123123](./123123), ')
|
||||
94
request_llms/com_qwenapi.py
普通文件
94
request_llms/com_qwenapi.py
普通文件
@@ -0,0 +1,94 @@
|
||||
from http import HTTPStatus
|
||||
from toolbox import get_conf
|
||||
import threading
|
||||
import logging
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
|
||||
class QwenRequestInstance():
|
||||
def __init__(self):
|
||||
import dashscope
|
||||
self.time_to_yield_event = threading.Event()
|
||||
self.time_to_exit_event = threading.Event()
|
||||
self.result_buf = ""
|
||||
|
||||
def validate_key():
|
||||
DASHSCOPE_API_KEY = get_conf("DASHSCOPE_API_KEY")
|
||||
if DASHSCOPE_API_KEY == '': return False
|
||||
return True
|
||||
|
||||
if not validate_key():
|
||||
raise RuntimeError('请配置 DASHSCOPE_API_KEY')
|
||||
dashscope.api_key = get_conf("DASHSCOPE_API_KEY")
|
||||
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
# import _thread as thread
|
||||
from dashscope import Generation
|
||||
QWEN_MODEL = {
|
||||
'qwen-turbo': Generation.Models.qwen_turbo,
|
||||
'qwen-plus': Generation.Models.qwen_plus,
|
||||
'qwen-max': Generation.Models.qwen_max,
|
||||
}[llm_kwargs['llm_model']]
|
||||
top_p = llm_kwargs.get('top_p', 0.8)
|
||||
if top_p == 0: top_p += 1e-5
|
||||
if top_p == 1: top_p -= 1e-5
|
||||
|
||||
self.result_buf = ""
|
||||
responses = Generation.call(
|
||||
model=QWEN_MODEL,
|
||||
messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
|
||||
top_p=top_p,
|
||||
temperature=llm_kwargs.get('temperature', 1.0),
|
||||
result_format='message',
|
||||
stream=True,
|
||||
incremental_output=True
|
||||
)
|
||||
|
||||
for response in responses:
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
if response.output.choices[0].finish_reason == 'stop':
|
||||
yield self.result_buf
|
||||
break
|
||||
elif response.output.choices[0].finish_reason == 'length':
|
||||
self.result_buf += "[Local Message] 生成长度过长,后续输出被截断"
|
||||
yield self.result_buf
|
||||
break
|
||||
else:
|
||||
self.result_buf += response.output.choices[0].message.content
|
||||
yield self.result_buf
|
||||
else:
|
||||
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
|
||||
yield self.result_buf
|
||||
break
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {self.result_buf}')
|
||||
return self.result_buf
|
||||
|
||||
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
conversation_cnt = len(history) // 2
|
||||
if system_prompt == '': system_prompt = 'Hello!'
|
||||
messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = history[index+1]
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "":
|
||||
continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg:
|
||||
continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
return messages
|
||||
95
request_llms/com_skylark2api.py
普通文件
95
request_llms/com_skylark2api.py
普通文件
@@ -0,0 +1,95 @@
|
||||
from toolbox import get_conf
|
||||
import threading
|
||||
import logging
|
||||
import os
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
#os.environ['VOLC_ACCESSKEY'] = ''
|
||||
#os.environ['VOLC_SECRETKEY'] = ''
|
||||
|
||||
class YUNQUERequestInstance():
|
||||
def __init__(self):
|
||||
|
||||
self.time_to_yield_event = threading.Event()
|
||||
self.time_to_exit_event = threading.Event()
|
||||
|
||||
self.result_buf = ""
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
# import _thread as thread
|
||||
from volcengine.maas import MaasService, MaasException
|
||||
|
||||
maas = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
|
||||
|
||||
YUNQUE_SECRET_KEY, YUNQUE_ACCESS_KEY,YUNQUE_MODEL = get_conf("YUNQUE_SECRET_KEY", "YUNQUE_ACCESS_KEY","YUNQUE_MODEL")
|
||||
maas.set_ak(YUNQUE_ACCESS_KEY) #填写 VOLC_ACCESSKEY
|
||||
maas.set_sk(YUNQUE_SECRET_KEY) #填写 'VOLC_SECRETKEY'
|
||||
|
||||
self.result_buf = ""
|
||||
|
||||
req = {
|
||||
"model": {
|
||||
"name": YUNQUE_MODEL,
|
||||
"version": "1.0", # use default version if not specified.
|
||||
},
|
||||
"parameters": {
|
||||
"max_new_tokens": 4000, # 输出文本的最大tokens限制
|
||||
"min_new_tokens": 1, # 输出文本的最小tokens限制
|
||||
"temperature": llm_kwargs['temperature'], # 用于控制生成文本的随机性和创造性,Temperature值越大随机性越大,取值范围0~1
|
||||
"top_p": llm_kwargs['top_p'], # 用于控制输出tokens的多样性,TopP值越大输出的tokens类型越丰富,取值范围0~1
|
||||
"top_k": 0, # 选择预测值最大的k个token进行采样,取值范围0-1000,0表示不生效
|
||||
"max_prompt_tokens": 4000, # 最大输入 token 数,如果给出的 prompt 的 token 长度超过此限制,取最后 max_prompt_tokens 个 token 输入模型。
|
||||
},
|
||||
"messages": self.generate_message_payload(inputs, llm_kwargs, history, system_prompt)
|
||||
}
|
||||
|
||||
response = maas.stream_chat(req)
|
||||
|
||||
for resp in response:
|
||||
self.result_buf += resp.choice.message.content
|
||||
yield self.result_buf
|
||||
'''
|
||||
for event in response.events():
|
||||
if event.event == "add":
|
||||
self.result_buf += event.data
|
||||
yield self.result_buf
|
||||
elif event.event == "error" or event.event == "interrupted":
|
||||
raise RuntimeError("Unknown error:" + event.data)
|
||||
elif event.event == "finish":
|
||||
yield self.result_buf
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Unknown error:" + str(event))
|
||||
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {self.result_buf}')
|
||||
'''
|
||||
return self.result_buf
|
||||
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
from volcengine.maas import ChatRole
|
||||
conversation_cnt = len(history) // 2
|
||||
messages = [{"role": ChatRole.USER, "content": system_prompt},
|
||||
{"role": ChatRole.ASSISTANT, "content": "Certainly!"}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2 * conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = ChatRole.USER
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = ChatRole.ASSISTANT
|
||||
what_gpt_answer["content"] = history[index + 1]
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "":
|
||||
continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg:
|
||||
continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = ChatRole.USER
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
return messages
|
||||
@@ -72,12 +72,12 @@ class SparkRequestInstance():
|
||||
|
||||
self.result_buf = ""
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt, use_image_api=False):
|
||||
llm_kwargs = llm_kwargs
|
||||
history = history
|
||||
system_prompt = system_prompt
|
||||
import _thread as thread
|
||||
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt))
|
||||
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt, use_image_api))
|
||||
while True:
|
||||
self.time_to_yield_event.wait(timeout=1)
|
||||
if self.time_to_yield_event.is_set():
|
||||
@@ -86,7 +86,7 @@ class SparkRequestInstance():
|
||||
return self.result_buf
|
||||
|
||||
|
||||
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt):
|
||||
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt, use_image_api):
|
||||
if llm_kwargs['llm_model'] == 'sparkv2':
|
||||
gpt_url = self.gpt_url_v2
|
||||
elif llm_kwargs['llm_model'] == 'sparkv3':
|
||||
@@ -94,9 +94,11 @@ class SparkRequestInstance():
|
||||
else:
|
||||
gpt_url = self.gpt_url
|
||||
file_manifest = []
|
||||
if llm_kwargs.get('most_recent_uploaded'):
|
||||
if use_image_api and llm_kwargs.get('most_recent_uploaded'):
|
||||
if llm_kwargs['most_recent_uploaded'].get('path'):
|
||||
file_manifest = get_pictures_list(llm_kwargs['most_recent_uploaded']['path'])
|
||||
if len(file_manifest) > 0:
|
||||
print('正在使用讯飞图片理解API')
|
||||
gpt_url = self.gpt_url_img
|
||||
wsParam = Ws_Param(self.appid, self.api_key, self.api_secret, gpt_url)
|
||||
websocket.enableTrace(False)
|
||||
|
||||
@@ -26,6 +26,8 @@ class ZhipuRequestInstance():
|
||||
)
|
||||
for event in response.events():
|
||||
if event.event == "add":
|
||||
# if self.result_buf == "" and event.data.startswith(" "):
|
||||
# event.data = event.data.lstrip(" ") # 每次智谱为啥都要带个空格开头呢?
|
||||
self.result_buf += event.data
|
||||
yield self.result_buf
|
||||
elif event.event == "error" or event.event == "interrupted":
|
||||
@@ -35,7 +37,8 @@ class ZhipuRequestInstance():
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Unknown error:" + str(event))
|
||||
|
||||
if self.result_buf == "":
|
||||
yield "智谱没有返回任何数据, 请检查ZHIPUAI_API_KEY和ZHIPUAI_MODEL是否填写正确."
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {self.result_buf}')
|
||||
return self.result_buf
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第一部分:来自EdgeGPT.py
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
========================================================================
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
"""
|
||||
Main.py
|
||||
@@ -452,9 +452,11 @@ class _ChatHub:
|
||||
ws_cookies = []
|
||||
for cookie in self.cookies:
|
||||
ws_cookies.append(f"{cookie['name']}={cookie['value']}")
|
||||
req_header.update({
|
||||
'Cookie': ';'.join(ws_cookies),
|
||||
})
|
||||
req_header.update(
|
||||
{
|
||||
"Cookie": ";".join(ws_cookies),
|
||||
}
|
||||
)
|
||||
|
||||
timeout = aiohttp.ClientTimeout(total=30)
|
||||
self.session = aiohttp.ClientSession(timeout=timeout)
|
||||
|
||||
@@ -183,11 +183,11 @@ class LocalLLMHandle(Process):
|
||||
def stream_chat(self, **kwargs):
|
||||
# ⭐run in main process
|
||||
if self.get_state() == "`准备就绪`":
|
||||
yield "`正在等待线程锁,排队中请稍后 ...`"
|
||||
yield "`正在等待线程锁,排队中请稍候 ...`"
|
||||
|
||||
with self.threadLock:
|
||||
if self.parent.poll():
|
||||
yield "`排队中请稍后 ...`"
|
||||
yield "`排队中请稍候 ...`"
|
||||
self.clear_pending_messages()
|
||||
self.parent.send(kwargs)
|
||||
std_out = ""
|
||||
|
||||
@@ -6,5 +6,3 @@ sentencepiece
|
||||
numpy
|
||||
onnxruntime
|
||||
sentencepiece
|
||||
streamlit
|
||||
streamlit-chat
|
||||
|
||||
@@ -5,5 +5,3 @@ accelerate
|
||||
matplotlib
|
||||
huggingface_hub
|
||||
triton
|
||||
streamlit
|
||||
|
||||
|
||||
@@ -1,4 +1 @@
|
||||
modelscope
|
||||
transformers_stream_generator
|
||||
auto-gptq
|
||||
optimum
|
||||
dashscope
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
modelscope
|
||||
transformers_stream_generator
|
||||
auto-gptq
|
||||
optimum
|
||||
urllib3<2
|
||||
@@ -1,11 +1,14 @@
|
||||
./docs/gradio-3.32.6-py3-none-any.whl
|
||||
https://fastly.jsdelivr.net/gh/binary-husky/gradio-fix@gpt-academic/release/gradio-3.32.7-py3-none-any.whl
|
||||
pypdf2==2.12.1
|
||||
zhipuai<2
|
||||
tiktoken>=0.3.3
|
||||
requests[socks]
|
||||
pydantic==1.10.11
|
||||
protobuf==3.18
|
||||
transformers>=4.27.1
|
||||
scipdf_parser>=0.52
|
||||
python-markdown-math
|
||||
pymdown-extensions
|
||||
websocket-client
|
||||
beautifulsoup4
|
||||
prompt_toolkit
|
||||
|
||||
@@ -0,0 +1,287 @@
|
||||
import markdown
|
||||
import re
|
||||
import os
|
||||
import math
|
||||
from textwrap import dedent
|
||||
from functools import lru_cache
|
||||
from pymdownx.superfences import fence_div_format, fence_code_format
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
from shared_utils.config_loader import get_conf as get_conf
|
||||
|
||||
pj = os.path.join
|
||||
default_user_name = 'default_user'
|
||||
|
||||
markdown_extension_configs = {
|
||||
'mdx_math': {
|
||||
'enable_dollar_delimiter': True,
|
||||
'use_gitlab_delimiters': False,
|
||||
},
|
||||
}
|
||||
|
||||
code_highlight_configs = {
|
||||
"pymdownx.superfences": {
|
||||
'css_class': 'codehilite',
|
||||
"custom_fences": [
|
||||
{
|
||||
'name': 'mermaid',
|
||||
'class': 'mermaid',
|
||||
'format': fence_code_format
|
||||
}
|
||||
]
|
||||
},
|
||||
"pymdownx.highlight": {
|
||||
'css_class': 'codehilite',
|
||||
'guess_lang': True,
|
||||
# 'auto_title': True,
|
||||
# 'linenums': True
|
||||
}
|
||||
}
|
||||
|
||||
def text_divide_paragraph(text):
|
||||
"""
|
||||
将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
if text.startswith(pre) and text.endswith(suf):
|
||||
return text
|
||||
|
||||
if '```' in text:
|
||||
# careful input
|
||||
return text
|
||||
elif '</div>' in text:
|
||||
# careful input
|
||||
return text
|
||||
else:
|
||||
# whatever input
|
||||
lines = text.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
lines[i] = lines[i].replace(" ", " ")
|
||||
text = "</br>".join(lines)
|
||||
return pre + text + suf
|
||||
|
||||
|
||||
def tex2mathml_catch_exception(content, *args, **kwargs):
|
||||
try:
|
||||
content = tex2mathml(content, *args, **kwargs)
|
||||
except:
|
||||
content = content
|
||||
return content
|
||||
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
content = content.replace('\n', '</br>')
|
||||
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
||||
else:
|
||||
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
||||
|
||||
|
||||
def replace_math_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
if '\\begin{aligned}' in content:
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
return tex2mathml_catch_exception(content)
|
||||
|
||||
|
||||
def markdown_bug_hunt(content):
|
||||
"""
|
||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||
"""
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">',
|
||||
'<script type="math/tex; mode=display">')
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
return content
|
||||
|
||||
|
||||
def is_equation(txt):
|
||||
"""
|
||||
判定是否为公式 | 测试1 写出洛伦兹定律,使用tex格式公式 测试2 给出柯西不等式,使用latex格式 测试3 写出麦克斯韦方程组
|
||||
"""
|
||||
if '```' in txt and '```reference' not in txt: return False
|
||||
if '$' not in txt and '\\[' not in txt: return False
|
||||
mathpatterns = {
|
||||
r'(?<!\\|\$)(\$)([^\$]+)(\$)': {'allow_multi_lines': False}, # $...$
|
||||
r'(?<!\\)(\$\$)([^\$]+)(\$\$)': {'allow_multi_lines': True}, # $$...$$
|
||||
r'(?<!\\)(\\\[)(.+?)(\\\])': {'allow_multi_lines': False}, # \[...\]
|
||||
# r'(?<!\\)(\\\()(.+?)(\\\))': {'allow_multi_lines': False}, # \(...\)
|
||||
# r'(?<!\\)(\\begin{([a-z]+?\*?)})(.+?)(\\end{\2})': {'allow_multi_lines': True}, # \begin...\end
|
||||
# r'(?<!\\)(\$`)([^`]+)(`\$)': {'allow_multi_lines': False}, # $`...`$
|
||||
}
|
||||
matches = []
|
||||
for pattern, property in mathpatterns.items():
|
||||
flags = re.ASCII | re.DOTALL if property['allow_multi_lines'] else re.ASCII
|
||||
matches.extend(re.findall(pattern, txt, flags))
|
||||
if len(matches) == 0: return False
|
||||
contain_any_eq = False
|
||||
illegal_pattern = re.compile(r'[^\x00-\x7F]|echo')
|
||||
for match in matches:
|
||||
if len(match) != 3: return False
|
||||
eq_canidate = match[1]
|
||||
if illegal_pattern.search(eq_canidate):
|
||||
return False
|
||||
else:
|
||||
contain_any_eq = True
|
||||
return contain_any_eq
|
||||
|
||||
|
||||
def fix_markdown_indent(txt):
|
||||
# fix markdown indent
|
||||
if (' - ' not in txt) or ('. ' not in txt):
|
||||
# do not need to fix, fast escape
|
||||
return txt
|
||||
# walk through the lines and fix non-standard indentation
|
||||
lines = txt.split("\n")
|
||||
pattern = re.compile(r'^\s+-')
|
||||
activated = False
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('- ') or line.startswith('1. '):
|
||||
activated = True
|
||||
if activated and pattern.match(line):
|
||||
stripped_string = line.lstrip()
|
||||
num_spaces = len(line) - len(stripped_string)
|
||||
if (num_spaces % 4) == 3:
|
||||
num_spaces_should_be = math.ceil(num_spaces / 4) * 4
|
||||
lines[i] = ' ' * num_spaces_should_be + stripped_string
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
FENCED_BLOCK_RE = re.compile(
|
||||
dedent(r'''
|
||||
(?P<fence>^[ \t]*(?:~{3,}|`{3,}))[ ]* # opening fence
|
||||
((\{(?P<attrs>[^\}\n]*)\})| # (optional {attrs} or
|
||||
(\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang
|
||||
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines)
|
||||
\n # newline (end of opening fence)
|
||||
(?P<code>.*?)(?<=\n) # the code block
|
||||
(?P=fence)[ ]*$ # closing fence
|
||||
'''),
|
||||
re.MULTILINE | re.DOTALL | re.VERBOSE
|
||||
)
|
||||
|
||||
|
||||
def get_line_range(re_match_obj, txt):
|
||||
start_pos, end_pos = re_match_obj.regs[0]
|
||||
num_newlines_before = txt[:start_pos+1].count('\n')
|
||||
line_start = num_newlines_before
|
||||
line_end = num_newlines_before + txt[start_pos:end_pos].count('\n')+1
|
||||
return line_start, line_end
|
||||
|
||||
|
||||
def fix_code_segment_indent(txt):
|
||||
lines = []
|
||||
change_any = False
|
||||
txt_tmp = txt
|
||||
while True:
|
||||
re_match_obj = FENCED_BLOCK_RE.search(txt_tmp)
|
||||
if not re_match_obj: break
|
||||
if len(lines) == 0: lines = txt.split("\n")
|
||||
|
||||
# 清空 txt_tmp 对应的位置方便下次搜索
|
||||
start_pos, end_pos = re_match_obj.regs[0]
|
||||
txt_tmp = txt_tmp[:start_pos] + ' '*(end_pos-start_pos) + txt_tmp[end_pos:]
|
||||
line_start, line_end = get_line_range(re_match_obj, txt)
|
||||
|
||||
# 获取公共缩进
|
||||
shared_indent_cnt = 1e5
|
||||
for i in range(line_start, line_end):
|
||||
stripped_string = lines[i].lstrip()
|
||||
num_spaces = len(lines[i]) - len(stripped_string)
|
||||
if num_spaces < shared_indent_cnt:
|
||||
shared_indent_cnt = num_spaces
|
||||
|
||||
# 修复缩进
|
||||
if (shared_indent_cnt < 1e5) and (shared_indent_cnt % 4) == 3:
|
||||
num_spaces_should_be = math.ceil(shared_indent_cnt / 4) * 4
|
||||
for i in range(line_start, line_end):
|
||||
add_n = num_spaces_should_be - shared_indent_cnt
|
||||
lines[i] = ' ' * add_n + lines[i]
|
||||
if not change_any: # 遇到第一个
|
||||
change_any = True
|
||||
|
||||
if change_any:
|
||||
return '\n'.join(lines)
|
||||
else:
|
||||
return txt
|
||||
|
||||
|
||||
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
|
||||
def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
if txt.startswith(pre) and txt.endswith(suf):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
|
||||
txt = fix_markdown_indent(txt)
|
||||
# txt = fix_code_segment_indent(txt)
|
||||
if is_equation(txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['sane_lists', 'tables', 'mdx_math', 'pymdownx.superfences', 'pymdownx.highlight'],
|
||||
extension_configs={**markdown_extension_configs, **code_highlight_configs})
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
# cat them together
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
else:
|
||||
return pre + markdown.markdown(txt, extensions=['sane_lists', 'tables', 'pymdownx.superfences', 'pymdownx.highlight'], extension_configs=code_highlight_configs) + suf
|
||||
|
||||
|
||||
def close_up_code_segment_during_stream(gpt_reply):
|
||||
"""
|
||||
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
||||
|
||||
Args:
|
||||
gpt_reply (str): GPT模型返回的回复字符串。
|
||||
|
||||
Returns:
|
||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||
|
||||
"""
|
||||
if '```' not in gpt_reply:
|
||||
return gpt_reply
|
||||
if gpt_reply.endswith('```'):
|
||||
return gpt_reply
|
||||
|
||||
# 排除了以上两个情况,我们
|
||||
segments = gpt_reply.split('```')
|
||||
n_mark = len(segments) - 1
|
||||
if n_mark % 2 == 1:
|
||||
return gpt_reply + '\n```' # 输出代码片段中!
|
||||
else:
|
||||
return gpt_reply
|
||||
|
||||
|
||||
def format_io(self, y):
|
||||
"""
|
||||
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
||||
"""
|
||||
if y is None or y == []:
|
||||
return []
|
||||
i_ask, gpt_reply = y[-1]
|
||||
# 输入部分太自由,预处理一波
|
||||
if i_ask is not None: i_ask = text_divide_paragraph(i_ask)
|
||||
# 当代码输出半截的时候,试着补上后个```
|
||||
if gpt_reply is not None: gpt_reply = close_up_code_segment_during_stream(gpt_reply)
|
||||
# process
|
||||
y[-1] = (
|
||||
None if i_ask is None else markdown.markdown(i_ask, extensions=['pymdownx.superfences', 'tables', 'pymdownx.highlight'], extension_configs=code_highlight_configs),
|
||||
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
||||
)
|
||||
return y
|
||||
131
shared_utils/config_loader.py
普通文件
131
shared_utils/config_loader.py
普通文件
@@ -0,0 +1,131 @@
|
||||
import importlib
|
||||
import time
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from colorful import print亮红, print亮绿, print亮蓝
|
||||
|
||||
pj = os.path.join
|
||||
default_user_name = 'default_user'
|
||||
|
||||
def read_env_variable(arg, default_value):
|
||||
"""
|
||||
环境变量可以是 `GPT_ACADEMIC_CONFIG`(优先),也可以直接是`CONFIG`
|
||||
例如在windows cmd中,既可以写:
|
||||
set USE_PROXY=True
|
||||
set API_KEY=sk-j7caBpkRoxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
set proxies={"http":"http://127.0.0.1:10085", "https":"http://127.0.0.1:10085",}
|
||||
set AVAIL_LLM_MODELS=["gpt-3.5-turbo", "chatglm"]
|
||||
set AUTHENTICATION=[("username", "password"), ("username2", "password2")]
|
||||
也可以写:
|
||||
set GPT_ACADEMIC_USE_PROXY=True
|
||||
set GPT_ACADEMIC_API_KEY=sk-j7caBpkRoxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
set GPT_ACADEMIC_proxies={"http":"http://127.0.0.1:10085", "https":"http://127.0.0.1:10085",}
|
||||
set GPT_ACADEMIC_AVAIL_LLM_MODELS=["gpt-3.5-turbo", "chatglm"]
|
||||
set GPT_ACADEMIC_AUTHENTICATION=[("username", "password"), ("username2", "password2")]
|
||||
"""
|
||||
arg_with_prefix = "GPT_ACADEMIC_" + arg
|
||||
if arg_with_prefix in os.environ:
|
||||
env_arg = os.environ[arg_with_prefix]
|
||||
elif arg in os.environ:
|
||||
env_arg = os.environ[arg]
|
||||
else:
|
||||
raise KeyError
|
||||
print(f"[ENV_VAR] 尝试加载{arg},默认值:{default_value} --> 修正值:{env_arg}")
|
||||
try:
|
||||
if isinstance(default_value, bool):
|
||||
env_arg = env_arg.strip()
|
||||
if env_arg == 'True': r = True
|
||||
elif env_arg == 'False': r = False
|
||||
else: print('Enter True or False, but have:', env_arg); r = default_value
|
||||
elif isinstance(default_value, int):
|
||||
r = int(env_arg)
|
||||
elif isinstance(default_value, float):
|
||||
r = float(env_arg)
|
||||
elif isinstance(default_value, str):
|
||||
r = env_arg.strip()
|
||||
elif isinstance(default_value, dict):
|
||||
r = eval(env_arg)
|
||||
elif isinstance(default_value, list):
|
||||
r = eval(env_arg)
|
||||
elif default_value is None:
|
||||
assert arg == "proxies"
|
||||
r = eval(env_arg)
|
||||
else:
|
||||
print亮红(f"[ENV_VAR] 环境变量{arg}不支持通过环境变量设置! ")
|
||||
raise KeyError
|
||||
except:
|
||||
print亮红(f"[ENV_VAR] 环境变量{arg}加载失败! ")
|
||||
raise KeyError(f"[ENV_VAR] 环境变量{arg}加载失败! ")
|
||||
|
||||
print亮绿(f"[ENV_VAR] 成功读取环境变量{arg}")
|
||||
return r
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def read_single_conf_with_lru_cache(arg):
|
||||
from shared_utils.key_pattern_manager import is_any_api_key
|
||||
try:
|
||||
# 优先级1. 获取环境变量作为配置
|
||||
default_ref = getattr(importlib.import_module('config'), arg) # 读取默认值作为数据类型转换的参考
|
||||
r = read_env_variable(arg, default_ref)
|
||||
except:
|
||||
try:
|
||||
# 优先级2. 获取config_private中的配置
|
||||
r = getattr(importlib.import_module('config_private'), arg)
|
||||
except:
|
||||
# 优先级3. 获取config中的配置
|
||||
r = getattr(importlib.import_module('config'), arg)
|
||||
|
||||
# 在读取API_KEY时,检查一下是不是忘了改config
|
||||
if arg == 'API_URL_REDIRECT':
|
||||
oai_rd = r.get("https://api.openai.com/v1/chat/completions", None) # API_URL_REDIRECT填写格式是错误的,请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`
|
||||
if oai_rd and not oai_rd.endswith('/completions'):
|
||||
print亮红("\n\n[API_URL_REDIRECT] API_URL_REDIRECT填错了。请阅读`https://github.com/binary-husky/gpt_academic/wiki/项目配置说明`。如果您确信自己没填错,无视此消息即可。")
|
||||
time.sleep(5)
|
||||
if arg == 'API_KEY':
|
||||
print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和Azure的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,azure-key3\"")
|
||||
print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。")
|
||||
if is_any_api_key(r):
|
||||
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
||||
else:
|
||||
print亮红("[API_KEY] 您的 API_KEY 不满足任何一种已知的密钥格式,请在config文件中修改API密钥之后再运行。")
|
||||
if arg == 'proxies':
|
||||
if not read_single_conf_with_lru_cache('USE_PROXY'): r = None # 检查USE_PROXY,防止proxies单独起作用
|
||||
if r is None:
|
||||
print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。')
|
||||
else:
|
||||
print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
||||
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
||||
return r
|
||||
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def get_conf(*args):
|
||||
"""
|
||||
本项目的所有配置都集中在config.py中。 修改配置有三种方法,您只需要选择其中一种即可:
|
||||
- 直接修改config.py
|
||||
- 创建并修改config_private.py
|
||||
- 修改环境变量(修改docker-compose.yml等价于修改容器内部的环境变量)
|
||||
|
||||
注意:如果您使用docker-compose部署,请修改docker-compose(等价于修改容器内部的环境变量)
|
||||
"""
|
||||
res = []
|
||||
for arg in args:
|
||||
r = read_single_conf_with_lru_cache(arg)
|
||||
res.append(r)
|
||||
if len(res) == 1: return res[0]
|
||||
return res
|
||||
|
||||
|
||||
def set_conf(key, value):
|
||||
from toolbox import read_single_conf_with_lru_cache
|
||||
read_single_conf_with_lru_cache.cache_clear()
|
||||
get_conf.cache_clear()
|
||||
os.environ[key] = str(value)
|
||||
altered = get_conf(key)
|
||||
return altered
|
||||
|
||||
|
||||
def set_multi_conf(dic):
|
||||
for k, v in dic.items(): set_conf(k, v)
|
||||
return
|
||||
@@ -0,0 +1,91 @@
|
||||
import os
|
||||
|
||||
"""
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
接驳void-terminal:
|
||||
- set_conf: 在运行过程中动态地修改配置
|
||||
- set_multi_conf: 在运行过程中动态地修改多个配置
|
||||
- get_plugin_handle: 获取插件的句柄
|
||||
- get_plugin_default_kwargs: 获取插件的默认参数
|
||||
- get_chat_handle: 获取简单聊天的句柄
|
||||
- get_chat_default_kwargs: 获取简单聊天的默认参数
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
|
||||
def get_plugin_handle(plugin_name):
|
||||
"""
|
||||
e.g. plugin_name = 'crazy_functions.批量Markdown翻译->Markdown翻译指定语言'
|
||||
"""
|
||||
import importlib
|
||||
|
||||
assert (
|
||||
"->" in plugin_name
|
||||
), "Example of plugin_name: crazy_functions.批量Markdown翻译->Markdown翻译指定语言"
|
||||
module, fn_name = plugin_name.split("->")
|
||||
f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
|
||||
return f_hot_reload
|
||||
|
||||
|
||||
def get_chat_handle():
|
||||
"""
|
||||
Get chat function
|
||||
"""
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
|
||||
return predict_no_ui_long_connection
|
||||
|
||||
|
||||
def get_plugin_default_kwargs():
|
||||
"""
|
||||
Get Plugin Default Arguments
|
||||
"""
|
||||
from toolbox import ChatBotWithCookies, load_chat_cookies
|
||||
|
||||
cookies = load_chat_cookies()
|
||||
llm_kwargs = {
|
||||
"api_key": cookies["api_key"],
|
||||
"llm_model": cookies["llm_model"],
|
||||
"top_p": 1.0,
|
||||
"max_length": None,
|
||||
"temperature": 1.0,
|
||||
}
|
||||
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||
|
||||
# txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port
|
||||
DEFAULT_FN_GROUPS_kwargs = {
|
||||
"main_input": "./README.md",
|
||||
"llm_kwargs": llm_kwargs,
|
||||
"plugin_kwargs": {},
|
||||
"chatbot_with_cookie": chatbot,
|
||||
"history": [],
|
||||
"system_prompt": "You are a good AI.",
|
||||
"web_port": None,
|
||||
}
|
||||
return DEFAULT_FN_GROUPS_kwargs
|
||||
|
||||
|
||||
def get_chat_default_kwargs():
|
||||
"""
|
||||
Get Chat Default Arguments
|
||||
"""
|
||||
from toolbox import load_chat_cookies
|
||||
|
||||
cookies = load_chat_cookies()
|
||||
llm_kwargs = {
|
||||
"api_key": cookies["api_key"],
|
||||
"llm_model": cookies["llm_model"],
|
||||
"top_p": 1.0,
|
||||
"max_length": None,
|
||||
"temperature": 1.0,
|
||||
}
|
||||
default_chat_kwargs = {
|
||||
"inputs": "Hello there, are you ready?",
|
||||
"llm_kwargs": llm_kwargs,
|
||||
"history": [],
|
||||
"sys_prompt": "You are AI assistant",
|
||||
"observe_window": None,
|
||||
"console_slience": False,
|
||||
}
|
||||
|
||||
return default_chat_kwargs
|
||||
@@ -0,0 +1,81 @@
|
||||
import re
|
||||
import os
|
||||
from functools import wraps, lru_cache
|
||||
from shared_utils.advanced_markdown_format import format_io
|
||||
from shared_utils.config_loader import get_conf as get_conf
|
||||
|
||||
|
||||
pj = os.path.join
|
||||
default_user_name = 'default_user'
|
||||
|
||||
|
||||
def is_openai_api_key(key):
|
||||
CUSTOM_API_KEY_PATTERN = get_conf('CUSTOM_API_KEY_PATTERN')
|
||||
if len(CUSTOM_API_KEY_PATTERN) != 0:
|
||||
API_MATCH_ORIGINAL = re.match(CUSTOM_API_KEY_PATTERN, key)
|
||||
else:
|
||||
API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
||||
return bool(API_MATCH_ORIGINAL)
|
||||
|
||||
|
||||
def is_azure_api_key(key):
|
||||
API_MATCH_AZURE = re.match(r"[a-zA-Z0-9]{32}$", key)
|
||||
return bool(API_MATCH_AZURE)
|
||||
|
||||
|
||||
def is_api2d_key(key):
|
||||
API_MATCH_API2D = re.match(r"fk[a-zA-Z0-9]{6}-[a-zA-Z0-9]{32}$", key)
|
||||
return bool(API_MATCH_API2D)
|
||||
|
||||
|
||||
def is_any_api_key(key):
|
||||
if ',' in key:
|
||||
keys = key.split(',')
|
||||
for k in keys:
|
||||
if is_any_api_key(k): return True
|
||||
return False
|
||||
else:
|
||||
return is_openai_api_key(key) or is_api2d_key(key) or is_azure_api_key(key)
|
||||
|
||||
|
||||
def what_keys(keys):
|
||||
avail_key_list = {'OpenAI Key': 0, "Azure Key": 0, "API2D Key": 0}
|
||||
key_list = keys.split(',')
|
||||
|
||||
for k in key_list:
|
||||
if is_openai_api_key(k):
|
||||
avail_key_list['OpenAI Key'] += 1
|
||||
|
||||
for k in key_list:
|
||||
if is_api2d_key(k):
|
||||
avail_key_list['API2D Key'] += 1
|
||||
|
||||
for k in key_list:
|
||||
if is_azure_api_key(k):
|
||||
avail_key_list['Azure Key'] += 1
|
||||
|
||||
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个, Azure Key {avail_key_list['Azure Key']} 个, API2D Key {avail_key_list['API2D Key']} 个"
|
||||
|
||||
|
||||
def select_api_key(keys, llm_model):
|
||||
import random
|
||||
avail_key_list = []
|
||||
key_list = keys.split(',')
|
||||
|
||||
if llm_model.startswith('gpt-'):
|
||||
for k in key_list:
|
||||
if is_openai_api_key(k): avail_key_list.append(k)
|
||||
|
||||
if llm_model.startswith('api2d-'):
|
||||
for k in key_list:
|
||||
if is_api2d_key(k): avail_key_list.append(k)
|
||||
|
||||
if llm_model.startswith('azure-'):
|
||||
for k in key_list:
|
||||
if is_azure_api_key(k): avail_key_list.append(k)
|
||||
|
||||
if len(avail_key_list) == 0:
|
||||
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源(右下角更换模型菜单中可切换openai,azure,claude,api2d等请求源)。")
|
||||
|
||||
api_key = random.choice(avail_key_list) # 随机负载均衡
|
||||
return api_key
|
||||
@@ -3,11 +3,13 @@
|
||||
# """
|
||||
def validate_path():
|
||||
import os, sys
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # validate path so you can run from base directory
|
||||
if __name__ == "__main__":
|
||||
# from request_llms.bridge_newbingfree import predict_no_ui_long_connection
|
||||
@@ -18,19 +20,19 @@ if __name__ == "__main__":
|
||||
# from request_llms.bridge_internlm import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_deepseekcoder import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_qwen_7B import predict_no_ui_long_connection
|
||||
from request_llms.bridge_qwen import predict_no_ui_long_connection
|
||||
from request_llms.bridge_qwen_local import predict_no_ui_long_connection
|
||||
|
||||
# from request_llms.bridge_spark import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_zhipu import predict_no_ui_long_connection
|
||||
# from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
|
||||
|
||||
llm_kwargs = {
|
||||
'max_length': 4096,
|
||||
'top_p': 1,
|
||||
'temperature': 1,
|
||||
"max_length": 4096,
|
||||
"top_p": 1,
|
||||
"temperature": 1,
|
||||
}
|
||||
|
||||
result = predict_no_ui_long_connection( inputs="请问什么是质子?",
|
||||
llm_kwargs=llm_kwargs,
|
||||
history=["你好", "我好!"],
|
||||
sys_prompt="")
|
||||
print('final result:', result)
|
||||
result = predict_no_ui_long_connection(
|
||||
inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt=""
|
||||
)
|
||||
print("final result:", result)
|
||||
|
||||
@@ -1,44 +1,53 @@
|
||||
md = """
|
||||
作为您的写作和编程助手,我可以为您提供以下服务:
|
||||
You can use the following Python script to rename files matching the pattern '* - 副本.tex' to '* - wushiguang.tex' in a directory:
|
||||
|
||||
1. 写作:
|
||||
- 帮助您撰写文章、报告、散文、故事等。
|
||||
- 提供写作建议和技巧。
|
||||
- 协助您进行文案策划和内容创作。
|
||||
```python
|
||||
import os
|
||||
|
||||
2. 编程:
|
||||
- 帮助您解决编程问题,提供编程思路和建议。
|
||||
- 协助您编写代码,包括但不限于 Python、Java、C++ 等。
|
||||
- 为您解释复杂的技术概念,让您更容易理解。
|
||||
# Directory containing the files
|
||||
directory = 'Tex/'
|
||||
|
||||
3. 项目支持:
|
||||
- 协助您规划项目进度和任务分配。
|
||||
- 提供项目管理和协作建议。
|
||||
- 在项目实施过程中提供支持,确保项目顺利进行。
|
||||
for filename in os.listdir(directory):
|
||||
if filename.endswith(' - 副本.tex'):
|
||||
new_filename = filename.replace(' - 副本.tex', ' - wushiguang.tex')
|
||||
os.rename(os.path.join(directory, filename), os.path.join(directory, new_filename))
|
||||
```
|
||||
|
||||
4. 学习辅导:
|
||||
- 帮助您巩固编程基础,提高编程能力。
|
||||
- 提供计算机科学、数据科学、人工智能等相关领域的学习资源和建议。
|
||||
- 解答您在学习过程中遇到的问题,让您更好地掌握知识。
|
||||
|
||||
5. 行业动态和趋势分析:
|
||||
- 为您提供业界最新的新闻和技术趋势。
|
||||
- 分析行业动态,帮助您了解市场发展和竞争态势。
|
||||
- 为您制定技术战略提供参考和建议。
|
||||
|
||||
请随时告诉我您的需求,我会尽力提供帮助。如果您有任何问题或需要解答的议题,请随时提问。
|
||||
Replace 'Tex/' with the actual directory path where your files are located before running the script.
|
||||
"""
|
||||
|
||||
|
||||
md = """
|
||||
Following code including wrapper
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Enter Chart Definition] --> B(Preview)
|
||||
B --> C{decide}
|
||||
C --> D[Keep]
|
||||
C --> E[Edit Definition]
|
||||
E --> B
|
||||
D --> F[Save Image and Code]
|
||||
F --> B
|
||||
```
|
||||
|
||||
"""
|
||||
def validate_path():
|
||||
import os, sys
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||
|
||||
os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # validate path so you can run from base directory
|
||||
from toolbox import markdown_convertion
|
||||
|
||||
html = markdown_convertion(md)
|
||||
print(html)
|
||||
with open('test.html', 'w', encoding='utf-8') as f:
|
||||
# print(html)
|
||||
with open("test.html", "w", encoding="utf-8") as f:
|
||||
f.write(html)
|
||||
|
||||
|
||||
# TODO: 列出10个经典名著
|
||||
@@ -4,16 +4,28 @@
|
||||
|
||||
|
||||
import os, sys
|
||||
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
def validate_path():
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(dir_name + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # 返回项目根路径
|
||||
|
||||
if __name__ == "__main__":
|
||||
from tests.test_utils import plugin_test
|
||||
|
||||
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||
|
||||
# plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||
|
||||
plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix")
|
||||
plugin_test(
|
||||
plugin="crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF",
|
||||
main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
|
||||
)
|
||||
|
||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||
|
||||
@@ -61,4 +73,3 @@ if __name__ == "__main__":
|
||||
|
||||
# advanced_arg = {"advanced_arg":"--pre_seq_len=128 --learning_rate=2e-2 --num_gpus=1 --json_dataset='t_code.json' --ptuning_directory='/home/hmp/ChatGLM2-6B/ptuning' " }
|
||||
# plugin_test(plugin='crazy_functions.chatglm微调工具->启动微调', main_input='build/dev.json', advanced_arg=advanced_arg)
|
||||
|
||||
|
||||
@@ -9,45 +9,52 @@ from functools import wraps
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
def chat_to_markdown_str(chat):
|
||||
result = ""
|
||||
for i, cc in enumerate(chat):
|
||||
result += f'\n\n{cc[0]}\n\n{cc[1]}'
|
||||
result += f"\n\n{cc[0]}\n\n{cc[1]}"
|
||||
if i != len(chat) - 1:
|
||||
result += '\n\n---'
|
||||
result += "\n\n---"
|
||||
return result
|
||||
|
||||
|
||||
def silence_stdout(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
_original_stdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.reconfigure(encoding='utf-8')
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
sys.stdout.reconfigure(encoding="utf-8")
|
||||
for q in func(*args, **kwargs):
|
||||
sys.stdout = _original_stdout
|
||||
yield q
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.reconfigure(encoding='utf-8')
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
sys.stdout.reconfigure(encoding="utf-8")
|
||||
sys.stdout.close()
|
||||
sys.stdout = _original_stdout
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def silence_stdout_fn(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
_original_stdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stdout.reconfigure(encoding='utf-8')
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
sys.stdout.reconfigure(encoding="utf-8")
|
||||
result = func(*args, **kwargs)
|
||||
sys.stdout.close()
|
||||
sys.stdout = _original_stdout
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
class VoidTerminal():
|
||||
|
||||
class VoidTerminal:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
|
||||
vt = VoidTerminal()
|
||||
vt.get_conf = silence_stdout_fn(get_conf)
|
||||
vt.set_conf = silence_stdout_fn(set_conf)
|
||||
@@ -56,9 +63,27 @@ vt.get_plugin_handle = silence_stdout_fn(get_plugin_handle)
|
||||
vt.get_plugin_default_kwargs = silence_stdout_fn(get_plugin_default_kwargs)
|
||||
vt.get_chat_handle = silence_stdout_fn(get_chat_handle)
|
||||
vt.get_chat_default_kwargs = silence_stdout_fn(get_chat_default_kwargs)
|
||||
vt.chat_to_markdown_str = (chat_to_markdown_str)
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
vt.get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
vt.chat_to_markdown_str = chat_to_markdown_str
|
||||
(
|
||||
proxies,
|
||||
WEB_PORT,
|
||||
LLM_MODEL,
|
||||
CONCURRENT_COUNT,
|
||||
AUTHENTICATION,
|
||||
CHATBOT_HEIGHT,
|
||||
LAYOUT,
|
||||
API_KEY,
|
||||
) = vt.get_conf(
|
||||
"proxies",
|
||||
"WEB_PORT",
|
||||
"LLM_MODEL",
|
||||
"CONCURRENT_COUNT",
|
||||
"AUTHENTICATION",
|
||||
"CHATBOT_HEIGHT",
|
||||
"LAYOUT",
|
||||
"API_KEY",
|
||||
)
|
||||
|
||||
|
||||
def plugin_test(main_input, plugin, advanced_arg=None, debug=True):
|
||||
from rich.live import Live
|
||||
@@ -69,9 +94,9 @@ def plugin_test(main_input, plugin, advanced_arg=None, debug=True):
|
||||
|
||||
plugin = vt.get_plugin_handle(plugin)
|
||||
plugin_kwargs = vt.get_plugin_default_kwargs()
|
||||
plugin_kwargs['main_input'] = main_input
|
||||
plugin_kwargs["main_input"] = main_input
|
||||
if advanced_arg is not None:
|
||||
plugin_kwargs['plugin_kwargs'] = advanced_arg
|
||||
plugin_kwargs["plugin_kwargs"] = advanced_arg
|
||||
if debug:
|
||||
my_working_plugin = (plugin)(**plugin_kwargs)
|
||||
else:
|
||||
|
||||
@@ -4,14 +4,25 @@
|
||||
|
||||
|
||||
import os, sys
|
||||
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
def validate_path():
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(dir_name + "/..")
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
|
||||
validate_path() # 返回项目根路径
|
||||
|
||||
if __name__ == "__main__":
|
||||
from tests.test_utils import plugin_test
|
||||
|
||||
plugin_test(plugin='crazy_functions.知识库问答->知识库文件注入', main_input="./README.md")
|
||||
plugin_test(plugin="crazy_functions.知识库问答->知识库文件注入", main_input="./README.md")
|
||||
|
||||
plugin_test(plugin='crazy_functions.知识库问答->读取知识库作答', main_input="What is the installation method?")
|
||||
plugin_test(
|
||||
plugin="crazy_functions.知识库问答->读取知识库作答",
|
||||
main_input="What is the installation method?",
|
||||
)
|
||||
|
||||
plugin_test(plugin='crazy_functions.知识库问答->读取知识库作答', main_input="远程云服务器部署?")
|
||||
plugin_test(plugin="crazy_functions.知识库问答->读取知识库作答", main_input="远程云服务器部署?")
|
||||
|
||||
@@ -94,6 +94,10 @@
|
||||
background-color: var(--block-background-fill) !important;
|
||||
}
|
||||
|
||||
#cbsc {
|
||||
background-color: var(--block-background-fill) !important;
|
||||
}
|
||||
|
||||
#interact-panel .form {
|
||||
border: hidden
|
||||
}
|
||||
|
||||
610
themes/common.js
610
themes/common.js
@@ -1,3 +1,7 @@
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 1 部分: 工具函数
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function gradioApp() {
|
||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||
const elems = document.getElementsByTagName('gradio-app');
|
||||
@@ -37,6 +41,143 @@ function getCookie(name) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let toastCount = 0;
|
||||
function toast_push(msg, duration) {
|
||||
duration = isNaN(duration) ? 3000 : duration;
|
||||
const existingToasts = document.querySelectorAll('.toast');
|
||||
existingToasts.forEach(toast => {
|
||||
toast.style.top = `${parseInt(toast.style.top, 10) - 70}px`;
|
||||
});
|
||||
const m = document.createElement('div');
|
||||
m.innerHTML = msg;
|
||||
m.classList.add('toast');
|
||||
m.style.cssText = `font-size: var(--text-md) !important; color: rgb(255, 255, 255); background-color: rgba(0, 0, 0, 0.6); padding: 10px 15px; border-radius: 4px; position: fixed; top: ${50 + toastCount * 70}%; left: 50%; transform: translateX(-50%); width: auto; text-align: center; transition: top 0.3s;`;
|
||||
document.body.appendChild(m);
|
||||
setTimeout(function () {
|
||||
m.style.opacity = '0';
|
||||
setTimeout(function () {
|
||||
document.body.removeChild(m);
|
||||
toastCount--;
|
||||
}, 500);
|
||||
}, duration);
|
||||
toastCount++;
|
||||
}
|
||||
|
||||
function toast_up(msg) {
|
||||
var m = document.getElementById('toast_up');
|
||||
if (m) {
|
||||
document.body.removeChild(m); // remove the loader from the body
|
||||
}
|
||||
m = document.createElement('div');
|
||||
m.id = 'toast_up';
|
||||
m.innerHTML = msg;
|
||||
m.style.cssText = "font-size: var(--text-md) !important; color: rgb(255, 255, 255); background-color: rgba(0, 0, 100, 0.6); padding: 10px 15px; margin: 0 0 0 -60px; border-radius: 4px; position: fixed; top: 50%; left: 50%; width: auto; text-align: center;";
|
||||
document.body.appendChild(m);
|
||||
}
|
||||
|
||||
function toast_down() {
|
||||
var m = document.getElementById('toast_up');
|
||||
if (m) {
|
||||
document.body.removeChild(m); // remove the loader from the body
|
||||
}
|
||||
}
|
||||
|
||||
function begin_loading_status() {
|
||||
// Create the loader div and add styling
|
||||
var loader = document.createElement('div');
|
||||
loader.id = 'Js_File_Loading';
|
||||
var C1 = document.createElement('div');
|
||||
var C2 = document.createElement('div');
|
||||
// var C3 = document.createElement('span');
|
||||
// C3.textContent = '上传中...'
|
||||
// C3.style.position = "fixed";
|
||||
// C3.style.top = "50%";
|
||||
// C3.style.left = "50%";
|
||||
// C3.style.width = "80px";
|
||||
// C3.style.height = "80px";
|
||||
// C3.style.margin = "-40px 0 0 -40px";
|
||||
|
||||
C1.style.position = "fixed";
|
||||
C1.style.top = "50%";
|
||||
C1.style.left = "50%";
|
||||
C1.style.width = "80px";
|
||||
C1.style.height = "80px";
|
||||
C1.style.borderLeft = "12px solid #00f3f300";
|
||||
C1.style.borderRight = "12px solid #00f3f300";
|
||||
C1.style.borderTop = "12px solid #82aaff";
|
||||
C1.style.borderBottom = "12px solid #82aaff"; // Added for effect
|
||||
C1.style.borderRadius = "50%";
|
||||
C1.style.margin = "-40px 0 0 -40px";
|
||||
C1.style.animation = "spinAndPulse 2s linear infinite";
|
||||
|
||||
C2.style.position = "fixed";
|
||||
C2.style.top = "50%";
|
||||
C2.style.left = "50%";
|
||||
C2.style.width = "40px";
|
||||
C2.style.height = "40px";
|
||||
C2.style.borderLeft = "12px solid #00f3f300";
|
||||
C2.style.borderRight = "12px solid #00f3f300";
|
||||
C2.style.borderTop = "12px solid #33c9db";
|
||||
C2.style.borderBottom = "12px solid #33c9db"; // Added for effect
|
||||
C2.style.borderRadius = "50%";
|
||||
C2.style.margin = "-20px 0 0 -20px";
|
||||
C2.style.animation = "spinAndPulse2 2s linear infinite";
|
||||
|
||||
loader.appendChild(C1);
|
||||
loader.appendChild(C2);
|
||||
// loader.appendChild(C3);
|
||||
document.body.appendChild(loader); // Add the loader to the body
|
||||
|
||||
// Set the CSS animation keyframes for spin and pulse to be synchronized
|
||||
var styleSheet = document.createElement('style');
|
||||
styleSheet.id = 'Js_File_Loading_Style';
|
||||
styleSheet.textContent = `
|
||||
@keyframes spinAndPulse {
|
||||
0% { transform: rotate(0deg) scale(1); }
|
||||
25% { transform: rotate(90deg) scale(1.1); }
|
||||
50% { transform: rotate(180deg) scale(1); }
|
||||
75% { transform: rotate(270deg) scale(0.9); }
|
||||
100% { transform: rotate(360deg) scale(1); }
|
||||
}
|
||||
|
||||
@keyframes spinAndPulse2 {
|
||||
0% { transform: rotate(-90deg);}
|
||||
25% { transform: rotate(-180deg);}
|
||||
50% { transform: rotate(-270deg);}
|
||||
75% { transform: rotate(-360deg);}
|
||||
100% { transform: rotate(-450deg);}
|
||||
}
|
||||
`;
|
||||
document.head.appendChild(styleSheet);
|
||||
}
|
||||
|
||||
|
||||
function cancel_loading_status() {
|
||||
// remove the loader from the body
|
||||
var loadingElement = document.getElementById('Js_File_Loading');
|
||||
if (loadingElement) {
|
||||
document.body.removeChild(loadingElement);
|
||||
}
|
||||
var loadingStyle = document.getElementById('Js_File_Loading_Style');
|
||||
if (loadingStyle) {
|
||||
document.head.removeChild(loadingStyle);
|
||||
}
|
||||
// create new listen event
|
||||
let clearButton = document.querySelectorAll('div[id*="elem_upload"] button[aria-label="Clear"]');
|
||||
for (let button of clearButton) {
|
||||
button.addEventListener('click', function () {
|
||||
setTimeout(function () {
|
||||
register_upload_event();
|
||||
}, 50);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 2 部分: 复制按钮
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function addCopyButton(botElement) {
|
||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||
// Copy bot button
|
||||
@@ -45,8 +186,7 @@ function addCopyButton(botElement) {
|
||||
|
||||
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
|
||||
if (messageBtnColumnElement) {
|
||||
// Do something if .message-btn-column exists, for example, remove it
|
||||
// messageBtnColumnElement.remove();
|
||||
// if .message-btn-column exists
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -89,6 +229,33 @@ function addCopyButton(botElement) {
|
||||
botElement.appendChild(messageBtnColumn);
|
||||
}
|
||||
|
||||
|
||||
let timeoutID = null;
|
||||
let lastInvocationTime = 0;
|
||||
let lastArgs = null;
|
||||
function do_something_but_not_too_frequently(min_interval, func) {
|
||||
return function(...args) {
|
||||
lastArgs = args;
|
||||
const now = Date.now();
|
||||
if (!lastInvocationTime || (now - lastInvocationTime) >= min_interval) {
|
||||
lastInvocationTime = now;
|
||||
// 现在就执行
|
||||
setTimeout(() => {
|
||||
func.apply(this, lastArgs);
|
||||
}, 0);
|
||||
} else if (!timeoutID) {
|
||||
// 等一会执行
|
||||
timeoutID = setTimeout(() => {
|
||||
timeoutID = null;
|
||||
lastInvocationTime = Date.now();
|
||||
func.apply(this, lastArgs);
|
||||
}, min_interval - (now - lastInvocationTime));
|
||||
} else {
|
||||
// 压根不执行
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function chatbotContentChanged(attempt = 1, force = false) {
|
||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||
for (var i = 0; i < attempt; i++) {
|
||||
@@ -96,46 +263,69 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
||||
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
||||
}, i === 0 ? 0 : 200);
|
||||
}
|
||||
|
||||
const run_mermaid_render = do_something_but_not_too_frequently(1000, function () {
|
||||
const blocks = document.querySelectorAll(`pre.mermaid, diagram-div`);
|
||||
if (blocks.length == 0) { return; }
|
||||
uml("mermaid");
|
||||
});
|
||||
run_mermaid_render();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 3 部分: chatbot动态高度调整
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function chatbotAutoHeight() {
|
||||
// 自动调整高度
|
||||
// 自动调整高度:立即
|
||||
function update_height() {
|
||||
var { panel_height_target, chatbot_height, chatbot } = get_elements(true);
|
||||
if (panel_height_target!=chatbot_height)
|
||||
{
|
||||
var pixelString = panel_height_target.toString() + 'px';
|
||||
var { height_target, chatbot_height, chatbot } = get_elements(true);
|
||||
if (height_target != chatbot_height) {
|
||||
var pixelString = height_target.toString() + 'px';
|
||||
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
|
||||
}
|
||||
}
|
||||
|
||||
// 自动调整高度:缓慢
|
||||
function update_height_slow() {
|
||||
var { panel_height_target, chatbot_height, chatbot } = get_elements();
|
||||
if (panel_height_target!=chatbot_height)
|
||||
{
|
||||
new_panel_height = (panel_height_target - chatbot_height)*0.5 + chatbot_height;
|
||||
if (Math.abs(new_panel_height - panel_height_target) < 10){
|
||||
new_panel_height = panel_height_target;
|
||||
var { height_target, chatbot_height, chatbot } = get_elements();
|
||||
if (height_target != chatbot_height) {
|
||||
// sign = (height_target - chatbot_height)/Math.abs(height_target - chatbot_height);
|
||||
// speed = Math.max(Math.abs(height_target - chatbot_height), 1);
|
||||
new_panel_height = (height_target - chatbot_height) * 0.5 + chatbot_height;
|
||||
if (Math.abs(new_panel_height - height_target) < 10) {
|
||||
new_panel_height = height_target;
|
||||
}
|
||||
// console.log(chatbot_height, panel_height_target, new_panel_height);
|
||||
var pixelString = new_panel_height.toString() + 'px';
|
||||
chatbot.style.maxHeight = pixelString; chatbot.style.height = pixelString;
|
||||
}
|
||||
}
|
||||
monitoring_input_box()
|
||||
update_height();
|
||||
setInterval(function() {
|
||||
update_height_slow()
|
||||
}, 50); // 每100毫秒执行一次
|
||||
window.addEventListener('resize', function () { update_height(); });
|
||||
window.addEventListener('scroll', function () { update_height_slow(); });
|
||||
setInterval(function () { update_height_slow() }, 50); // 每50毫秒执行一次
|
||||
}
|
||||
|
||||
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||
var chatbotObserver = new MutationObserver(() => {
|
||||
chatbotContentChanged(1);
|
||||
});
|
||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||
if (LAYOUT === "LEFT-RIGHT") {chatbotAutoHeight();}
|
||||
swapped = false;
|
||||
function swap_input_area() {
|
||||
// Get the elements to be swapped
|
||||
var element1 = document.querySelector("#input-panel");
|
||||
var element2 = document.querySelector("#basic-panel");
|
||||
|
||||
// Get the parent of the elements
|
||||
var parent = element1.parentNode;
|
||||
|
||||
// Get the next sibling of element2
|
||||
var nextSibling = element2.nextSibling;
|
||||
|
||||
// Swap the elements
|
||||
parent.insertBefore(element2, element1);
|
||||
parent.insertBefore(element1, nextSibling);
|
||||
if (swapped) { swapped = false; }
|
||||
else { swapped = true; }
|
||||
}
|
||||
|
||||
function get_elements(consider_state_panel = false) {
|
||||
@@ -147,23 +337,95 @@ function get_elements(consider_state_panel=false) {
|
||||
const panel2 = document.querySelector('#basic-panel').getBoundingClientRect()
|
||||
const panel3 = document.querySelector('#plugin-panel').getBoundingClientRect();
|
||||
// const panel4 = document.querySelector('#interact-panel').getBoundingClientRect();
|
||||
const panel5 = document.querySelector('#input-panel2').getBoundingClientRect();
|
||||
const panel_active = document.querySelector('#state-panel').getBoundingClientRect();
|
||||
if (consider_state_panel || panel_active.height < 25) {
|
||||
document.state_panel_height = panel_active.height;
|
||||
}
|
||||
// 25 是chatbot的label高度, 16 是右侧的gap
|
||||
var panel_height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16*2;
|
||||
var height_target = panel1.height + panel2.height + panel3.height + 0 + 0 - 25 + 16 * 2;
|
||||
// 禁止动态的state-panel高度影响
|
||||
panel_height_target = panel_height_target + (document.state_panel_height-panel_active.height)
|
||||
var panel_height_target = parseInt(panel_height_target);
|
||||
height_target = height_target + (document.state_panel_height - panel_active.height)
|
||||
var height_target = parseInt(height_target);
|
||||
var chatbot_height = chatbot.style.height;
|
||||
// 交换输入区位置,使得输入区始终可用
|
||||
if (!swapped) {
|
||||
if (panel1.top != 0 && (panel1.bottom + panel1.top) / 2 < 0) { swap_input_area(); }
|
||||
}
|
||||
else if (swapped) {
|
||||
if (panel2.top != 0 && panel2.top > 0) { swap_input_area(); }
|
||||
}
|
||||
// 调整高度
|
||||
const err_tor = 5;
|
||||
if (Math.abs(panel1.left - chatbot.getBoundingClientRect().left) < err_tor) {
|
||||
// 是否处于窄屏模式
|
||||
height_target = window.innerHeight * 0.6;
|
||||
} else {
|
||||
// 调整高度
|
||||
const chatbot_height_exceed = 15;
|
||||
const chatbot_height_exceed_m = 10;
|
||||
b_panel = Math.max(panel1.bottom, panel2.bottom, panel3.bottom)
|
||||
if (b_panel >= window.innerHeight - chatbot_height_exceed) {
|
||||
height_target = window.innerHeight - chatbot.getBoundingClientRect().top - chatbot_height_exceed_m;
|
||||
}
|
||||
else if (b_panel < window.innerHeight * 0.75) {
|
||||
height_target = window.innerHeight * 0.8;
|
||||
}
|
||||
}
|
||||
var chatbot_height = parseInt(chatbot_height);
|
||||
return { panel_height_target, chatbot_height, chatbot };
|
||||
return { height_target, chatbot_height, chatbot };
|
||||
}
|
||||
|
||||
|
||||
function add_func_paste(input) {
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 4 部分: 粘贴、拖拽文件上传
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
var elem_upload = null;
|
||||
var elem_upload_float = null;
|
||||
var elem_input_main = null;
|
||||
var elem_input_float = null;
|
||||
var elem_chatbot = null;
|
||||
var elem_upload_component_float = null;
|
||||
var elem_upload_component = null;
|
||||
var exist_file_msg = '⚠️请先删除上传区(左上方)中的历史文件,再尝试上传。'
|
||||
|
||||
function locate_upload_elems() {
|
||||
elem_upload = document.getElementById('elem_upload')
|
||||
elem_upload_float = document.getElementById('elem_upload_float')
|
||||
elem_input_main = document.getElementById('user_input_main')
|
||||
elem_input_float = document.getElementById('user_input_float')
|
||||
elem_chatbot = document.getElementById('gpt-chatbot')
|
||||
elem_upload_component_float = elem_upload_float.querySelector("input[type=file]");
|
||||
elem_upload_component = elem_upload.querySelector("input[type=file]");
|
||||
}
|
||||
|
||||
async function upload_files(files) {
|
||||
let totalSizeMb = 0
|
||||
elem_upload_component_float = elem_upload_float.querySelector("input[type=file]");
|
||||
if (files && files.length > 0) {
|
||||
// 执行具体的上传逻辑
|
||||
if (elem_upload_component_float) {
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
// 将从文件数组中获取的文件大小(单位为字节)转换为MB,
|
||||
totalSizeMb += files[i].size / 1024 / 1024;
|
||||
}
|
||||
// 检查文件总大小是否超过20MB
|
||||
if (totalSizeMb > 20) {
|
||||
toast_push('⚠️文件夹大于 20MB 🚀上传文件中', 3000);
|
||||
}
|
||||
let event = new Event("change");
|
||||
Object.defineProperty(event, "target", { value: elem_upload_component_float, enumerable: true });
|
||||
Object.defineProperty(event, "currentTarget", { value: elem_upload_component_float, enumerable: true });
|
||||
Object.defineProperty(elem_upload_component_float, "files", { value: files, enumerable: true });
|
||||
elem_upload_component_float.dispatchEvent(event);
|
||||
} else {
|
||||
toast_push(exist_file_msg, 3000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function register_func_paste(input) {
|
||||
let paste_files = [];
|
||||
if (input) {
|
||||
input.addEventListener("paste", async function (e) {
|
||||
@@ -180,7 +442,7 @@ function add_func_paste(input) {
|
||||
}
|
||||
if (paste_files.length > 0) {
|
||||
// 按照文件列表执行批量上传逻辑
|
||||
await paste_upload_files(paste_files);
|
||||
await upload_files(paste_files);
|
||||
paste_files = []
|
||||
|
||||
}
|
||||
@@ -189,72 +451,110 @@ function add_func_paste(input) {
|
||||
}
|
||||
}
|
||||
|
||||
function register_func_drag(elem) {
|
||||
if (elem) {
|
||||
const dragEvents = ["dragover"];
|
||||
const leaveEvents = ["dragleave", "dragend", "drop"];
|
||||
|
||||
async function paste_upload_files(files) {
|
||||
const uploadInputElement = elem_upload_float.querySelector("input[type=file]");
|
||||
let totalSizeMb = 0
|
||||
if (files && files.length > 0) {
|
||||
// 执行具体的上传逻辑
|
||||
if (uploadInputElement) {
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
// 将从文件数组中获取的文件大小(单位为字节)转换为MB,
|
||||
totalSizeMb += files[i].size / 1024 / 1024;
|
||||
}
|
||||
// 检查文件总大小是否超过20MB
|
||||
if (totalSizeMb > 20) {
|
||||
toast_push('⚠️文件夹大于20MB 🚀上传文件中', 2000)
|
||||
// return; // 如果超过了指定大小, 可以不进行后续上传操作
|
||||
}
|
||||
// 监听change事件, 原生Gradio可以实现
|
||||
// uploadInputElement.addEventListener('change', function(){replace_input_string()});
|
||||
let event = new Event("change");
|
||||
Object.defineProperty(event, "target", {value: uploadInputElement, enumerable: true});
|
||||
Object.defineProperty(event, "currentTarget", {value: uploadInputElement, enumerable: true});
|
||||
Object.defineProperty(uploadInputElement, "files", {value: files, enumerable: true});
|
||||
uploadInputElement.dispatchEvent(event);
|
||||
// toast_push('🎉上传文件成功', 2000)
|
||||
const onDrag = function (e) {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (elem_upload_float.querySelector("input[type=file]")) {
|
||||
toast_up('⚠️释放以上传文件')
|
||||
} else {
|
||||
toast_push('⚠️请先删除上传区中的历史文件,再尝试粘贴。', 2000)
|
||||
toast_up(exist_file_msg)
|
||||
}
|
||||
};
|
||||
|
||||
const onLeave = function (e) {
|
||||
toast_down();
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
};
|
||||
|
||||
dragEvents.forEach(event => {
|
||||
elem.addEventListener(event, onDrag);
|
||||
});
|
||||
|
||||
leaveEvents.forEach(event => {
|
||||
elem.addEventListener(event, onLeave);
|
||||
});
|
||||
|
||||
elem.addEventListener("drop", async function (e) {
|
||||
const files = e.dataTransfer.files;
|
||||
await upload_files(files);
|
||||
});
|
||||
}
|
||||
}
|
||||
//提示信息 封装
|
||||
function toast_push(msg, duration) {
|
||||
duration = isNaN(duration) ? 3000 : duration;
|
||||
const m = document.createElement('div');
|
||||
m.innerHTML = msg;
|
||||
m.style.cssText = "font-size: var(--text-md) !important; color: rgb(255, 255, 255);background-color: rgba(0, 0, 0, 0.6);padding: 10px 15px;margin: 0 0 0 -60px;border-radius: 4px;position: fixed; top: 50%;left: 50%;width: auto; text-align: center;";
|
||||
document.body.appendChild(m);
|
||||
setTimeout(function () {
|
||||
var d = 0.5;
|
||||
m.style.opacity = '0';
|
||||
setTimeout(function () {
|
||||
document.body.removeChild(m)
|
||||
}, d * 1000);
|
||||
}, duration);
|
||||
}
|
||||
|
||||
var elem_upload = null;
|
||||
var elem_upload_float = null;
|
||||
var elem_input_main = null;
|
||||
var elem_input_float = null;
|
||||
function elem_upload_component_pop_message(elem) {
|
||||
if (elem) {
|
||||
const dragEvents = ["dragover"];
|
||||
const leaveEvents = ["dragleave", "dragend", "drop"];
|
||||
dragEvents.forEach(event => {
|
||||
elem.addEventListener(event, function (e) {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
if (elem_upload_float.querySelector("input[type=file]")) {
|
||||
toast_up('⚠️释放以上传文件')
|
||||
} else {
|
||||
toast_up(exist_file_msg)
|
||||
}
|
||||
});
|
||||
});
|
||||
leaveEvents.forEach(event => {
|
||||
elem.addEventListener(event, function (e) {
|
||||
toast_down();
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
});
|
||||
});
|
||||
elem.addEventListener("drop", async function (e) {
|
||||
toast_push('正在上传中,请稍等。', 2000);
|
||||
begin_loading_status();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function register_upload_event() {
|
||||
locate_upload_elems();
|
||||
if (elem_upload_float) {
|
||||
_upload = document.querySelector("#elem_upload_float div.center.boundedheight.flex")
|
||||
elem_upload_component_pop_message(_upload);
|
||||
}
|
||||
if (elem_upload_component_float) {
|
||||
elem_upload_component_float.addEventListener('change', function (event) {
|
||||
toast_push('正在上传中,请稍等。', 2000);
|
||||
begin_loading_status();
|
||||
});
|
||||
}
|
||||
if (elem_upload_component) {
|
||||
elem_upload_component.addEventListener('change', function (event) {
|
||||
toast_push('正在上传中,请稍等。', 2000);
|
||||
begin_loading_status();
|
||||
});
|
||||
} else {
|
||||
toast_push("oppps", 3000);
|
||||
}
|
||||
}
|
||||
|
||||
function monitoring_input_box() {
|
||||
elem_upload = document.getElementById('elem_upload')
|
||||
elem_upload_float = document.getElementById('elem_upload_float')
|
||||
elem_input_main = document.getElementById('user_input_main')
|
||||
elem_input_float = document.getElementById('user_input_float')
|
||||
register_upload_event();
|
||||
|
||||
if (elem_input_main) {
|
||||
if (elem_input_main.querySelector("textarea")) {
|
||||
add_func_paste(elem_input_main.querySelector("textarea"))
|
||||
register_func_paste(elem_input_main.querySelector("textarea"))
|
||||
}
|
||||
}
|
||||
if (elem_input_float) {
|
||||
if (elem_input_float.querySelector("textarea")) {
|
||||
add_func_paste(elem_input_float.querySelector("textarea"))
|
||||
register_func_paste(elem_input_float.querySelector("textarea"))
|
||||
}
|
||||
}
|
||||
if (elem_chatbot) {
|
||||
register_func_drag(elem_chatbot)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -263,3 +563,153 @@ window.addEventListener("DOMContentLoaded", function () {
|
||||
// const ga = document.getElementsByTagName("gradio-app");
|
||||
gradioApp().addEventListener("render", monitoring_input_box);
|
||||
});
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 5 部分: 音频按钮样式变化
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function audio_fn_init() {
|
||||
let audio_component = document.getElementById('elem_audio');
|
||||
if (audio_component) {
|
||||
let buttonElement = audio_component.querySelector('button');
|
||||
let specificElement = audio_component.querySelector('.hide.sr-only');
|
||||
specificElement.remove();
|
||||
|
||||
buttonElement.childNodes[1].nodeValue = '启动麦克风';
|
||||
buttonElement.addEventListener('click', function (event) {
|
||||
event.stopPropagation();
|
||||
toast_push('您启动了麦克风!下一步请点击“实时语音对话”启动语音对话。');
|
||||
});
|
||||
|
||||
// 查找语音插件按钮
|
||||
let buttons = document.querySelectorAll('button');
|
||||
let audio_button = null;
|
||||
for (let button of buttons) {
|
||||
if (button.textContent.includes('语音')) {
|
||||
audio_button = button;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (audio_button) {
|
||||
audio_button.addEventListener('click', function () {
|
||||
toast_push('您点击了“实时语音对话”启动语音对话。');
|
||||
});
|
||||
let parent_element = audio_component.parentElement; // 将buttonElement移动到audio_button的内部
|
||||
audio_button.appendChild(audio_component);
|
||||
buttonElement.style.cssText = 'border-color: #00ffe0;border-width: 2px; height: 25px;'
|
||||
parent_element.remove();
|
||||
audio_component.style.cssText = 'width: 250px;right: 0px;display: inline-flex;flex-flow: row-reverse wrap;place-content: stretch space-between;align-items: center;background-color: #ffffff00;';
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
function minor_ui_adjustment() {
|
||||
let cbsc_area = document.getElementById('cbsc');
|
||||
cbsc_area.style.paddingTop = '15px';
|
||||
var bar_btn_width = [];
|
||||
// 自动隐藏超出范围的toolbar按钮
|
||||
function auto_hide_toolbar() {
|
||||
var qq = document.getElementById('tooltip');
|
||||
var tab_nav = qq.getElementsByClassName('tab-nav');
|
||||
if (tab_nav.length == 0) { return; }
|
||||
var btn_list = tab_nav[0].getElementsByTagName('button')
|
||||
if (btn_list.length == 0) { return; }
|
||||
// 获取页面宽度
|
||||
var page_width = document.documentElement.clientWidth;
|
||||
// 总是保留的按钮数量
|
||||
const always_preserve = 2;
|
||||
// 获取最后一个按钮的右侧位置
|
||||
var cur_right = btn_list[always_preserve - 1].getBoundingClientRect().right;
|
||||
if (bar_btn_width.length == 0) {
|
||||
// 首次运行,记录每个按钮的宽度
|
||||
for (var i = 0; i < btn_list.length; i++) {
|
||||
bar_btn_width.push(btn_list[i].getBoundingClientRect().width);
|
||||
}
|
||||
}
|
||||
// 处理每一个按钮
|
||||
for (var i = always_preserve; i < btn_list.length; i++) {
|
||||
var element = btn_list[i];
|
||||
var element_right = element.getBoundingClientRect().right;
|
||||
if (element_right != 0) { cur_right = element_right; }
|
||||
if (element.style.display === 'none') {
|
||||
if ((cur_right + bar_btn_width[i]) < (page_width * 0.37)) {
|
||||
// 恢复显示当前按钮
|
||||
element.style.display = 'block';
|
||||
return;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (cur_right > (page_width * 0.38)) {
|
||||
// 隐藏当前按钮以及右侧所有按钮
|
||||
for (var j = i; j < btn_list.length; j++) {
|
||||
if (btn_list[j].style.display !== 'none') {
|
||||
btn_list[j].style.display = 'none';
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setInterval(function () {
|
||||
auto_hide_toolbar()
|
||||
}, 200); // 每50毫秒执行一次
|
||||
}
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 6 部分: 避免滑动
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
let prevented_offset = 0;
|
||||
function limit_scroll_position() {
|
||||
let scrollableDiv = document.querySelector('#gpt-chatbot > div.wrap');
|
||||
scrollableDiv.addEventListener('wheel', function (e) {
|
||||
let preventScroll = false;
|
||||
if (e.deltaX != 0) { prevented_offset = 0; return;}
|
||||
if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return;}
|
||||
if (e.deltaY < 0) { prevented_offset = 0; return;}
|
||||
if (e.deltaY > 0 && this.scrollHeight - this.clientHeight - this.scrollTop <= 1) { preventScroll = true; }
|
||||
|
||||
if (preventScroll) {
|
||||
prevented_offset += e.deltaY;
|
||||
if (Math.abs(prevented_offset) > 499) {
|
||||
if (prevented_offset > 500) { prevented_offset = 500; }
|
||||
if (prevented_offset < -500) { prevented_offset = -500; }
|
||||
preventScroll = false;
|
||||
}
|
||||
} else {
|
||||
prevented_offset = 0;
|
||||
}
|
||||
if (preventScroll) {
|
||||
e.preventDefault();
|
||||
return;
|
||||
}
|
||||
}, { passive: false }); // Passive event listener option should be false
|
||||
}
|
||||
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 7 部分: JS初始化函数
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") {
|
||||
audio_fn_init();
|
||||
minor_ui_adjustment();
|
||||
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||
var chatbotObserver = new MutationObserver(() => {
|
||||
chatbotContentChanged(1);
|
||||
});
|
||||
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||
if (LAYOUT === "LEFT-RIGHT") { chatbotAutoHeight(); }
|
||||
if (LAYOUT === "LEFT-RIGHT") { limit_scroll_position(); }
|
||||
// setInterval(function () { uml("mermaid") }, 5000); // 每50毫秒执行一次
|
||||
|
||||
}
|
||||
|
||||
@@ -479,4 +479,3 @@
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
|
||||
def adjust_theme():
|
||||
|
||||
def adjust_theme():
|
||||
try:
|
||||
color_er = gr.themes.utils.colors.fuchsia
|
||||
set_theme = gr.themes.Default(
|
||||
primary_hue=gr.themes.utils.colors.orange,
|
||||
neutral_hue=gr.themes.utils.colors.gray,
|
||||
font=["Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace"])
|
||||
font=[
|
||||
"Helvetica",
|
||||
"Microsoft YaHei",
|
||||
"ui-sans-serif",
|
||||
"sans-serif",
|
||||
"system-ui",
|
||||
],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace"],
|
||||
)
|
||||
set_theme.set(
|
||||
# Colors
|
||||
input_background_fill_dark="*neutral_800",
|
||||
@@ -59,8 +67,14 @@ def adjust_theme():
|
||||
button_cancel_text_color_dark="white",
|
||||
)
|
||||
|
||||
with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f:
|
||||
js = f"<script>{f.read()}</script>"
|
||||
js = ""
|
||||
for jsf in [
|
||||
os.path.join(theme_dir, "common.js"),
|
||||
os.path.join(theme_dir, "mermaid.min.js"),
|
||||
os.path.join(theme_dir, "mermaid_loader.js"),
|
||||
]:
|
||||
with open(jsf, "r", encoding="utf8") as f:
|
||||
js += f"<script>{f.read()}</script>"
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
@@ -69,21 +83,26 @@ def adjust_theme():
|
||||
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
if not hasattr(gr, 'RawTemplateResponse'):
|
||||
if not hasattr(gr, "RawTemplateResponse"):
|
||||
gr.RawTemplateResponse = gr.routes.templates.TemplateResponse
|
||||
gradio_original_template_fn = gr.RawTemplateResponse
|
||||
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
|
||||
gr.routes.templates.TemplateResponse = (
|
||||
gradio_new_template_fn # override gradio template
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
with open(os.path.join(theme_dir, 'contrast.css'), "r", encoding="utf-8") as f:
|
||||
|
||||
with open(os.path.join(theme_dir, "contrast.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css = f.read()
|
||||
with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f:
|
||||
with open(os.path.join(theme_dir, "common.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css += f.read()
|
||||
|
||||
0
themes/cookies.py
普通文件
0
themes/cookies.py
普通文件
@@ -303,4 +303,3 @@
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
|
||||
|
||||
@@ -1,17 +1,26 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
def adjust_theme():
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def adjust_theme():
|
||||
try:
|
||||
color_er = gr.themes.utils.colors.fuchsia
|
||||
set_theme = gr.themes.Default(
|
||||
primary_hue=gr.themes.utils.colors.orange,
|
||||
neutral_hue=gr.themes.utils.colors.gray,
|
||||
font=["Helvetica", "Microsoft YaHei", "ui-sans-serif", "sans-serif", "system-ui"],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace"])
|
||||
font=[
|
||||
"Helvetica",
|
||||
"Microsoft YaHei",
|
||||
"ui-sans-serif",
|
||||
"sans-serif",
|
||||
"system-ui",
|
||||
],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace"],
|
||||
)
|
||||
set_theme.set(
|
||||
# Colors
|
||||
input_background_fill_dark="*neutral_800",
|
||||
@@ -58,8 +67,14 @@ def adjust_theme():
|
||||
button_cancel_text_color_dark="white",
|
||||
)
|
||||
|
||||
with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f:
|
||||
js = f"<script>{f.read()}</script>"
|
||||
js = ""
|
||||
for jsf in [
|
||||
os.path.join(theme_dir, "common.js"),
|
||||
os.path.join(theme_dir, "mermaid.min.js"),
|
||||
os.path.join(theme_dir, "mermaid_loader.js"),
|
||||
]:
|
||||
with open(jsf, "r", encoding="utf8") as f:
|
||||
js += f"<script>{f.read()}</script>"
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
@@ -68,21 +83,26 @@ def adjust_theme():
|
||||
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
if not hasattr(gr, 'RawTemplateResponse'):
|
||||
if not hasattr(gr, "RawTemplateResponse"):
|
||||
gr.RawTemplateResponse = gr.routes.templates.TemplateResponse
|
||||
gradio_original_template_fn = gr.RawTemplateResponse
|
||||
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
|
||||
gr.routes.templates.TemplateResponse = (
|
||||
gradio_new_template_fn # override gradio template
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
with open(os.path.join(theme_dir, 'default.css'), "r", encoding="utf-8") as f:
|
||||
|
||||
with open(os.path.join(theme_dir, "default.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css = f.read()
|
||||
with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f:
|
||||
with open(os.path.join(theme_dir, "common.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css += f.read()
|
||||
|
||||
@@ -2,30 +2,44 @@ import logging
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf, ProxyNetworkActivate
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def dynamic_set_theme(THEME):
|
||||
set_theme = gr.themes.ThemeClass()
|
||||
with ProxyNetworkActivate('Download_Gradio_Theme'):
|
||||
logging.info('正在下载Gradio主题,请稍等。')
|
||||
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
||||
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
||||
with ProxyNetworkActivate("Download_Gradio_Theme"):
|
||||
logging.info("正在下载Gradio主题,请稍等。")
|
||||
if THEME.startswith("Huggingface-"):
|
||||
THEME = THEME.lstrip("Huggingface-")
|
||||
if THEME.startswith("huggingface-"):
|
||||
THEME = THEME.lstrip("huggingface-")
|
||||
set_theme = set_theme.from_hub(THEME.lower())
|
||||
return set_theme
|
||||
|
||||
|
||||
def adjust_theme():
|
||||
try:
|
||||
set_theme = gr.themes.ThemeClass()
|
||||
with ProxyNetworkActivate('Download_Gradio_Theme'):
|
||||
logging.info('正在下载Gradio主题,请稍等。')
|
||||
THEME = get_conf('THEME')
|
||||
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
||||
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
||||
with ProxyNetworkActivate("Download_Gradio_Theme"):
|
||||
logging.info("正在下载Gradio主题,请稍等。")
|
||||
THEME = get_conf("THEME")
|
||||
if THEME.startswith("Huggingface-"):
|
||||
THEME = THEME.lstrip("Huggingface-")
|
||||
if THEME.startswith("huggingface-"):
|
||||
THEME = THEME.lstrip("huggingface-")
|
||||
set_theme = set_theme.from_hub(THEME.lower())
|
||||
|
||||
with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f:
|
||||
js = f"<script>{f.read()}</script>"
|
||||
js = ""
|
||||
for jsf in [
|
||||
os.path.join(theme_dir, "common.js"),
|
||||
os.path.join(theme_dir, "mermaid.min.js"),
|
||||
os.path.join(theme_dir, "mermaid_loader.js"),
|
||||
]:
|
||||
with open(jsf, "r", encoding="utf8") as f:
|
||||
js += f"<script>{f.read()}</script>"
|
||||
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
@@ -34,20 +48,26 @@ def adjust_theme():
|
||||
<script src="file=docs/waifu_plugin/jquery-ui.min.js"></script>
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
if not hasattr(gr, 'RawTemplateResponse'):
|
||||
if not hasattr(gr, "RawTemplateResponse"):
|
||||
gr.RawTemplateResponse = gr.routes.templates.TemplateResponse
|
||||
gradio_original_template_fn = gr.RawTemplateResponse
|
||||
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
except Exception as e:
|
||||
|
||||
gr.routes.templates.TemplateResponse = (
|
||||
gradio_new_template_fn # override gradio template
|
||||
)
|
||||
except Exception:
|
||||
set_theme = None
|
||||
from toolbox import trimmed_format_exc
|
||||
logging.error('gradio版本较旧, 不能自定义字体和颜色:', trimmed_format_exc())
|
||||
|
||||
logging.error("gradio版本较旧, 不能自定义字体和颜色:", trimmed_format_exc())
|
||||
return set_theme
|
||||
|
||||
with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f:
|
||||
|
||||
with open(os.path.join(theme_dir, "common.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css = f.read()
|
||||
|
||||
@@ -197,12 +197,12 @@ footer {
|
||||
}
|
||||
textarea.svelte-1pie7s6 {
|
||||
background: #e7e6e6 !important;
|
||||
width: 96% !important;
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
.dark textarea.svelte-1pie7s6 {
|
||||
background: var(--input-background-fill) !important;
|
||||
width: 96% !important;
|
||||
width: 100% !important;
|
||||
}
|
||||
|
||||
.dark input[type=number].svelte-1cl284s {
|
||||
@@ -256,13 +256,13 @@ textarea.svelte-1pie7s6 {
|
||||
max-height: 95% !important;
|
||||
overflow-y: auto !important;
|
||||
}*/
|
||||
.app.svelte-1mya07g.svelte-1mya07g {
|
||||
/* .app.svelte-1mya07g.svelte-1mya07g {
|
||||
max-width: 100%;
|
||||
position: relative;
|
||||
padding: var(--size-4);
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
} */
|
||||
|
||||
.gradio-container-3-32-2 h1 {
|
||||
font-weight: 700 !important;
|
||||
@@ -508,12 +508,14 @@ ol:not(.options), ul:not(.options) {
|
||||
[data-testid = "bot"] {
|
||||
max-width: 85%;
|
||||
border-bottom-left-radius: 0 !important;
|
||||
box-shadow: 2px 2px 0px 1px rgba(0, 0, 0, 0.06);
|
||||
background-color: var(--message-bot-background-color-light) !important;
|
||||
}
|
||||
[data-testid = "user"] {
|
||||
max-width: 85%;
|
||||
width: auto !important;
|
||||
border-bottom-right-radius: 0 !important;
|
||||
box-shadow: 2px 2px 0px 1px rgba(0, 0, 0, 0.06);
|
||||
background-color: var(--message-user-background-color-light) !important;
|
||||
}
|
||||
.dark [data-testid = "bot"] {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAYOUT')
|
||||
|
||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||
theme_dir = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def adjust_theme():
|
||||
try:
|
||||
set_theme = gr.themes.Soft(
|
||||
@@ -50,7 +52,6 @@ def adjust_theme():
|
||||
c900="#2B2B2B",
|
||||
c950="#171717",
|
||||
),
|
||||
|
||||
radius_size=gr.themes.sizes.radius_sm,
|
||||
).set(
|
||||
button_primary_background_fill="*primary_500",
|
||||
@@ -75,8 +76,14 @@ def adjust_theme():
|
||||
chatbot_code_background_color_dark="*neutral_950",
|
||||
)
|
||||
|
||||
with open(os.path.join(theme_dir, 'common.js'), 'r', encoding='utf8') as f:
|
||||
js = f"<script>{f.read()}</script>"
|
||||
js = ""
|
||||
for jsf in [
|
||||
os.path.join(theme_dir, "common.js"),
|
||||
os.path.join(theme_dir, "mermaid.min.js"),
|
||||
os.path.join(theme_dir, "mermaid_loader.js"),
|
||||
]:
|
||||
with open(jsf, "r", encoding="utf8") as f:
|
||||
js += f"<script>{f.read()}</script>"
|
||||
|
||||
# 添加一个萌萌的看板娘
|
||||
if ADD_WAIFU:
|
||||
@@ -86,24 +93,29 @@ def adjust_theme():
|
||||
<script src="file=docs/waifu_plugin/autoload.js"></script>
|
||||
"""
|
||||
|
||||
with open(os.path.join(theme_dir, 'green.js'), 'r', encoding='utf8') as f:
|
||||
with open(os.path.join(theme_dir, "green.js"), "r", encoding="utf8") as f:
|
||||
js += f"<script>{f.read()}</script>"
|
||||
|
||||
if not hasattr(gr, 'RawTemplateResponse'):
|
||||
if not hasattr(gr, "RawTemplateResponse"):
|
||||
gr.RawTemplateResponse = gr.routes.templates.TemplateResponse
|
||||
gradio_original_template_fn = gr.RawTemplateResponse
|
||||
|
||||
def gradio_new_template_fn(*args, **kwargs):
|
||||
res = gradio_original_template_fn(*args, **kwargs)
|
||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
||||
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
|
||||
res.init_headers()
|
||||
return res
|
||||
gr.routes.templates.TemplateResponse = gradio_new_template_fn # override gradio template
|
||||
|
||||
gr.routes.templates.TemplateResponse = (
|
||||
gradio_new_template_fn # override gradio template
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
print("gradio版本较旧, 不能自定义字体和颜色")
|
||||
return set_theme
|
||||
|
||||
with open(os.path.join(theme_dir, 'green.css'), "r", encoding="utf-8") as f:
|
||||
|
||||
with open(os.path.join(theme_dir, "green.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css = f.read()
|
||||
with open(os.path.join(theme_dir, 'common.css'), "r", encoding="utf-8") as f:
|
||||
with open(os.path.join(theme_dir, "common.css"), "r", encoding="utf-8") as f:
|
||||
advanced_css += f.read()
|
||||
|
||||
1589
themes/mermaid.min.js
vendored
普通文件
1589
themes/mermaid.min.js
vendored
普通文件
文件差异因一行或多行过长而隐藏
55
themes/mermaid_editor.js
普通文件
55
themes/mermaid_editor.js
普通文件
@@ -0,0 +1,55 @@
|
||||
import { deflate, inflate } from 'https://fastly.jsdelivr.net/gh/nodeca/pako@master/dist/pako.esm.mjs';
|
||||
import { toUint8Array, fromUint8Array, toBase64, fromBase64 } from 'https://cdn.jsdelivr.net/npm/js-base64@3.7.2/base64.mjs';
|
||||
|
||||
const base64Serde = {
|
||||
serialize: (state) => {
|
||||
return toBase64(state, true);
|
||||
},
|
||||
deserialize: (state) => {
|
||||
return fromBase64(state);
|
||||
}
|
||||
};
|
||||
|
||||
const pakoSerde = {
|
||||
serialize: (state) => {
|
||||
const data = new TextEncoder().encode(state);
|
||||
const compressed = deflate(data, { level: 9 });
|
||||
return fromUint8Array(compressed, true);
|
||||
},
|
||||
deserialize: (state) => {
|
||||
const data = toUint8Array(state);
|
||||
return inflate(data, { to: 'string' });
|
||||
}
|
||||
};
|
||||
|
||||
const serdes = {
|
||||
base64: base64Serde,
|
||||
pako: pakoSerde
|
||||
};
|
||||
|
||||
export const serializeState = (state, serde = 'pako') => {
|
||||
if (!(serde in serdes)) {
|
||||
throw new Error(`Unknown serde type: ${serde}`);
|
||||
}
|
||||
const json = JSON.stringify(state);
|
||||
const serialized = serdes[serde].serialize(json);
|
||||
return `${serde}:${serialized}`;
|
||||
};
|
||||
|
||||
const deserializeState = (state) => {
|
||||
let type, serialized;
|
||||
if (state.includes(':')) {
|
||||
let tempType;
|
||||
[tempType, serialized] = state.split(':');
|
||||
if (tempType in serdes) {
|
||||
type = tempType;
|
||||
} else {
|
||||
throw new Error(`Unknown serde type: ${tempType}`);
|
||||
}
|
||||
} else {
|
||||
type = 'base64';
|
||||
serialized = state;
|
||||
}
|
||||
const json = serdes[type].deserialize(serialized);
|
||||
return JSON.parse(json);
|
||||
};
|
||||
189
themes/mermaid_loader.js
普通文件
189
themes/mermaid_loader.js
普通文件
@@ -0,0 +1,189 @@
|
||||
const uml = async className => {
|
||||
|
||||
// Custom element to encapsulate Mermaid content.
|
||||
class MermaidDiv extends HTMLElement {
|
||||
|
||||
/**
|
||||
* Creates a special Mermaid div shadow DOM.
|
||||
* Works around issues of shared IDs.
|
||||
* @return {void}
|
||||
*/
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
// Create the Shadow DOM and attach style
|
||||
const shadow = this.attachShadow({ mode: "open" })
|
||||
const style = document.createElement("style")
|
||||
style.textContent = `
|
||||
:host {
|
||||
display: block;
|
||||
line-height: initial;
|
||||
font-size: 16px;
|
||||
}
|
||||
div.diagram {
|
||||
margin: 0;
|
||||
overflow: visible;
|
||||
}`
|
||||
shadow.appendChild(style)
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof customElements.get("diagram-div") === "undefined") {
|
||||
customElements.define("diagram-div", MermaidDiv)
|
||||
}
|
||||
|
||||
const getFromCode = parent => {
|
||||
// Handles <pre><code> text extraction.
|
||||
let text = ""
|
||||
for (let j = 0; j < parent.childNodes.length; j++) {
|
||||
const subEl = parent.childNodes[j]
|
||||
if (subEl.tagName.toLowerCase() === "code") {
|
||||
for (let k = 0; k < subEl.childNodes.length; k++) {
|
||||
const child = subEl.childNodes[k]
|
||||
const whitespace = /^\s*$/
|
||||
if (child.nodeName === "#text" && !(whitespace.test(child.nodeValue))) {
|
||||
text = child.nodeValue
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
function createOrUpdateHyperlink(parentElement, linkText, linkHref) {
|
||||
// Search for an existing anchor element within the parentElement
|
||||
let existingAnchor = parentElement.querySelector("a");
|
||||
|
||||
// Check if an anchor element already exists
|
||||
if (existingAnchor) {
|
||||
// Update the hyperlink reference if it's different from the current one
|
||||
if (existingAnchor.href !== linkHref) {
|
||||
existingAnchor.href = linkHref;
|
||||
}
|
||||
// Update the target attribute to ensure it opens in a new tab
|
||||
existingAnchor.target = '_blank';
|
||||
|
||||
// If the text must be dynamic, uncomment and use the following line:
|
||||
// existingAnchor.textContent = linkText;
|
||||
} else {
|
||||
// If no anchor exists, create one and append it to the parentElement
|
||||
let anchorElement = document.createElement("a");
|
||||
anchorElement.href = linkHref; // Set hyperlink reference
|
||||
anchorElement.textContent = linkText; // Set text displayed
|
||||
anchorElement.target = '_blank'; // Ensure it opens in a new tab
|
||||
parentElement.appendChild(anchorElement); // Append the new anchor element to the parent
|
||||
}
|
||||
}
|
||||
|
||||
function removeLastLine(str) {
|
||||
// 将字符串按换行符分割成数组
|
||||
var lines = str.split('\n');
|
||||
lines.pop();
|
||||
// 将数组重新连接成字符串,并按换行符连接
|
||||
var result = lines.join('\n');
|
||||
return result;
|
||||
}
|
||||
|
||||
// 给出配置 Provide a default config in case one is not specified
|
||||
const defaultConfig = {
|
||||
startOnLoad: false,
|
||||
theme: "default",
|
||||
flowchart: {
|
||||
htmlLabels: false
|
||||
},
|
||||
er: {
|
||||
useMaxWidth: false
|
||||
},
|
||||
sequence: {
|
||||
useMaxWidth: false,
|
||||
noteFontWeight: "14px",
|
||||
actorFontSize: "14px",
|
||||
messageFontSize: "16px"
|
||||
}
|
||||
}
|
||||
if (document.body.classList.contains("dark")) {
|
||||
defaultConfig.theme = "dark"
|
||||
}
|
||||
|
||||
const Module = await import('./file=themes/mermaid_editor.js');
|
||||
|
||||
function do_render(block, code, codeContent, cnt) {
|
||||
var rendered_content = mermaid.render(`_diagram_${cnt}`, code);
|
||||
////////////////////////////// 记录有哪些代码已经被渲染了 ///////////////////////////////////
|
||||
let codeFinishRenderElement = block.querySelector("code_finish_render"); // 如果block下已存在code_already_rendered元素,则获取它
|
||||
if (codeFinishRenderElement) { // 如果block下已存在code_already_rendered元素
|
||||
codeFinishRenderElement.style.display = "none";
|
||||
} else {
|
||||
// 如果不存在code_finish_render元素,则将code元素中的内容添加到新创建的code_finish_render元素中
|
||||
let codeFinishRenderElementNew = document.createElement("code_finish_render"); // 创建一个新的code_already_rendered元素
|
||||
codeFinishRenderElementNew.style.display = "none";
|
||||
codeFinishRenderElementNew.textContent = "";
|
||||
block.appendChild(codeFinishRenderElementNew); // 将新创建的code_already_rendered元素添加到block中
|
||||
codeFinishRenderElement = codeFinishRenderElementNew;
|
||||
}
|
||||
|
||||
////////////////////////////// 创建一个用于渲染的容器 ///////////////////////////////////
|
||||
let mermaidRender = block.querySelector(".mermaid_render"); // 尝试获取已存在的<div class='mermaid_render'>
|
||||
if (!mermaidRender) {
|
||||
mermaidRender = document.createElement("div"); // 不存在,创建新的<div class='mermaid_render'>
|
||||
mermaidRender.classList.add("mermaid_render");
|
||||
block.appendChild(mermaidRender); // 将新创建的元素附加到block
|
||||
}
|
||||
mermaidRender.innerHTML = rendered_content
|
||||
codeFinishRenderElement.textContent = code // 标记已经渲染的部分
|
||||
|
||||
////////////////////////////// 创建一个“点击这里编辑脑图” ///////////////////////////////
|
||||
let pako_encode = Module.serializeState({
|
||||
"code": codeContent,
|
||||
"mermaid": "{\n \"theme\": \"default\"\n}",
|
||||
"autoSync": true,
|
||||
"updateDiagram": false
|
||||
});
|
||||
createOrUpdateHyperlink(block, "点击这里编辑脑图", "https://mermaid.live/edit#" + pako_encode)
|
||||
}
|
||||
|
||||
// 加载配置 Load up the config
|
||||
mermaid.mermaidAPI.globalReset() // 全局复位
|
||||
const config = (typeof mermaidConfig === "undefined") ? defaultConfig : mermaidConfig
|
||||
mermaid.initialize(config)
|
||||
// 查找需要渲染的元素 Find all of our Mermaid sources and render them.
|
||||
const blocks = document.querySelectorAll(`pre.mermaid`);
|
||||
|
||||
for (let i = 0; i < blocks.length; i++) {
|
||||
var block = blocks[i]
|
||||
////////////////////////////// 如果代码没有发生变化,就不渲染了 ///////////////////////////////////
|
||||
var code = getFromCode(block);
|
||||
let codeContent = block.querySelector("code").textContent; // 获取code元素中的文本内容
|
||||
let codePendingRenderElement = block.querySelector("code_pending_render"); // 如果block下已存在code_already_rendered元素,则获取它
|
||||
if (codePendingRenderElement) { // 如果block下已存在code_pending_render元素
|
||||
codePendingRenderElement.style.display = "none";
|
||||
if (codePendingRenderElement.textContent !== codeContent) {
|
||||
codePendingRenderElement.textContent = codeContent; // 如果现有的code_pending_render元素中的内容与code元素中的内容不同,更新code_pending_render元素中的内容
|
||||
}
|
||||
else {
|
||||
continue; // 如果相同,就不处理了
|
||||
}
|
||||
} else { // 如果不存在code_pending_render元素,则将code元素中的内容添加到新创建的code_pending_render元素中
|
||||
let codePendingRenderElementNew = document.createElement("code_pending_render"); // 创建一个新的code_already_rendered元素
|
||||
codePendingRenderElementNew.style.display = "none";
|
||||
codePendingRenderElementNew.textContent = codeContent;
|
||||
block.appendChild(codePendingRenderElementNew); // 将新创建的code_pending_render元素添加到block中
|
||||
codePendingRenderElement = codePendingRenderElementNew;
|
||||
}
|
||||
|
||||
////////////////////////////// 在这里才真正开始渲染 ///////////////////////////////////
|
||||
try {
|
||||
do_render(block, code, codeContent, i);
|
||||
// console.log("渲染", codeContent);
|
||||
} catch (err) {
|
||||
try {
|
||||
var lines = code.split('\n'); if (lines.length < 2) { continue; }
|
||||
do_render(block, removeLastLine(code), codeContent, i);
|
||||
// console.log("渲染", codeContent);
|
||||
} catch (err) {
|
||||
console.log("以下代码不能渲染", code, removeLastLine(code), err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
112
themes/theme.py
112
themes/theme.py
@@ -1,23 +1,121 @@
|
||||
import gradio as gr
|
||||
import pickle
|
||||
import base64
|
||||
import uuid
|
||||
from toolbox import get_conf
|
||||
THEME = get_conf('THEME')
|
||||
|
||||
"""
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第 1 部分
|
||||
加载主题相关的工具函数
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
|
||||
def load_dynamic_theme(THEME):
|
||||
adjust_dynamic_theme = None
|
||||
if THEME == 'Chuanhu-Small-and-Beautiful':
|
||||
if THEME == "Chuanhu-Small-and-Beautiful":
|
||||
from .green import adjust_theme, advanced_css
|
||||
theme_declaration = "<h2 align=\"center\" class=\"small\">[Chuanhu-Small-and-Beautiful主题]</h2>"
|
||||
elif THEME == 'High-Contrast':
|
||||
|
||||
theme_declaration = (
|
||||
'<h2 align="center" class="small">[Chuanhu-Small-and-Beautiful主题]</h2>'
|
||||
)
|
||||
elif THEME == "High-Contrast":
|
||||
from .contrast import adjust_theme, advanced_css
|
||||
|
||||
theme_declaration = ""
|
||||
elif '/' in THEME:
|
||||
elif "/" in THEME:
|
||||
from .gradios import adjust_theme, advanced_css
|
||||
from .gradios import dynamic_set_theme
|
||||
|
||||
adjust_dynamic_theme = dynamic_set_theme(THEME)
|
||||
theme_declaration = ""
|
||||
else:
|
||||
from .default import adjust_theme, advanced_css
|
||||
|
||||
theme_declaration = ""
|
||||
return adjust_theme, advanced_css, theme_declaration, adjust_dynamic_theme
|
||||
|
||||
adjust_theme, advanced_css, theme_declaration, _ = load_dynamic_theme(THEME)
|
||||
|
||||
adjust_theme, advanced_css, theme_declaration, _ = load_dynamic_theme(get_conf("THEME"))
|
||||
|
||||
|
||||
"""
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第 2 部分
|
||||
cookie相关工具函数
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
|
||||
def init_cookie(cookies, chatbot):
|
||||
# 为每一位访问的用户赋予一个独一无二的uuid编码
|
||||
cookies.update({"uuid": uuid.uuid4()})
|
||||
return cookies
|
||||
|
||||
|
||||
def to_cookie_str(d):
|
||||
# Pickle the dictionary and encode it as a string
|
||||
pickled_dict = pickle.dumps(d)
|
||||
cookie_value = base64.b64encode(pickled_dict).decode("utf-8")
|
||||
return cookie_value
|
||||
|
||||
|
||||
def from_cookie_str(c):
|
||||
# Decode the base64-encoded string and unpickle it into a dictionary
|
||||
pickled_dict = base64.b64decode(c.encode("utf-8"))
|
||||
return pickle.loads(pickled_dict)
|
||||
|
||||
|
||||
"""
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
第 3 部分
|
||||
内嵌的javascript代码
|
||||
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
"""
|
||||
|
||||
js_code_for_css_changing = """(css) => {
|
||||
var existingStyles = document.querySelectorAll("body > gradio-app > div > style")
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var existingStyles = document.querySelectorAll("style[data-loaded-css]");
|
||||
for (var i = 0; i < existingStyles.length; i++) {
|
||||
var style = existingStyles[i];
|
||||
style.parentNode.removeChild(style);
|
||||
}
|
||||
var styleElement = document.createElement('style');
|
||||
styleElement.setAttribute('data-loaded-css', 'placeholder');
|
||||
styleElement.innerHTML = css;
|
||||
document.body.appendChild(styleElement);
|
||||
}
|
||||
"""
|
||||
|
||||
js_code_for_darkmode_init = """(dark) => {
|
||||
dark = dark == "True";
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
if (!dark){
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
}
|
||||
} else {
|
||||
if (dark){
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
js_code_for_toggle_darkmode = """() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
|
||||
} else {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
document.querySelectorAll('code_pending_render').forEach(code => {code.remove();})
|
||||
}"""
|
||||
|
||||
|
||||
js_code_for_persistent_cookie_init = """(persistent_cookie) => {
|
||||
return getCookie("persistent_cookie");
|
||||
}
|
||||
"""
|
||||
|
||||
939
toolbox.py
939
toolbox.py
文件差异内容过多而无法显示
加载差异
4
version
4
version
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": 3.62,
|
||||
"version": 3.70,
|
||||
"show_feature": true,
|
||||
"new_feature": "修复若干隐蔽的内存BUG <-> 修复多用户冲突问题 <-> 接入Deepseek Coder <-> AutoGen多智能体插件测试版 <-> 修复本地模型在Windows下的加载BUG <-> 支持文心一言v4和星火v3 <-> 支持GLM3和智谱的API <-> 解决本地模型并发BUG <-> 支持动态追加基础功能按钮"
|
||||
"new_feature": "支持Mermaid绘图库(让大模型绘制脑图) <-> 支持Gemini-pro <-> 支持直接拖拽文件到上传区 <-> 支持将图片粘贴到输入区 <-> 修复若干隐蔽的内存BUG <-> 修复多用户冲突问题 <-> 接入Deepseek Coder <-> AutoGen多智能体插件测试版"
|
||||
}
|
||||
|
||||
在新工单中引用
屏蔽一个用户